From de300df7750453c1ba20d138e9897e883b194d9f Mon Sep 17 00:00:00 2001 From: Julian Tescher Date: Fri, 10 Jun 2022 23:17:39 -0700 Subject: [PATCH] Update metrics API and SDK This change aligns metrics with the spec, changes include: * Rename `MeterProvider::meter` to `MeterProvider::versioned_meter` for consistency with `TracerProvider` trait. * Move metrics sdk api types to `opentelemetry-sdk` * Consolidate instrument builders into `InstrumentBuilder` * Remove value observers and add gauges. * Move from batch observer to registered callbacks. * Rename `ExportKindFor` to `TemporalitySelector` * Consolidate `PushController` and `PullController` into `BasicController` * Remove `MinMaxSumCountAggregator` and `ArrayAggregator` * Update examples and exporters for new api/sdk --- Cargo.toml | 1 - examples/basic-otlp-with-selector/Cargo.toml | 13 - examples/basic-otlp-with-selector/README.md | 7 - examples/basic-otlp-with-selector/src/main.rs | 157 ---- examples/basic-otlp/src/main.rs | 72 +- examples/basic/src/main.rs | 62 +- examples/dynatrace/src/main.rs | 85 +- .../external-otlp-grpcio-async-std/Cargo.toml | 2 +- examples/hyper-prometheus/src/main.rs | 56 +- opentelemetry-api/src/global/metrics.rs | 17 +- opentelemetry-api/src/global/mod.rs | 10 +- opentelemetry-api/src/metrics/config.rs | 59 -- opentelemetry-api/src/metrics/counter.rs | 107 --- opentelemetry-api/src/metrics/histogram.rs | 107 --- .../src/metrics/instruments/counter.rs | 130 +++ .../src/metrics/instruments/gauge.rs | 87 ++ .../src/metrics/instruments/histogram.rs | 74 ++ .../src/metrics/instruments/mod.rs | 72 ++ .../metrics/instruments/up_down_counter.rs | 133 +++ opentelemetry-api/src/metrics/meter.rs | 420 +++------ opentelemetry-api/src/metrics/mod.rs | 209 ++++- opentelemetry-api/src/metrics/noop.rs | 96 +- opentelemetry-api/src/metrics/observer.rs | 352 ------- opentelemetry-api/src/metrics/registry.rs | 174 ---- opentelemetry-api/src/metrics/sdk_api.rs | 65 -- .../src/metrics/up_down_counter.rs | 107 --- .../src/metrics/value_recorder.rs | 107 --- opentelemetry-dynatrace/Cargo.toml | 2 +- opentelemetry-dynatrace/src/lib.rs | 32 +- opentelemetry-dynatrace/src/metric.rs | 195 ++-- .../src/transform/metrics.rs | 324 ++----- opentelemetry-dynatrace/tests/http_test.rs | 53 +- opentelemetry-jaeger/Cargo.toml | 2 +- opentelemetry-otlp/src/metric.rs | 226 ++--- opentelemetry-otlp/src/transform/metrics.rs | 171 +--- opentelemetry-prometheus/src/lib.rs | 339 ++----- .../tests/integration_test.rs | 143 +-- opentelemetry-proto/src/transform/metrics.rs | 26 +- opentelemetry-sdk/Cargo.toml | 7 +- opentelemetry-sdk/benches/ddsketch.rs | 71 -- opentelemetry-sdk/benches/metric.rs | 69 +- .../src/export/metrics/aggregation.rs | 81 -- .../src/export/metrics/aggregation/mod.rs | 110 +++ .../export/metrics/aggregation/temporality.rs | 92 ++ opentelemetry-sdk/src/export/metrics/mod.rs | 561 +++++------ .../src/export/metrics/stdout.rs | 302 +++--- .../src/metrics/aggregators/array.rs | 156 ---- .../src/metrics/aggregators/ddsketch.rs | 877 ------------------ .../src/metrics/aggregators/histogram.rs | 27 +- .../src/metrics/aggregators/last_value.rs | 22 +- .../metrics/aggregators/min_max_sum_count.rs | 165 ---- .../src/metrics/aggregators/mod.rs | 75 +- .../src/metrics/aggregators/sum.rs | 45 +- .../src/metrics/controllers/basic.rs | 468 ++++++++++ .../src/metrics/controllers/mod.rs | 6 +- .../src/metrics/controllers/pull.rs | 163 ---- .../src/metrics/controllers/push.rs | 201 ---- opentelemetry-sdk/src/metrics/mod.rs | 678 +++++--------- .../src/metrics/processors/basic.rs | 176 ++-- .../src/metrics/processors/mod.rs | 2 +- opentelemetry-sdk/src/metrics/registry.rs | 129 +++ .../src/metrics/sdk_api}/async_instrument.rs | 57 -- .../src/metrics/sdk_api}/descriptor.rs | 49 +- .../src/metrics/sdk_api/instrument_kind.rs | 21 +- opentelemetry-sdk/src/metrics/sdk_api/mod.rs | 81 ++ .../src/metrics/sdk_api}/number.rs | 0 .../src/metrics/sdk_api}/sync_instrument.rs | 0 opentelemetry-sdk/src/metrics/sdk_api/wrap.rs | 319 +++++++ .../src/metrics/selectors/mod.rs | 2 +- .../src/metrics/selectors/simple.rs | 86 +- opentelemetry-sdk/src/testing/metric.rs | 39 +- opentelemetry/src/lib.rs | 6 +- 72 files changed, 3572 insertions(+), 5865 deletions(-) delete mode 100644 examples/basic-otlp-with-selector/Cargo.toml delete mode 100644 examples/basic-otlp-with-selector/README.md delete mode 100644 examples/basic-otlp-with-selector/src/main.rs delete mode 100644 opentelemetry-api/src/metrics/config.rs delete mode 100644 opentelemetry-api/src/metrics/counter.rs delete mode 100644 opentelemetry-api/src/metrics/histogram.rs create mode 100644 opentelemetry-api/src/metrics/instruments/counter.rs create mode 100644 opentelemetry-api/src/metrics/instruments/gauge.rs create mode 100644 opentelemetry-api/src/metrics/instruments/histogram.rs create mode 100644 opentelemetry-api/src/metrics/instruments/mod.rs create mode 100644 opentelemetry-api/src/metrics/instruments/up_down_counter.rs delete mode 100644 opentelemetry-api/src/metrics/observer.rs delete mode 100644 opentelemetry-api/src/metrics/registry.rs delete mode 100644 opentelemetry-api/src/metrics/sdk_api.rs delete mode 100644 opentelemetry-api/src/metrics/up_down_counter.rs delete mode 100644 opentelemetry-api/src/metrics/value_recorder.rs delete mode 100644 opentelemetry-sdk/benches/ddsketch.rs delete mode 100644 opentelemetry-sdk/src/export/metrics/aggregation.rs create mode 100644 opentelemetry-sdk/src/export/metrics/aggregation/mod.rs create mode 100644 opentelemetry-sdk/src/export/metrics/aggregation/temporality.rs delete mode 100644 opentelemetry-sdk/src/metrics/aggregators/array.rs delete mode 100644 opentelemetry-sdk/src/metrics/aggregators/ddsketch.rs delete mode 100644 opentelemetry-sdk/src/metrics/aggregators/min_max_sum_count.rs create mode 100644 opentelemetry-sdk/src/metrics/controllers/basic.rs delete mode 100644 opentelemetry-sdk/src/metrics/controllers/pull.rs delete mode 100644 opentelemetry-sdk/src/metrics/controllers/push.rs create mode 100644 opentelemetry-sdk/src/metrics/registry.rs rename {opentelemetry-api/src/metrics => opentelemetry-sdk/src/metrics/sdk_api}/async_instrument.rs (63%) rename {opentelemetry-api/src/metrics => opentelemetry-sdk/src/metrics/sdk_api}/descriptor.rs (56%) rename opentelemetry-api/src/metrics/kind.rs => opentelemetry-sdk/src/metrics/sdk_api/instrument_kind.rs (83%) create mode 100644 opentelemetry-sdk/src/metrics/sdk_api/mod.rs rename {opentelemetry-api/src/metrics => opentelemetry-sdk/src/metrics/sdk_api}/number.rs (100%) rename {opentelemetry-api/src/metrics => opentelemetry-sdk/src/metrics/sdk_api}/sync_instrument.rs (100%) create mode 100644 opentelemetry-sdk/src/metrics/sdk_api/wrap.rs diff --git a/Cargo.toml b/Cargo.toml index b7eaef93f6..1bee77f0d3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,7 +23,6 @@ members = [ "examples/aws-xray", "examples/basic", "examples/basic-otlp", - "examples/basic-otlp-with-selector", "examples/basic-otlp-http", "examples/datadog", "examples/dynatrace", diff --git a/examples/basic-otlp-with-selector/Cargo.toml b/examples/basic-otlp-with-selector/Cargo.toml deleted file mode 100644 index a7f61234c3..0000000000 --- a/examples/basic-otlp-with-selector/Cargo.toml +++ /dev/null @@ -1,13 +0,0 @@ -[package] -name = "basic-otlp-with-selector" -version = "0.1.0" -edition = "2018" -publish = false - -[dependencies] -futures-util = { version = "0.3", default-features = false, features = ["std"] } -lazy_static = "1.4" -opentelemetry = { path = "../../opentelemetry", features = ["rt-tokio", "metrics"] } -opentelemetry-otlp = { path = "../../opentelemetry-otlp", features = ["tonic", "metrics"] } -serde_json = "1.0" -tokio = { version = "1.0", features = ["full"] } diff --git a/examples/basic-otlp-with-selector/README.md b/examples/basic-otlp-with-selector/README.md deleted file mode 100644 index 977096586a..0000000000 --- a/examples/basic-otlp-with-selector/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# Basic OTLP exporter Example - -This example shows how to configure OTLP metrics exporter to use custom aggregator selectors and custom export kind selectors. - -## Prerequisite -You should first start a `opentelemetry-collector` on localhost using the default configuration. - diff --git a/examples/basic-otlp-with-selector/src/main.rs b/examples/basic-otlp-with-selector/src/main.rs deleted file mode 100644 index 8810cb4f01..0000000000 --- a/examples/basic-otlp-with-selector/src/main.rs +++ /dev/null @@ -1,157 +0,0 @@ -use futures_util::{Stream, StreamExt as _}; -use opentelemetry::global::shutdown_tracer_provider; -use opentelemetry::sdk::export::metrics::{ExportKind, ExportKindFor}; -use opentelemetry::sdk::{ - export::metrics::{Aggregator, AggregatorSelector}, - metrics::{aggregators, PushController}, -}; -use opentelemetry::trace::TraceError; -use opentelemetry::{ - baggage::BaggageExt, - metrics::{self, Descriptor, ObserverResult}, - trace::{TraceContextExt, Tracer}, - Context, Key, KeyValue, -}; -use opentelemetry::{global, sdk::trace as sdktrace}; -use opentelemetry_otlp::Protocol; -use opentelemetry_otlp::{ExportConfig, WithExportConfig}; -use std::error::Error; -use std::sync::Arc; -use std::time::Duration; - -fn init_tracer() -> Result { - opentelemetry_otlp::new_pipeline() - .tracing() - .with_exporter( - opentelemetry_otlp::new_exporter() - .tonic() - .with_endpoint("http://localhost:4317"), - ) - .install_batch(opentelemetry::runtime::Tokio) -} - -// Skip first immediate tick from tokio, not needed for async_std. -fn delayed_interval(duration: Duration) -> impl Stream { - opentelemetry::sdk::util::tokio_interval_stream(duration).skip(1) -} - -#[derive(Debug)] -struct CustomAggregator(); - -impl AggregatorSelector for CustomAggregator { - fn aggregator_for( - &self, - descriptor: &Descriptor, - ) -> Option> { - match descriptor.name() { - "ex.com.one" => Some(Arc::new(aggregators::last_value())), - "ex.com.two" => Some(Arc::new(aggregators::histogram( - descriptor, - &[0.0, 0.5, 1.0, 10.0], - ))), - _ => Some(Arc::new(aggregators::sum())), - } - } -} - -#[derive(Debug, Clone)] -struct CustomExportKindFor(); - -impl ExportKindFor for CustomExportKindFor { - fn export_kind_for(&self, _descriptor: &Descriptor) -> ExportKind { - ExportKind::Delta - } -} - -fn init_meter() -> metrics::Result { - let exporter_config = ExportConfig { - endpoint: "http://localhost:4317".to_string(), - protocol: Protocol::Grpc, - ..ExportConfig::default() - }; - opentelemetry_otlp::new_pipeline() - .metrics(tokio::spawn, delayed_interval) - .with_exporter( - opentelemetry_otlp::new_exporter() - .tonic() - .with_export_config(exporter_config), - ) - .with_export_kind(CustomExportKindFor()) - .with_aggregator_selector(CustomAggregator()) - .build() -} - -const FOO_KEY: Key = Key::from_static_str("ex.com/foo"); -const BAR_KEY: Key = Key::from_static_str("ex.com/bar"); -const LEMONS_KEY: Key = Key::from_static_str("lemons"); -const ANOTHER_KEY: Key = Key::from_static_str("ex.com/another"); - -lazy_static::lazy_static! { - static ref COMMON_ATTRIBUTES: [KeyValue; 4] = [ - LEMONS_KEY.i64(10), - KeyValue::new("A", "1"), - KeyValue::new("B", "2"), - KeyValue::new("C", "3"), - ]; -} - -#[tokio::main] -async fn main() -> Result<(), Box> { - // By binding the result to an unused variable, the lifetime of the variable - // matches the containing block, reporting traces and metrics during the whole - // execution. - let _ = init_tracer()?; - let _started = init_meter()?; - - let tracer = global::tracer("ex.com/basic"); - let meter = global::meter("ex.com/basic"); - - let one_metric_callback = - |res: ObserverResult| res.observe(1.0, COMMON_ATTRIBUTES.as_ref()); - let _ = meter - .f64_value_observer("ex.com.one", one_metric_callback) - .with_description("A ValueObserver set to 1.0") - .init(); - - let histogram_two = meter.f64_histogram("ex.com.two").init(); - - let another_recorder = meter.f64_histogram("ex.com.two").init(); - another_recorder.record(5.5, COMMON_ATTRIBUTES.as_ref()); - - let _baggage = - Context::current_with_baggage(vec![FOO_KEY.string("foo1"), BAR_KEY.string("bar1")]) - .attach(); - - let histogram = histogram_two.bind(COMMON_ATTRIBUTES.as_ref()); - tracer.in_span("operation", |cx| { - let span = cx.span(); - span.add_event( - "Nice operation!".to_string(), - vec![Key::new("bogons").i64(100)], - ); - span.set_attribute(ANOTHER_KEY.string("yes")); - - meter.record_batch_with_context( - // Note: call-site variables added as context Entries: - &Context::current_with_baggage(vec![ANOTHER_KEY.string("xyz")]), - COMMON_ATTRIBUTES.as_ref(), - vec![histogram_two.measurement(2.0)], - ); - - tracer.in_span("Sub operation...", |cx| { - let span = cx.span(); - span.set_attribute(LEMONS_KEY.string("five")); - - span.add_event("Sub span event", vec![]); - - histogram.record(1.3); - }); - }); - - // wait for 1 minutes so that we could see metrics being pushed via OTLP every 10 seconds. - tokio::time::sleep(Duration::from_secs(60)).await; - - shutdown_tracer_provider(); - - Ok(()) -} diff --git a/examples/basic-otlp/src/main.rs b/examples/basic-otlp/src/main.rs index 0aa26ee41f..ac50e0b147 100644 --- a/examples/basic-otlp/src/main.rs +++ b/examples/basic-otlp/src/main.rs @@ -1,15 +1,16 @@ -use futures_util::{Stream, StreamExt as _}; use opentelemetry::global::shutdown_tracer_provider; -use opentelemetry::sdk::metrics::{selectors, PushController}; +use opentelemetry::runtime; +use opentelemetry::sdk::export::metrics::aggregation::cumulative_temporality_selector; +use opentelemetry::sdk::metrics::controllers::BasicController; +use opentelemetry::sdk::metrics::selectors; use opentelemetry::sdk::Resource; use opentelemetry::trace::TraceError; +use opentelemetry::{global, sdk::trace as sdktrace}; use opentelemetry::{ - baggage::BaggageExt, - metrics::{self, ObserverResult}, + metrics, trace::{TraceContextExt, Tracer}, Context, Key, KeyValue, }; -use opentelemetry::{global, sdk::trace as sdktrace}; use opentelemetry_otlp::{ExportConfig, WithExportConfig}; use std::error::Error; use std::time::Duration; @@ -31,29 +32,25 @@ fn init_tracer() -> Result { .install_batch(opentelemetry::runtime::Tokio) } -// Skip first immediate tick from tokio, not needed for async_std. -fn delayed_interval(duration: Duration) -> impl Stream { - opentelemetry::sdk::util::tokio_interval_stream(duration).skip(1) -} - -fn init_meter() -> metrics::Result { +fn init_metrics() -> metrics::Result { let export_config = ExportConfig { endpoint: "http://localhost:4317".to_string(), ..ExportConfig::default() }; opentelemetry_otlp::new_pipeline() - .metrics(tokio::spawn, delayed_interval) + .metrics( + selectors::simple::inexpensive(), + cumulative_temporality_selector(), + runtime::Tokio, + ) .with_exporter( opentelemetry_otlp::new_exporter() .tonic() .with_export_config(export_config), ) - .with_aggregator_selector(selectors::simple::Selector::Exact) .build() } -const FOO_KEY: Key = Key::from_static_str("ex.com/foo"); -const BAR_KEY: Key = Key::from_static_str("ex.com/bar"); const LEMONS_KEY: Key = Key::from_static_str("lemons"); const ANOTHER_KEY: Key = Key::from_static_str("ex.com/another"); @@ -72,39 +69,20 @@ async fn main() -> Result<(), Box> { // matches the containing block, reporting traces and metrics during the whole // execution. let _ = init_tracer()?; - let _started = init_meter()?; + let metrics_controller = init_metrics()?; + let cx = Context::new(); let tracer = global::tracer("ex.com/basic"); let meter = global::meter("ex.com/basic"); - let one_metric_callback = - |res: ObserverResult| res.observe(1.0, COMMON_ATTRIBUTES.as_ref()); - let _ = meter - .f64_value_observer("ex.com.one", one_metric_callback) - .with_description("A ValueObserver set to 1.0") + let gauge = meter + .f64_observable_gauge("ex.com.one") + .with_description("A gauge set to 1.0") .init(); + meter.register_callback(move |cx| gauge.observe(cx, 1.0, COMMON_ATTRIBUTES.as_ref()))?; - let histogram_two = meter.f64_histogram("ex.com.two").init(); - - // Needed for code coverage reasons. - #[allow(deprecated)] - let a_recorder = meter.f64_value_recorder("ex.recorder.a").init(); - a_recorder.record(5.5, COMMON_ATTRIBUTES.as_ref()); - #[allow(deprecated)] - let b_recorder = meter.u64_value_recorder("ex.recorder.b").init(); - b_recorder.record(5, COMMON_ATTRIBUTES.as_ref()); - #[allow(deprecated)] - let c_recorder = meter.i64_value_recorder("ex.recorder.c").init(); - c_recorder.record(5, COMMON_ATTRIBUTES.as_ref()); - - let another_histogram = meter.f64_histogram("ex.com.two").init(); - another_histogram.record(5.5, COMMON_ATTRIBUTES.as_ref()); - - let _baggage = - Context::current_with_baggage(vec![FOO_KEY.string("foo1"), BAR_KEY.string("bar1")]) - .attach(); - - let histogram = histogram_two.bind(COMMON_ATTRIBUTES.as_ref()); + let histogram = meter.f64_histogram("ex.com.two").init(); + histogram.record(&cx, 5.5, COMMON_ATTRIBUTES.as_ref()); tracer.in_span("operation", |cx| { let span = cx.span(); @@ -114,20 +92,13 @@ async fn main() -> Result<(), Box> { ); span.set_attribute(ANOTHER_KEY.string("yes")); - meter.record_batch_with_context( - // Note: call-site variables added as context Entries: - &Context::current_with_baggage(vec![ANOTHER_KEY.string("xyz")]), - COMMON_ATTRIBUTES.as_ref(), - vec![histogram_two.measurement(2.0)], - ); - tracer.in_span("Sub operation...", |cx| { let span = cx.span(); span.set_attribute(LEMONS_KEY.string("five")); span.add_event("Sub span event", vec![]); - histogram.record(1.3); + histogram.record(&cx, 1.3, &[]); }); }); @@ -135,6 +106,7 @@ async fn main() -> Result<(), Box> { tokio::time::sleep(Duration::from_secs(60)).await; shutdown_tracer_provider(); + metrics_controller.stop(&cx)?; Ok(()) } diff --git a/examples/basic/src/main.rs b/examples/basic/src/main.rs index 4ee0e9b93b..0bff3578c5 100644 --- a/examples/basic/src/main.rs +++ b/examples/basic/src/main.rs @@ -1,17 +1,17 @@ -use futures_util::{Stream, StreamExt as _}; -use opentelemetry::global; use opentelemetry::global::shutdown_tracer_provider; +use opentelemetry::metrics::MetricsError; +use opentelemetry::sdk::metrics::controllers::BasicController; +use opentelemetry::sdk::metrics::{controllers, processors, selectors}; use opentelemetry::sdk::trace::Config; -use opentelemetry::sdk::{metrics::PushController, trace as sdktrace, Resource}; +use opentelemetry::sdk::{export, trace as sdktrace, Resource}; use opentelemetry::trace::TraceError; use opentelemetry::{ baggage::BaggageExt, - metrics::ObserverResult, trace::{TraceContextExt, Tracer}, Context, Key, KeyValue, }; +use opentelemetry::{global, runtime}; use std::error::Error; -use std::time::Duration; fn init_tracer() -> Result { opentelemetry_jaeger::new_agent_pipeline() @@ -20,16 +20,24 @@ fn init_tracer() -> Result { KeyValue::new("service.name", "new_service"), KeyValue::new("exporter", "otlp-jaeger"), ]))) - .install_batch(opentelemetry::runtime::Tokio) + .install_batch(runtime::Tokio) } -// Skip first immediate tick from tokio, not needed for async_std. -fn delayed_interval(duration: Duration) -> impl Stream { - opentelemetry::sdk::util::tokio_interval_stream(duration).skip(1) -} +fn init_metrics() -> Result { + let exporter = export::metrics::stdout().build()?; + let pusher = controllers::basic(processors::factory( + selectors::simple::inexpensive(), + exporter.temporality_selector(), + )) + .with_exporter(exporter) + .build(); + + let cx = Context::new(); + pusher.start(&cx, runtime::Tokio)?; -fn init_meter() -> PushController { - opentelemetry::sdk::export::metrics::stdout(tokio::spawn, delayed_interval).init() + global::set_meter_provider(pusher.clone()); + + Ok(pusher) } const FOO_KEY: Key = Key::from_static_str("ex.com/foo"); @@ -52,26 +60,25 @@ async fn main() -> Result<(), Box> { // matches the containing block, reporting traces and metrics during the whole // execution. let _tracer = init_tracer()?; - let _started = init_meter(); + let controller = init_metrics()?; + let cx = Context::new(); let tracer = global::tracer("ex.com/basic"); let meter = global::meter("ex.com/basic"); - let one_metric_callback = - |res: ObserverResult| res.observe(1.0, COMMON_ATTRIBUTES.as_ref()); - let _ = meter - .f64_value_observer("ex.com.one", one_metric_callback) - .with_description("A ValueObserver set to 1.0") + let observable_counter = meter + .u64_observable_counter("ex.com.one") + .with_description("An observable counter set to 1.0") .init(); - let histogram_two = meter.f64_histogram("ex.com.two").init(); + let histogram = meter.f64_histogram("ex.com.three").init(); + + let observable_gauge = meter.f64_observable_gauge("ex.com.two").init(); let _baggage = Context::current_with_baggage(vec![FOO_KEY.string("foo1"), BAR_KEY.string("bar1")]) .attach(); - let histogram = histogram_two.bind(COMMON_ATTRIBUTES.as_ref()); - tracer.in_span("operation", |cx| { let span = cx.span(); span.add_event( @@ -80,12 +87,10 @@ async fn main() -> Result<(), Box> { ); span.set_attribute(ANOTHER_KEY.string("yes")); - meter.record_batch_with_context( - // Note: call-site variables added as context Entries: - &Context::current_with_baggage(vec![ANOTHER_KEY.string("xyz")]), - COMMON_ATTRIBUTES.as_ref(), - vec![histogram_two.measurement(2.0)], - ); + let _ = meter.register_callback(move |cx| { + observable_counter.observe(cx, 1, &[]); + observable_gauge.observe(cx, 2.0, &[]); + }); tracer.in_span("Sub operation...", |cx| { let span = cx.span(); @@ -93,11 +98,12 @@ async fn main() -> Result<(), Box> { span.add_event("Sub span event", vec![]); - histogram.record(1.3); + histogram.record(&cx, 1.3, &[]); }); }); shutdown_tracer_provider(); // sending remaining spans. + controller.stop(&cx)?; // send remaining metrics. Ok(()) } diff --git a/examples/dynatrace/src/main.rs b/examples/dynatrace/src/main.rs index 942be78ebb..17905d54ef 100644 --- a/examples/dynatrace/src/main.rs +++ b/examples/dynatrace/src/main.rs @@ -1,21 +1,22 @@ -use futures::stream::Stream; -use futures::StreamExt; use opentelemetry::global::shutdown_tracer_provider; -use opentelemetry::sdk::{ - export::metrics::{Aggregator, AggregatorSelector, ExportKind, ExportKindFor}, - metrics::{aggregators, PushController}, +use opentelemetry::runtime; +use opentelemetry::sdk::export::metrics::aggregation::{ + AggregationKind, Temporality, TemporalitySelector, }; +use opentelemetry::sdk::metrics::aggregators::Aggregator; +use opentelemetry::sdk::metrics::controllers::BasicController; +use opentelemetry::sdk::metrics::sdk_api::Descriptor; +use opentelemetry::sdk::{export::metrics::AggregatorSelector, metrics::aggregators}; use opentelemetry::trace::TraceError; -use opentelemetry::{ - baggage::BaggageExt, - metrics::{self, Descriptor, ObserverResult}, - trace::{TraceContextExt, Tracer}, - Context, Key, KeyValue, -}; use opentelemetry::{ global, sdk::{propagation::TraceContextPropagator, trace as sdktrace, Resource}, }; +use opentelemetry::{ + metrics, + trace::{TraceContextExt, Tracer}, + Context, Key, KeyValue, +}; use opentelemetry_dynatrace::transform::DimensionSet; use opentelemetry_dynatrace::ExportConfig; use opentelemetry_otlp::WithExportConfig; @@ -50,14 +51,13 @@ fn init_tracer() -> Result { .install_batch(opentelemetry::runtime::Tokio) } -// Skip first immediate tick from tokio, not needed for async_std. -fn delayed_interval(duration: Duration) -> impl Stream { - opentelemetry::sdk::util::tokio_interval_stream(duration).skip(1) -} - -fn init_meter() -> metrics::Result { +fn init_metrics() -> metrics::Result { opentelemetry_dynatrace::new_pipeline() - .metrics(tokio::spawn, delayed_interval) + .metrics( + CustomAggregator(), + CustomTemporalitySelector(), + runtime::Tokio, + ) .with_exporter( opentelemetry_dynatrace::new_exporter().with_export_config( ExportConfig::default() @@ -75,12 +75,10 @@ fn init_meter() -> metrics::Result { KeyValue::new(semcov::resource::SERVICE_NAME, "rust-quickstart"), KeyValue::new(semcov::resource::SERVICE_VERSION, env!("CARGO_PKG_VERSION")), ])) - .with_export_kind(CustomExportKindFor()) - .with_aggregator_selector(CustomAggregator()) .build() } -#[derive(Debug)] +#[derive(Debug, Clone)] struct CustomAggregator(); impl AggregatorSelector for CustomAggregator { @@ -90,26 +88,21 @@ impl AggregatorSelector for CustomAggregator { ) -> Option> { match descriptor.name() { "ex.com.one" => Some(Arc::new(aggregators::last_value())), - "ex.com.two" => Some(Arc::new(aggregators::histogram( - descriptor, - &[0.0, 0.5, 1.0, 10.0], - ))), + "ex.com.two" => Some(Arc::new(aggregators::histogram(&[0.0, 0.5, 1.0, 10.0]))), _ => Some(Arc::new(aggregators::sum())), } } } #[derive(Debug, Clone)] -struct CustomExportKindFor(); +struct CustomTemporalitySelector(); -impl ExportKindFor for CustomExportKindFor { - fn export_kind_for(&self, _descriptor: &Descriptor) -> ExportKind { - ExportKind::Delta +impl TemporalitySelector for CustomTemporalitySelector { + fn temporality_for(&self, _descriptor: &Descriptor, _kind: &AggregationKind) -> Temporality { + Temporality::Delta } } -const FOO_KEY: Key = Key::from_static_str("ex.com/foo"); -const BAR_KEY: Key = Key::from_static_str("ex.com/bar"); const LEMONS_KEY: Key = Key::from_static_str("lemons"); const ANOTHER_KEY: Key = Key::from_static_str("ex.com/another"); @@ -128,28 +121,22 @@ async fn main() -> Result<(), Box> { // matches the containing block, reporting traces and metrics during the whole // execution. let _init_tracer = init_tracer()?; - let _init_meter = init_meter()?; + let metrics_controller = init_metrics()?; + let cx = Context::new(); let tracer = global::tracer("ex.com/basic"); let meter = global::meter("ex.com/basic"); - let one_metric_callback = - |res: ObserverResult| res.observe(1.0, COMMON_ATTRIBUTES.as_ref()); - let _ = meter - .f64_value_observer("ex.com.one", one_metric_callback) + let gauge = meter + .f64_observable_gauge("ex.com.one") .with_description("A ValueObserver set to 1.0") .init(); + meter.register_callback(move |cx| gauge.observe(cx, 1.0, COMMON_ATTRIBUTES.as_ref()))?; - let histogram_two = meter.f64_histogram("ex.com.two").init(); + let histogram = meter.f64_histogram("ex.com.two").init(); let another_recorder = meter.f64_histogram("ex.com.two").init(); - another_recorder.record(5.5, COMMON_ATTRIBUTES.as_ref()); - - let _baggage = - Context::current_with_baggage(vec![FOO_KEY.string("foo1"), BAR_KEY.string("bar1")]) - .attach(); - - let histogram = histogram_two.bind(COMMON_ATTRIBUTES.as_ref()); + another_recorder.record(&cx, 5.5, COMMON_ATTRIBUTES.as_ref()); tracer.in_span("operation", |cx| { let span = cx.span(); @@ -159,20 +146,13 @@ async fn main() -> Result<(), Box> { ); span.set_attribute(ANOTHER_KEY.string("yes")); - meter.record_batch_with_context( - // Note: call-site variables added as context Entries: - &Context::current_with_baggage(vec![ANOTHER_KEY.string("xyz")]), - COMMON_ATTRIBUTES.as_ref(), - vec![histogram_two.measurement(2.0)], - ); - tracer.in_span("Sub operation...", |cx| { let span = cx.span(); span.set_attribute(LEMONS_KEY.string("five")); span.add_event("Sub span event", vec![]); - histogram.record(1.3); + histogram.record(&cx, 1.3, &[]); }); }); @@ -180,6 +160,7 @@ async fn main() -> Result<(), Box> { tokio::time::sleep(Duration::from_secs(60)).await; shutdown_tracer_provider(); + metrics_controller.stop(&cx)?; Ok(()) } diff --git a/examples/external-otlp-grpcio-async-std/Cargo.toml b/examples/external-otlp-grpcio-async-std/Cargo.toml index 43dcbf475a..3b70e775e6 100644 --- a/examples/external-otlp-grpcio-async-std/Cargo.toml +++ b/examples/external-otlp-grpcio-async-std/Cargo.toml @@ -5,7 +5,7 @@ edition = "2018" publish = false [dependencies] -async-std = { version = "= 1.8.0", features = ["attributes"] } +async-std = { version = "= 1.10.0", features = ["attributes"] } env_logger = "0.8.2" opentelemetry = { path = "../../opentelemetry", features = ["rt-async-std"] } opentelemetry-otlp = { path = "../../opentelemetry-otlp", features = [ diff --git a/examples/hyper-prometheus/src/main.rs b/examples/hyper-prometheus/src/main.rs index 7d27675c1a..15461b7453 100644 --- a/examples/hyper-prometheus/src/main.rs +++ b/examples/hyper-prometheus/src/main.rs @@ -8,8 +8,12 @@ use hyper::{ }; use opentelemetry::{ global, - metrics::{BoundCounter, BoundHistogram}, - KeyValue, + metrics::{Counter, Histogram}, + sdk::{ + export::metrics::aggregation, + metrics::{controllers, processors, selectors}, + }, + Context, KeyValue, }; use opentelemetry_prometheus::PrometheusExporter; use prometheus::{Encoder, TextEncoder}; @@ -22,13 +26,14 @@ lazy_static! { } async fn serve_req( + cx: Context, req: Request, state: Arc, ) -> Result, hyper::Error> { println!("Receiving request at path {}", req.uri()); let request_start = SystemTime::now(); - state.http_counter.add(1); + state.http_counter.add(&cx, 1, &[]); let response = match (req.method(), req.uri().path()) { (&Method::GET, "/metrics") => { @@ -36,7 +41,7 @@ async fn serve_req( let encoder = TextEncoder::new(); let metric_families = state.exporter.registry().gather(); encoder.encode(&metric_families, &mut buffer).unwrap(); - state.http_body_gauge.record(buffer.len() as u64); + state.http_body_gauge.record(&cx, buffer.len() as u64, &[]); Response::builder() .status(200) @@ -54,22 +59,35 @@ async fn serve_req( .unwrap(), }; - state - .http_req_histogram - .record(request_start.elapsed().map_or(0.0, |d| d.as_secs_f64())); + state.http_req_histogram.record( + &cx, + request_start.elapsed().map_or(0.0, |d| d.as_secs_f64()), + &[], + ); Ok(response) } struct AppState { exporter: PrometheusExporter, - http_counter: BoundCounter, - http_body_gauge: BoundHistogram, - http_req_histogram: BoundHistogram, + http_counter: Counter, + http_body_gauge: Histogram, + http_req_histogram: Histogram, } #[tokio::main] pub async fn main() -> Result<(), Box> { - let exporter = opentelemetry_prometheus::exporter().init(); + let controller = controllers::basic( + processors::factory( + selectors::simple::histogram([1.0, 2.0, 5.0, 10.0, 20.0, 50.0]), + aggregation::cumulative_temporality_selector(), + ) + .with_memory(true), + ) + .build(); + + let exporter = opentelemetry_prometheus::exporter(controller).init(); + global::set_meter_provider(exporter.meter_provider()?); + let cx = Context::new(); let meter = global::meter("ex.com/hyper"); let state = Arc::new(AppState { @@ -77,28 +95,30 @@ pub async fn main() -> Result<(), Box> { http_counter: meter .u64_counter("example.http_requests_total") .with_description("Total number of HTTP requests made.") - .init() - .bind(HANDLER_ALL.as_ref()), + .init(), http_body_gauge: meter .u64_histogram("example.http_response_size_bytes") .with_description("The metrics HTTP response sizes in bytes.") - .init() - .bind(HANDLER_ALL.as_ref()), + .init(), http_req_histogram: meter .f64_histogram("example.http_request_duration_seconds") .with_description("The HTTP request latencies in seconds.") - .init() - .bind(HANDLER_ALL.as_ref()), + .init(), }); // For every connection, we must make a `Service` to handle all // incoming HTTP requests on said connection. let make_svc = make_service_fn(move |_conn| { let state = state.clone(); + let cx = cx.clone(); // This is the `Service` that will handle the connection. // `service_fn` is a helper to convert a function that // returns a Response into a `Service`. - async move { Ok::<_, Infallible>(service_fn(move |req| serve_req(req, state.clone()))) } + async move { + Ok::<_, Infallible>(service_fn(move |req| { + serve_req(cx.clone(), req, state.clone()) + })) + } }); let addr = ([127, 0, 0, 1], 3000).into(); diff --git a/opentelemetry-api/src/global/metrics.rs b/opentelemetry-api/src/global/metrics.rs index 0761a1332e..0d3f7a9344 100644 --- a/opentelemetry-api/src/global/metrics.rs +++ b/opentelemetry-api/src/global/metrics.rs @@ -1,4 +1,5 @@ use crate::metrics::{self, Meter, MeterProvider}; +use core::fmt; use std::sync::{Arc, RwLock}; lazy_static::lazy_static! { @@ -8,19 +9,25 @@ lazy_static::lazy_static! { /// Represents the globally configured [`MeterProvider`] instance for this /// application. -#[derive(Debug, Clone)] +#[derive(Clone)] pub struct GlobalMeterProvider { provider: Arc, } +impl fmt::Debug for GlobalMeterProvider { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("GlobalMeterProvider").finish() + } +} + impl MeterProvider for GlobalMeterProvider { - fn meter( + fn versioned_meter( &self, name: &'static str, version: Option<&'static str>, schema_url: Option<&'static str>, ) -> Meter { - self.provider.meter(name, version, schema_url) + self.provider.versioned_meter(name, version, schema_url) } } @@ -63,7 +70,7 @@ pub fn meter_provider() -> GlobalMeterProvider { /// /// This is a more convenient way of expressing `global::meter_provider().meter(name, None, None)`. pub fn meter(name: &'static str) -> Meter { - meter_provider().meter(name, None, None) + meter_provider().versioned_meter(name, None, None) } /// Creates a [`Meter`] with the name, version and schema url. @@ -84,5 +91,5 @@ pub fn meter_with_version( version: Option<&'static str>, schema_url: Option<&'static str>, ) -> Meter { - meter_provider().meter(name, version, schema_url) + meter_provider().versioned_meter(name, version, schema_url) } diff --git a/opentelemetry-api/src/global/mod.rs b/opentelemetry-api/src/global/mod.rs index a153a92e19..89a94e7524 100644 --- a/opentelemetry-api/src/global/mod.rs +++ b/opentelemetry-api/src/global/mod.rs @@ -91,7 +91,7 @@ //! # #[cfg(feature="metrics")] //! # { //! use opentelemetry_api::metrics::{Meter, noop::NoopMeterProvider}; -//! use opentelemetry_api::{global, KeyValue}; +//! use opentelemetry_api::{global, Context, KeyValue}; //! //! fn init_meter() { //! let provider = NoopMeterProvider::new(); @@ -105,9 +105,10 @@ //! // Then you can get a named tracer instance anywhere in your codebase. //! let meter = global::meter("my-component"); //! let counter = meter.u64_counter("my_counter").init(); +//! let cx = Context::current(); //! //! // record metrics -//! counter.add(1, &[KeyValue::new("mykey", "myvalue")]); +//! counter.add(&cx, 1, &[KeyValue::new("mykey", "myvalue")]); //! } //! //! // in main or other app start @@ -121,16 +122,17 @@ //! ``` //! # #[cfg(feature="metrics")] //! # { -//! use opentelemetry_api::{global, KeyValue}; +//! use opentelemetry_api::{global, Context, KeyValue}; //! //! pub fn my_traced_library_function() { //! // End users of your library will configure their global meter provider //! // so you can use the global meter without any setup //! let tracer = global::meter("my-library-name"); //! let counter = tracer.u64_counter("my_counter").init(); +//! let cx = Context::current(); //! //! // record metrics -//! counter.add(1, &[KeyValue::new("mykey", "myvalue")]); +//! counter.add(&cx, 1, &[KeyValue::new("mykey", "myvalue")]); //! } //! # } //! ``` diff --git a/opentelemetry-api/src/metrics/config.rs b/opentelemetry-api/src/metrics/config.rs deleted file mode 100644 index 710c3c2381..0000000000 --- a/opentelemetry-api/src/metrics/config.rs +++ /dev/null @@ -1,59 +0,0 @@ -use crate::metrics::Unit; -use crate::InstrumentationLibrary; -use std::borrow::Cow; - -/// Config contains some options for metrics of any kind. -#[derive(Clone, Debug, PartialEq, Hash)] -pub struct InstrumentConfig { - pub(crate) description: Option, - pub(crate) unit: Option, - pub(crate) instrumentation_library: InstrumentationLibrary, -} - -impl InstrumentConfig { - /// Create a new config from instrumentation name - pub fn with_instrumentation_name(instrumentation_name: &'static str) -> Self { - InstrumentConfig { - description: None, - unit: None, - instrumentation_library: InstrumentationLibrary::new(instrumentation_name, None, None), - } - } - - /// Create a new config with instrumentation name and optional version - pub fn with_instrumentation>>( - instrumentation_name: T, - instrumentation_version: Option, - schema_url: Option, - ) -> Self { - InstrumentConfig { - description: None, - unit: None, - instrumentation_library: InstrumentationLibrary::new( - instrumentation_name, - instrumentation_version, - schema_url, - ), - } - } - - /// Description is an optional field describing the metric instrument. - pub fn description(&self) -> Option<&String> { - self.description.as_ref() - } - - /// Unit is an optional field describing the metric instrument data. - pub fn unit(&self) -> Option<&Unit> { - self.unit.as_ref() - } - - /// Instrumentation name is the name given to the Meter that created this instrument. - pub fn instrumentation_name(&self) -> Cow<'static, str> { - self.instrumentation_library.name.clone() - } - - /// Instrumentation version returns the version of instrumentation - pub fn instrumentation_version(&self) -> Option> { - self.instrumentation_library.version.clone() - } -} diff --git a/opentelemetry-api/src/metrics/counter.rs b/opentelemetry-api/src/metrics/counter.rs deleted file mode 100644 index 8a584c9eac..0000000000 --- a/opentelemetry-api/src/metrics/counter.rs +++ /dev/null @@ -1,107 +0,0 @@ -use crate::{ - metrics::{ - sync_instrument::{SyncBoundInstrument, SyncInstrument}, - Descriptor, InstrumentKind, Measurement, Meter, Number, NumberKind, Result, Unit, - }, - KeyValue, -}; -use std::marker; - -/// A metric that accumulates values. -#[derive(Clone, Debug)] -pub struct Counter(SyncInstrument); - -impl Counter -where - T: Into, -{ - /// Creates a bound instrument for this counter. The attributes are associated with - /// values recorded via subsequent calls to record. - pub fn bind(&self, attributes: &[KeyValue]) -> BoundCounter { - let bound_instrument = self.0.bind(attributes); - - BoundCounter { bound_instrument } - } - - /// Increment this counter by a given T - pub fn add(&self, value: T, attributes: &[KeyValue]) { - self.0.direct_record(value.into(), attributes) - } - - /// Creates a Measurement for use with batch recording. - pub fn measurement(&self, value: T) -> Measurement { - Measurement::new(value.into(), self.0.instrument().clone()) - } -} - -/// BoundCounter is a bound instrument for counters. -#[derive(Clone, Debug)] -pub struct BoundCounter { - bound_instrument: SyncBoundInstrument, -} - -impl BoundCounter -where - T: Into, -{ - /// Increment this counter by a given T - pub fn add(&self, value: T) { - self.bound_instrument.direct_record(value.into()) - } -} - -/// Configuration for building a counter. -#[derive(Debug)] -pub struct CounterBuilder<'a, T> { - meter: &'a Meter, - descriptor: Descriptor, - _marker: marker::PhantomData, -} - -impl<'a, T> CounterBuilder<'a, T> { - /// Create a new counter builder - pub(crate) fn new(meter: &'a Meter, name: String, number_kind: NumberKind) -> Self { - CounterBuilder { - meter, - descriptor: Descriptor::new( - name, - meter.instrumentation_library().name, - meter.instrumentation_library().version, - meter.instrumentation_library().schema_url, - InstrumentKind::Counter, - number_kind, - ), - _marker: marker::PhantomData, - } - } - - /// Set the description for this counter - pub fn with_description>(mut self, description: S) -> Self { - self.descriptor.set_description(description.into()); - self - } - - /// Set the unit for this counter. - pub fn with_unit(mut self, unit: Unit) -> Self { - self.descriptor.config.unit = Some(unit); - self - } - - /// Creates a new counter instrument. - pub fn try_init(self) -> Result> { - let instrument = self.meter.new_sync_instrument(self.descriptor)?; - Ok(Counter(SyncInstrument::new(instrument))) - } - - /// Creates a new counter instrument. - /// - /// # Panics - /// - /// This function panics if the instrument cannot be created. Use try_init if you want to - /// handle errors. - pub fn init(self) -> Counter { - Counter(SyncInstrument::new( - self.meter.new_sync_instrument(self.descriptor).unwrap(), - )) - } -} diff --git a/opentelemetry-api/src/metrics/histogram.rs b/opentelemetry-api/src/metrics/histogram.rs deleted file mode 100644 index 461ec26778..0000000000 --- a/opentelemetry-api/src/metrics/histogram.rs +++ /dev/null @@ -1,107 +0,0 @@ -use crate::metrics::{ - sync_instrument::{SyncBoundInstrument, SyncInstrument}, - Descriptor, InstrumentKind, Measurement, Meter, Number, NumberKind, Result, Unit, -}; -use crate::KeyValue; -use std::marker; - -/// Histogram is a metric that records per-request non-additive values. -#[derive(Clone, Debug)] -pub struct Histogram(SyncInstrument); - -impl Histogram -where - T: Into, -{ - /// Creates a bound instrument for this Histogram. The attributes are - /// associated with values recorded via subsequent calls to record. - pub fn bind(&self, attributes: &[KeyValue]) -> BoundHistogram { - let bound_instrument = self.0.bind(attributes); - BoundHistogram { bound_instrument } - } - - /// Record a new metric value - pub fn record(&self, value: T, attributes: &[KeyValue]) { - self.0.direct_record(value.into(), attributes) - } - - /// Creates a `Measurement` object to use with batch recording. - pub fn measurement(&self, value: T) -> Measurement { - Measurement::new(value.into(), self.0.instrument().clone()) - } -} - -/// BoundHistogram is a bound instrument for recording per-request -/// non-additive values. -/// -/// It inherits the Unbind function from syncBoundInstrument. -#[derive(Clone, Debug)] -pub struct BoundHistogram { - bound_instrument: SyncBoundInstrument, -} - -impl BoundHistogram -where - T: Into, -{ - /// Adds a new value to the list of Histogram's records. The attributes - /// should contain the keys and values to be associated with this value. - pub fn record(&self, value: T) { - self.bound_instrument.direct_record(value.into()) - } -} - -/// Initialization configuration for a given `Histogram`. -#[derive(Debug)] -pub struct HistogramBuilder<'a, T> { - meter: &'a Meter, - descriptor: Descriptor, - _marker: marker::PhantomData, -} - -impl<'a, T> HistogramBuilder<'a, T> { - pub(crate) fn new(meter: &'a Meter, name: String, number_kind: NumberKind) -> Self { - HistogramBuilder { - meter, - descriptor: Descriptor::new( - name, - meter.instrumentation_library().name, - meter.instrumentation_library().version, - meter.instrumentation_library().schema_url, - InstrumentKind::Histogram, - number_kind, - ), - _marker: marker::PhantomData, - } - } - - /// Set the description for this `Histogram` - pub fn with_description>(mut self, description: S) -> Self { - self.descriptor.set_description(description.into()); - self - } - - /// Set the unit for this `Histogram`. - pub fn with_unit(mut self, unit: Unit) -> Self { - self.descriptor.config.unit = Some(unit); - self - } - - /// Tries to create a new `Histogram`. - pub fn try_init(self) -> Result> { - let instrument = self.meter.new_sync_instrument(self.descriptor)?; - Ok(Histogram(SyncInstrument::new(instrument))) - } - - /// Creates a new `Histogram`. - /// - /// # Panics - /// - /// This function panics if the instrument cannot be created. Use try_init if you want to - /// handle errors. - pub fn init(self) -> Histogram { - Histogram(SyncInstrument::new( - self.meter.new_sync_instrument(self.descriptor).unwrap(), - )) - } -} diff --git a/opentelemetry-api/src/metrics/instruments/counter.rs b/opentelemetry-api/src/metrics/instruments/counter.rs new file mode 100644 index 0000000000..903dd0d5ed --- /dev/null +++ b/opentelemetry-api/src/metrics/instruments/counter.rs @@ -0,0 +1,130 @@ +use crate::{ + metrics::{InstrumentBuilder, MetricsError}, + Context, KeyValue, +}; +use core::fmt; +use std::convert::TryFrom; +use std::sync::Arc; + +/// An SDK implemented instrument that records increasing values. +pub trait SyncCounter { + /// Records an increment to the counter. + fn add(&self, cx: &Context, value: T, attributes: &[KeyValue]); +} + +/// An instrument that records increasing values. +#[derive(Clone)] +pub struct Counter(Arc + Send + Sync>); + +impl fmt::Debug for Counter +where + T: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_fmt(format_args!("Counter<{}>", std::any::type_name::())) + } +} + +impl Counter { + /// Create a new counter. + pub fn new(inner: Arc + Send + Sync>) -> Self { + Counter(inner) + } + + /// Records an increment to the counter. + pub fn add(&self, cx: &Context, value: T, attributes: &[KeyValue]) { + self.0.add(cx, value, attributes) + } +} + +impl TryFrom>> for Counter { + type Error = MetricsError; + + fn try_from(builder: InstrumentBuilder<'_, Counter>) -> Result { + builder.meter.instrument_provider.u64_counter( + builder.name, + builder.description, + builder.unit, + ) + } +} + +impl TryFrom>> for Counter { + type Error = MetricsError; + + fn try_from(builder: InstrumentBuilder<'_, Counter>) -> Result { + builder.meter.instrument_provider.f64_counter( + builder.name, + builder.description, + builder.unit, + ) + } +} + +/// An SDK implemented async instrument that records increasing values. +pub trait AsyncCounter { + /// Records an increment to the counter. + /// + /// It is only valid to call this within a callback. If called outside of the + /// registered callback it should have no effect on the instrument, and an + /// error will be reported via the error handler. + fn observe(&self, cx: &Context, value: T, attributes: &[KeyValue]); +} + +/// An async instrument that records increasing values. +pub struct ObservableCounter(Arc + Send + Sync>); + +impl ObservableCounter { + /// Create a new observable counter. + pub fn new(inner: Arc + Send + Sync>) -> Self { + ObservableCounter(inner) + } +} + +impl fmt::Debug for ObservableCounter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_fmt(format_args!( + "ObservableCounter<{}>", + std::any::type_name::() + )) + } +} + +impl ObservableCounter { + /// Records an increment to the counter. + /// + /// It is only valid to call this within a callback. If called outside of the + /// registered callback it should have no effect on the instrument, and an + /// error will be reported via the error handler. + pub fn observe(&self, cx: &Context, value: T, attributes: &[KeyValue]) { + self.0.observe(cx, value, attributes) + } +} + +impl TryFrom>> for ObservableCounter { + type Error = MetricsError; + + fn try_from( + builder: InstrumentBuilder<'_, ObservableCounter>, + ) -> Result { + builder.meter.instrument_provider.u64_observable_counter( + builder.name, + builder.description, + builder.unit, + ) + } +} + +impl TryFrom>> for ObservableCounter { + type Error = MetricsError; + + fn try_from( + builder: InstrumentBuilder<'_, ObservableCounter>, + ) -> Result { + builder.meter.instrument_provider.f64_observable_counter( + builder.name, + builder.description, + builder.unit, + ) + } +} diff --git a/opentelemetry-api/src/metrics/instruments/gauge.rs b/opentelemetry-api/src/metrics/instruments/gauge.rs new file mode 100644 index 0000000000..ad3c7945ac --- /dev/null +++ b/opentelemetry-api/src/metrics/instruments/gauge.rs @@ -0,0 +1,87 @@ +use crate::{ + metrics::{InstrumentBuilder, MetricsError}, + Context, KeyValue, +}; +use core::fmt; +use std::convert::TryFrom; +use std::sync::Arc; + +/// An SDK implemented instrument that records independent readings. +pub trait AsyncGauge: Send + Sync { + /// Records the state of the instrument. + /// + /// It is only valid to call this within a callback. If called outside of the + /// registered callback it should have no effect on the instrument, and an + /// error will be reported via the error handler. + fn observe(&self, cx: &Context, value: T, attributes: &[KeyValue]); +} + +/// An instrument that records independent readings. +#[derive(Clone)] +pub struct ObservableGauge(Arc>); + +impl fmt::Debug for ObservableGauge +where + T: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_fmt(format_args!( + "ObservableGauge<{}>", + std::any::type_name::() + )) + } +} + +impl ObservableGauge { + /// Records the state of the instrument. + /// + /// It is only valid to call this within a callback. If called outside of the + /// registered callback it should have no effect on the instrument, and an + /// error will be reported via the error handler. + pub fn observe(&self, cx: &Context, value: T, attributes: &[KeyValue]) { + self.0.observe(cx, value, attributes) + } +} + +impl ObservableGauge { + /// Create a new gauge + pub fn new(inner: Arc>) -> Self { + ObservableGauge(inner) + } +} + +impl TryFrom>> for ObservableGauge { + type Error = MetricsError; + + fn try_from(builder: InstrumentBuilder<'_, ObservableGauge>) -> Result { + builder.meter.instrument_provider.u64_observable_gauge( + builder.name, + builder.description, + builder.unit, + ) + } +} + +impl TryFrom>> for ObservableGauge { + type Error = MetricsError; + + fn try_from(builder: InstrumentBuilder<'_, ObservableGauge>) -> Result { + builder.meter.instrument_provider.f64_observable_gauge( + builder.name, + builder.description, + builder.unit, + ) + } +} + +impl TryFrom>> for ObservableGauge { + type Error = MetricsError; + + fn try_from(builder: InstrumentBuilder<'_, ObservableGauge>) -> Result { + builder.meter.instrument_provider.i64_observable_gauge( + builder.name, + builder.description, + builder.unit, + ) + } +} diff --git a/opentelemetry-api/src/metrics/instruments/histogram.rs b/opentelemetry-api/src/metrics/instruments/histogram.rs new file mode 100644 index 0000000000..c460575623 --- /dev/null +++ b/opentelemetry-api/src/metrics/instruments/histogram.rs @@ -0,0 +1,74 @@ +use crate::{ + metrics::{InstrumentBuilder, MetricsError}, + Context, KeyValue, +}; +use core::fmt; +use std::convert::TryFrom; +use std::sync::Arc; + +/// An SDK implemented instrument that records a distribution of values. +pub trait SyncHistogram { + /// Adds an additional value to the distribution. + fn record(&self, cx: &Context, value: T, attributes: &[KeyValue]); +} + +/// An instrument that records a distribution of values. +#[derive(Clone)] +pub struct Histogram(Arc + Send + Sync>); + +impl fmt::Debug for Histogram +where + T: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_fmt(format_args!("Histogram<{}>", std::any::type_name::())) + } +} + +impl Histogram { + /// Create a new histogram. + pub fn new(inner: Arc + Send + Sync>) -> Self { + Histogram(inner) + } + + /// Adds an additional value to the distribution. + pub fn record(&self, cx: &Context, value: T, attributes: &[KeyValue]) { + self.0.record(cx, value, attributes) + } +} + +impl TryFrom>> for Histogram { + type Error = MetricsError; + + fn try_from(builder: InstrumentBuilder<'_, Histogram>) -> Result { + builder.meter.instrument_provider.f64_histogram( + builder.name, + builder.description, + builder.unit, + ) + } +} + +impl TryFrom>> for Histogram { + type Error = MetricsError; + + fn try_from(builder: InstrumentBuilder<'_, Histogram>) -> Result { + builder.meter.instrument_provider.u64_histogram( + builder.name, + builder.description, + builder.unit, + ) + } +} + +impl TryFrom>> for Histogram { + type Error = MetricsError; + + fn try_from(builder: InstrumentBuilder<'_, Histogram>) -> Result { + builder.meter.instrument_provider.i64_histogram( + builder.name, + builder.description, + builder.unit, + ) + } +} diff --git a/opentelemetry-api/src/metrics/instruments/mod.rs b/opentelemetry-api/src/metrics/instruments/mod.rs new file mode 100644 index 0000000000..e8d2d74b40 --- /dev/null +++ b/opentelemetry-api/src/metrics/instruments/mod.rs @@ -0,0 +1,72 @@ +use crate::metrics::{Meter, MetricsError, Result, Unit}; +use core::fmt; +use std::convert::TryFrom; +use std::marker; + +pub(super) mod counter; +pub(super) mod gauge; +pub(super) mod histogram; +pub(super) mod up_down_counter; + +/// Configuration for building an instrument. +pub struct InstrumentBuilder<'a, T> { + meter: &'a Meter, + name: String, + description: Option, + unit: Option, + _marker: marker::PhantomData, +} + +impl<'a, T> InstrumentBuilder<'a, T> +where + T: TryFrom, +{ + /// Create a new counter builder + pub(crate) fn new(meter: &'a Meter, name: String) -> Self { + InstrumentBuilder { + meter, + name, + description: None, + unit: None, + _marker: marker::PhantomData, + } + } + + /// Set the description for this counter + pub fn with_description>(mut self, description: S) -> Self { + self.description = Some(description.into()); + self + } + + /// Set the unit for this counter. + pub fn with_unit(mut self, unit: Unit) -> Self { + self.unit = Some(unit); + self + } + + /// Creates a new counter instrument. + pub fn try_init(self) -> Result { + T::try_from(self) + } + + /// Creates a new counter instrument. + /// + /// # Panics + /// + /// This function panics if the instrument cannot be created. Use try_init if you want to + /// handle errors. + pub fn init(self) -> T { + self.try_init().unwrap() + } +} + +impl fmt::Debug for InstrumentBuilder<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("InstrumentBuilder") + .field("name", &self.name) + .field("description", &self.description) + .field("unit", &self.unit) + .field("kind", &std::any::type_name::()) + .finish() + } +} diff --git a/opentelemetry-api/src/metrics/instruments/up_down_counter.rs b/opentelemetry-api/src/metrics/instruments/up_down_counter.rs new file mode 100644 index 0000000000..40605bccaa --- /dev/null +++ b/opentelemetry-api/src/metrics/instruments/up_down_counter.rs @@ -0,0 +1,133 @@ +use crate::{ + metrics::{InstrumentBuilder, MetricsError}, + Context, KeyValue, +}; +use core::fmt; +use std::convert::TryFrom; +use std::sync::Arc; + +/// An SDK implemented instrument that records increasing or decreasing values. +pub trait SyncUpDownCounter { + /// Records an increment or decrement to the counter. + fn add(&self, cx: &Context, value: T, attributes: &[KeyValue]); +} + +/// An instrument that records increasing or decreasing values. +#[derive(Clone)] +pub struct UpDownCounter(Arc + Send + Sync>); + +impl fmt::Debug for UpDownCounter +where + T: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_fmt(format_args!( + "UpDownCounter<{}>", + std::any::type_name::() + )) + } +} + +impl UpDownCounter { + /// Create a new up down counter. + pub fn new(inner: Arc + Send + Sync>) -> Self { + UpDownCounter(inner) + } + + /// Records an increment or decrement to the counter. + pub fn add(&self, cx: &Context, value: T, attributes: &[KeyValue]) { + self.0.add(cx, value, attributes) + } +} + +impl TryFrom>> for UpDownCounter { + type Error = MetricsError; + + fn try_from(builder: InstrumentBuilder<'_, UpDownCounter>) -> Result { + builder.meter.instrument_provider.i64_up_down_counter( + builder.name, + builder.description, + builder.unit, + ) + } +} + +impl TryFrom>> for UpDownCounter { + type Error = MetricsError; + + fn try_from(builder: InstrumentBuilder<'_, UpDownCounter>) -> Result { + builder.meter.instrument_provider.f64_up_down_counter( + builder.name, + builder.description, + builder.unit, + ) + } +} + +/// An SDK implemented async instrument that records increasing or decreasing values. +pub trait AsyncUpDownCounter { + /// Records the increment or decrement to the counter. + /// + /// It is only valid to call this within a callback. If called outside of the + /// registered callback it should have no effect on the instrument, and an + /// error will be reported via the error handler. + fn observe(&self, cx: &Context, value: T, attributes: &[KeyValue]); +} + +/// An async instrument that records increasing or decreasing values. +#[derive(Clone)] +pub struct ObservableUpDownCounter(Arc + Send + Sync>); + +impl fmt::Debug for ObservableUpDownCounter +where + T: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_fmt(format_args!( + "ObservableUpDownCounter<{}>", + std::any::type_name::() + )) + } +} + +impl ObservableUpDownCounter { + /// Create a new observable up down counter. + pub fn new(inner: Arc + Send + Sync>) -> Self { + ObservableUpDownCounter(inner) + } + + /// Records the increment or decrement to the counter. + /// + /// It is only valid to call this within a callback. If called outside of the + /// registered callback it should have no effect on the instrument, and an + /// error will be reported via the error handler. + pub fn observe(&self, cx: &Context, value: T, attributes: &[KeyValue]) { + self.0.observe(cx, value, attributes) + } +} + +impl TryFrom>> for ObservableUpDownCounter { + type Error = MetricsError; + + fn try_from( + builder: InstrumentBuilder<'_, ObservableUpDownCounter>, + ) -> Result { + builder + .meter + .instrument_provider + .i64_observable_up_down_counter(builder.name, builder.description, builder.unit) + } +} + +impl TryFrom>> for ObservableUpDownCounter { + type Error = MetricsError; + + fn try_from( + builder: InstrumentBuilder<'_, ObservableUpDownCounter>, + ) -> Result { + builder + .meter + .instrument_provider + .f64_observable_up_down_counter(builder.name, builder.description, builder.unit) + } +} diff --git a/opentelemetry-api/src/metrics/meter.rs b/opentelemetry-api/src/metrics/meter.rs index be58c37532..969b06e597 100644 --- a/opentelemetry-api/src/metrics/meter.rs +++ b/opentelemetry-api/src/metrics/meter.rs @@ -1,369 +1,165 @@ -use std::borrow::Cow; -use std::fmt; +use core::fmt; use std::sync::Arc; -use crate::InstrumentationLibrary; -use crate::{ - metrics::{ - sdk_api, AsyncRunner, BatchObserver, BatchObserverResult, CounterBuilder, Descriptor, - HistogramBuilder, Measurement, NumberKind, ObserverResult, Result, SumObserverBuilder, - UpDownCounterBuilder, UpDownSumObserverBuilder, ValueObserverBuilder, ValueRecorderBuilder, - }, - Context, KeyValue, +use crate::metrics::{ + Counter, Histogram, InstrumentBuilder, InstrumentProvider, MetricsError, ObservableCounter, + ObservableGauge, ObservableUpDownCounter, UpDownCounter, }; +use crate::{Context, InstrumentationLibrary}; /// Returns named meter instances -pub trait MeterProvider: fmt::Debug { - /// Creates an implementation of the [`Meter`] interface. The - /// instrumentation name must be the name of the library providing - /// instrumentation. This name may be the same as the instrumented code only if - /// that code provides built-in instrumentation. If the instrumentation name is - /// empty, then a implementation defined default name will be used instead. +pub trait MeterProvider { + /// Creates a named [`Meter`] instance. + fn meter(&self, name: &'static str) -> Meter { + self.versioned_meter(name, None, None) + } + + /// Creates an implementation of the [`Meter`] interface. /// - fn meter( + /// The instrumentation name must be the name of the library providing instrumentation. This + /// name may be the same as the instrumented code only if that code provides built-in + /// instrumentation. If the instrumentation name is empty, then a implementation defined + /// default name will be used instead. + fn versioned_meter( &self, - instrumentation_name: &'static str, - instrumentation_version: Option<&'static str>, + name: &'static str, + version: Option<&'static str>, schema_url: Option<&'static str>, ) -> Meter; } -/// Meter is the OpenTelemetry metric API, based on a sdk-defined `MeterCore` -/// implementation and the `Meter` library name. -/// -/// # Instruments -/// -/// | **Name** | Instrument kind | Function(argument) | Default aggregation | Notes | -/// | ----------------------- | ----- | --------- | ------------- | --- | -/// | **Counter** | Synchronous adding monotonic | Add(increment) | Sum | Per-request, part of a monotonic sum | -/// | **UpDownCounter** | Synchronous adding | Add(increment) | Sum | Per-request, part of a non-monotonic sum | -/// | **Histogram** | Synchronous | Record(value) | Histogram Aggregation | Per-request, any grouping measurement | -/// | **ValueRecorder** | Synchronous | Record(value) | [TBD issue 636](https://github.com/open-telemetry/opentelemetry-specification/issues/636) | Per-request, any grouping measurement | -/// | **SumObserver** | Asynchronous adding monotonic | Observe(sum) | Sum | Per-interval, reporting a monotonic sum | -/// | **UpDownSumObserver** | Asynchronous adding | Observe(sum) | Sum | Per-interval, reporting a non-monotonic sum | -/// | **ValueObserver** | Asynchronous | Observe(value) | LastValue | Per-interval, any grouping measurement | -#[derive(Debug)] +/// Provides access to instrument instances for recording metrics. +#[derive(Clone)] pub struct Meter { - instrumentation_library: InstrumentationLibrary, - core: Arc, + pub(crate) instrumentation_library: InstrumentationLibrary, + pub(crate) instrument_provider: Arc, } impl Meter { - /// Create a new named meter from a sdk implemented core - pub fn new>>( - instrumentation_name: T, - instrumentation_version: Option, - schema_url: Option, - core: Arc, + /// Create a new named meter from an instrumentation provider + pub fn new( + instrumentation_library: InstrumentationLibrary, + instrument_provider: Arc, ) -> Self { Meter { - instrumentation_library: InstrumentationLibrary::new( - instrumentation_name.into(), - instrumentation_version.map(Into::into), - schema_url.map(Into::into), - ), - core, + instrumentation_library, + instrument_provider, } } - pub(crate) fn instrumentation_library(&self) -> InstrumentationLibrary { - self.instrumentation_library.clone() - } - - /// Creates a new integer `CounterBuilder` for `u64` values with the given name. - pub fn u64_counter(&self, name: T) -> CounterBuilder<'_, u64> - where - T: Into, - { - CounterBuilder::new(self, name.into(), NumberKind::U64) - } - - /// Creates a new floating point `CounterBuilder` for `f64` values with the given name. - pub fn f64_counter(&self, name: T) -> CounterBuilder<'_, f64> - where - T: Into, - { - CounterBuilder::new(self, name.into(), NumberKind::F64) - } - - /// Creates a new integer `UpDownCounterBuilder` for an `i64` up down counter with the given name. - pub fn i64_up_down_counter(&self, name: T) -> UpDownCounterBuilder<'_, i64> - where - T: Into, - { - UpDownCounterBuilder::new(self, name.into(), NumberKind::I64) - } - - /// Creates a new floating point `UpDownCounterBuilder` for an `f64` up down counter with the given name. - pub fn f64_up_down_counter(&self, name: T) -> UpDownCounterBuilder<'_, f64> - where - T: Into, - { - UpDownCounterBuilder::new(self, name.into(), NumberKind::F64) + /// creates an instrument for recording increasing values. + pub fn u64_counter(&self, name: impl Into) -> InstrumentBuilder<'_, Counter> { + InstrumentBuilder::new(self, name.into()) } - /// Creates a new `ValueRecorderBuilder` for `i64` values with the given name. - #[deprecated(since = "0.18.0", note = "Please use i64_histogram instead.")] - pub fn i64_value_recorder(&self, name: T) -> ValueRecorderBuilder<'_, i64> - where - T: Into, - { - ValueRecorderBuilder::new(self, name.into(), NumberKind::I64) + /// creates an instrument for recording increasing values. + pub fn f64_counter(&self, name: impl Into) -> InstrumentBuilder<'_, Counter> { + InstrumentBuilder::new(self, name.into()) } - /// Creates a new `ValueRecorderBuilder` for `u64` values with the given name. - #[deprecated(since = "0.18.0", note = "Please use u64_histogram instead.")] - pub fn u64_value_recorder(&self, name: T) -> ValueRecorderBuilder<'_, u64> - where - T: Into, - { - ValueRecorderBuilder::new(self, name.into(), NumberKind::U64) - } - - /// Creates a new `ValueRecorderBuilder` for `f64` values with the given name. - #[deprecated(since = "0.18.0", note = "Please use f64_histogram instead.")] - pub fn f64_value_recorder(&self, name: T) -> ValueRecorderBuilder<'_, f64> - where - T: Into, - { - ValueRecorderBuilder::new(self, name.into(), NumberKind::F64) + /// creates an instrument for recording increasing values via callback. + pub fn u64_observable_counter( + &self, + name: impl Into, + ) -> InstrumentBuilder<'_, ObservableCounter> { + InstrumentBuilder::new(self, name.into()) } - /// Creates a new `HistogramBuilder` for `i64` values with the given name. - pub fn i64_histogram(&self, name: T) -> HistogramBuilder<'_, i64> - where - T: Into, - { - HistogramBuilder::new(self, name.into(), NumberKind::I64) + /// creates an instrument for recording increasing values via callback. + pub fn f64_observable_counter( + &self, + name: impl Into, + ) -> InstrumentBuilder<'_, ObservableCounter> { + InstrumentBuilder::new(self, name.into()) } - /// Creates a new `HistogramBuilder` for `u64` values with the given name. - pub fn u64_histogram(&self, name: T) -> HistogramBuilder<'_, u64> - where - T: Into, - { - HistogramBuilder::new(self, name.into(), NumberKind::U64) + /// creates an instrument for recording changes of a value. + pub fn i64_up_down_counter( + &self, + name: impl Into, + ) -> InstrumentBuilder<'_, UpDownCounter> { + InstrumentBuilder::new(self, name.into()) } - /// Creates a new `HistogramBuilder` for `f64` values with the given name. - pub fn f64_histogram(&self, name: T) -> HistogramBuilder<'_, f64> - where - T: Into, - { - HistogramBuilder::new(self, name.into(), NumberKind::F64) + /// creates an instrument for recording changes of a value. + pub fn f64_up_down_counter( + &self, + name: impl Into, + ) -> InstrumentBuilder<'_, UpDownCounter> { + InstrumentBuilder::new(self, name.into()) } - /// Creates a new integer `SumObserverBuilder` for `u64` values with the given - /// name and callback - pub fn u64_sum_observer(&self, name: T, callback: F) -> SumObserverBuilder<'_, u64> - where - T: Into, - F: Fn(ObserverResult) + Send + Sync + 'static, - { - SumObserverBuilder::new( - self, - name.into(), - Some(AsyncRunner::U64(Box::new(callback))), - NumberKind::U64, - ) + /// creates an instrument for recording changes of a value via callback. + pub fn i64_observable_up_down_counter( + &self, + name: impl Into, + ) -> InstrumentBuilder<'_, ObservableUpDownCounter> { + InstrumentBuilder::new(self, name.into()) } - /// Creates a new floating point `SumObserverBuilder` for `f64` values with the - /// given name and callback - pub fn f64_sum_observer(&self, name: T, callback: F) -> SumObserverBuilder<'_, f64> - where - T: Into, - F: Fn(ObserverResult) + Send + Sync + 'static, - { - SumObserverBuilder::new( - self, - name.into(), - Some(AsyncRunner::F64(Box::new(callback))), - NumberKind::F64, - ) + /// creates an instrument for recording changes of a value via callback. + pub fn f64_observable_up_down_counter( + &self, + name: impl Into, + ) -> InstrumentBuilder<'_, ObservableUpDownCounter> { + InstrumentBuilder::new(self, name.into()) } - /// Creates a new integer `UpDownSumObserverBuilder` for `i64` values with the - /// given name and callback. - pub fn i64_up_down_sum_observer( + /// creates an instrument for recording the current value via callback. + pub fn u64_observable_gauge( &self, - name: T, - callback: F, - ) -> UpDownSumObserverBuilder<'_, i64> - where - T: Into, - F: Fn(ObserverResult) + Send + Sync + 'static, - { - UpDownSumObserverBuilder::new( - self, - name.into(), - Some(AsyncRunner::I64(Box::new(callback))), - NumberKind::I64, - ) + name: impl Into, + ) -> InstrumentBuilder<'_, ObservableGauge> { + InstrumentBuilder::new(self, name.into()) } - /// Creates a new floating point `UpDownSumObserverBuilder` for `f64` values - /// with the given name and callback - pub fn f64_up_down_sum_observer( + /// creates an instrument for recording the current value via callback. + pub fn i64_observable_gauge( &self, - name: T, - callback: F, - ) -> UpDownSumObserverBuilder<'_, f64> - where - T: Into, - F: Fn(ObserverResult) + Send + Sync + 'static, - { - UpDownSumObserverBuilder::new( - self, - name.into(), - Some(AsyncRunner::F64(Box::new(callback))), - NumberKind::F64, - ) + name: impl Into, + ) -> InstrumentBuilder<'_, ObservableGauge> { + InstrumentBuilder::new(self, name.into()) } - /// Creates a new integer `ValueObserverBuilder` for `u64` values with the given - /// name and callback - pub fn u64_value_observer(&self, name: T, callback: F) -> ValueObserverBuilder<'_, u64> - where - T: Into, - F: Fn(ObserverResult) + Send + Sync + 'static, - { - ValueObserverBuilder::new( - self, - name.into(), - Some(AsyncRunner::U64(Box::new(callback))), - NumberKind::U64, - ) + /// creates an instrument for recording the current value via callback. + pub fn f64_observable_gauge( + &self, + name: impl Into, + ) -> InstrumentBuilder<'_, ObservableGauge> { + InstrumentBuilder::new(self, name.into()) } - /// Creates a new integer `ValueObserverBuilder` for `i64` values with the given - /// name and callback - pub fn i64_value_observer(&self, name: T, callback: F) -> ValueObserverBuilder<'_, i64> - where - T: Into, - F: Fn(ObserverResult) + Send + Sync + 'static, - { - ValueObserverBuilder::new( - self, - name.into(), - Some(AsyncRunner::I64(Box::new(callback))), - NumberKind::I64, - ) + /// creates an instrument for recording a distribution of values. + pub fn f64_histogram(&self, name: impl Into) -> InstrumentBuilder<'_, Histogram> { + InstrumentBuilder::new(self, name.into()) } - /// Creates a new floating point `ValueObserverBuilder` for `f64` values with - /// the given name and callback - pub fn f64_value_observer(&self, name: T, callback: F) -> ValueObserverBuilder<'_, f64> - where - T: Into, - F: Fn(ObserverResult) + Send + Sync + 'static, - { - ValueObserverBuilder::new( - self, - name.into(), - Some(AsyncRunner::F64(Box::new(callback))), - NumberKind::F64, - ) + /// creates an instrument for recording a distribution of values. + pub fn u64_histogram(&self, name: impl Into) -> InstrumentBuilder<'_, Histogram> { + InstrumentBuilder::new(self, name.into()) } - /// Creates a new `BatchObserver` that supports making batches of observations - /// for multiple instruments or returns an error if instrument initialization - /// fails. - /// - /// # Examples - /// - /// ``` - /// use opentelemetry_api::{global, metrics::BatchObserverResult, KeyValue}; - /// - /// # fn init_observer() -> opentelemetry_api::metrics::Result<()> { - /// let meter = global::meter("test"); - /// - /// meter.build_batch_observer(|batch| { - /// let instrument = batch.u64_value_observer("test_instrument").try_init()?; - /// - /// Ok(move |result: BatchObserverResult| { - /// result.observe(&[KeyValue::new("my-key", "my-value")], &[instrument.observation(1)]); - /// }) - /// })?; - /// # Ok(()) - /// # } - /// ``` - pub fn build_batch_observer(&self, builder: B) -> Result<()> - where - B: Fn(BatchObserver<'_>) -> Result, - F: Fn(BatchObserverResult) + Send + Sync + 'static, - { - let observer = builder(BatchObserver::new(self))?; - self.core - .new_batch_observer(AsyncRunner::Batch(Box::new(observer))) + /// creates an instrument for recording a distribution of values. + pub fn i64_histogram(&self, name: impl Into) -> InstrumentBuilder<'_, Histogram> { + InstrumentBuilder::new(self, name.into()) } - /// Creates a new `BatchObserver` that supports making batches of observations - /// for multiple instruments. - /// - /// # Panics - /// - /// Panics if instrument initialization or observer registration returns an - /// error. - /// - /// # Examples + /// Captures the function that will be called during data collection. /// - /// ``` - /// use opentelemetry_api::{global, metrics::BatchObserverResult, KeyValue}; - /// - /// let meter = global::meter("test"); - /// - /// meter.batch_observer(|batch| { - /// let instrument = batch.u64_value_observer("test_instrument").init(); - /// - /// move |result: BatchObserverResult| { - /// result.observe(&[KeyValue::new("my-key", "my-value")], &[instrument.observation(1)]); - /// } - /// }); - /// ``` - pub fn batch_observer(&self, builder: B) + /// It is only valid to call `observe` within the scope of the passed function. + pub fn register_callback(&self, callback: F) -> Result<(), MetricsError> where - B: Fn(BatchObserver<'_>) -> F, - F: Fn(BatchObserverResult) + Send + Sync + 'static, + F: Fn(&Context) + Send + Sync + 'static, { - let observer = builder(BatchObserver::new(self)); - self.core - .new_batch_observer(AsyncRunner::Batch(Box::new(observer))) - .unwrap() - } - - /// Atomically record a batch of measurements. - pub fn record_batch>( - &self, - attributes: &[KeyValue], - measurements: T, - ) { - self.record_batch_with_context(&Context::current(), attributes, measurements) - } - - /// Atomically record a batch of measurements with a given context - pub fn record_batch_with_context>( - &self, - cx: &Context, - attributes: &[KeyValue], - measurements: T, - ) { - self.core - .record_batch_with_context(cx, attributes, measurements.into_iter().collect()) - } - - pub(crate) fn new_sync_instrument( - &self, - descriptor: Descriptor, - ) -> Result> { - self.core.new_sync_instrument(descriptor) + self.instrument_provider + .register_callback(Box::new(callback)) } +} - pub(crate) fn new_async_instrument( - &self, - descriptor: Descriptor, - runner: Option, - ) -> Result> { - self.core.new_async_instrument(descriptor, runner) +impl fmt::Debug for Meter { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Meter") + .field("instrumentation_library", &self.instrumentation_library) + .finish() } } diff --git a/opentelemetry-api/src/metrics/mod.rs b/opentelemetry-api/src/metrics/mod.rs index d35de0fde4..bb6e2ca40b 100644 --- a/opentelemetry-api/src/metrics/mod.rs +++ b/opentelemetry-api/src/metrics/mod.rs @@ -1,42 +1,25 @@ //! # OpenTelemetry Metrics API -use std::borrow::Cow; use std::result; use std::sync::PoisonError; +use std::{borrow::Cow, sync::Arc}; use thiserror::Error; -mod async_instrument; -mod config; -mod counter; -mod descriptor; -mod histogram; -mod kind; +mod instruments; mod meter; pub mod noop; -mod number; -mod observer; -pub mod registry; -pub mod sdk_api; -mod sync_instrument; -mod up_down_counter; -mod value_recorder; - -use crate::ExportError; -pub use async_instrument::{AsyncRunner, BatchObserverResult, Observation, ObserverResult}; -pub use config::InstrumentConfig; -pub use counter::{BoundCounter, Counter, CounterBuilder}; -pub use descriptor::Descriptor; -pub use histogram::{BoundHistogram, Histogram, HistogramBuilder}; -pub use kind::InstrumentKind; -pub use meter::{Meter, MeterProvider}; -pub use number::{AtomicNumber, Number, NumberKind}; -pub use observer::{ - BatchObserver, SumObserver, SumObserverBuilder, UpDownSumObserver, UpDownSumObserverBuilder, - ValueObserver, ValueObserverBuilder, + +use crate::{Context, ExportError}; +pub use instruments::{ + counter::{AsyncCounter, Counter, ObservableCounter, SyncCounter}, + gauge::{AsyncGauge, ObservableGauge}, + histogram::{Histogram, SyncHistogram}, + up_down_counter::{ + AsyncUpDownCounter, ObservableUpDownCounter, SyncUpDownCounter, UpDownCounter, + }, + InstrumentBuilder, }; -pub use sync_instrument::Measurement; -pub use up_down_counter::{BoundUpDownCounter, UpDownCounter, UpDownCounterBuilder}; -pub use value_recorder::{BoundValueRecorder, ValueRecorder, ValueRecorderBuilder}; +pub use meter::{Meter, MeterProvider}; /// A specialized `Result` type for metric operations. pub type Result = result::Result; @@ -114,3 +97,169 @@ impl AsRef for Unit { self.0.as_ref() } } + +/// SDK implemented trait for creating instruments +pub trait InstrumentProvider { + /// creates an instrument for recording increasing values. + fn u64_counter( + &self, + _name: String, + _description: Option, + _unit: Option, + ) -> Result> { + Ok(Counter::new(Arc::new(noop::NoopSyncInstrument::new()))) + } + + /// creates an instrument for recording increasing values. + fn f64_counter( + &self, + _name: String, + _description: Option, + _unit: Option, + ) -> Result> { + Ok(Counter::new(Arc::new(noop::NoopSyncInstrument::new()))) + } + + /// creates an instrument for recording increasing values via callback. + fn u64_observable_counter( + &self, + _name: String, + _description: Option, + _unit: Option, + ) -> Result> { + Ok(ObservableCounter::new(Arc::new( + noop::NoopAsyncInstrument::new(), + ))) + } + + /// creates an instrument for recording increasing values via callback. + fn f64_observable_counter( + &self, + _name: String, + _description: Option, + _unit: Option, + ) -> Result> { + Ok(ObservableCounter::new(Arc::new( + noop::NoopAsyncInstrument::new(), + ))) + } + + /// creates an instrument for recording changes of a value. + fn i64_up_down_counter( + &self, + _name: String, + _description: Option, + _unit: Option, + ) -> Result> { + Ok(UpDownCounter::new( + Arc::new(noop::NoopSyncInstrument::new()), + )) + } + + /// creates an instrument for recording changes of a value. + fn f64_up_down_counter( + &self, + _name: String, + _description: Option, + _unit: Option, + ) -> Result> { + Ok(UpDownCounter::new( + Arc::new(noop::NoopSyncInstrument::new()), + )) + } + + /// creates an instrument for recording changes of a value. + fn i64_observable_up_down_counter( + &self, + _name: String, + _description: Option, + _unit: Option, + ) -> Result> { + Ok(ObservableUpDownCounter::new(Arc::new( + noop::NoopAsyncInstrument::new(), + ))) + } + + /// creates an instrument for recording changes of a value via callback. + fn f64_observable_up_down_counter( + &self, + _name: String, + _description: Option, + _unit: Option, + ) -> Result> { + Ok(ObservableUpDownCounter::new(Arc::new( + noop::NoopAsyncInstrument::new(), + ))) + } + + /// creates an instrument for recording the current value via callback. + fn u64_observable_gauge( + &self, + _name: String, + _description: Option, + _unit: Option, + ) -> Result> { + Ok(ObservableGauge::new(Arc::new( + noop::NoopAsyncInstrument::new(), + ))) + } + + /// creates an instrument for recording the current value via callback. + fn i64_observable_gauge( + &self, + _name: String, + _description: Option, + _unit: Option, + ) -> Result> { + Ok(ObservableGauge::new(Arc::new( + noop::NoopAsyncInstrument::new(), + ))) + } + + /// creates an instrument for recording the current value via callback. + fn f64_observable_gauge( + &self, + _name: String, + _description: Option, + _unit: Option, + ) -> Result> { + Ok(ObservableGauge::new(Arc::new( + noop::NoopAsyncInstrument::new(), + ))) + } + + /// creates an instrument for recording a distribution of values. + fn f64_histogram( + &self, + _name: String, + _description: Option, + _unit: Option, + ) -> Result> { + Ok(Histogram::new(Arc::new(noop::NoopSyncInstrument::new()))) + } + + /// creates an instrument for recording a distribution of values. + fn u64_histogram( + &self, + _name: String, + _description: Option, + _unit: Option, + ) -> Result> { + Ok(Histogram::new(Arc::new(noop::NoopSyncInstrument::new()))) + } + + /// creates an instrument for recording a distribution of values. + fn i64_histogram( + &self, + _name: String, + _description: Option, + _unit: Option, + ) -> Result> { + Ok(Histogram::new(Arc::new(noop::NoopSyncInstrument::new()))) + } + + /// Captures the function that will be called during data collection. + /// + /// It is only valid to call `observe` within the scope of the passed function. + fn register_callback(&self, callback: Box) -> Result<()>; +} diff --git a/opentelemetry-api/src/metrics/noop.rs b/opentelemetry-api/src/metrics/noop.rs index 14aa71310a..5b1165e37c 100644 --- a/opentelemetry-api/src/metrics/noop.rs +++ b/opentelemetry-api/src/metrics/noop.rs @@ -5,22 +5,13 @@ //! to have minimal resource utilization and runtime impact. use crate::{ metrics::{ - sdk_api::{ - AsyncInstrumentCore, InstrumentCore, MeterCore, SyncBoundInstrumentCore, - SyncInstrumentCore, - }, - AsyncRunner, Descriptor, InstrumentKind, Measurement, Meter, MeterProvider, Number, - NumberKind, Result, + AsyncCounter, AsyncGauge, AsyncUpDownCounter, InstrumentProvider, Meter, MeterProvider, + Result, SyncCounter, SyncHistogram, SyncUpDownCounter, }, - Context, KeyValue, + Context, InstrumentationLibrary, KeyValue, }; -use std::any::Any; use std::sync::Arc; -lazy_static::lazy_static! { - static ref NOOP_DESCRIPTOR: Descriptor = Descriptor::new(String::new(), "noop", None, None, InstrumentKind::Counter, NumberKind::U64); -} - /// A no-op instance of a `MetricProvider` #[derive(Debug, Default)] pub struct NoopMeterProvider { @@ -35,13 +26,14 @@ impl NoopMeterProvider { } impl MeterProvider for NoopMeterProvider { - fn meter( + fn versioned_meter( &self, name: &'static str, version: Option<&'static str>, schema_url: Option<&'static str>, ) -> Meter { - Meter::new(name, version, schema_url, Arc::new(NoopMeterCore::new())) + let library = InstrumentationLibrary::new(name, version, schema_url); + Meter::new(library, Arc::new(NoopMeterCore::new())) } } @@ -58,29 +50,8 @@ impl NoopMeterCore { } } -impl MeterCore for NoopMeterCore { - fn new_sync_instrument(&self, _descriptor: Descriptor) -> Result> { - Ok(Arc::new(NoopSyncInstrument::new())) - } - - fn new_async_instrument( - &self, - _descriptor: Descriptor, - _runner: Option, - ) -> Result> { - Ok(Arc::new(NoopAsyncInstrument::new())) - } - - fn record_batch_with_context( - &self, - _cx: &Context, - _attributes: &[KeyValue], - _measurements: Vec, - ) { - // Ignored - } - - fn new_batch_observer(&self, _runner: AsyncRunner) -> Result<()> { +impl InstrumentProvider for NoopMeterCore { + fn register_callback(&self, _callback: Box) -> Result<()> { Ok(()) } } @@ -98,39 +69,20 @@ impl NoopSyncInstrument { } } -impl InstrumentCore for NoopSyncInstrument { - fn descriptor(&self) -> &Descriptor { - &NOOP_DESCRIPTOR - } -} - -impl SyncInstrumentCore for NoopSyncInstrument { - fn bind(&self, _attributes: &'_ [KeyValue]) -> Arc { - Arc::new(NoopBoundSyncInstrument::new()) - } - fn record_one(&self, _number: Number, _attributes: &'_ [KeyValue]) { +impl SyncCounter for NoopSyncInstrument { + fn add(&self, _cx: &Context, _value: T, _attributes: &[KeyValue]) { // Ignored } - fn as_any(&self) -> &dyn Any { - self - } } -/// A no-op bound sync instrument -#[derive(Debug, Default)] -pub struct NoopBoundSyncInstrument { - _private: (), -} - -impl NoopBoundSyncInstrument { - /// Create a new no-op bound sync instrument - pub fn new() -> Self { - NoopBoundSyncInstrument { _private: () } +impl SyncUpDownCounter for NoopSyncInstrument { + fn add(&self, _cx: &Context, _value: T, _attributes: &[KeyValue]) { + // Ignored } } -impl SyncBoundInstrumentCore for NoopBoundSyncInstrument { - fn record_one(&self, _number: Number) { +impl SyncHistogram for NoopSyncInstrument { + fn record(&self, _cx: &Context, _value: T, _attributes: &[KeyValue]) { // Ignored } } @@ -148,14 +100,20 @@ impl NoopAsyncInstrument { } } -impl InstrumentCore for NoopAsyncInstrument { - fn descriptor(&self) -> &Descriptor { - &NOOP_DESCRIPTOR +impl AsyncGauge for NoopAsyncInstrument { + fn observe(&self, _cx: &Context, _value: T, _attributes: &[KeyValue]) { + // Ignored } } -impl AsyncInstrumentCore for NoopAsyncInstrument { - fn as_any(&self) -> &dyn Any { - self +impl AsyncCounter for NoopAsyncInstrument { + fn observe(&self, _cx: &Context, _value: T, _attributes: &[KeyValue]) { + // Ignored + } +} + +impl AsyncUpDownCounter for NoopAsyncInstrument { + fn observe(&self, _cx: &Context, _value: T, _attributes: &[KeyValue]) { + // Ignored } } diff --git a/opentelemetry-api/src/metrics/observer.rs b/opentelemetry-api/src/metrics/observer.rs deleted file mode 100644 index 34144b3798..0000000000 --- a/opentelemetry-api/src/metrics/observer.rs +++ /dev/null @@ -1,352 +0,0 @@ -use crate::metrics::{ - sdk_api, AsyncRunner, Descriptor, InstrumentKind, Meter, Number, NumberKind, Observation, - Result, Unit, -}; -use std::sync::Arc; - -/// An Observer callback that can report observations for multiple instruments. -#[derive(Debug)] -pub struct BatchObserver<'a> { - meter: &'a Meter, -} - -impl<'a> BatchObserver<'a> { - pub(crate) fn new(meter: &'a Meter) -> Self { - BatchObserver { meter } - } - - /// Creates a new integer `SumObserverBuilder` for `u64` values with the given name. - pub fn u64_sum_observer(&self, name: T) -> SumObserverBuilder<'_, u64> - where - T: Into, - { - SumObserverBuilder::new(self.meter, name.into(), None, NumberKind::U64) - } - - /// Creates a new floating point `SumObserverBuilder` for `f64` values with the given name. - pub fn f64_sum_observer(&self, name: T) -> SumObserverBuilder<'_, f64> - where - T: Into, - { - SumObserverBuilder::new(self.meter, name.into(), None, NumberKind::F64) - } - - /// Creates a new integer `UpDownSumObserverBuilder` for `i64` values with the given name. - pub fn i64_up_down_sum_observer(&self, name: T) -> UpDownSumObserverBuilder<'_, i64> - where - T: Into, - { - UpDownSumObserverBuilder::new(self.meter, name.into(), None, NumberKind::I64) - } - - /// Creates a new floating point `UpDownSumObserverBuilder` for `f64` values with the given name. - pub fn f64_up_down_sum_observer(&self, name: T) -> UpDownSumObserverBuilder<'_, f64> - where - T: Into, - { - UpDownSumObserverBuilder::new(self.meter, name.into(), None, NumberKind::F64) - } - - /// Creates a new integer `ValueObserverBuilder` for `u64` values with the given name. - pub fn u64_value_observer(&self, name: T) -> ValueObserverBuilder<'_, u64> - where - T: Into, - { - ValueObserverBuilder::new(self.meter, name.into(), None, NumberKind::U64) - } - - /// Creates a new integer `ValueObserverBuilder` for `i64` values with the given name. - pub fn i64_value_observer(&self, name: T) -> ValueObserverBuilder<'_, i64> - where - T: Into, - { - ValueObserverBuilder::new(self.meter, name.into(), None, NumberKind::I64) - } - - /// Creates a new floating point `ValueObserverBuilder` for `f64` values with the given name. - pub fn f64_value_observer(&self, name: T) -> ValueObserverBuilder<'_, f64> - where - T: Into, - { - ValueObserverBuilder::new(self.meter, name.into(), None, NumberKind::F64) - } -} - -/// A metric that captures a precomputed sum of values at a point in time. -#[derive(Debug)] -pub struct SumObserver { - instrument: Arc, - _marker: std::marker::PhantomData, -} - -impl SumObserver -where - T: Into, -{ - /// Returns an `Observation`: a `BatchObserverCallback` argument, for an - /// asynchronous instrument. This returns an implementation-level - /// object for use by the SDK, users should not refer to this. - pub fn observation(&self, value: T) -> Observation { - Observation::new(value.into(), self.instrument.clone()) - } -} - -/// Configuration options for building a `SumObserver` -#[derive(Debug)] -pub struct SumObserverBuilder<'a, T> { - meter: &'a Meter, - descriptor: Descriptor, - runner: Option, - _marker: std::marker::PhantomData, -} - -impl<'a, T> SumObserverBuilder<'a, T> { - pub(crate) fn new( - meter: &'a Meter, - name: String, - runner: Option, - number_kind: NumberKind, - ) -> Self { - SumObserverBuilder { - meter, - descriptor: Descriptor::new( - name, - meter.instrumentation_library().name, - meter.instrumentation_library().version, - meter.instrumentation_library().schema_url, - InstrumentKind::SumObserver, - number_kind, - ), - runner, - _marker: std::marker::PhantomData, - } - } - - /// Set the description of this `SumObserver` - pub fn with_description>(mut self, description: S) -> Self { - self.descriptor.set_description(description.into()); - self - } - - /// Set the unit for this `SumObserver`. - pub fn with_unit(mut self, unit: Unit) -> Self { - self.descriptor.config.unit = Some(unit); - self - } - - /// Create a `SumObserver` from this configuration. - pub fn try_init(self) -> Result> { - let instrument = self - .meter - .new_async_instrument(self.descriptor, self.runner)?; - - Ok(SumObserver { - instrument, - _marker: std::marker::PhantomData, - }) - } - - /// Create a `SumObserver` from this configuration. - /// - /// # Panics - /// - /// This method panics if it cannot create an instrument with the provided - /// config. If you want to handle results instead, use [`try_init`] - /// - /// [`try_init`]: SumObserverBuilder::try_init() - pub fn init(self) -> SumObserver { - SumObserver { - instrument: self - .meter - .new_async_instrument(self.descriptor, self.runner) - .unwrap(), - _marker: std::marker::PhantomData, - } - } -} - -/// A metric that captures a precomputed non-monotonic sum of values at a point -/// in time. -#[derive(Debug)] -pub struct UpDownSumObserver { - instrument: Arc, - _marker: std::marker::PhantomData, -} - -impl UpDownSumObserver -where - T: Into, -{ - /// Returns an `Observation`: a `BatchObserverCallback` argument, for an - /// asynchronous instrument. This returns an implementation-level - /// object for use by the SDK, users should not refer to this. - pub fn observation(&self, value: T) -> Observation { - Observation::new(value.into(), self.instrument.clone()) - } -} - -/// Configuration options for building a `UpDownSumObserver` -#[derive(Debug)] -pub struct UpDownSumObserverBuilder<'a, T> { - meter: &'a Meter, - descriptor: Descriptor, - runner: Option, - _marker: std::marker::PhantomData, -} - -impl<'a, T> UpDownSumObserverBuilder<'a, T> { - pub(crate) fn new( - meter: &'a Meter, - name: String, - runner: Option, - number_kind: NumberKind, - ) -> Self { - UpDownSumObserverBuilder { - meter, - descriptor: Descriptor::new( - name, - meter.instrumentation_library().name, - meter.instrumentation_library().version, - meter.instrumentation_library().schema_url, - InstrumentKind::UpDownSumObserver, - number_kind, - ), - runner, - _marker: std::marker::PhantomData, - } - } - - /// Set the description of this `UpDownSumObserver` - pub fn with_description>(mut self, description: S) -> Self { - self.descriptor.set_description(description.into()); - self - } - - /// Set the unit for this `UpDownSumObserver`. - pub fn with_unit(mut self, unit: Unit) -> Self { - self.descriptor.config.unit = Some(unit); - self - } - - /// Create a `UpDownSumObserver` from this configuration. - pub fn try_init(self) -> Result> { - let instrument = self - .meter - .new_async_instrument(self.descriptor, self.runner)?; - - Ok(UpDownSumObserver { - instrument, - _marker: std::marker::PhantomData, - }) - } - - /// Create a `UpDownSumObserver` from this configuration. - /// - /// # Panics - /// - /// This method panics if it cannot create an instrument with the provided - /// config. If you want to handle results instead, use [`try_init`] - /// - /// [`try_init`]: UpDownSumObserverBuilder::try_init() - pub fn init(self) -> UpDownSumObserver { - UpDownSumObserver { - instrument: self - .meter - .new_async_instrument(self.descriptor, self.runner) - .unwrap(), - _marker: std::marker::PhantomData, - } - } -} - -/// A metric that captures a set of values at a point in time. -#[derive(Debug)] -pub struct ValueObserver { - instrument: Arc, - _marker: std::marker::PhantomData, -} - -impl ValueObserver -where - T: Into, -{ - /// Returns an `Observation`: a `BatchObserverCallback` argument, for an - /// asynchronous instrument. This returns an implementation-level - /// object for use by the SDK, users should not refer to this. - pub fn observation(&self, value: T) -> Observation { - Observation::new(value.into(), self.instrument.clone()) - } -} - -/// Configuration options for building a `ValueObserver` -#[derive(Debug)] -pub struct ValueObserverBuilder<'a, T> { - meter: &'a Meter, - descriptor: Descriptor, - runner: Option, - _marker: std::marker::PhantomData, -} - -impl<'a, T> ValueObserverBuilder<'a, T> { - pub(crate) fn new( - meter: &'a Meter, - name: String, - runner: Option, - number_kind: NumberKind, - ) -> Self { - ValueObserverBuilder { - meter, - descriptor: Descriptor::new( - name, - meter.instrumentation_library().name, - meter.instrumentation_library().version, - meter.instrumentation_library().schema_url, - InstrumentKind::ValueObserver, - number_kind, - ), - runner, - _marker: std::marker::PhantomData, - } - } - /// Set the description of this `ValueObserver` - pub fn with_description>(mut self, description: S) -> Self { - self.descriptor.set_description(description.into()); - self - } - - /// Set the unit for this `ValueObserver`. - pub fn with_unit(mut self, unit: Unit) -> Self { - self.descriptor.config.unit = Some(unit); - self - } - - /// Create a `ValueObserver` from this configuration. - pub fn try_init(self) -> Result> { - let instrument = self - .meter - .new_async_instrument(self.descriptor, self.runner)?; - - Ok(ValueObserver { - instrument, - _marker: std::marker::PhantomData, - }) - } - - /// Create a `ValueObserver` from this configuration. - /// - /// # Panics - /// - /// This method panics if it cannot create an instrument with the provided - /// config. If you want to handle results instead, use [`try_init`] - /// - /// [`try_init`]: ValueObserverBuilder::try_init() - pub fn init(self) -> ValueObserver { - ValueObserver { - instrument: self - .meter - .new_async_instrument(self.descriptor, self.runner) - .unwrap(), - _marker: std::marker::PhantomData, - } - } -} diff --git a/opentelemetry-api/src/metrics/registry.rs b/opentelemetry-api/src/metrics/registry.rs deleted file mode 100644 index 023664a8b9..0000000000 --- a/opentelemetry-api/src/metrics/registry.rs +++ /dev/null @@ -1,174 +0,0 @@ -//! Metrics Registry API -use crate::{ - metrics::{ - sdk_api::{AsyncInstrumentCore, MeterCore, SyncInstrumentCore}, - Meter, MeterProvider, - }, - metrics::{AsyncRunner, Descriptor, Measurement, MetricsError, Result}, - Context, KeyValue, -}; -use std::collections::HashMap; -use std::sync::{Arc, Mutex}; - -/// Create a new `RegistryMeterProvider` from a `MeterCore`. -pub fn meter_provider(core: Arc) -> RegistryMeterProvider { - RegistryMeterProvider(Arc::new(UniqueInstrumentMeterCore::wrap(core))) -} - -/// A standard `MeterProvider` for wrapping a `MeterCore`. -#[derive(Debug, Clone)] -pub struct RegistryMeterProvider(Arc); - -impl MeterProvider for RegistryMeterProvider { - fn meter( - &self, - name: &'static str, - version: Option<&'static str>, - schema_url: Option<&'static str>, - ) -> Meter { - Meter::new(name, version, schema_url, self.0.clone()) - } -} - -#[derive(Debug)] -struct UniqueInstrumentMeterCore { - inner: Arc, - sync_state: Mutex>, - async_state: Mutex>, -} - -impl UniqueInstrumentMeterCore { - fn wrap(inner: Arc) -> Self { - UniqueInstrumentMeterCore { - inner, - sync_state: Mutex::new(HashMap::default()), - async_state: Mutex::new(HashMap::default()), - } - } -} - -impl MeterCore for UniqueInstrumentMeterCore { - fn record_batch_with_context( - &self, - cx: &Context, - attributes: &[KeyValue], - measurements: Vec, - ) { - self.inner - .record_batch_with_context(cx, attributes, measurements) - } - - fn new_sync_instrument(&self, descriptor: Descriptor) -> Result { - self.sync_state - .lock() - .map_err(Into::into) - .and_then(|mut state| { - let key = UniqueInstrumentKey::from(&descriptor); - check_sync_uniqueness(&state, &descriptor, &key).and_then(|instrument| { - match instrument { - Some(instrument) => Ok(instrument), - None => { - let instrument = self.inner.new_sync_instrument(descriptor)?; - state.insert(key, instrument.clone()); - - Ok(instrument) - } - } - }) - }) - } - - fn new_async_instrument( - &self, - descriptor: Descriptor, - runner: Option, - ) -> super::Result { - self.async_state - .lock() - .map_err(Into::into) - .and_then(|mut state| { - let key = UniqueInstrumentKey::from(&descriptor); - check_async_uniqueness(&state, &descriptor, &key).and_then(|instrument| { - match instrument { - Some(instrument) => Ok(instrument), - None => { - let instrument = self.inner.new_async_instrument(descriptor, runner)?; - state.insert(key, instrument.clone()); - - Ok(instrument) - } - } - }) - }) - } - - fn new_batch_observer(&self, runner: AsyncRunner) -> Result<()> { - self.inner.new_batch_observer(runner) - } -} - -fn check_sync_uniqueness( - instruments: &HashMap, - desc: &Descriptor, - key: &UniqueInstrumentKey, -) -> Result> { - if let Some(instrument) = instruments.get(key) { - if is_equal(instrument.descriptor(), desc) { - Ok(Some(instrument.clone())) - } else { - Err(MetricsError::MetricKindMismatch(format!( - "metric was {} ({}), registered as a {:?} {:?}", - desc.name(), - desc.instrumentation_name(), - desc.number_kind(), - desc.instrument_kind() - ))) - } - } else { - Ok(None) - } -} - -fn check_async_uniqueness( - instruments: &HashMap, - desc: &Descriptor, - key: &UniqueInstrumentKey, -) -> Result> { - if let Some(instrument) = instruments.get(key) { - if is_equal(instrument.descriptor(), desc) { - Ok(Some(instrument.clone())) - } else { - Err(MetricsError::MetricKindMismatch(format!( - "metric was {} ({}), registered as a {:?} {:?}", - desc.name(), - desc.instrumentation_name(), - desc.number_kind(), - desc.instrument_kind() - ))) - } - } else { - Ok(None) - } -} - -fn is_equal(a: &Descriptor, b: &Descriptor) -> bool { - a.instrument_kind() == b.instrument_kind() && a.number_kind() == b.number_kind() -} - -#[derive(Debug, PartialEq, Eq, Hash)] -struct UniqueInstrumentKey { - instrument_name: String, - instrumentation_name: String, -} - -impl From<&Descriptor> for UniqueInstrumentKey { - fn from(desc: &Descriptor) -> Self { - UniqueInstrumentKey { - instrument_name: desc.name().to_string(), - instrumentation_name: desc.instrumentation_name().to_string(), - } - } -} - -type UniqueSyncInstrument = Arc; -type UniqueAsyncInstrument = Arc; diff --git a/opentelemetry-api/src/metrics/sdk_api.rs b/opentelemetry-api/src/metrics/sdk_api.rs deleted file mode 100644 index b8b4fb1d2c..0000000000 --- a/opentelemetry-api/src/metrics/sdk_api.rs +++ /dev/null @@ -1,65 +0,0 @@ -//! Metrics SDK API -use crate::metrics::{AsyncRunner, Descriptor, Measurement, Number, Result}; -use crate::{Context, KeyValue}; -use std::any::Any; -use std::fmt; -use std::sync::Arc; - -/// The interface an SDK must implement to supply a Meter implementation. -pub trait MeterCore: fmt::Debug { - /// Atomically record a batch of measurements. - fn record_batch_with_context( - &self, - cx: &Context, - attributes: &[KeyValue], - measurements: Vec, - ); - - /// Create a new synchronous instrument implementation. - fn new_sync_instrument(&self, descriptor: Descriptor) -> Result>; - - /// Create a new asynchronous instrument implementation. - /// - /// Runner is `None` if used in batch as the batch runner is registered separately. - fn new_async_instrument( - &self, - descriptor: Descriptor, - runner: Option, - ) -> Result>; - - /// Register a batch observer - fn new_batch_observer(&self, runner: AsyncRunner) -> Result<()>; -} - -/// A common interface for synchronous and asynchronous instruments. -pub trait InstrumentCore: fmt::Debug + Send + Sync { - /// Description of the instrument's descriptor - fn descriptor(&self) -> &Descriptor; -} - -/// The implementation-level interface to a generic synchronous instrument -/// (e.g., ValueRecorder and Counter instruments). -pub trait SyncInstrumentCore: InstrumentCore { - /// Creates an implementation-level bound instrument, binding an attribute set - /// with this instrument implementation. - fn bind(&self, attributes: &'_ [KeyValue]) -> Arc; - - /// Capture a single synchronous metric event. - fn record_one(&self, number: Number, attributes: &'_ [KeyValue]); - - /// Returns self as any - fn as_any(&self) -> &dyn Any; -} - -/// The implementation-level interface to a generic synchronous bound instrument -pub trait SyncBoundInstrumentCore: fmt::Debug + Send + Sync { - /// Capture a single synchronous metric event. - fn record_one(&self, number: Number); -} - -/// An implementation-level interface to an asynchronous instrument (e.g., -/// Observer instruments). -pub trait AsyncInstrumentCore: InstrumentCore { - /// The underlying type as `Any` to support downcasting. - fn as_any(&self) -> &dyn Any; -} diff --git a/opentelemetry-api/src/metrics/up_down_counter.rs b/opentelemetry-api/src/metrics/up_down_counter.rs deleted file mode 100644 index 39811ebaf2..0000000000 --- a/opentelemetry-api/src/metrics/up_down_counter.rs +++ /dev/null @@ -1,107 +0,0 @@ -use crate::{ - metrics::{ - sync_instrument::{SyncBoundInstrument, SyncInstrument}, - Descriptor, InstrumentKind, Measurement, Meter, Number, NumberKind, Result, Unit, - }, - KeyValue, -}; -use std::marker; - -/// A metric instrument that sums non-monotonic values. -#[derive(Clone, Debug)] -pub struct UpDownCounter(SyncInstrument); - -impl UpDownCounter -where - T: Into, -{ - /// Creates a bound instrument for this counter. The attributes are associated with - /// values recorded via subsequent calls to record. - pub fn bind(&self, attributes: &[KeyValue]) -> BoundUpDownCounter { - let bound_instrument = self.0.bind(attributes); - - BoundUpDownCounter { bound_instrument } - } - - /// Increment this counter by a given T - pub fn add(&self, value: T, attributes: &[KeyValue]) { - self.0.direct_record(value.into(), attributes) - } - - /// Creates a Measurement for use with batch recording. - pub fn measurement(&self, value: T) -> Measurement { - Measurement::new(value.into(), self.0.instrument().clone()) - } -} - -/// BoundUpDownCounter is a bound instrument for counters. -#[derive(Clone, Debug)] -pub struct BoundUpDownCounter { - bound_instrument: SyncBoundInstrument, -} - -impl BoundUpDownCounter -where - T: Into, -{ - /// Increment this counter by a given T - pub fn add(&self, value: T) { - self.bound_instrument.direct_record(value.into()) - } -} - -/// Configuration for a new up down counter. -#[derive(Debug)] -pub struct UpDownCounterBuilder<'a, T> { - meter: &'a Meter, - descriptor: Descriptor, - _marker: marker::PhantomData, -} - -impl<'a, T> UpDownCounterBuilder<'a, T> { - /// Create a new counter builder - pub(crate) fn new(meter: &'a Meter, name: String, number_kind: NumberKind) -> Self { - UpDownCounterBuilder { - meter, - descriptor: Descriptor::new( - name, - meter.instrumentation_library().name, - meter.instrumentation_library().version, - meter.instrumentation_library().schema_url, - InstrumentKind::UpDownCounter, - number_kind, - ), - _marker: marker::PhantomData, - } - } - - /// Set the description for this counter - pub fn with_description>(mut self, description: S) -> Self { - self.descriptor.set_description(description.into()); - self - } - - /// Set the unit for this counter. - pub fn with_unit(mut self, unit: Unit) -> Self { - self.descriptor.config.unit = Some(unit); - self - } - - /// Creates a new counter instrument. - pub fn try_init(self) -> Result> { - let instrument = self.meter.new_sync_instrument(self.descriptor)?; - Ok(UpDownCounter(SyncInstrument::new(instrument))) - } - - /// Creates a new counter instrument. - /// - /// # Panics - /// - /// This function panics if the instrument cannot be created. Use try_init if you want to - /// handle errors. - pub fn init(self) -> UpDownCounter { - UpDownCounter(SyncInstrument::new( - self.meter.new_sync_instrument(self.descriptor).unwrap(), - )) - } -} diff --git a/opentelemetry-api/src/metrics/value_recorder.rs b/opentelemetry-api/src/metrics/value_recorder.rs deleted file mode 100644 index 7ef383b4d3..0000000000 --- a/opentelemetry-api/src/metrics/value_recorder.rs +++ /dev/null @@ -1,107 +0,0 @@ -use crate::metrics::{ - sync_instrument::{SyncBoundInstrument, SyncInstrument}, - Descriptor, InstrumentKind, Measurement, Meter, Number, NumberKind, Result, Unit, -}; -use crate::KeyValue; -use std::marker; - -/// ValueRecorder is a metric that records per-request non-additive values. -#[derive(Clone, Debug)] -pub struct ValueRecorder(SyncInstrument); - -impl ValueRecorder -where - T: Into, -{ - /// Creates a bound instrument for this ValueRecorder. The attributes are - /// associated with values recorded via subsequent calls to record. - pub fn bind(&self, attributes: &[KeyValue]) -> BoundValueRecorder { - let bound_instrument = self.0.bind(attributes); - BoundValueRecorder { bound_instrument } - } - - /// Record a new metric value - pub fn record(&self, value: T, attributes: &[KeyValue]) { - self.0.direct_record(value.into(), attributes) - } - - /// Creates a `Measurement` object to use with batch recording. - pub fn measurement(&self, value: T) -> Measurement { - Measurement::new(value.into(), self.0.instrument().clone()) - } -} - -/// BoundValueRecorder is a bound instrument for recording per-request -/// non-additive values. -/// -/// It inherits the Unbind function from syncBoundInstrument. -#[derive(Clone, Debug)] -pub struct BoundValueRecorder { - bound_instrument: SyncBoundInstrument, -} - -impl BoundValueRecorder -where - T: Into, -{ - /// Adds a new value to the list of ValueRecorder's records. The attributes - /// should contain the keys and values to be associated with this value. - pub fn record(&self, value: T) { - self.bound_instrument.direct_record(value.into()) - } -} - -/// Initialization configuration for a given `ValueRecorder`. -#[derive(Debug)] -pub struct ValueRecorderBuilder<'a, T> { - meter: &'a Meter, - descriptor: Descriptor, - _marker: marker::PhantomData, -} - -impl<'a, T> ValueRecorderBuilder<'a, T> { - pub(crate) fn new(meter: &'a Meter, name: String, number_kind: NumberKind) -> Self { - ValueRecorderBuilder { - meter, - descriptor: Descriptor::new( - name, - meter.instrumentation_library().name, - meter.instrumentation_library().version, - meter.instrumentation_library().schema_url, - InstrumentKind::Histogram, - number_kind, - ), - _marker: marker::PhantomData, - } - } - - /// Set the description for this `ValueRecorder` - pub fn with_description>(mut self, description: S) -> Self { - self.descriptor.set_description(description.into()); - self - } - - /// Set the unit for this `ValueRecorder`. - pub fn with_unit(mut self, unit: Unit) -> Self { - self.descriptor.config.unit = Some(unit); - self - } - - /// Creates a new value recorder. - pub fn try_init(self) -> Result> { - let instrument = self.meter.new_sync_instrument(self.descriptor)?; - Ok(ValueRecorder(SyncInstrument::new(instrument))) - } - - /// Creates a new value recorder. - /// - /// # Panics - /// - /// This function panics if the instrument cannot be created. Use try_init if you want to - /// handle errors. - pub fn init(self) -> ValueRecorder { - ValueRecorder(SyncInstrument::new( - self.meter.new_sync_instrument(self.descriptor).unwrap(), - )) - } -} diff --git a/opentelemetry-dynatrace/Cargo.toml b/opentelemetry-dynatrace/Cargo.toml index 99dd343639..a5b122b408 100644 --- a/opentelemetry-dynatrace/Cargo.toml +++ b/opentelemetry-dynatrace/Cargo.toml @@ -49,7 +49,7 @@ wasm = [ ] [dependencies] -async-std = { version = "= 1.8.0", features = ["unstable"], optional = true } +async-std = { version = "= 1.10.0", features = ["unstable"], optional = true } base64 = { version = "0.13", optional = true } futures = "0.3" futures-util = { version = "0.3", optional = true } diff --git a/opentelemetry-dynatrace/src/lib.rs b/opentelemetry-dynatrace/src/lib.rs index 3cbac9d4d9..a050841598 100644 --- a/opentelemetry-dynatrace/src/lib.rs +++ b/opentelemetry-dynatrace/src/lib.rs @@ -8,7 +8,9 @@ //! You can start a new Dynatrace metrics pipeline by using [`DynatracePipelineBuilder::metrics()`]. //! //! ```no_run -//! use opentelemetry::sdk::metrics::{selectors, PushController}; +//! use opentelemetry::runtime; +//! use opentelemetry::sdk::export::metrics::aggregation::cumulative_temporality_selector; +//! use opentelemetry::sdk::metrics::selectors; //! use opentelemetry::sdk::util::tokio_interval_stream; //! use opentelemetry_dynatrace::ExportConfig; //! @@ -19,7 +21,11 @@ //! //! // Then pass the exporter into pipeline builder //! let meter = opentelemetry_dynatrace::new_pipeline() -//! .metrics(tokio::spawn, tokio_interval_stream) +//! .metrics( +//! selectors::simple::inexpensive(), +//! cumulative_temporality_selector(), +//! runtime::Tokio, +//! ) //! .with_exporter(dynatrace_exporter) //! .build(); //! @@ -37,8 +43,9 @@ //! //! ``` //! # #[cfg(feature = "reqwest-client")] { -//! use opentelemetry::sdk::metrics::{selectors, PushController}; -//! use opentelemetry::sdk::util::tokio_interval_stream; +//! use opentelemetry::runtime; +//! use opentelemetry::sdk::metrics::selectors; +//! use opentelemetry::sdk::export::metrics::aggregation::cumulative_temporality_selector; //! use opentelemetry::KeyValue; //! use opentelemetry_dynatrace::transform::DimensionSet; //! use opentelemetry_dynatrace::ExportConfig; @@ -52,7 +59,11 @@ //! }; //! //! let meter = opentelemetry_dynatrace::new_pipeline() -//! .metrics(tokio::spawn, tokio_interval_stream) +//! .metrics( +//! selectors::simple::inexpensive(), +//! cumulative_temporality_selector(), +//! runtime::Tokio, +//! ) //! .with_exporter( //! opentelemetry_dynatrace::new_exporter() //! .with_export_config( @@ -74,7 +85,6 @@ //! .with_default_dimensions(DimensionSet::from(vec![ //! KeyValue::new("version", env!("CARGO_PKG_VERSION")), //! ])) -//! .with_aggregator_selector(selectors::simple::Selector::Exact) //! .build(); //! //! Ok(()) @@ -226,10 +236,16 @@ impl DynatraceExporterBuilder { /// ## Examples /// /// ```no_run -/// use opentelemetry::sdk::util::tokio_interval_stream; +/// use opentelemetry::runtime; +/// use opentelemetry::sdk::export::metrics::aggregation::cumulative_temporality_selector; +/// use opentelemetry::sdk::metrics::selectors; /// # fn main() -> Result<(), Box> { /// let meter = opentelemetry_dynatrace::new_pipeline() -/// .metrics(tokio::spawn, tokio_interval_stream); +/// .metrics( +/// selectors::simple::inexpensive(), +/// cumulative_temporality_selector(), +/// runtime::Tokio, +/// ); /// # Ok(()) /// # } /// ``` diff --git a/opentelemetry-dynatrace/src/metric.rs b/opentelemetry-dynatrace/src/metric.rs index d2c51c6224..266160b682 100644 --- a/opentelemetry-dynatrace/src/metric.rs +++ b/opentelemetry-dynatrace/src/metric.rs @@ -8,20 +8,21 @@ use crate::exporter::ExportConfig; use crate::transform::record_to_metric_line; use crate::transform::{DimensionSet, MetricLine}; use crate::{DynatraceExporterBuilder, DynatracePipelineBuilder, Error}; -use futures::Stream; use http::{ header::{HeaderName, HeaderValue, AUTHORIZATION, CONTENT_TYPE, USER_AGENT}, Method, Uri, Version, }; -use opentelemetry::metrics::{Descriptor, Result}; -use opentelemetry::sdk::export::metrics::{AggregatorSelector, ExportKindSelector}; -use opentelemetry::sdk::metrics::{PushController, PushControllerWorker}; -use opentelemetry::sdk::{ - export::metrics::{CheckpointSet, ExportKind, ExportKindFor, Exporter}, - metrics::selectors, - Resource, +use opentelemetry::metrics::Result; +use opentelemetry::runtime::Runtime; +use opentelemetry::sdk::export::metrics::aggregation::{ + AggregationKind, Temporality, TemporalitySelector, }; -use opentelemetry::{global, KeyValue}; +use opentelemetry::sdk::export::metrics::{AggregatorSelector, InstrumentationLibraryReader}; +use opentelemetry::sdk::metrics::controllers::BasicController; +use opentelemetry::sdk::metrics::sdk_api::Descriptor; +use opentelemetry::sdk::metrics::{controllers, processors}; +use opentelemetry::sdk::{export::metrics, Resource}; +use opentelemetry::{global, Context}; use opentelemetry_http::HttpClient; use std::collections::HashMap; use std::convert::TryFrom; @@ -39,20 +40,21 @@ const DEFAULT_USER_AGENT: &str = "opentelemetry-metric-rust"; impl DynatracePipelineBuilder { /// Create a Dynatrace metrics pipeline. - pub fn metrics( + pub fn metrics( self, - spawn: SP, - interval: I, - ) -> DynatraceMetricsPipeline + aggregator_selector: AS, + temporality_selector: TS, + rt: RT, + ) -> DynatraceMetricsPipeline where - SP: Fn(PushControllerWorker) -> SO, - I: Fn(time::Duration) -> IO, + AS: AggregatorSelector + Send + Sync, + TS: TemporalitySelector + Clone + Send + Sync, + RT: Runtime, { DynatraceMetricsPipeline { - aggregator_selector: selectors::simple::Selector::Inexpensive, - export_selector: ExportKindSelector::Cumulative, - spawn, - interval, + rt, + aggregator_selector, + temporality_selector, exporter_pipeline: None, resource: None, period: None, @@ -72,24 +74,24 @@ pub struct MetricsExporterBuilder { impl MetricsExporterBuilder { /// Build a Dynatrace metrics exporter with given configuration. - fn build_metrics_exporter( + fn build_metrics_exporter( self, - export_selector: ES, + temporality_selector: TS, prefix: Option, default_dimensions: Option, timestamp: bool, ) -> Result where - ES: ExportKindFor + Sync + Send + 'static, + TS: TemporalitySelector + Clone + Sync + Send + 'static, { - MetricsExporter::new::( + MetricsExporter::new::( self.builder.export_config, self.builder.http_config.client.unwrap(), self.builder.http_config.headers, prefix, default_dimensions, timestamp, - export_selector, + temporality_selector, ) } } @@ -102,17 +104,15 @@ impl From for MetricsExporterBuilder { /// Pipeline to build Dynatrace metrics exporter. #[derive(Debug)] -pub struct DynatraceMetricsPipeline +pub struct DynatraceMetricsPipeline where AS: AggregatorSelector + Send + Sync + 'static, - ES: ExportKindFor + Send + Sync + Clone + 'static, - SP: Fn(PushControllerWorker) -> SO, - I: Fn(time::Duration) -> IO, + TS: TemporalitySelector + Clone + Send + Sync + 'static, + RT: Runtime, { + rt: RT, aggregator_selector: AS, - export_selector: ES, - spawn: SP, - interval: I, + temporality_selector: TS, exporter_pipeline: Option, resource: Option, period: Option, @@ -122,18 +122,16 @@ where timestamp: bool, } -impl DynatraceMetricsPipeline +impl DynatraceMetricsPipeline where AS: AggregatorSelector + Send + Sync + 'static, - ES: ExportKindFor + Send + Sync + Clone + 'static, - SP: Fn(PushControllerWorker) -> SO, - I: Fn(time::Duration) -> IO, - IO: Stream + Send + 'static, + TS: TemporalitySelector + Clone + Send + Sync + 'static, + RT: Runtime, { /// Build with resource key value pairs. - pub fn with_resource, R: Into>(self, resource: T) -> Self { + pub fn with_resource(self, resource: Resource) -> Self { DynatraceMetricsPipeline { - resource: Some(Resource::new(resource.into_iter().map(Into::into))), + resource: Some(resource), ..self } } @@ -146,34 +144,6 @@ where } } - /// Build with an aggregator selector. - pub fn with_aggregator_selector( - self, - aggregator_selector: T, - ) -> DynatraceMetricsPipeline - where - T: AggregatorSelector + Send + Sync + 'static, - { - DynatraceMetricsPipeline { - aggregator_selector, - export_selector: self.export_selector, - spawn: self.spawn, - interval: self.interval, - exporter_pipeline: self.exporter_pipeline, - resource: self.resource, - period: self.period, - timeout: self.timeout, - prefix: self.prefix, - default_dimensions: self.default_dimensions, - timestamp: self.timestamp, - } - } - - /// Build with a spawn function. - pub fn with_spawn(self, spawn: SP) -> Self { - DynatraceMetricsPipeline { spawn, ..self } - } - /// Build with a timeout. pub fn with_timeout(self, timeout: time::Duration) -> Self { DynatraceMetricsPipeline { @@ -190,34 +160,6 @@ where } } - /// Build with an interval function. - pub fn with_interval(self, interval: I) -> Self { - DynatraceMetricsPipeline { interval, ..self } - } - - /// Build with an export kind selector. - pub fn with_export_kind( - self, - export_selector: E, - ) -> DynatraceMetricsPipeline - where - E: ExportKindFor + Send + Sync + Clone + 'static, - { - DynatraceMetricsPipeline { - aggregator_selector: self.aggregator_selector, - export_selector, - spawn: self.spawn, - interval: self.interval, - exporter_pipeline: self.exporter_pipeline, - resource: self.resource, - period: self.period, - timeout: self.timeout, - prefix: self.prefix, - default_dimensions: self.default_dimensions, - timestamp: self.timestamp, - } - } - /// Set the prefix to prepend to all metric data. pub fn with_prefix(self, prefix: String) -> Self { DynatraceMetricsPipeline { @@ -245,35 +187,37 @@ where } /// Build the push controller. - pub fn build(self) -> Result { + pub fn build(self) -> Result { let exporter = self .exporter_pipeline .ok_or(Error::NoExporterBuilder)? .build_metrics_exporter( - self.export_selector.clone(), + self.temporality_selector.clone(), self.prefix, self.default_dimensions, self.timestamp, )?; - let mut builder = opentelemetry::sdk::metrics::controllers::push( + let mut builder = controllers::basic(processors::factory( self.aggregator_selector, - self.export_selector, - exporter, - self.spawn, - self.interval, - ); + self.temporality_selector, + )) + .with_exporter(exporter); + if let Some(period) = self.period { - builder = builder.with_period(period); + builder = builder.with_collect_period(period); + } + if let Some(timeout) = self.timeout { + builder = builder.with_collect_timeout(timeout) } if let Some(resource) = self.resource { builder = builder.with_resource(resource); } - if let Some(timeout) = self.timeout { - builder = builder.with_timeout(timeout) - } let controller = builder.build(); - global::set_meter_provider(controller.provider()); + controller.start(&Context::current(), self.rt)?; + + global::set_meter_provider(controller.clone()); + Ok(controller) } } @@ -303,7 +247,7 @@ pub struct MetricsExporter { timestamp: bool, - export_kind_selector: Arc, + temporality_selector: Arc, } impl Debug for MetricsExporter { @@ -312,22 +256,16 @@ impl Debug for MetricsExporter { } } -impl ExportKindFor for MetricsExporter { - fn export_kind_for(&self, descriptor: &Descriptor) -> ExportKind { - self.export_kind_selector.export_kind_for(descriptor) - } -} - impl MetricsExporter { /// Create a new `MetricsExporter`. - pub fn new( + pub fn new( export_config: ExportConfig, client: Box, headers: Option>, prefix: Option, default_dimensions: Option, timestamp: bool, - export_kind_selector: T, + temporality_selector: T, ) -> Result { let uri: Uri = if let Some(endpoint) = export_config.endpoint { endpoint.parse() @@ -382,20 +320,31 @@ impl MetricsExporter { prefix, default_dimensions, timestamp, - export_kind_selector: Arc::new(export_kind_selector), + temporality_selector: Arc::new(temporality_selector), }) } } -impl Exporter for MetricsExporter { +impl TemporalitySelector for MetricsExporter { + fn temporality_for(&self, descriptor: &Descriptor, kind: &AggregationKind) -> Temporality { + self.temporality_selector.temporality_for(descriptor, kind) + } +} + +impl metrics::MetricsExporter for MetricsExporter { /// Export metric data to Dynatrace /// - fn export(&self, checkpoint_set: &mut dyn CheckpointSet) -> Result<()> { + fn export( + &self, + _cx: &Context, + _res: &Resource, + reader: &dyn InstrumentationLibraryReader, + ) -> Result<()> { let mut metric_line_data: Vec = Vec::default(); - checkpoint_set.try_for_each(self.export_kind_selector.as_ref(), &mut |record| { - match record_to_metric_line( + reader.try_for_each(&mut |_lib, reader| { + reader.try_for_each(self, &mut |record| match record_to_metric_line( record, - self.export_kind_selector.as_ref(), + self.temporality_selector.as_ref(), self.prefix.clone(), self.default_dimensions.clone(), self.timestamp, @@ -405,7 +354,7 @@ impl Exporter for MetricsExporter { Ok(()) } Err(err) => Err(err), - } + }) })?; if metric_line_data.is_empty() { diff --git a/opentelemetry-dynatrace/src/transform/metrics.rs b/opentelemetry-dynatrace/src/transform/metrics.rs index f0aca6ffdf..87bb819496 100644 --- a/opentelemetry-dynatrace/src/transform/metrics.rs +++ b/opentelemetry-dynatrace/src/transform/metrics.rs @@ -1,16 +1,19 @@ //! OpenTelemetry Dynatrace Metrics use crate::transform::common::get_time; use opentelemetry::attributes::merge_iters; -use opentelemetry::metrics::{MetricsError, Number, NumberKind}; -use opentelemetry::sdk::export::metrics::{ - Count, ExportKind, ExportKindFor, Histogram as SdkHistogram, LastValue, Max, Min, Points, - Record, Sum as SdkSum, -}; +use opentelemetry::metrics::MetricsError; +use opentelemetry::sdk::export::metrics::aggregation::{Count, Temporality, TemporalitySelector}; use opentelemetry::sdk::metrics::aggregators::{ - ArrayAggregator, HistogramAggregator, LastValueAggregator, MinMaxSumCountAggregator, - SumAggregator, + HistogramAggregator, LastValueAggregator, SumAggregator, +}; +use opentelemetry::sdk::{ + export::metrics::{ + aggregation::{Histogram as SdkHistogram, LastValue, Sum as SdkSum}, + Record, + }, + metrics::sdk_api::{Number, NumberKind}, }; -use opentelemetry::{Key, KeyValue, Value}; +use opentelemetry::{global, Key, KeyValue, Value}; use std::borrow::Cow; use std::cmp; use std::collections::{btree_map, BTreeMap}; @@ -441,7 +444,7 @@ impl PartialEq for MetricLine { /// Transform a record to a Dynatrace metrics ingestion protocol metric line. pub(crate) fn record_to_metric_line( record: &Record, - export_selector: &dyn ExportKindFor, + temporality_selector: &dyn TemporalitySelector, prefix: Option, default_dimensions: Option, timestamp: bool, @@ -472,36 +475,12 @@ pub(crate) fn record_to_metric_line( DimensionSet::from_iter(iter.map(|(k, v)| (k.to_owned(), v.to_owned()))) }; - let temporality = export_selector.export_kind_for(descriptor); + let temporality = + temporality_selector.temporality_for(descriptor, aggregator.aggregation().kind()); let mut metric_line_data: Vec = Vec::with_capacity(1); - if let Some(array) = aggregator.as_any().downcast_ref::() { - if let Ok(points) = array.points() { - let timestamp = if timestamp { - Some(get_time(record.end_time().to_owned())) - } else { - None - }; - - metric_line_data.reserve(points.len()); - - points.iter().for_each(|val| { - metric_line_data.push(MetricLine { - kind: kind.clone(), - key: key.clone(), - dimensions: Some(dimensions.clone()), - min: None, - max: None, - sum: None, - count: None, - delta: None, - gauge: Some(val.to_owned()), - timestamp, - }) - }) - } - } else if let Some(last_value) = aggregator.as_any().downcast_ref::() { + if let Some(last_value) = aggregator.as_any().downcast_ref::() { let (val, sample_time) = last_value.last_value()?; let timestamp = if timestamp { Some(get_time(sample_time)) @@ -543,39 +522,15 @@ pub(crate) fn record_to_metric_line( }; match temporality { - ExportKind::Cumulative => metric_line.gauge = Some(val), - ExportKind::Delta => metric_line.delta = Some(val), + Temporality::Cumulative => metric_line.gauge = Some(val), + Temporality::Delta => metric_line.delta = Some(val), + other => global::handle_error(MetricsError::Other(format!( + "Unsupported temporality {:?}", + other + ))), }; metric_line_data.push(metric_line); - } else if let Some(min_max_sum_count) = aggregator - .as_any() - .downcast_ref::() - { - let (min, max, sum, count) = ( - min_max_sum_count.min()?, - min_max_sum_count.max()?, - min_max_sum_count.sum()?, - min_max_sum_count.count()?, - ); - let timestamp = if timestamp { - Some(get_time(record.end_time().to_owned())) - } else { - None - }; - - metric_line_data.push(MetricLine { - kind: kind.to_owned(), - key, - dimensions: Some(dimensions), - min: Some(min), - max: Some(max), - sum: Some(sum), - count: Some(count), - delta: None, - gauge: None, - timestamp, - }); } else if let Some(histogram) = aggregator.as_any().downcast_ref::() { let (sum, count, buckets) = (histogram.sum()?, histogram.count()?, histogram.histogram()?); let (counts, boundaries) = (buckets.counts(), buckets.boundaries()); @@ -645,14 +600,16 @@ mod tests { use crate::transform::common::get_time; use crate::transform::metrics::MetricLine; use crate::transform::record_to_metric_line; - use opentelemetry::attributes::AttributeSet; - use opentelemetry::metrics::{Descriptor, InstrumentKind, MetricsError, Number, NumberKind}; - use opentelemetry::sdk::export::metrics::{record, Aggregator, ExportKindSelector}; + use opentelemetry::sdk::export::metrics::aggregation::{ + cumulative_temporality_selector, delta_temporality_selector, + }; + use opentelemetry::sdk::export::metrics::record; use opentelemetry::sdk::metrics::aggregators::{ - histogram, last_value, min_max_sum_count, SumAggregator, + histogram, last_value, Aggregator, SumAggregator, }; - use opentelemetry::sdk::Resource; - use opentelemetry::KeyValue; + use opentelemetry::sdk::metrics::sdk_api::{Descriptor, InstrumentKind, Number, NumberKind}; + use opentelemetry::{attributes::AttributeSet, metrics::MetricsError}; + use opentelemetry::{Context, KeyValue}; use std::borrow::Cow; use std::sync::Arc; use std::time::{Duration, SystemTime}; @@ -763,131 +720,39 @@ mod tests { .cloned() .map(|(k, v)| opentelemetry::KeyValue::new(k, v)), ); - let resource = Resource::new(vec![ - opentelemetry::KeyValue::new("process", "rust"), - opentelemetry::KeyValue::new("runtime", "sync"), - ]); let start_time = SystemTime::now(); let end_time = SystemTime::now().checked_add(Duration::new(30, 0)).unwrap(); - - // Array - { - let descriptor = Descriptor::new( - "test_array".to_string(), - "test", - None, - None, - InstrumentKind::Counter, - NumberKind::I64, - ); - let aggregator = ArrayAggregator::default(); - let val = Number::from(12_i64); - aggregator.update(&val, &descriptor)?; - let val = Number::from(24_i64); - aggregator.update(&val, &descriptor)?; - let wrapped_aggregator: Arc = Arc::new(aggregator); - let record = record( - &descriptor, - &attribute_set, - &resource, - Some(&wrapped_aggregator), - start_time, - end_time, - ); - - let metric_line_data = - record_to_metric_line(&record, &ExportKindSelector::Cumulative, None, None, true)?; - - let dimensions = DimensionSet::from(vec![ - KeyValue::new("KEY", "VALUE"), - KeyValue::new("test.abc_123-", "value.123_foo-bar"), - KeyValue::new(METRICS_SOURCE, "opentelemetry".to_string()), - ]); - - let expect = vec![ - MetricLine { - key: MetricKey::new("test_array"), - kind: NumberKind::I64, - dimensions: Some(dimensions.clone()), - min: None, - max: None, - sum: None, - count: None, - delta: None, - gauge: Some(Number::from(12_i64)), - timestamp: Some(get_time(end_time)), - }, - MetricLine { - key: MetricKey::new("test_array"), - kind: NumberKind::I64, - dimensions: Some(dimensions), - min: None, - max: None, - sum: None, - count: None, - delta: None, - gauge: Some(Number::from(24_i64)), - timestamp: Some(get_time(end_time)), - }, - ]; - - assert_eq!(expect, metric_line_data); - - let mut metric_lines: Vec = metric_line_data - .iter() - .map(|export_line| format!("{}", export_line)) - .collect(); - metric_lines.sort_unstable(); - - let mut iter = metric_lines.iter(); - - assert_eq!( - Some(&format!( - "test_array,key=VALUE,{}={},test.abc_123-=value.123_foo-bar gauge,12 {}", - METRICS_SOURCE, - "opentelemetry", - get_time(end_time), - )), - iter.next() - ); - assert_eq!( - Some(&format!( - "test_array,key=VALUE,{}={},test.abc_123-=value.123_foo-bar gauge,24 {}", - METRICS_SOURCE, - "opentelemetry", - get_time(end_time), - )), - iter.next() - ); - assert_eq!(None, iter.next()); - } + let cx = Context::new(); // Sum { let descriptor = Descriptor::new( "test_sum".to_string(), - "test", - None, - None, InstrumentKind::Counter, NumberKind::I64, + None, + None, ); let aggregator = SumAggregator::default(); let val = Number::from(12_i64); - aggregator.update(&val, &descriptor)?; + aggregator.update(&cx, &val, &descriptor)?; let wrapped_aggregator: Arc = Arc::new(aggregator); let record = record( &descriptor, &attribute_set, - &resource, Some(&wrapped_aggregator), start_time, end_time, ); // ExportKindSelector::Cumulative - let metric_line_data = - record_to_metric_line(&record, &ExportKindSelector::Cumulative, None, None, true)?; + let metric_line_data = record_to_metric_line( + &record, + &cumulative_temporality_selector(), + None, + None, + true, + )?; let dimensions = DimensionSet::from(vec![ KeyValue::new("KEY", "VALUE"), @@ -931,7 +796,7 @@ mod tests { // ExportKindSelector::Delta let metric_line_data = - record_to_metric_line(&record, &ExportKindSelector::Delta, None, None, true)?; + record_to_metric_line(&record, &delta_temporality_selector(), None, None, true)?; let dimensions = DimensionSet::from(vec![ KeyValue::new("KEY", "VALUE"), @@ -978,29 +843,32 @@ mod tests { { let descriptor = Descriptor::new( "test_last_value".to_string(), - "test", + InstrumentKind::GaugeObserver, + NumberKind::I64, None, None, - InstrumentKind::ValueObserver, - NumberKind::I64, ); let aggregator = last_value(); let val1 = Number::from(12_i64); let val2 = Number::from(14_i64); - aggregator.update(&val1, &descriptor)?; - aggregator.update(&val2, &descriptor)?; + aggregator.update(&cx, &val1, &descriptor)?; + aggregator.update(&cx, &val2, &descriptor)?; let wrapped_aggregator: Arc = Arc::new(aggregator); let record = record( &descriptor, &attribute_set, - &resource, Some(&wrapped_aggregator), start_time, end_time, ); - let metric_line_data = - record_to_metric_line(&record, &ExportKindSelector::Cumulative, None, None, false)?; + let metric_line_data = record_to_metric_line( + &record, + &cumulative_temporality_selector(), + None, + None, + false, + )?; let dimensions = DimensionSet::from(vec![ KeyValue::new("KEY", "VALUE"), @@ -1041,103 +909,37 @@ mod tests { assert_eq!(None, iter.next()); } - // MinMaxSumCount - { - let descriptor = Descriptor::new( - "test_min_max_sum_count".to_string(), - "test", - None, - None, - InstrumentKind::UpDownSumObserver, - NumberKind::I64, - ); - let aggregator = min_max_sum_count(&descriptor); - let vals = vec![1i64.into(), 2i64.into(), 3i64.into()]; - for val in vals.iter() { - aggregator.update(val, &descriptor)?; - } - let wrapped_aggregator: Arc = Arc::new(aggregator); - let record = record( - &descriptor, - &attribute_set, - &resource, - Some(&wrapped_aggregator), - start_time, - end_time, - ); - - let metric_line_data = - record_to_metric_line(&record, &ExportKindSelector::Cumulative, None, None, true)?; - - let dimensions = DimensionSet::from(vec![ - KeyValue::new("KEY", "VALUE"), - KeyValue::new("test.abc_123-", "value.123_foo-bar"), - KeyValue::new(METRICS_SOURCE, "opentelemetry".to_string()), - ]); - - let expect = vec![MetricLine { - key: MetricKey::new("test_min_max_sum_count"), - kind: NumberKind::I64, - dimensions: Some(dimensions), - min: Some(Number::from(1_i64)), - max: Some(Number::from(3_i64)), - sum: Some(Number::from(6_i64)), - count: Some(3), - delta: None, - gauge: None, - timestamp: Some(get_time(end_time)), - }]; - - assert_eq!(expect, metric_line_data); - - let mut metric_lines: Vec = metric_line_data - .iter() - .map(|export_line| format!("{}", export_line)) - .collect(); - metric_lines.sort_unstable(); - - let mut iter = metric_lines.iter(); - - assert_eq!( - Some(&format!( - "test_min_max_sum_count,key=VALUE,{}={},test.abc_123-=value.123_foo-bar gauge,min=1,max=3,sum=6,count=3 {}", - METRICS_SOURCE, - "opentelemetry", - get_time(end_time), - )), - iter.next() - ); - assert_eq!(None, iter.next()); - } - // Histogram { let descriptor = Descriptor::new( "test_histogram".to_string(), - "test", - None, - None, InstrumentKind::Histogram, NumberKind::I64, + None, + None, ); let bound = [0.1, 0.2, 0.3]; - let aggregator = histogram(&descriptor, &bound); + let aggregator = histogram(&bound); let vals = vec![1i64.into(), 2i64.into(), 3i64.into()]; for val in vals.iter() { - aggregator.update(val, &descriptor)?; + aggregator.update(&cx, val, &descriptor)?; } let wrapped_aggregator: Arc = Arc::new(aggregator); let record = record( &descriptor, &attribute_set, - &resource, Some(&wrapped_aggregator), start_time, end_time, ); - let metric_line_data = - record_to_metric_line(&record, &ExportKindSelector::Cumulative, None, None, true)?; + let metric_line_data = record_to_metric_line( + &record, + &cumulative_temporality_selector(), + None, + None, + true, + )?; let dimensions = DimensionSet::from(vec![ KeyValue::new("KEY", "VALUE"), diff --git a/opentelemetry-dynatrace/tests/http_test.rs b/opentelemetry-dynatrace/tests/http_test.rs index 23d1b7474d..8e464fc614 100644 --- a/opentelemetry-dynatrace/tests/http_test.rs +++ b/opentelemetry-dynatrace/tests/http_test.rs @@ -1,14 +1,44 @@ #[cfg(all(feature = "metrics", feature = "rt-tokio"))] mod test { + use futures::future::BoxFuture; use http::header::{HeaderValue, AUTHORIZATION, USER_AGENT}; use hyper::{ body, service::{make_service_fn, service_fn}, Body, Method, Request, Response, Server, }; - use opentelemetry::{global, Key, KeyValue}; - use std::net::SocketAddr; + use opentelemetry::{ + global, runtime, + sdk::{export::metrics::aggregation::cumulative_temporality_selector, metrics::selectors}, + Context, Key, KeyValue, + }; use std::time::Duration; + use std::{net::SocketAddr, pin::Pin}; + + #[derive(Clone)] + struct TestRuntime { + tick_rx: tokio::sync::watch::Receiver, + } + impl runtime::Runtime for TestRuntime { + type Interval = futures::stream::Once>; + + type Delay = Pin>; + + fn interval(&self, _duration: Duration) -> Self::Interval { + let mut tick_rx = self.tick_rx.clone(); + futures::stream::once(Box::pin(async move { + let _ = tick_rx.changed().await.is_ok(); + })) + } + + fn spawn(&self, future: futures::future::BoxFuture<'static, ()>) { + tokio::spawn(future); + } + + fn delay(&self, duration: Duration) -> Self::Delay { + Box::pin(tokio::time::sleep(duration)) + } + } #[tokio::test(flavor = "multi_thread")] async fn integration_test() { @@ -16,6 +46,7 @@ mod test { let (req_tx, mut req_rx) = tokio::sync::mpsc::channel(1); let (tick_tx, tick_rx) = tokio::sync::watch::channel(0); let (shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel(); + let cx = Context::new(); let addr: SocketAddr = "[::1]:0".parse().unwrap(); @@ -63,13 +94,12 @@ mod test { let addr = addr_rx.await.unwrap(); - let _meter = opentelemetry_dynatrace::new_pipeline() - .metrics(tokio::spawn, move |_: Duration| { - let mut tick_rx = tick_rx.clone(); - futures::stream::once(async move { - let _ = tick_rx.changed().await.is_ok(); - }) - }) + let pipeline = opentelemetry_dynatrace::new_pipeline().metrics( + selectors::simple::inexpensive(), + cumulative_temporality_selector(), + TestRuntime { tick_rx }, + ); + pipeline .with_exporter(opentelemetry_dynatrace::new_exporter().with_export_config( opentelemetry_dynatrace::ExportConfig { endpoint: Some(format!("http://{}/test/a/b/c", addr)), @@ -87,6 +117,7 @@ mod test { let recorder = meter.u64_counter("test1").init(); recorder.add( + &cx, 90, &[ KeyValue::new("A", "test1"), @@ -96,10 +127,10 @@ mod test { ); let recorder = meter.f64_counter("test2").init(); - recorder.add(1e10 + 0.123, &[KeyValue::new("foo", "bar")]); + recorder.add(&cx, 1e10 + 0.123, &[KeyValue::new("foo", "bar")]); let recorder = meter.i64_histogram("test3").init(); - recorder.record(-999, &[Key::new("foo").i64(-123)]); + recorder.record(&cx, -999, &[Key::new("foo").i64(-123)]); let _ = tick_tx.send(1); }); diff --git a/opentelemetry-jaeger/Cargo.toml b/opentelemetry-jaeger/Cargo.toml index 686dba584c..085cc7ca32 100644 --- a/opentelemetry-jaeger/Cargo.toml +++ b/opentelemetry-jaeger/Cargo.toml @@ -19,7 +19,7 @@ all-features = true rustdoc-args = ["--cfg", "docsrs"] [dependencies] -async-std = { version = "= 1.8.0", optional = true } +async-std = { version = "= 1.10.0", optional = true } async-trait = "0.1" base64 = { version = "0.13", optional = true } futures = "0.3" diff --git a/opentelemetry-otlp/src/metric.rs b/opentelemetry-otlp/src/metric.rs index a3efd7065b..582223dd5d 100644 --- a/opentelemetry-otlp/src/metric.rs +++ b/opentelemetry-otlp/src/metric.rs @@ -10,16 +10,24 @@ use crate::exporter::{ }; use crate::transform::{record_to_metric, sink, CheckpointedMetrics}; use crate::{Error, OtlpPipeline}; -use futures_util::Stream; -use opentelemetry::metrics::{Descriptor, Result}; -use opentelemetry::sdk::{ - export::metrics::{ - AggregatorSelector, CheckpointSet, ExportKind, ExportKindFor, ExportKindSelector, Exporter, +use core::fmt; +use opentelemetry::{global, metrics::Result, runtime::Runtime}; +use opentelemetry::{ + sdk::{ + export::metrics::{ + self, + aggregation::{AggregationKind, Temporality, TemporalitySelector}, + AggregatorSelector, InstrumentationLibraryReader, + }, + metrics::{ + controllers::{self, BasicController}, + processors, + sdk_api::Descriptor, + }, + Resource, }, - metrics::{selectors, PushController, PushControllerWorker}, - InstrumentationLibrary, Resource, + Context, }; -use opentelemetry::{global, KeyValue}; #[cfg(feature = "grpc-tonic")] use opentelemetry_proto::tonic::collector::metrics::v1::{ metrics_service_client::MetricsServiceClient, ExportMetricsServiceRequest, @@ -27,7 +35,6 @@ use opentelemetry_proto::tonic::collector::metrics::v1::{ use std::fmt::{Debug, Formatter}; #[cfg(feature = "grpc-tonic")] use std::str::FromStr; -use std::sync::Arc; use std::sync::Mutex; use std::time; use std::time::Duration; @@ -46,20 +53,21 @@ pub const OTEL_EXPORTER_OTLP_METRICS_TIMEOUT: &str = "OTEL_EXPORTER_OTLP_METRICS impl OtlpPipeline { /// Create a OTLP metrics pipeline. - pub fn metrics( + pub fn metrics( self, - spawn: SP, - interval: I, - ) -> OtlpMetricPipeline + aggregator_selector: AS, + temporality_selector: TS, + rt: RT, + ) -> OtlpMetricPipeline where - SP: Fn(PushControllerWorker) -> SO, - I: Fn(time::Duration) -> IO, + AS: AggregatorSelector, + TS: TemporalitySelector + Clone, + RT: Runtime, { OtlpMetricPipeline { - aggregator_selector: selectors::simple::Selector::Inexpensive, - export_selector: ExportKindSelector::Cumulative, - spawn, - interval, + rt, + aggregator_selector, + temporality_selector, exporter_pipeline: None, resource: None, period: None, @@ -77,16 +85,16 @@ pub enum MetricsExporterBuilder { impl MetricsExporterBuilder { /// Build a OTLP metrics exporter with given configuration. - fn build_metrics_exporter(self, export_selector: ES) -> Result - where - ES: ExportKindFor + Sync + Send + 'static, - { + fn build_metrics_exporter( + self, + temporality_selector: Box, + ) -> Result { match self { #[cfg(feature = "grpc-tonic")] MetricsExporterBuilder::Tonic(builder) => Ok(MetricsExporter::new( builder.exporter_config, builder.tonic_config, - export_selector, + temporality_selector, )?), } } @@ -102,36 +110,26 @@ impl From for MetricsExporterBuilder { /// /// Note that currently the OTLP metrics exporter only supports tonic as it's grpc layer and tokio as /// runtime. -#[derive(Debug)] -pub struct OtlpMetricPipeline -where - AS: AggregatorSelector + Send + Sync + 'static, - ES: ExportKindFor + Send + Sync + Clone + 'static, - SP: Fn(PushControllerWorker) -> SO, - I: Fn(time::Duration) -> IO, -{ +pub struct OtlpMetricPipeline { + rt: RT, aggregator_selector: AS, - export_selector: ES, - spawn: SP, - interval: I, + temporality_selector: TS, exporter_pipeline: Option, resource: Option, period: Option, timeout: Option, } -impl OtlpMetricPipeline +impl OtlpMetricPipeline where AS: AggregatorSelector + Send + Sync + 'static, - ES: ExportKindFor + Send + Sync + Clone + 'static, - SP: Fn(PushControllerWorker) -> SO, - I: Fn(time::Duration) -> IO, - IO: Stream + Send + 'static, + TS: TemporalitySelector + Clone + Send + Sync + 'static, + RT: Runtime, { /// Build with resource key value pairs. - pub fn with_resource, R: Into>(self, resource: T) -> Self { + pub fn with_resource(self, resource: Resource) -> Self { OtlpMetricPipeline { - resource: Some(Resource::new(resource.into_iter().map(Into::into))), + resource: Some(resource), ..self } } @@ -144,31 +142,6 @@ where } } - /// Build with the aggregator selector - pub fn with_aggregator_selector( - self, - aggregator_selector: T, - ) -> OtlpMetricPipeline - where - T: AggregatorSelector + Send + Sync + 'static, - { - OtlpMetricPipeline { - aggregator_selector, - export_selector: self.export_selector, - spawn: self.spawn, - interval: self.interval, - exporter_pipeline: self.exporter_pipeline, - resource: self.resource, - period: self.period, - timeout: self.timeout, - } - } - - /// Build with spawn function - pub fn with_spawn(self, spawn: SP) -> Self { - OtlpMetricPipeline { spawn, ..self } - } - /// Build with timeout pub fn with_timeout(self, timeout: time::Duration) -> Self { OtlpMetricPipeline { @@ -185,57 +158,48 @@ where } } - /// Build with interval function - pub fn with_interval(self, interval: I) -> Self { - OtlpMetricPipeline { interval, ..self } - } - - /// Build with export kind selector - pub fn with_export_kind(self, export_selector: E) -> OtlpMetricPipeline - where - E: ExportKindFor + Send + Sync + Clone + 'static, - { - OtlpMetricPipeline { - aggregator_selector: self.aggregator_selector, - export_selector, - spawn: self.spawn, - interval: self.interval, - exporter_pipeline: self.exporter_pipeline, - resource: self.resource, - period: self.period, - timeout: self.timeout, - } - } - /// Build push controller. - pub fn build(self) -> Result { + pub fn build(self) -> Result { let exporter = self .exporter_pipeline .ok_or(Error::NoExporterBuilder)? - .build_metrics_exporter(self.export_selector.clone())?; + .build_metrics_exporter(Box::new(self.temporality_selector.clone()))?; - let mut builder = opentelemetry::sdk::metrics::controllers::push( + let mut builder = controllers::basic(processors::factory( self.aggregator_selector, - self.export_selector, - exporter, - self.spawn, - self.interval, - ); + self.temporality_selector, + )) + .with_exporter(exporter); if let Some(period) = self.period { - builder = builder.with_period(period); + builder = builder.with_collect_period(period); + } + if let Some(timeout) = self.timeout { + builder = builder.with_collect_timeout(timeout) } if let Some(resource) = self.resource { builder = builder.with_resource(resource); } - if let Some(timeout) = self.timeout { - builder = builder.with_timeout(timeout) - } + let controller = builder.build(); - global::set_meter_provider(controller.provider()); + controller.start(&Context::current(), self.rt)?; + + global::set_meter_provider(controller.clone()); + Ok(controller) } } +impl fmt::Debug for OtlpMetricPipeline { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + f.debug_struct("OtlpMetricPipeline") + .field("exporter_pipeline", &self.exporter_pipeline) + .field("resource", &self.resource) + .field("period", &self.period) + .field("timeout", &self.timeout) + .finish() + } +} + enum ExportMsg { #[cfg(feature = "grpc-tonic")] Export(tonic::Request), @@ -245,8 +209,8 @@ enum ExportMsg { /// Export metrics in OTEL format. pub struct MetricsExporter { #[cfg(feature = "tokio")] - sender: Arc>>, - export_kind_selector: Arc, + sender: Mutex>, + temporality_selector: Box, metadata: Option, } @@ -259,19 +223,19 @@ impl Debug for MetricsExporter { } } -impl ExportKindFor for MetricsExporter { - fn export_kind_for(&self, descriptor: &Descriptor) -> ExportKind { - self.export_kind_selector.export_kind_for(descriptor) +impl TemporalitySelector for MetricsExporter { + fn temporality_for(&self, descriptor: &Descriptor, kind: &AggregationKind) -> Temporality { + self.temporality_selector.temporality_for(descriptor, kind) } } impl MetricsExporter { /// Create a new OTLP metrics exporter. #[cfg(feature = "grpc-tonic")] - pub fn new( + pub fn new( config: ExportConfig, mut tonic_config: TonicConfig, - export_selector: T, + temporality_selector: Box, ) -> Result { let endpoint = match std::env::var(OTEL_EXPORTER_OTLP_METRICS_ENDPOINT) { Ok(val) => val, @@ -318,42 +282,28 @@ impl MetricsExporter { })); Ok(MetricsExporter { - sender: Arc::new(Mutex::new(sender)), - export_kind_selector: Arc::new(export_selector), + sender: Mutex::new(sender), + temporality_selector, metadata: tonic_config.metadata.take(), }) } } -impl Exporter for MetricsExporter { - fn export(&self, checkpoint_set: &mut dyn CheckpointSet) -> Result<()> { +impl metrics::MetricsExporter for MetricsExporter { + fn export( + &self, + _cx: &Context, + res: &Resource, + reader: &dyn InstrumentationLibraryReader, + ) -> Result<()> { let mut resource_metrics: Vec = Vec::default(); // transform the metrics into proto. Append the resource and instrumentation library information into it. - checkpoint_set.try_for_each(self.export_kind_selector.as_ref(), &mut |record| { - let metric_result = record_to_metric(record, self.export_kind_selector.as_ref()); - match metric_result { - Ok(metrics) => { - resource_metrics.push(( - record.resource().clone().into(), - InstrumentationLibrary::new( - record.descriptor().instrumentation_name(), - record - .descriptor() - .instrumentation_library() - .version - .clone(), - record - .descriptor() - .instrumentation_library() - .schema_url - .clone(), - ), - metrics, - )); - Ok(()) - } - Err(err) => Err(err), - } + reader.try_for_each(&mut |library, record| { + record.try_for_each(self, &mut |record| { + let metrics = record_to_metric(record, self.temporality_selector.as_ref())?; + resource_metrics.push((res.clone().into(), library.clone(), metrics)); + Ok(()) + }) })?; let mut request = Request::new(sink(resource_metrics)); if let Some(metadata) = &self.metadata { diff --git a/opentelemetry-otlp/src/transform/metrics.rs b/opentelemetry-otlp/src/transform/metrics.rs index 093b96c206..0aa91f37a7 100644 --- a/opentelemetry-otlp/src/transform/metrics.rs +++ b/opentelemetry-otlp/src/transform/metrics.rs @@ -7,12 +7,13 @@ pub(crate) mod tonic { use opentelemetry::metrics::MetricsError; use opentelemetry::sdk::export::metrics::{ - Count, ExportKindFor, Histogram as SdkHistogram, LastValue, Max, Min, Points, Record, - Sum as SdkSum, + aggregation::{ + Count, Histogram as SdkHistogram, LastValue, Sum as SdkSum, TemporalitySelector, + }, + Record, }; use opentelemetry::sdk::metrics::aggregators::{ - ArrayAggregator, HistogramAggregator, LastValueAggregator, MinMaxSumCountAggregator, - SumAggregator, + HistogramAggregator, LastValueAggregator, SumAggregator, }; use opentelemetry::sdk::InstrumentationLibrary; use opentelemetry_proto::tonic::metrics::v1::DataPointFlags; @@ -33,7 +34,7 @@ pub(crate) mod tonic { pub(crate) fn record_to_metric( record: &Record, - export_selector: &dyn ExportKindFor, + temporality_selector: &dyn TemporalitySelector, ) -> Result { let descriptor = record.descriptor(); let aggregator = record.aggregator().ok_or(MetricsError::NoDataCollected)?; @@ -42,37 +43,16 @@ pub(crate) mod tonic { .iter() .map(|kv| kv.into()) .collect::>(); - let temporality: AggregationTemporality = - export_selector.export_kind_for(descriptor).into(); + let temporality: AggregationTemporality = temporality_selector + .temporality_for(descriptor, aggregator.aggregation().kind()) + .into(); let kind = descriptor.number_kind(); Ok(Metric { name: descriptor.name().to_string(), - description: descriptor - .description() - .cloned() - .unwrap_or_else(|| "".to_string()), + description: descriptor.description().cloned().unwrap_or_default(), unit: descriptor.unit().unwrap_or("").to_string(), data: { - if let Some(array) = aggregator.as_any().downcast_ref::() { - if let Ok(points) = array.points() { - Some(Data::Gauge(Gauge { - data_points: points - .into_iter() - .map(|val| NumberDataPoint { - flags: DataPointFlags::FlagNone as u32, - attributes: attributes.clone(), - start_time_unix_nano: to_nanos(*record.start_time()), - time_unix_nano: to_nanos(*record.end_time()), - value: Some(number_data_point::Value::from_number(val, kind)), - exemplars: Vec::default(), - }) - .collect(), - })) - } else { - None - } - } else if let Some(last_value) = - aggregator.as_any().downcast_ref::() + if let Some(last_value) = aggregator.as_any().downcast_ref::() { Some({ let (val, sample_time) = last_value.last_value()?; @@ -129,34 +109,6 @@ pub(crate) mod tonic { aggregation_temporality: temporality as i32, }) }) - } else if let Some(min_max_sum_count) = aggregator - .as_any() - .downcast_ref::() - { - Some({ - let (min, max, sum, count) = ( - min_max_sum_count.min()?, - min_max_sum_count.max()?, - min_max_sum_count.sum()?, - min_max_sum_count.count()?, - ); - let buckets = vec![min.to_u64(kind), max.to_u64(kind)]; - let bounds = vec![0.0, 100.0]; - Data::Histogram(Histogram { - data_points: vec![HistogramDataPoint { - flags: DataPointFlags::FlagNone as u32, - attributes, - start_time_unix_nano: to_nanos(*record.start_time()), - time_unix_nano: to_nanos(*record.end_time()), - count, - sum: sum.to_f64(kind), - bucket_counts: buckets, - explicit_bounds: bounds, - exemplars: Vec::default(), - }], - aggregation_temporality: temporality as i32, - }) - }) } else { None } @@ -288,14 +240,17 @@ mod tests { use crate::transform::metrics::tonic::merge; use crate::transform::{record_to_metric, sink, ResourceWrapper}; use opentelemetry::attributes::AttributeSet; - use opentelemetry::metrics::{ - Descriptor, InstrumentKind, MetricsError, Number, NumberKind, - }; - use opentelemetry::sdk::export::metrics::{record, Aggregator, ExportKindSelector}; + use opentelemetry::metrics::MetricsError; + use opentelemetry::sdk::export::metrics::aggregation::cumulative_temporality_selector; + use opentelemetry::sdk::export::metrics::record; use opentelemetry::sdk::metrics::aggregators::{ - histogram, last_value, min_max_sum_count, SumAggregator, + histogram, last_value, Aggregator, SumAggregator, + }; + use opentelemetry::sdk::metrics::sdk_api::{ + Descriptor, InstrumentKind, Number, NumberKind, }; use opentelemetry::sdk::{InstrumentationLibrary, Resource}; + use opentelemetry::Context; use opentelemetry_proto::tonic::metrics::v1::DataPointFlags; use opentelemetry_proto::tonic::{ common::v1::{any_value, AnyValue, KeyValue}, @@ -476,6 +431,7 @@ mod tests { #[test] fn test_record_to_metric() -> Result<(), MetricsError> { + let cx = Context::new(); let attributes = vec![("test1", "value1"), ("test2", "value2")]; let str_kv_attributes = attributes .iter() @@ -488,10 +444,6 @@ mod tests { .cloned() .map(|(k, v)| opentelemetry::KeyValue::new(k, v)), ); - let resource = Resource::new(vec![ - opentelemetry::KeyValue::new("process", "rust"), - opentelemetry::KeyValue::new("runtime", "sync"), - ]); let start_time = datetime!(2020-12-25 10:10:0 UTC); // unit nano 1608891000000000000 let end_time = datetime!(2020-12-25 10:10:30 UTC); // unix nano 1608891030000000000 @@ -499,25 +451,23 @@ mod tests { { let descriptor = Descriptor::new( "test".to_string(), - "test", - None, - None, InstrumentKind::Counter, NumberKind::I64, + None, + None, ); let aggregator = SumAggregator::default(); let val = Number::from(12_i64); - aggregator.update(&val, &descriptor)?; + aggregator.update(&cx, &val, &descriptor)?; let wrapped_aggregator: Arc = Arc::new(aggregator); let record = record( &descriptor, &attribute_set, - &resource, Some(&wrapped_aggregator), start_time.into(), end_time.into(), ); - let metric = record_to_metric(&record, &ExportKindSelector::Cumulative)?; + let metric = record_to_metric(&record, &cumulative_temporality_selector())?; let expect = Metric { name: "test".to_string(), @@ -544,27 +494,25 @@ mod tests { { let descriptor = Descriptor::new( "test".to_string(), - "test", + InstrumentKind::GaugeObserver, + NumberKind::I64, None, None, - InstrumentKind::ValueObserver, - NumberKind::I64, ); let aggregator = last_value(); let val1 = Number::from(12_i64); let val2 = Number::from(14_i64); - aggregator.update(&val1, &descriptor)?; - aggregator.update(&val2, &descriptor)?; + aggregator.update(&cx, &val1, &descriptor)?; + aggregator.update(&cx, &val2, &descriptor)?; let wrapped_aggregator: Arc = Arc::new(aggregator); let record = record( &descriptor, &attribute_set, - &resource, Some(&wrapped_aggregator), start_time.into(), end_time.into(), ); - let metric = record_to_metric(&record, &ExportKindSelector::Cumulative)?; + let metric = record_to_metric(&record, &cumulative_temporality_selector())?; let expect = Metric { name: "test".to_string(), @@ -592,81 +540,30 @@ mod tests { assert_eq!(expect, metric); } - // MinMaxSumCount - { - let descriptor = Descriptor::new( - "test".to_string(), - "test", - None, - None, - InstrumentKind::UpDownSumObserver, - NumberKind::I64, - ); - let aggregator = min_max_sum_count(&descriptor); - let vals = vec![1i64.into(), 2i64.into(), 3i64.into()]; - for val in vals.iter() { - aggregator.update(val, &descriptor)?; - } - let wrapped_aggregator: Arc = Arc::new(aggregator); - let record = record( - &descriptor, - &attribute_set, - &resource, - Some(&wrapped_aggregator), - start_time.into(), - end_time.into(), - ); - let metric = record_to_metric(&record, &ExportKindSelector::Cumulative)?; - - let expect = Metric { - name: "test".to_string(), - description: "".to_string(), - unit: "".to_string(), - data: Some(Data::Histogram(Histogram { - data_points: vec![HistogramDataPoint { - flags: DataPointFlags::FlagNone as u32, - attributes: str_kv_attributes.clone(), - start_time_unix_nano: 1608891000000000000, - time_unix_nano: 1608891030000000000, - count: 3, - sum: 6f64, - bucket_counts: vec![1, 3], - explicit_bounds: vec![0.0, 100.0], - exemplars: vec![], - }], - aggregation_temporality: 2, - })), - }; - - assert_eq!(expect, metric); - } - // Histogram { let descriptor = Descriptor::new( "test".to_string(), - "test", - None, - None, InstrumentKind::Histogram, NumberKind::I64, + None, + None, ); let bound = [0.1, 0.2, 0.3]; - let aggregator = histogram(&descriptor, &bound); + let aggregator = histogram(&bound); let vals = vec![1i64.into(), 2i64.into(), 3i64.into()]; for val in vals.iter() { - aggregator.update(val, &descriptor)?; + aggregator.update(&cx, val, &descriptor)?; } let wrapped_aggregator: Arc = Arc::new(aggregator); let record = record( &descriptor, &attribute_set, - &resource, Some(&wrapped_aggregator), start_time.into(), end_time.into(), ); - let metric = record_to_metric(&record, &ExportKindSelector::Cumulative)?; + let metric = record_to_metric(&record, &cumulative_temporality_selector())?; let expect = Metric { name: "test".to_string(), diff --git a/opentelemetry-prometheus/src/lib.rs b/opentelemetry-prometheus/src/lib.rs index 5aafd25ffc..91d68b2ae7 100644 --- a/opentelemetry-prometheus/src/lib.rs +++ b/opentelemetry-prometheus/src/lib.rs @@ -3,16 +3,26 @@ //! ### Prometheus Exporter Example //! //! ```rust -//! use opentelemetry::{global, KeyValue, sdk::Resource}; +//! use opentelemetry::{global, Context, KeyValue, sdk::Resource}; +//! use opentelemetry::sdk::export::metrics::aggregation; +//! use opentelemetry::sdk::metrics::{controllers, processors, selectors}; //! use opentelemetry_prometheus::PrometheusExporter; //! use prometheus::{TextEncoder, Encoder}; //! //! fn init_meter() -> PrometheusExporter { -//! opentelemetry_prometheus::exporter() -//! .with_resource(Resource::new(vec![KeyValue::new("R", "V")])) -//! .init() +//! let controller = controllers::basic( +//! processors::factory( +//! selectors::simple::histogram([1.0, 2.0, 5.0, 10.0, 20.0, 50.0]), +//! aggregation::cumulative_temporality_selector(), +//! ) +//! .with_memory(true), +//! ) +//! .build(); +//! +//! opentelemetry_prometheus::exporter(controller).init() //! } //! +//! let cx = Context::current(); //! let exporter = init_meter(); //! let meter = global::meter("my-app"); //! @@ -26,8 +36,8 @@ //! .with_description("Records values") //! .init(); //! -//! counter.add(100, &[KeyValue::new("key", "value")]); -//! recorder.record(100, &[KeyValue::new("key", "value")]); +//! counter.add(&cx, 100, &[KeyValue::new("key", "value")]); +//! recorder.record(&cx, 100, &[KeyValue::new("key", "value")]); //! //! // Encode data as text or protobuf //! let encoder = TextEncoder::new(); @@ -68,177 +78,58 @@ )] #![cfg_attr(test, deny(warnings))] +use opentelemetry::metrics::MeterProvider; +use opentelemetry::sdk::export::metrics::aggregation::{ + self, AggregationKind, Temporality, TemporalitySelector, +}; +use opentelemetry::sdk::export::metrics::InstrumentationLibraryReader; +use opentelemetry::sdk::metrics::sdk_api::Descriptor; #[cfg(feature = "prometheus-encoding")] pub use prometheus::{Encoder, TextEncoder}; use opentelemetry::global; use opentelemetry::sdk::{ export::metrics::{ - AggregatorSelector, CheckpointSet, ExportKindSelector, Histogram, LastValue, Record, Sum, + aggregation::{Histogram, LastValue, Sum}, + Record, }, metrics::{ aggregators::{HistogramAggregator, LastValueAggregator, SumAggregator}, - controllers, - selectors::simple::Selector, - PullController, + controllers::BasicController, + sdk_api::NumberKind, }, Resource, }; -use opentelemetry::{ - attributes, - metrics::{registry::RegistryMeterProvider, MetricsError, NumberKind}, - Key, Value, -}; -use std::env; -use std::num::ParseIntError; +use opentelemetry::{attributes, metrics::MetricsError, Context, Key, Value}; use std::sync::{Arc, Mutex}; -use std::time::Duration; mod sanitize; use sanitize::sanitize; -/// Cache disabled by default. -const DEFAULT_CACHE_PERIOD: Duration = Duration::from_secs(0); - -const EXPORT_KIND_SELECTOR: ExportKindSelector = ExportKindSelector::Cumulative; - -/// Default host used by the Prometheus Exporter when env variable not found -const DEFAULT_EXPORTER_HOST: &str = "0.0.0.0"; - -/// Default port used by the Prometheus Exporter when env variable not found -const DEFAULT_EXPORTER_PORT: u16 = 9464; - -/// The hostname for the Promtheus Exporter -const ENV_EXPORTER_HOST: &str = "OTEL_EXPORTER_PROMETHEUS_HOST"; - -/// The port for the Prometheus Exporter -const ENV_EXPORTER_PORT: &str = "OTEL_EXPORTER_PROMETHEUS_PORT"; - /// Create a new prometheus exporter builder. -pub fn exporter() -> ExporterBuilder { - ExporterBuilder::default() +pub fn exporter(controller: BasicController) -> ExporterBuilder { + ExporterBuilder::new(controller) } /// Configuration for the prometheus exporter. #[derive(Debug)] pub struct ExporterBuilder { - /// The OpenTelemetry `Resource` associated with all Meters - /// created by the pull controller. - resource: Option, - - /// The period which a recently-computed result will be returned without - /// gathering metric data again. - /// - /// If the period is zero, caching of the result is disabled, which is the - /// prometheus default. - cache_period: Option, - - /// The default summary quantiles to use. Use nil to specify the system-default - /// summary quantiles. - default_summary_quantiles: Option>, - - /// Defines the default histogram bucket boundaries. - default_histogram_boundaries: Option>, - /// The prometheus registry that will be used to register instruments. /// /// If not set a new empty `Registry` is created. registry: Option, - /// The host used by the prometheus exporter - /// - /// If not set it will be defaulted to all addresses "0.0.0.0" - host: Option, - - /// The port used by the prometheus exporter - /// - /// If not set it will be defaulted to port 9464 - port: Option, - - /// The aggregator selector used by the prometheus exporter. - aggegator_selector: Option>, -} - -impl Default for ExporterBuilder { - fn default() -> Self { - let port: Option = match env::var(ENV_EXPORTER_PORT) { - Err(_) => None, - Ok(p_str) => p_str - .parse() - .map_err(|err: ParseIntError| { - let err_msg = format!( - "Unable to parse environment variable {}=\"{}\" - {}. Falling back to default port {}. ", - ENV_EXPORTER_PORT, p_str, err, DEFAULT_EXPORTER_PORT - ); - global::handle_error(global::Error::Other(err_msg)); - err - }) - .ok(), - }; - - ExporterBuilder { - resource: None, - cache_period: None, - default_histogram_boundaries: None, - default_summary_quantiles: None, - registry: None, - host: env::var(ENV_EXPORTER_HOST).ok().filter(|s| !s.is_empty()), - port, - aggegator_selector: None, - } - } + /// The metrics controller + controller: BasicController, } impl ExporterBuilder { - /// Set the resource to be associated with all `Meter`s for this exporter - pub fn with_resource(self, resource: Resource) -> Self { - ExporterBuilder { - resource: Some(resource), - ..self - } - } - - /// Set the period which a recently-computed result will be returned without - /// gathering metric data again. - /// - /// If the period is zero, caching of the result is disabled (default). - pub fn with_cache_period(self, period: Duration) -> Self { - ExporterBuilder { - cache_period: Some(period), - ..self - } - } - - /// Set the default summary quantiles to be used by exported prometheus histograms - pub fn with_default_summary_quantiles(self, quantiles: Vec) -> Self { + /// Create a new exporter builder with a given controller + pub fn new(controller: BasicController) -> Self { ExporterBuilder { - default_summary_quantiles: Some(quantiles), - ..self - } - } - - /// Set the default boundaries to be used by exported prometheus histograms - pub fn with_default_histogram_boundaries(self, boundaries: Vec) -> Self { - ExporterBuilder { - default_histogram_boundaries: Some(boundaries), - ..self - } - } - - /// Set the host for the prometheus exporter - pub fn with_host(self, host: String) -> Self { - ExporterBuilder { - host: Some(host), - ..self - } - } - - /// Set the port for the prometheus exporter - pub fn with_port(self, port: u16) -> Self { - ExporterBuilder { - port: Some(port), - ..self + registry: None, + controller, } } @@ -250,47 +141,12 @@ impl ExporterBuilder { } } - /// Set the aggregation selector for the prometheus exporter - pub fn with_aggregator_selector(self, aggregator_selector: T) -> Self - where - T: AggregatorSelector + Send + Sync + 'static, - { - ExporterBuilder { - aggegator_selector: Some(Box::new(aggregator_selector)), - ..self - } - } - /// Sets up a complete export pipeline with the recommended setup, using the /// recommended selector and standard processor. pub fn try_init(self) -> Result { let registry = self.registry.unwrap_or_else(prometheus::Registry::new); - // reserved for future use cases - let _default_summary_quantiles = self - .default_summary_quantiles - .unwrap_or_else(|| vec![0.5, 0.9, 0.99]); - let default_histogram_boundaries = self - .default_histogram_boundaries - .unwrap_or_else(|| vec![0.5, 0.9, 0.99]); - let selector = self - .aggegator_selector - .unwrap_or_else(|| Box::new(Selector::Histogram(default_histogram_boundaries))); - let mut controller_builder = controllers::pull(selector, Box::new(EXPORT_KIND_SELECTOR)) - .with_cache_period(self.cache_period.unwrap_or(DEFAULT_CACHE_PERIOD)) - .with_memory(true); - if let Some(resource) = self.resource { - controller_builder = controller_builder.with_resource(resource); - } - let controller = controller_builder.build(); - global::set_meter_provider(controller.provider()); - - let host = self - .host - .unwrap_or_else(|| DEFAULT_EXPORTER_HOST.to_string()); - let port = self.port.unwrap_or(DEFAULT_EXPORTER_PORT); - - let controller = Arc::new(Mutex::new(controller)); + let controller = Arc::new(Mutex::new(self.controller)); let collector = Collector::with_controller(controller.clone()); registry .register(Box::new(collector)) @@ -299,8 +155,6 @@ impl ExporterBuilder { Ok(PrometheusExporter { registry, controller, - host, - port, }) } @@ -322,9 +176,7 @@ impl ExporterBuilder { #[derive(Clone, Debug)] pub struct PrometheusExporter { registry: prometheus::Registry, - controller: Arc>, - host: String, - port: u16, + controller: Arc>, } impl PrometheusExporter { @@ -334,31 +186,27 @@ impl PrometheusExporter { } /// Get this exporter's provider. - pub fn provider(&self) -> Result { + pub fn meter_provider(&self) -> Result { self.controller .lock() .map_err(Into::into) - .map(|locked| locked.provider()) - } - - /// Get the exporters host for prometheus. - pub fn host(&self) -> &str { - self.host.as_str() - } - - /// Get the exporters port for prometheus. - pub fn port(&self) -> u16 { - self.port + .map(|locked| locked.clone()) } } #[derive(Debug)] struct Collector { - controller: Arc>, + controller: Arc>, +} + +impl TemporalitySelector for Collector { + fn temporality_for(&self, descriptor: &Descriptor, kind: &AggregationKind) -> Temporality { + aggregation::cumulative_temporality_selector().temporality_for(descriptor, kind) + } } impl Collector { - fn with_controller(controller: Arc>) -> Self { + fn with_controller(controller: Arc>) -> Self { Collector { controller } } } @@ -371,37 +219,39 @@ impl prometheus::core::Collector for Collector { /// Collect all otel metrics and convert to prometheus metrics. fn collect(&self) -> Vec { - if let Ok(mut controller) = self.controller.lock() { + if let Ok(controller) = self.controller.lock() { let mut metrics = Vec::new(); - if let Err(err) = controller.collect() { + if let Err(err) = controller.collect(&Context::current()) { global::handle_error(err); return metrics; } - if let Err(err) = controller.try_for_each(&EXPORT_KIND_SELECTOR, &mut |record| { - let agg = record.aggregator().ok_or(MetricsError::NoDataCollected)?; - let number_kind = record.descriptor().number_kind(); - let instrument_kind = record.descriptor().instrument_kind(); - - let desc = get_metric_desc(record); - let labels = get_metric_labels(record); - - if let Some(hist) = agg.as_any().downcast_ref::() { - metrics.push(build_histogram(hist, number_kind, desc, labels)?); - } else if let Some(sum) = agg.as_any().downcast_ref::() { - let counter = if instrument_kind.monotonic() { - build_monotonic_counter(sum, number_kind, desc, labels)? - } else { - build_non_monotonic_counter(sum, number_kind, desc, labels)? - }; - - metrics.push(counter); - } else if let Some(last) = agg.as_any().downcast_ref::() { - metrics.push(build_last_value(last, number_kind, desc, labels)?); - } - - Ok(()) + if let Err(err) = controller.try_for_each(&mut |_library, reader| { + reader.try_for_each(self, &mut |record| { + let agg = record.aggregator().ok_or(MetricsError::NoDataCollected)?; + let number_kind = record.descriptor().number_kind(); + let instrument_kind = record.descriptor().instrument_kind(); + + let desc = get_metric_desc(record); + let labels = get_metric_labels(record, controller.resource()); + + if let Some(hist) = agg.as_any().downcast_ref::() { + metrics.push(build_histogram(hist, number_kind, desc, labels)?); + } else if let Some(sum) = agg.as_any().downcast_ref::() { + let counter = if instrument_kind.monotonic() { + build_monotonic_counter(sum, number_kind, desc, labels)? + } else { + build_non_monotonic_counter(sum, number_kind, desc, labels)? + }; + + metrics.push(counter); + } else if let Some(last) = agg.as_any().downcast_ref::() { + metrics.push(build_last_value(last, number_kind, desc, labels)?); + } + + Ok(()) + }) }) { global::handle_error(err); } @@ -532,10 +382,13 @@ fn build_label_pair(key: &Key, value: &Value) -> prometheus::proto::LabelPair { lp } -fn get_metric_labels(record: &Record<'_>) -> Vec { +fn get_metric_labels( + record: &Record<'_>, + resource: &Resource, +) -> Vec { // Duplicate keys are resolved by taking the record label value over // the resource value. - let iter = attributes::merge_iters(record.attributes().iter(), record.resource().iter()); + let iter = attributes::merge_iters(record.attributes().iter(), resource.iter()); iter.map(|(key, value)| build_label_pair(key, value)) .collect() } @@ -554,37 +407,3 @@ fn get_metric_desc(record: &Record<'_>) -> PrometheusMetricDesc { .unwrap_or_else(|| desc.name().to_string()); PrometheusMetricDesc { name, help } } - -#[cfg(test)] -mod tests { - use std::env; - - use super::*; - - #[test] - fn test_exporter_builder_default() { - env::remove_var(ENV_EXPORTER_HOST); - env::remove_var(ENV_EXPORTER_PORT); - let exporter = ExporterBuilder::default().init(); - assert_eq!(exporter.host(), "0.0.0.0"); - assert_eq!(exporter.port(), 9464); - - env::set_var(ENV_EXPORTER_HOST, "prometheus-test"); - env::set_var(ENV_EXPORTER_PORT, "9000"); - let exporter = ExporterBuilder::default().init(); - assert_eq!(exporter.host(), "prometheus-test"); - assert_eq!(exporter.port(), 9000); - - env::set_var(ENV_EXPORTER_HOST, ""); - env::set_var(ENV_EXPORTER_PORT, ""); - let exporter = ExporterBuilder::default().init(); - assert_eq!(exporter.host(), "0.0.0.0"); - assert_eq!(exporter.port(), 9464); - - env::set_var(ENV_EXPORTER_HOST, ""); - env::set_var(ENV_EXPORTER_PORT, "not_a_number"); - let exporter = ExporterBuilder::default().init(); - assert_eq!(exporter.host(), "0.0.0.0"); - assert_eq!(exporter.port(), 9464); - } -} diff --git a/opentelemetry-prometheus/tests/integration_test.rs b/opentelemetry-prometheus/tests/integration_test.rs index fc951edc83..0fae5e3ab1 100644 --- a/opentelemetry-prometheus/tests/integration_test.rs +++ b/opentelemetry-prometheus/tests/integration_test.rs @@ -1,27 +1,37 @@ +use opentelemetry::sdk::export::metrics::aggregation; +use opentelemetry::sdk::metrics::{controllers, processors, selectors}; use opentelemetry::sdk::Resource; -use opentelemetry::{ - metrics::{BatchObserverResult, MeterProvider, ObserverResult}, - KeyValue, -}; +use opentelemetry::Context; +use opentelemetry::{metrics::MeterProvider, KeyValue}; use opentelemetry_prometheus::PrometheusExporter; use prometheus::{Encoder, TextEncoder}; #[test] fn free_unused_instruments() { - let exporter = opentelemetry_prometheus::exporter() - .with_default_histogram_boundaries(vec![-0.5, 1.0]) - .with_resource(Resource::new(vec![KeyValue::new("R", "V")])) - .init(); + let cx = Context::new(); + let controller = controllers::basic( + processors::factory( + selectors::simple::histogram(vec![-0.5, 1.0]), + aggregation::cumulative_temporality_selector(), + ) + .with_memory(true), + ) + .with_resource(Resource::new(vec![KeyValue::new("R", "V")])) + .build(); + let exporter = opentelemetry_prometheus::exporter(controller).init(); let mut expected = Vec::new(); { - let meter = exporter.provider().unwrap().meter("test", None, None); + let meter = exporter + .meter_provider() + .unwrap() + .versioned_meter("test", None, None); let counter = meter.f64_counter("counter").init(); let attributes = vec![KeyValue::new("A", "B"), KeyValue::new("C", "D")]; - counter.add(10.0, &attributes); - counter.add(5.3, &attributes); + counter.add(&cx, 10.0, &attributes); + counter.add(&cx, 5.3, &attributes); expected.push(r#"counter{A="B",C="D",R="V"} 15.3"#); } @@ -33,42 +43,24 @@ fn free_unused_instruments() { compare_export(&exporter, expected); } -#[test] -fn batch() { - let exporter = opentelemetry_prometheus::exporter() - .with_resource(Resource::new(vec![KeyValue::new("R", "V")])) - .init(); - let meter = exporter.provider().unwrap().meter("test", None, None); - let mut expected = Vec::new(); - - meter.batch_observer(|batch| { - let uint_observer = batch.u64_value_observer("uint_observer").init(); - let float_observer = batch.f64_value_observer("float_observer").init(); - - move |result: BatchObserverResult| { - result.observe( - &[KeyValue::new("A", "B")], - &[ - uint_observer.observation(2), - float_observer.observation(3.1), - ], - ); - } - }); - - expected.push(r#"uint_observer{A="B",R="V"} 2"#); - expected.push(r#"float_observer{A="B",R="V"} 3.1"#); - compare_export(&exporter, expected); -} - #[test] fn test_add() { - let exporter = opentelemetry_prometheus::exporter() - .with_default_histogram_boundaries(vec![-0.5, 1.0]) - .with_resource(Resource::new(vec![KeyValue::new("R", "V")])) - .init(); - - let meter = exporter.provider().unwrap().meter("test", None, None); + let cx = Context::new(); + let controller = controllers::basic( + processors::factory( + selectors::simple::histogram(vec![-0.5, 1.0]), + aggregation::cumulative_temporality_selector(), + ) + .with_memory(true), + ) + .with_resource(Resource::new(vec![KeyValue::new("R", "V")])) + .build(); + let exporter = opentelemetry_prometheus::exporter(controller).init(); + + let meter = exporter + .meter_provider() + .unwrap() + .versioned_meter("test", None, None); let up_down_counter = meter.f64_up_down_counter("updowncounter").init(); let counter = meter.f64_counter("counter").init(); @@ -78,24 +70,23 @@ fn test_add() { let mut expected = Vec::new(); - counter.add(10.0, &attributes); - counter.add(5.3, &attributes); + counter.add(&cx, 10.0, &attributes); + counter.add(&cx, 5.3, &attributes); expected.push(r#"counter{A="B",C="D",R="V"} 15.3"#); let cb_attributes = attributes.clone(); - let _observer = meter - .i64_value_observer("intobserver", move |result: ObserverResult| { - result.observe(1, cb_attributes.as_ref()) - }) - .init(); + let gauge = meter.i64_observable_gauge("intgauge").init(); + meter + .register_callback(move |cx| gauge.observe(cx, 1, cb_attributes.as_ref())) + .unwrap(); - expected.push(r#"intobserver{A="B",C="D",R="V"} 1"#); + expected.push(r#"intgauge{A="B",C="D",R="V"} 1"#); - histogram.record(-0.6, &attributes); - histogram.record(-0.4, &attributes); - histogram.record(0.6, &attributes); - histogram.record(20.0, &attributes); + histogram.record(&cx, -0.6, &attributes); + histogram.record(&cx, -0.4, &attributes); + histogram.record(&cx, 0.6, &attributes); + histogram.record(&cx, 20.0, &attributes); expected.push(r#"my_histogram_bucket{A="B",C="D",R="V",le="+Inf"} 4"#); expected.push(r#"my_histogram_bucket{A="B",C="D",R="V",le="-0.5"} 1"#); @@ -103,8 +94,8 @@ fn test_add() { expected.push(r#"my_histogram_count{A="B",C="D",R="V"} 4"#); expected.push(r#"my_histogram_sum{A="B",C="D",R="V"} 19.6"#); - up_down_counter.add(10.0, &attributes); - up_down_counter.add(-3.2, &attributes); + up_down_counter.add(&cx, 10.0, &attributes); + up_down_counter.add(&cx, -3.2, &attributes); expected.push(r#"updowncounter{A="B",C="D",R="V"} 6.8"#); @@ -113,24 +104,34 @@ fn test_add() { #[test] fn test_sanitization() { - let exporter = opentelemetry_prometheus::exporter() - .with_default_histogram_boundaries(vec![-0.5, 1.0]) - .with_resource(Resource::new(vec![KeyValue::new( - "service.name", - "Test Service", - )])) - .init(); - let meter = exporter.provider().unwrap().meter("test", None, None); + let cx = Context::new(); + let controller = controllers::basic( + processors::factory( + selectors::simple::histogram(vec![-0.5, 1.0]), + aggregation::cumulative_temporality_selector(), + ) + .with_memory(true), + ) + .with_resource(Resource::new(vec![KeyValue::new( + "service.name", + "Test Service", + )])) + .build(); + let exporter = opentelemetry_prometheus::exporter(controller).init(); + let meter = exporter + .meter_provider() + .unwrap() + .versioned_meter("test", None, None); let histogram = meter.f64_histogram("http.server.duration").init(); let attributes = vec![ KeyValue::new("http.method", "GET"), KeyValue::new("http.host", "server"), ]; - histogram.record(-0.6, &attributes); - histogram.record(-0.4, &attributes); - histogram.record(0.6, &attributes); - histogram.record(20.0, &attributes); + histogram.record(&cx, -0.6, &attributes); + histogram.record(&cx, -0.4, &attributes); + histogram.record(&cx, 0.6, &attributes); + histogram.record(&cx, 20.0, &attributes); let expected = vec![ r#"http_server_duration_bucket{http_host="server",http_method="GET",service_name="Test Service",le="+Inf"} 4"#, diff --git a/opentelemetry-proto/src/transform/metrics.rs b/opentelemetry-proto/src/transform/metrics.rs index 7c34d32876..1c64139a2d 100644 --- a/opentelemetry-proto/src/transform/metrics.rs +++ b/opentelemetry-proto/src/transform/metrics.rs @@ -9,8 +9,13 @@ pub mod tonic { common::v1::KeyValue, metrics::v1::{number_data_point, AggregationTemporality}, }; - use opentelemetry::metrics::{Number, NumberKind}; - use opentelemetry::sdk::export::metrics::ExportKind; + use opentelemetry::{ + metrics::MetricsError, + sdk::{ + export::metrics::aggregation::Temporality, + metrics::sdk_api::{Number, NumberKind}, + }, + }; use opentelemetry::{Key, Value}; @@ -40,11 +45,18 @@ pub mod tonic { } } - impl From for AggregationTemporality { - fn from(kind: ExportKind) -> Self { - match kind { - ExportKind::Cumulative => AggregationTemporality::Cumulative, - ExportKind::Delta => AggregationTemporality::Delta, + impl From for AggregationTemporality { + fn from(temporality: Temporality) -> Self { + match temporality { + Temporality::Cumulative => AggregationTemporality::Cumulative, + Temporality::Delta => AggregationTemporality::Delta, + other => { + opentelemetry::global::handle_error(MetricsError::Other(format!( + "Unknown temporality {:?}, using default instead.", + other + ))); + AggregationTemporality::Cumulative + } } } } diff --git a/opentelemetry-sdk/Cargo.toml b/opentelemetry-sdk/Cargo.toml index d9077436bd..01474e56cd 100644 --- a/opentelemetry-sdk/Cargo.toml +++ b/opentelemetry-sdk/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" edition = "2018" [dependencies] -async-std = { version = "= 1.8.0", features = ["unstable"], optional = true } +async-std = { version = "= 1.10.0", features = ["unstable"], optional = true } async-trait = { version = "0.1", optional = true } crossbeam-channel = { version = "0.5", optional = true } dashmap = { version = "4.0.1", optional = true } @@ -54,8 +54,3 @@ required-features = ["rt-tokio", "testing"] name = "metric" harness = false required-features = ["metrics"] - -[[bench]] -name = "ddsketch" -harness = false -required-features = ["metrics"] diff --git a/opentelemetry-sdk/benches/ddsketch.rs b/opentelemetry-sdk/benches/ddsketch.rs deleted file mode 100644 index ec64c79b8b..0000000000 --- a/opentelemetry-sdk/benches/ddsketch.rs +++ /dev/null @@ -1,71 +0,0 @@ -use criterion::{criterion_group, criterion_main, Criterion}; -use opentelemetry_api::metrics::{Descriptor, InstrumentKind, Number, NumberKind}; -use opentelemetry_sdk::{ - export::metrics::Aggregator, - metrics::aggregators::{ArrayAggregator, DdSketchAggregator, DdSketchConfig}, -}; -use rand::Rng; -use std::sync::Arc; - -fn generate_normal_data(num: usize) -> Vec { - let mut data = Vec::with_capacity(num); - for _ in 0..num { - data.push(rand::thread_rng().gen_range(-100..10000) as f64); - } - data -} - -fn ddsketch(data: Vec) { - let aggregator = - DdSketchAggregator::new(&DdSketchConfig::new(0.001, 2048, 1e-9), NumberKind::F64); - let descriptor = Descriptor::new( - "test".to_string(), - "test", - None, - None, - InstrumentKind::Histogram, - NumberKind::F64, - ); - for f in data { - aggregator.update(&Number::from(f), &descriptor).unwrap(); - } - let new_aggregator: Arc<(dyn Aggregator + Send + Sync)> = Arc::new(DdSketchAggregator::new( - &DdSketchConfig::new(0.001, 2048, 1e-9), - NumberKind::F64, - )); - aggregator - .synchronized_move(&new_aggregator, &descriptor) - .unwrap(); -} - -fn array(data: Vec) { - let aggregator = ArrayAggregator::default(); - let descriptor = Descriptor::new( - "test".to_string(), - "test", - None, - None, - InstrumentKind::Histogram, - NumberKind::F64, - ); - for f in data { - aggregator.update(&Number::from(f), &descriptor).unwrap(); - } - let new_aggregator: Arc<(dyn Aggregator + Send + Sync)> = Arc::new(ArrayAggregator::default()); - aggregator - .synchronized_move(&new_aggregator, &descriptor) - .unwrap(); -} - -pub fn histogram(c: &mut Criterion) { - let data = generate_normal_data(5000); - c.bench_function("ddsketch", |b| { - b.iter(|| { - ddsketch(data.clone()); - }) - }); - c.bench_function("array", |b| b.iter(|| array(data.clone()))); -} - -criterion_group!(benches, histogram); -criterion_main!(benches); diff --git a/opentelemetry-sdk/benches/metric.rs b/opentelemetry-sdk/benches/metric.rs index aab933acfc..2551734f30 100644 --- a/opentelemetry-sdk/benches/metric.rs +++ b/opentelemetry-sdk/benches/metric.rs @@ -2,13 +2,14 @@ use criterion::{ criterion_group, criterion_main, measurement::Measurement, BenchmarkGroup, BenchmarkId, Criterion, }; -use opentelemetry_api::{ - metrics::{Descriptor, Meter}, - Key, KeyValue, -}; +use opentelemetry_api::{metrics::Meter, Context, InstrumentationLibrary, Key, KeyValue}; use opentelemetry_sdk::{ export::metrics::{AggregatorSelector, Processor}, - metrics::{accumulator, aggregators}, + metrics::{ + accumulator, + aggregators::{self, Aggregator}, + sdk_api::{wrap_meter_core, Descriptor}, + }, }; use rand::{rngs, Rng}; use std::cell::RefCell; @@ -18,42 +19,16 @@ pub fn counters(c: &mut Criterion) { let meter = build_meter(); let mut g = c.benchmark_group("Counter"); + let cx = Context::new(); // unbound u64 - let counter = meter.u64_counter("u64_unbound.sum").init(); - benchmark_unbound_metric("u64_unbound", &mut g, |attributes| { - counter.add(1, attributes) - }); - - // bound u64 - g.bench_with_input( - BenchmarkId::new("u64_bound", 1), - &meter - .u64_counter("u64_bound.sum") - .init() - .bind(build_kv(1).as_ref()), - |b, counter| b.iter(|| counter.add(1)), - ); + let counter = meter.u64_counter("u64.sum").init(); + benchmark_unbound_metric("u64", &mut g, |attributes| counter.add(&cx, 1, attributes)); // unbound f64 - let counter = meter.f64_counter("f64_unbound.sum").init(); - benchmark_unbound_metric("f64_unbound", &mut g, |attributes| { - counter.add(1.0, attributes) - }); - - // bound f64 - g.bench_with_input( - BenchmarkId::new("f64_bound", 1.0), - &meter - .f64_counter("f64_bound.sum") - .init() - .bind(build_kv(1).as_ref()), - |b, counter| b.iter(|| counter.add(1.0)), - ); - - // acquire handle - benchmark_unbound_metric("f64_bind", &mut g, |attributes| { - let _ = counter.bind(attributes); + let counter = meter.f64_counter("f64.sum").init(); + benchmark_unbound_metric("f64", &mut g, |attributes| { + counter.add(&cx, 1.0, attributes) }); g.finish(); @@ -98,21 +73,12 @@ thread_local! { struct BenchAggregatorSelector; impl AggregatorSelector for BenchAggregatorSelector { - fn aggregator_for( - &self, - descriptor: &Descriptor, - ) -> Option> { + fn aggregator_for(&self, descriptor: &Descriptor) -> Option> { match descriptor.name() { name if name.ends_with(".disabled") => None, name if name.ends_with(".sum") => Some(Arc::new(aggregators::sum())), - name if name.ends_with(".minmaxsumcount") => { - Some(Arc::new(aggregators::min_max_sum_count(descriptor))) - } name if name.ends_with(".lastvalue") => Some(Arc::new(aggregators::last_value())), - name if name.ends_with(".histogram") => { - Some(Arc::new(aggregators::histogram(descriptor, &[]))) - } - name if name.ends_with(".exact") => Some(Arc::new(aggregators::array())), + name if name.ends_with(".histogram") => Some(Arc::new(aggregators::histogram(&[]))), _ => panic!( "Invalid instrument name for test AggregatorSelector: {}", descriptor.name() @@ -127,15 +93,16 @@ struct BenchProcessor { } impl Processor for BenchProcessor { - fn aggregation_selector(&self) -> &dyn AggregatorSelector { + fn aggregator_selector(&self) -> &dyn AggregatorSelector { &self.aggregation_selector } } fn build_meter() -> Meter { let processor = Arc::new(BenchProcessor::default()); - let core = accumulator(processor).build(); - Meter::new("benches", None, None, Arc::new(core)) + let core = accumulator(processor); + let library = InstrumentationLibrary::new("benches", None, None); + wrap_meter_core(Arc::new(core), library) } criterion_group!(benches, counters); diff --git a/opentelemetry-sdk/src/export/metrics/aggregation.rs b/opentelemetry-sdk/src/export/metrics/aggregation.rs deleted file mode 100644 index dc0bab48f4..0000000000 --- a/opentelemetry-sdk/src/export/metrics/aggregation.rs +++ /dev/null @@ -1,81 +0,0 @@ -//! Metrics SDK Aggregator export API -use opentelemetry_api::metrics::{Number, Result}; -use std::time::SystemTime; - -/// Sum returns an aggregated sum. -pub trait Sum { - /// The sum of the currently aggregated metrics - fn sum(&self) -> Result; -} - -/// Count returns the number of values that were aggregated. -pub trait Count { - /// The count of the currently aggregated metrics - fn count(&self) -> Result; -} - -/// Min returns the minimum value over the set of values that were aggregated. -pub trait Min { - /// The min of the currently aggregated metrics - fn min(&self) -> Result; -} - -/// Max returns the maximum value over the set of values that were aggregated. -pub trait Max { - /// The max of the currently aggregated metrics - fn max(&self) -> Result; -} - -/// LastValue returns the latest value that was aggregated. -pub trait LastValue { - /// The last value of the currently aggregated metrics - fn last_value(&self) -> Result<(Number, SystemTime)>; -} - -/// Points return the raw set of values that were aggregated. -pub trait Points { - /// The raw set of points currently aggregated - fn points(&self) -> Result>; -} - -/// Buckets represent histogram buckets boundaries and counts. -/// -/// For a Histogram with N defined boundaries, e.g, [x, y, z]. -/// There are N+1 counts: [-inf, x), [x, y), [y, z), [z, +inf] -#[derive(Debug)] -pub struct Buckets { - /// Boundaries are floating point numbers, even when - /// aggregating integers. - boundaries: Vec, - - /// Counts are floating point numbers to account for - /// the possibility of sampling which allows for - /// non-integer count values. - counts: Vec, -} - -impl Buckets { - /// Create new buckets - pub fn new(boundaries: Vec, counts: Vec) -> Self { - Buckets { boundaries, counts } - } - - /// Boundaries of the histogram buckets - pub fn boundaries(&self) -> &Vec { - &self.boundaries - } - - /// Counts of the histogram buckets - pub fn counts(&self) -> &Vec { - &self.counts - } -} - -/// Histogram returns the count of events in pre-determined buckets. -pub trait Histogram: Sum + Count { - /// Buckets for this histogram. - fn histogram(&self) -> Result; -} - -/// MinMaxSumCount supports the Min, Max, Sum, and Count interfaces. -pub trait MinMaxSumCount: Min + Max + Sum + Count {} diff --git a/opentelemetry-sdk/src/export/metrics/aggregation/mod.rs b/opentelemetry-sdk/src/export/metrics/aggregation/mod.rs new file mode 100644 index 0000000000..d854b367bf --- /dev/null +++ b/opentelemetry-sdk/src/export/metrics/aggregation/mod.rs @@ -0,0 +1,110 @@ +//! Metrics aggregation +use std::time::SystemTime; + +use crate::metrics::sdk_api::Number; +use opentelemetry_api::metrics::Result; + +mod temporality; + +pub use temporality::*; + +/// An interface returned by an [`Aggregator`] containing an interval of metric +/// data. +/// +/// [`Aggregator`]: crate::metrics::aggregators::Aggregator +pub trait Aggregation { + /// A short identifying string to identify the [`Aggregator`] that was used to + /// produce the aggregation (e.g., [`AggregationKind::SUM`]). + /// + /// [`Aggregator`]: crate::metrics::aggregators::Aggregator + /// [`AggregationKind`]: crate::export::metrics::aggregation::AggregationKind + fn kind(&self) -> &AggregationKind; +} + +/// Sum returns an aggregated sum. +pub trait Sum: Aggregation { + /// The sum of the currently aggregated metrics + fn sum(&self) -> Result; +} + +/// Count returns the number of values that were aggregated. +pub trait Count: Aggregation { + /// The count of the currently aggregated metrics + fn count(&self) -> Result; +} + +/// LastValue returns the latest value that was aggregated. +pub trait LastValue: Aggregation { + /// The last value of the currently aggregated metrics + fn last_value(&self) -> Result<(Number, SystemTime)>; +} + +/// Buckets represent histogram buckets boundaries and counts. +/// +/// For a Histogram with N defined boundaries, e.g, [x, y, z]. There are N+1 +/// counts: [-inf, x), [x, y), [y, z), [z, +inf] +#[derive(Debug)] +pub struct Buckets { + /// Boundaries are floating point numbers, even when + /// aggregating integers. + boundaries: Vec, + + /// Counts are floating point numbers to account for + /// the possibility of sampling which allows for + /// non-integer count values. + counts: Vec, +} + +impl Buckets { + /// Create new buckets + pub fn new(boundaries: Vec, counts: Vec) -> Self { + Buckets { boundaries, counts } + } + + /// Boundaries of the histogram buckets + pub fn boundaries(&self) -> &Vec { + &self.boundaries + } + + /// Counts of the histogram buckets + pub fn counts(&self) -> &Vec { + &self.counts + } +} + +/// Histogram returns the count of events in pre-determined buckets. +pub trait Histogram: Sum + Count + Aggregation { + /// Buckets for this histogram. + fn histogram(&self) -> Result; +} + +/// A short name for the [`Aggregator`] that produces an [`Aggregation`]. +/// +/// Kind is a string to allow user-defined Aggregators. +/// +/// When deciding how to handle an Aggregation, Exporters are encouraged to +/// decide based on conversion to the above interfaces based on strength, not on +/// Kind value, when deciding how to expose metric data. This enables +/// user-supplied Aggregators to replace builtin Aggregators. +/// +/// For example, test for a Histogram before testing for a Sum, and so on. +/// +/// [`Aggregator`]: crate::metrics::aggregators::Aggregator +#[derive(Debug, Clone, PartialEq)] +pub struct AggregationKind(&'static str); + +impl AggregationKind { + /// Aggregations that return an aggregated sum. + pub const SUM: Self = AggregationKind("SUM"); + + /// Aggregations that return a distribution + pub const HISTOGRAM: Self = AggregationKind("HISTOGRAM"); + + /// Aggregations that return only the latest value. + pub const LAST_VALUE: AggregationKind = AggregationKind("LAST_VALUE"); + + /// Create a new custom aggregation kind + pub const fn new(name: &'static str) -> Self { + AggregationKind(name) + } +} diff --git a/opentelemetry-sdk/src/export/metrics/aggregation/temporality.rs b/opentelemetry-sdk/src/export/metrics/aggregation/temporality.rs new file mode 100644 index 0000000000..f3c570fdcc --- /dev/null +++ b/opentelemetry-sdk/src/export/metrics/aggregation/temporality.rs @@ -0,0 +1,92 @@ +use crate::export::metrics::aggregation::AggregationKind; +use crate::metrics::sdk_api::{Descriptor, InstrumentKind}; + +#[derive(Clone)] +struct ConstantTemporalitySelector(Temporality); + +impl TemporalitySelector for ConstantTemporalitySelector { + fn temporality_for(&self, _descriptor: &Descriptor, _kind: &AggregationKind) -> Temporality { + self.0 + } +} + +/// Returns an [`TemporalitySelector`] that returns a constant [`Temporality`]. +pub fn constant_temporality_selector(temporality: Temporality) -> impl TemporalitySelector + Clone { + ConstantTemporalitySelector(temporality) +} + +/// Returns an [`TemporalitySelector`] that always returns [`Temporality::Cumulative`]. +pub fn cumulative_temporality_selector() -> impl TemporalitySelector + Clone { + constant_temporality_selector(Temporality::Cumulative) +} + +/// Returns an [`TemporalitySelector`] that always returns [`Temporality::Delta`]. +pub fn delta_temporality_selector() -> impl TemporalitySelector + Clone { + constant_temporality_selector(Temporality::Delta) +} + +/// Returns a [`TemporalitySelector`] that always returns the cumulative [`Temporality`] to avoid +/// long-term memory requirements. +pub fn stateless_temporality_selector() -> impl TemporalitySelector + Clone { + constant_temporality_selector(Temporality::Cumulative) +} + +#[derive(Clone)] +struct StatelessTemporalitySelector; + +impl TemporalitySelector for StatelessTemporalitySelector { + fn temporality_for(&self, descriptor: &Descriptor, kind: &AggregationKind) -> Temporality { + if kind == &AggregationKind::SUM && descriptor.instrument_kind().precomputed_sum() { + Temporality::Cumulative + } else { + Temporality::Delta + } + } +} + +/// Temporality indicates the temporal aggregation exported by an exporter. +/// These bits may be OR-d together when multiple exporters are in use. +#[derive(Copy, Clone, Debug, PartialEq)] +#[non_exhaustive] +pub enum Temporality { + /// Indicates that an Exporter expects a Cumulative Aggregation. + Cumulative = 1, + + /// Indicates that an Exporter expects a Delta Aggregation. + Delta = 2, +} + +impl Temporality { + /// Tests whether `kind` includes a specific kind of exporter. + pub fn includes(&self, other: &Self) -> bool { + (*self as u32) & (*other as u32) != 0 + } + + /// Returns whether a temporality of this kind requires memory to export correctly. + pub fn memory_required(&self, kind: &InstrumentKind) -> bool { + match kind { + InstrumentKind::Histogram + | InstrumentKind::GaugeObserver + | InstrumentKind::Counter + | InstrumentKind::UpDownCounter => { + // Cumulative-oriented instruments: + self.includes(&Temporality::Cumulative) + } + + InstrumentKind::CounterObserver | InstrumentKind::UpDownCounterObserver => { + // Delta-oriented instruments: + self.includes(&Temporality::Delta) + } + } + } +} + +/// TemporalitySelector is a sub-interface of Exporter used to indicate +/// whether the Processor should compute Delta or Cumulative +/// Aggregations. +pub trait TemporalitySelector { + /// TemporalityFor should return the correct Temporality that + /// should be used when exporting data for the given metric + /// instrument and Aggregator kind. + fn temporality_for(&self, descriptor: &Descriptor, kind: &AggregationKind) -> Temporality; +} diff --git a/opentelemetry-sdk/src/export/metrics/mod.rs b/opentelemetry-sdk/src/export/metrics/mod.rs index 66219ba97e..d90a2e302b 100644 --- a/opentelemetry-sdk/src/export/metrics/mod.rs +++ b/opentelemetry-sdk/src/export/metrics/mod.rs @@ -1,71 +1,25 @@ //! Metrics Export -use crate::resource::Resource; -use opentelemetry_api::{ - attributes, - metrics::{Descriptor, InstrumentKind, Number, Result}, -}; -use std::any::Any; -use std::fmt; -use std::sync::Arc; -use std::time::SystemTime; -mod aggregation; -pub mod stdout; +use core::fmt; +use std::{sync::Arc, time::SystemTime}; + +use opentelemetry_api::{attributes, metrics::Result, Context, InstrumentationLibrary}; -pub use aggregation::{ - Buckets, Count, Histogram, LastValue, Max, Min, MinMaxSumCount, Points, Sum, +use crate::{ + metrics::{aggregators::Aggregator, sdk_api::Descriptor}, + Resource, }; -pub use stdout::stdout; -/// Processor is responsible for deciding which kind of aggregation to use (via -/// `aggregation_selector`), gathering exported results from the SDK during -/// collection, and deciding over which dimensions to group the exported data. -/// -/// The SDK supports binding only one of these interfaces, as it has the sole -/// responsibility of determining which Aggregator to use for each record. -/// -/// The embedded AggregatorSelector interface is called (concurrently) in -/// instrumentation context to select the appropriate Aggregator for an -/// instrument. -pub trait Processor: fmt::Debug { - /// AggregatorSelector is responsible for selecting the - /// concrete type of Aggregator used for a metric in the SDK. - /// - /// This may be a static decision based on fields of the - /// Descriptor, or it could use an external configuration - /// source to customize the treatment of each metric - /// instrument. - /// - /// The result from AggregatorSelector.AggregatorFor should be - /// the same type for a given Descriptor or else nil. The same - /// type should be returned for a given descriptor, because - /// Aggregators only know how to Merge with their own type. If - /// the result is nil, the metric instrument will be disabled. - /// - /// Note that the SDK only calls AggregatorFor when new records - /// require an Aggregator. This does not provide a way to - /// disable metrics with active records. - fn aggregation_selector(&self) -> &dyn AggregatorSelector; -} +use self::aggregation::TemporalitySelector; -/// A locked processor. -/// -/// The `Process` method is called during collection in a single-threaded -/// context from the SDK, after the aggregator is checkpointed, allowing the -/// processor to build the set of metrics currently being exported. -pub trait LockedProcessor { - /// Process is called by the SDK once per internal record, passing the export - /// Accumulation (a Descriptor, the corresponding Attributes, and the checkpointed - /// Aggregator). - /// - /// The Context argument originates from the controller that orchestrates - /// collection. - fn process(&mut self, accumulation: Accumulation<'_>) -> Result<()>; -} +pub mod aggregation; +mod stdout; + +pub use stdout::{stdout, ExportLine, ExportNumeric, StdoutExporter, StdoutExporterBuilder}; /// AggregatorSelector supports selecting the kind of `Aggregator` to use at /// runtime for a specific metric instrument. -pub trait AggregatorSelector: fmt::Debug { +pub trait AggregatorSelector { /// This allocates a variable number of aggregators of a kind suitable for /// the requested export. /// @@ -79,131 +33,34 @@ pub trait AggregatorSelector: fmt::Debug { fn aggregator_for(&self, descriptor: &Descriptor) -> Option>; } -/// The interface used by a `Controller` to coordinate the `Processor` with -/// `Accumulator`(s) and `Exporter`(s). The `start_collection` and -/// `finish_collection` methods start and finish a collection interval. -/// `Controller`s call the `Accumulator`(s) during collection to process -/// `Accumulation`s. -pub trait Checkpointer: LockedProcessor { - /// A checkpoint of the current data set. This may be called before and after - /// collection. The implementation is required to return the same value - /// throughout its lifetime. - fn checkpoint_set(&mut self) -> &mut dyn CheckpointSet; - - /// Logic to be run at the start of a collection interval. - fn start_collection(&mut self); - - /// Cleanup logic or other behavior that needs to be run after a collection - /// interval is complete. - fn finish_collection(&mut self) -> Result<()>; -} - -/// Aggregator implements a specific aggregation behavior, i.e., a behavior to -/// track a sequence of updates to an instrument. Sum-only instruments commonly -/// use a simple Sum aggregator, but for the distribution instruments -/// (Histogram, ValueObserver) there are a number of possible aggregators -/// with different cost and accuracy tradeoffs. -/// -/// Note that any Aggregator may be attached to any instrument--this is the -/// result of the OpenTelemetry API/SDK separation. It is possible to attach a -/// Sum aggregator to a Histogram instrument or a MinMaxSumCount aggregator -/// to a Counter instrument. -pub trait Aggregator: fmt::Debug { - /// Update receives a new measured value and incorporates it into the - /// aggregation. Update calls may be called concurrently. - /// - /// `Descriptor::number_kind` should be consulted to determine whether the - /// provided number is an `i64`, `u64` or `f64`. - /// - /// The current Context could be inspected for a `Baggage` or - /// `SpanContext`. - fn update(&self, number: &Number, descriptor: &Descriptor) -> Result<()>; - - /// This method is called during collection to finish one period of aggregation - /// by atomically saving the currently-updating state into the argument - /// Aggregator. - /// - /// `synchronized_move` is called concurrently with `update`. These two methods - /// must be synchronized with respect to each other, for correctness. - /// - /// This method will return an `InconsistentAggregator` error if this - /// `Aggregator` cannot be copied into the destination due to an incompatible - /// type. - /// - /// This call has no `Context` argument because it is expected to perform only - /// computation. - fn synchronized_move( - &self, - destination: &Arc, - descriptor: &Descriptor, - ) -> Result<()>; - - /// This combines the checkpointed state from the argument `Aggregator` into this - /// `Aggregator`. `merge` is not synchronized with respect to `update` or - /// `synchronized_move`. - /// - /// The owner of an `Aggregator` being merged is responsible for synchronization - /// of both `Aggregator` states. - fn merge(&self, other: &(dyn Aggregator + Send + Sync), descriptor: &Descriptor) -> Result<()>; - - /// Returns the implementing aggregator as `Any` for downcasting. - fn as_any(&self) -> &dyn Any; -} - -/// An optional interface implemented by some Aggregators. An Aggregator must -/// support `subtract()` in order to be configured for a Precomputed-Sum -/// instrument (SumObserver, UpDownSumObserver) using a DeltaExporter. -pub trait Subtractor { - /// Subtract subtracts the `operand` from this Aggregator and outputs the value - /// in `result`. - fn subtract( - &self, - operand: &(dyn Aggregator + Send + Sync), - result: &(dyn Aggregator + Send + Sync), - descriptor: &Descriptor, - ) -> Result<()>; +/// A container for the common elements for exported metric data that are shared +/// by the `Accumulator`->`Processor` and `Processor`->`Exporter` steps. +#[derive(Debug)] +pub struct Metadata<'a> { + descriptor: &'a Descriptor, + attributes: &'a attributes::AttributeSet, } -/// Exporter handles presentation of the checkpoint of aggregate metrics. This -/// is the final stage of a metrics export pipeline, where metric data are -/// formatted for a specific system. -pub trait Exporter: ExportKindFor { - /// Export is called immediately after completing a collection pass in the SDK. - /// - /// The CheckpointSet interface refers to the Processor that just completed - /// collection. - fn export(&self, checkpoint_set: &mut dyn CheckpointSet) -> Result<()>; -} +impl<'a> Metadata<'a> { + /// Create a new `Metadata` instance. + pub fn new(descriptor: &'a Descriptor, attributes: &'a attributes::AttributeSet) -> Self { + { + Metadata { + descriptor, + attributes, + } + } + } -/// ExportKindSelector is a sub-interface of Exporter used to indicate -/// whether the Processor should compute Delta or Cumulative -/// Aggregations. -pub trait ExportKindFor: fmt::Debug { - /// Determines the correct `ExportKind` that should be used when exporting data - /// for the given metric instrument. - fn export_kind_for(&self, descriptor: &Descriptor) -> ExportKind; -} + /// A description of the metric instrument being exported. + pub fn descriptor(&self) -> &Descriptor { + self.descriptor + } -/// CheckpointSet allows a controller to access a complete checkpoint of -/// aggregated metrics from the Processor. This is passed to the `Exporter` -/// which may then use `try_for_each` to iterate over the collection of -/// aggregated metrics. -pub trait CheckpointSet: fmt::Debug { - /// This iterates over aggregated checkpoints for all metrics that were updated - /// during the last collection period. Each aggregated checkpoint returned by - /// the function parameter may return an error. - /// - /// The `ExportKindSelector` argument is used to determine whether the `Record` - /// is computed using delta or cumulative aggregation. - /// - /// ForEach tolerates `MetricsError::NoData` silently, as this is expected from - /// the Meter implementation. Any other kind of error will immediately halt and - /// return the error to the caller. - fn try_for_each( - &mut self, - export_selector: &dyn ExportKindFor, - f: &mut dyn FnMut(&Record<'_>) -> Result<()>, - ) -> Result<()>; + /// The attributes associated with the instrument and the aggregated data. + pub fn attributes(&self) -> &attributes::AttributeSet { + self.attributes + } } /// Allows `Accumulator` implementations to construct new `Accumulation`s to @@ -213,83 +70,77 @@ pub trait CheckpointSet: fmt::Debug { pub fn accumulation<'a>( descriptor: &'a Descriptor, attributes: &'a attributes::AttributeSet, - resource: &'a Resource, aggregator: &'a Arc, ) -> Accumulation<'a> { - Accumulation::new(descriptor, attributes, resource, aggregator) -} - -/// Allows `Processor` implementations to construct export records. The -/// `Descriptor`, `Attributes`, and `Aggregator` represent aggregate metric events -/// received over a single collection period. -pub fn record<'a>( - descriptor: &'a Descriptor, - attributes: &'a attributes::AttributeSet, - resource: &'a Resource, - aggregator: Option<&'a Arc>, - start: SystemTime, - end: SystemTime, -) -> Record<'a> { - Record { - metadata: Metadata::new(descriptor, attributes, resource), - aggregator, - start, - end, - } + Accumulation::new(descriptor, attributes, aggregator) } -impl Record<'_> { - /// The aggregator for this metric - pub fn aggregator(&self) -> Option<&Arc> { - self.aggregator - } -} - -/// A container for the common elements for exported metric data that are shared -/// by the `Accumulator`->`Processor` and `Processor`->`Exporter` steps. -#[derive(Debug)] -pub struct Metadata<'a> { - descriptor: &'a Descriptor, - attributes: &'a attributes::AttributeSet, - resource: &'a Resource, +/// A container for the exported data for a single metric instrument and attribute +/// set, as prepared by an `Accumulator` for the `Processor`. +pub struct Accumulation<'a> { + metadata: Metadata<'a>, + aggregator: &'a Arc, } -impl<'a> Metadata<'a> { - /// Create a new `Metadata` instance. +impl<'a> Accumulation<'a> { + /// Create a new `Record` instance. pub fn new( descriptor: &'a Descriptor, attributes: &'a attributes::AttributeSet, - resource: &'a Resource, + aggregator: &'a Arc, ) -> Self { - { - Metadata { - descriptor, - attributes, - resource, - } + Accumulation { + metadata: Metadata::new(descriptor, attributes), + aggregator, } } /// A description of the metric instrument being exported. pub fn descriptor(&self) -> &Descriptor { - self.descriptor + self.metadata.descriptor } /// The attributes associated with the instrument and the aggregated data. pub fn attributes(&self) -> &attributes::AttributeSet { - self.attributes + self.metadata.attributes } - /// Common attributes that apply to this metric event. - pub fn resource(&self) -> &Resource { - self.resource + /// The checkpointed aggregator for this metric. + pub fn aggregator(&self) -> &Arc { + self.aggregator + } +} + +impl<'a> fmt::Debug for Accumulation<'a> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Accumulation") + .field("metadata", &self.metadata) + .finish() } } +/// Metric data processor. +/// +/// Locked processors are responsible gathering exported results from the SDK during +/// collection, and deciding over which dimensions to group the exported data. +/// +/// The `process` method is called during collection in a single-threaded +/// context from the SDK, after the aggregator is checkpointed, allowing the +/// processor to build the set of metrics currently being exported. +pub trait LockedProcessor { + /// Process is called by the SDK once per internal record, passing the export + /// [`Accumulation`] (a Descriptor, the corresponding attributes, and the + /// checkpointed aggregator). + /// + /// This call has no [`Context`] argument because it is expected to perform only + /// computation. An SDK is not expected to call exporters from with Process, use + /// a controller for that. + fn process(&mut self, accumulation: Accumulation<'_>) -> Result<()>; +} + /// A container for the exported data for a single metric instrument and attribute /// set, as prepared by the `Processor` for the `Exporter`. This includes the /// effective start and end time for the aggregation. -#[derive(Debug)] pub struct Record<'a> { metadata: Metadata<'a>, aggregator: Option<&'a Arc>, @@ -308,9 +159,9 @@ impl Record<'_> { self.metadata.attributes } - /// Common attributes that apply to this metric event. - pub fn resource(&self) -> &Resource { - self.metadata.resource + /// The aggregator for this metric + pub fn aggregator(&self) -> Option<&Arc> { + self.aggregator } /// The start time of the interval covered by this aggregation. @@ -324,109 +175,181 @@ impl Record<'_> { } } -/// A container for the exported data for a single metric instrument and attribute -/// set, as prepared by an `Accumulator` for the `Processor`. -#[derive(Debug)] -pub struct Accumulation<'a> { - metadata: Metadata<'a>, - aggregator: &'a Arc, +/// Exporter handles presentation of the checkpoint of aggregate +/// metrics. This is the final stage of a metrics export pipeline, +/// where metric data are formatted for a specific system. +pub trait MetricsExporter: TemporalitySelector { + /// Export is called immediately after completing a collection + /// pass in the SDK. + /// + /// The Context comes from the controller that initiated + /// collection. + /// + /// The InstrumentationLibraryReader interface refers to the + /// Processor that just completed collection. + fn export( + &self, + cx: &Context, + res: &Resource, + reader: &dyn InstrumentationLibraryReader, + ) -> Result<()>; } -impl<'a> Accumulation<'a> { - /// Create a new `Record` instance. - pub fn new( - descriptor: &'a Descriptor, - attributes: &'a attributes::AttributeSet, - resource: &'a Resource, - aggregator: &'a Arc, - ) -> Self { - Accumulation { - metadata: Metadata::new(descriptor, attributes, resource), - aggregator, - } - } - - /// A description of the metric instrument being exported. - pub fn descriptor(&self) -> &Descriptor { - self.metadata.descriptor - } +/// InstrumentationLibraryReader is an interface for exporters to iterate +/// over one instrumentation library of metric data at a time. +pub trait InstrumentationLibraryReader { + /// ForEach calls the passed function once per instrumentation library, + /// allowing the caller to emit metrics grouped by the library that + /// produced them. + fn try_for_each( + &self, + f: &mut dyn FnMut(&InstrumentationLibrary, &mut dyn Reader) -> Result<()>, + ) -> Result<()>; +} - /// The attributes associated with the instrument and the aggregated data. - pub fn attributes(&self) -> &attributes::AttributeSet { - self.metadata.attributes - } +/// Reader allows a controller to access a complete checkpoint of +/// aggregated metrics from the Processor for a single library of +/// metric data. This is passed to the Exporter which may then use +/// ForEach to iterate over the collection of aggregated metrics. +pub trait Reader { + /// ForEach iterates over aggregated checkpoints for all + /// metrics that were updated during the last collection + /// period. Each aggregated checkpoint returned by the + /// function parameter may return an error. + /// + /// The TemporalitySelector argument is used to determine + /// whether the Record is computed using Delta or Cumulative + /// aggregation. + /// + /// ForEach tolerates ErrNoData silently, as this is + /// expected from the Meter implementation. Any other kind + /// of error will immediately halt ForEach and return + /// the error to the caller. + fn try_for_each( + &mut self, + temp_selector: &dyn TemporalitySelector, + f: &mut dyn FnMut(&Record<'_>) -> Result<()>, + ) -> Result<()>; +} - /// Common attributes that apply to this metric event. - pub fn resource(&self) -> &Resource { - self.metadata.resource +impl fmt::Debug for Record<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Record") + .field("metadata", &self.metadata) + .field("start", &self.start) + .field("end", &self.end) + .finish() } +} - /// The checkpointed aggregator for this metric. - pub fn aggregator(&self) -> &Arc { - self.aggregator - } +/// The interface used to create checkpoints. +pub trait Checkpointer: Processor { + /// Synchronizes the checkpoint process and allows a single locked + /// checkpoint to be accessed at a time. + fn checkpoint( + &self, + f: &mut dyn FnMut(&mut dyn LockedCheckpointer) -> Result<()>, + ) -> Result<()>; } -/// Indicates the kind of data exported by an exporter. -/// These bits may be OR-d together when multiple exporters are in use. -#[derive(Clone, Debug)] -pub enum ExportKind { - /// Indicates that the `Exporter` expects a cumulative `Aggregation`. - Cumulative = 1, +/// The interface used by a controller to coordinate the processor with +/// accumulator(s) and exporter(s). +/// +/// The StartCollection() and FinishCollection() methods start and finish a +/// collection interval. Controllers call the Accumulator(s) during collection +/// to process Accumulations. +pub trait LockedCheckpointer { + /// Processes metric data for export. + /// + /// The `process` method is bracketed by `start_collection` and + /// `finish_collection` calls. + fn processor(&mut self) -> &mut dyn LockedProcessor; + + /// Reader returns the current data set. + /// + /// This may be called before and after collection. The implementation is + /// required to return the same value throughout its lifetime. + fn reader(&mut self) -> &mut dyn Reader; + + /// begins a collection interval. + fn start_collection(&mut self); - /// Indicates that the `Exporter` expects a delta `Aggregation`. - Delta = 2, + /// ends a collection interval. + fn finish_collection(&mut self) -> Result<()>; } -/// Strategies for selecting which export kind is used for an instrument. -#[derive(Debug, Clone)] -pub enum ExportKindSelector { - /// A selector that always returns [`ExportKind::Cumulative`]. - Cumulative, - /// A selector that always returns [`ExportKind::Delta`]. - Delta, - /// A selector that returns cumulative or delta based on a given instrument - /// kind. - Stateless, +/// An interface for producing configured [`Checkpointer`] instances. +pub trait CheckpointerFactory { + /// Creates a new configured checkpointer. + fn checkpointer(&self) -> Arc; } -impl ExportKind { - /// Tests whether `kind` includes a specific kind of exporter. - pub fn includes(&self, has: &ExportKind) -> bool { - (self.clone() as u32) & (has.clone() as u32) != 0 +/// Allows `Processor` implementations to construct export records. The +/// `Descriptor`, `Attributes`, and `Aggregator` represent aggregate metric events +/// received over a single collection period. +pub fn record<'a>( + descriptor: &'a Descriptor, + attributes: &'a attributes::AttributeSet, + aggregator: Option<&'a Arc>, + start: SystemTime, + end: SystemTime, +) -> Record<'a> { + Record { + metadata: Metadata::new(descriptor, attributes), + aggregator, + start, + end, } +} - /// Returns whether an exporter of this kind requires memory to export correctly. - pub fn memory_required(&self, kind: &InstrumentKind) -> bool { - match kind { - InstrumentKind::Histogram - | InstrumentKind::ValueObserver - | InstrumentKind::Counter - | InstrumentKind::UpDownCounter => { - // Delta-oriented instruments: - self.includes(&ExportKind::Cumulative) - } +/// A utility extension to allow upcasting. +/// +/// Can be removed once [trait_upcasting] is stablized. +/// +/// [trait_upcasting]: https://doc.rust-lang.org/unstable-book/language-features/trait-upcasting.html +pub trait AsDynProcessor { + /// Create an `Arc` from an impl of [`Processor`]. + fn as_dyn_processor<'a>(self: Arc) -> Arc + where + Self: 'a; +} - InstrumentKind::SumObserver | InstrumentKind::UpDownSumObserver => { - // Cumulative-oriented instruments: - self.includes(&ExportKind::Delta) - } - } +impl AsDynProcessor for T { + fn as_dyn_processor<'a>(self: Arc) -> Arc + where + Self: 'a, + { + self } } -impl ExportKindFor for ExportKindSelector { - fn export_kind_for(&self, descriptor: &Descriptor) -> ExportKind { - match self { - ExportKindSelector::Cumulative => ExportKind::Cumulative, - ExportKindSelector::Delta => ExportKind::Delta, - ExportKindSelector::Stateless => { - if descriptor.instrument_kind().precomputed_sum() { - ExportKind::Cumulative - } else { - ExportKind::Delta - } - } - } - } +/// Processor is responsible for deciding which kind of aggregation to use (via +/// `aggregation_selector`), gathering exported results from the SDK during +/// collection, and deciding over which dimensions to group the exported data. +/// +/// The SDK supports binding only one of these interfaces, as it has the sole +/// responsibility of determining which Aggregator to use for each record. +/// +/// The embedded AggregatorSelector interface is called (concurrently) in +/// instrumentation context to select the appropriate Aggregator for an +/// instrument. +pub trait Processor: AsDynProcessor { + /// AggregatorSelector is responsible for selecting the + /// concrete type of Aggregator used for a metric in the SDK. + /// + /// This may be a static decision based on fields of the + /// Descriptor, or it could use an external configuration + /// source to customize the treatment of each metric + /// instrument. + /// + /// The result from AggregatorSelector.AggregatorFor should be + /// the same type for a given Descriptor or else nil. The same + /// type should be returned for a given descriptor, because + /// Aggregators only know how to Merge with their own type. If + /// the result is nil, the metric instrument will be disabled. + /// + /// Note that the SDK only calls AggregatorFor when new records + /// require an Aggregator. This does not provide a way to + /// disable metrics with active records. + fn aggregator_selector(&self) -> &dyn AggregatorSelector; } diff --git a/opentelemetry-sdk/src/export/metrics/stdout.rs b/opentelemetry-sdk/src/export/metrics/stdout.rs index aaf1ae722d..1263ea289e 100644 --- a/opentelemetry-sdk/src/export/metrics/stdout.rs +++ b/opentelemetry-sdk/src/export/metrics/stdout.rs @@ -1,71 +1,60 @@ //! Stdout Metrics Exporter use crate::{ export::metrics::{ - CheckpointSet, Count, ExportKind, ExportKindFor, ExportKindSelector, Exporter, LastValue, - Max, Min, Sum, - }, - metrics::{ - aggregators::{ - ArrayAggregator, HistogramAggregator, LastValueAggregator, MinMaxSumCountAggregator, - SumAggregator, - }, - controllers::{self, PushController, PushControllerWorker}, - selectors::simple, + aggregation::{stateless_temporality_selector, LastValue, Sum, TemporalitySelector}, + InstrumentationLibraryReader, MetricsExporter, }, + metrics::aggregators::{LastValueAggregator, SumAggregator}, + Resource, }; -use futures_util::stream::Stream; -use opentelemetry_api::global; use opentelemetry_api::{ attributes::{default_encoder, AttributeSet, Encoder}, - metrics::{Descriptor, MetricsError, Result}, - KeyValue, + metrics::{MetricsError, Result}, + Context, KeyValue, }; use std::fmt; use std::io; -use std::iter; use std::sync::Mutex; -use std::time::{Duration, SystemTime}; +use std::time::SystemTime; /// Create a new stdout exporter builder with the configuration for a stdout exporter. -pub fn stdout(spawn: S, interval: I) -> StdoutExporterBuilder -where - S: Fn(PushControllerWorker) -> SO, - I: Fn(Duration) -> IS, - IS: Stream + Send + 'static, -{ - StdoutExporterBuilder::::builder(spawn, interval) +pub fn stdout() -> StdoutExporterBuilder { + StdoutExporterBuilder::::builder() } -/// +/// An OpenTelemetry metric exporter that transmits telemetry to +/// the local STDOUT or via the registered implementation of `Write`. #[derive(Debug)] pub struct StdoutExporter { /// Writer is the destination. If not set, `Stdout` is used. writer: Mutex, - /// Suppresses timestamp printing. This is useful to create deterministic test - /// conditions. - do_not_print_time: bool, + + /// Specifies if timestamps should be printed + timestamps: bool, + /// Encodes the attributes. attribute_encoder: Box, + /// An optional user-defined function to format a given export batch. formatter: Option, } -/// A collection of exported lines +/// Individually exported metric +/// +/// Can be formatted using [`StdoutExporterBuilder::with_formatter`]. #[derive(Default, Debug)] -pub struct ExportBatch { - timestamp: Option, - lines: Vec, -} +pub struct ExportLine { + /// metric name + pub name: String, -#[derive(Default, Debug)] -struct ExportLine { - name: String, - min: Option, - max: Option, - sum: Option, - count: u64, - last_value: Option, - timestamp: Option, + /// populated if using sum aggregator + pub sum: Option, + + /// populated if using last value aggregator + pub last_value: Option, + + /// metric timestamp + pub timestamp: Option, } /// A number exported as debug for serialization @@ -77,97 +66,105 @@ impl fmt::Debug for ExportNumeric { } } -impl Exporter for StdoutExporter +impl StdoutExporter { + /// The temporality selector for this exporter + pub fn temporality_selector(&self) -> impl TemporalitySelector { + stateless_temporality_selector() + } +} + +impl TemporalitySelector for StdoutExporter { + fn temporality_for( + &self, + descriptor: &crate::metrics::sdk_api::Descriptor, + kind: &super::aggregation::AggregationKind, + ) -> super::aggregation::Temporality { + stateless_temporality_selector().temporality_for(descriptor, kind) + } +} + +impl MetricsExporter for StdoutExporter where W: fmt::Debug + io::Write, { - fn export(&self, checkpoint_set: &mut dyn CheckpointSet) -> Result<()> { - let mut batch = ExportBatch::default(); - if !self.do_not_print_time { - batch.timestamp = Some(opentelemetry_api::time::now()); - } - checkpoint_set.try_for_each(self, &mut |record| { - let agg = record.aggregator().ok_or(MetricsError::NoDataCollected)?; - let desc = record.descriptor(); - let kind = desc.number_kind(); - let encoded_resource = record.resource().encoded(self.attribute_encoder.as_ref()); - let encoded_inst_attributes = if !desc.instrumentation_name().is_empty() { - let inst_attributes = AttributeSet::from_attributes(iter::once(KeyValue::new( - "instrumentation.name", - desc.instrumentation_name().to_owned(), - ))); - inst_attributes.encoded(Some(self.attribute_encoder.as_ref())) - } else { - String::new() - }; - - let mut expose = ExportLine::default(); - - if let Some(array) = agg.as_any().downcast_ref::() { - expose.count = array.count()?; - } - - if let Some(last_value) = agg.as_any().downcast_ref::() { - let (value, timestamp) = last_value.last_value()?; - expose.last_value = Some(ExportNumeric(value.to_debug(kind))); - - if !self.do_not_print_time { - expose.timestamp = Some(timestamp); - } + fn export( + &self, + _cx: &Context, + res: &Resource, + reader: &dyn InstrumentationLibraryReader, + ) -> Result<()> { + let mut batch = Vec::new(); + reader.try_for_each(&mut |library, reader| { + let mut attributes = Vec::new(); + if !library.name.is_empty() { + attributes.push(KeyValue::new("instrumentation.name", library.name.clone())); } - - if let Some(histogram) = agg.as_any().downcast_ref::() { - expose.sum = Some(ExportNumeric(histogram.sum()?.to_debug(kind))); - expose.count = histogram.count()?; - // TODO expose buckets + if let Some(version) = &library.version { + attributes.push(KeyValue::new("instrumentation.version", version.clone())); } - - if let Some(mmsc) = agg.as_any().downcast_ref::() { - expose.min = Some(ExportNumeric(mmsc.min()?.to_debug(kind))); - expose.max = Some(ExportNumeric(mmsc.max()?.to_debug(kind))); - expose.sum = Some(ExportNumeric(mmsc.sum()?.to_debug(kind))); - expose.count = mmsc.count()?; + if let Some(schema) = &library.schema_url { + attributes.push(KeyValue::new("instrumentation.schema_url", schema.clone())); } + let inst_attributes = AttributeSet::from_attributes(attributes.into_iter()); + let encoded_inst_attributes = + inst_attributes.encoded(Some(self.attribute_encoder.as_ref())); - if let Some(sum) = agg.as_any().downcast_ref::() { - expose.sum = Some(ExportNumeric(sum.sum()?.to_debug(kind))); - } + reader.try_for_each(self, &mut |record| { + let desc = record.descriptor(); + let agg = record.aggregator().ok_or(MetricsError::NoDataCollected)?; + let kind = desc.number_kind(); - let mut encoded_attributes = String::new(); - let iter = record.attributes().iter(); - if let (0, _) = iter.size_hint() { - encoded_attributes = record - .attributes() - .encoded(Some(self.attribute_encoder.as_ref())); - } + let encoded_resource = res.encoded(self.attribute_encoder.as_ref()); - let mut sb = String::new(); + let mut expose = ExportLine::default(); + if let Some(sum) = agg.as_any().downcast_ref::() { + expose.sum = Some(ExportNumeric(sum.sum()?.to_debug(kind))); + } else if let Some(last_value) = agg.as_any().downcast_ref::() + { + let (value, timestamp) = last_value.last_value()?; + expose.last_value = Some(ExportNumeric(value.to_debug(kind))); - sb.push_str(desc.name()); + if self.timestamps { + expose.timestamp = Some(timestamp); + } + } - if !encoded_attributes.is_empty() - || !encoded_resource.is_empty() - || !encoded_inst_attributes.is_empty() - { - sb.push('{'); - sb.push_str(&encoded_resource); - if !encoded_inst_attributes.is_empty() && !encoded_resource.is_empty() { - sb.push(','); + let mut encoded_attributes = String::new(); + let iter = record.attributes().iter(); + if let (0, _) = iter.size_hint() { + encoded_attributes = record + .attributes() + .encoded(Some(self.attribute_encoder.as_ref())); } - sb.push_str(&encoded_inst_attributes); + + let mut sb = String::new(); + + sb.push_str(desc.name()); + if !encoded_attributes.is_empty() - && (!encoded_inst_attributes.is_empty() || !encoded_resource.is_empty()) + || !encoded_resource.is_empty() + || !encoded_inst_attributes.is_empty() { - sb.push(','); + sb.push('{'); + sb.push_str(&encoded_resource); + if !encoded_inst_attributes.is_empty() && !encoded_resource.is_empty() { + sb.push(','); + } + sb.push_str(&encoded_inst_attributes); + if !encoded_attributes.is_empty() + && (!encoded_inst_attributes.is_empty() || !encoded_resource.is_empty()) + { + sb.push(','); + } + sb.push_str(&encoded_attributes); + sb.push('}'); } - sb.push_str(&encoded_attributes); - sb.push('}'); - } - expose.name = sb; + expose.name = sb; - batch.lines.push(expose); - Ok(()) + batch.push(expose); + Ok(()) + }) })?; self.writer.lock().map_err(From::from).and_then(|mut w| { @@ -181,17 +178,8 @@ where } } -impl ExportKindFor for StdoutExporter -where - W: fmt::Debug + io::Write, -{ - fn export_kind_for(&self, descriptor: &Descriptor) -> ExportKind { - ExportKindSelector::Stateless.export_kind_for(descriptor) - } -} - /// A formatter for user-defined batch serialization. -pub struct Formatter(Box Result + Send + Sync>); +struct Formatter(Box) -> Result + Send + Sync>); impl fmt::Debug for Formatter { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "Formatter(closure)") @@ -200,46 +188,31 @@ impl fmt::Debug for Formatter { /// Configuration for a given stdout exporter. #[derive(Debug)] -pub struct StdoutExporterBuilder { - spawn: S, - interval: I, +pub struct StdoutExporterBuilder { writer: Mutex, - do_not_print_time: bool, - quantiles: Option>, + timestamps: bool, attribute_encoder: Option>, - period: Option, formatter: Option, } -impl StdoutExporterBuilder +impl StdoutExporterBuilder where W: io::Write + fmt::Debug + Send + Sync + 'static, - S: Fn(PushControllerWorker) -> SO, - I: Fn(Duration) -> IS, - IS: Stream + Send + 'static, { - fn builder(spawn: S, interval: I) -> StdoutExporterBuilder { + fn builder() -> StdoutExporterBuilder { StdoutExporterBuilder { - spawn, - interval, writer: Mutex::new(io::stdout()), - do_not_print_time: false, - quantiles: None, + timestamps: true, attribute_encoder: None, - period: None, formatter: None, } } /// Set the writer that this exporter will use. - pub fn with_writer(self, writer: W2) -> StdoutExporterBuilder { + pub fn with_writer(self, writer: W2) -> StdoutExporterBuilder { StdoutExporterBuilder { - spawn: self.spawn, - interval: self.interval, writer: Mutex::new(writer), - do_not_print_time: self.do_not_print_time, - quantiles: self.quantiles, + timestamps: self.timestamps, attribute_encoder: self.attribute_encoder, - period: self.period, formatter: self.formatter, } } @@ -247,7 +220,7 @@ where /// Hide the timestamps from exported results pub fn with_do_not_print_time(self, do_not_print_time: bool) -> Self { StdoutExporterBuilder { - do_not_print_time, + timestamps: do_not_print_time, ..self } } @@ -263,18 +236,10 @@ where } } - /// Set the frequency in which metrics are exported. - pub fn with_period(self, period: Duration) -> Self { - StdoutExporterBuilder { - period: Some(period), - ..self - } - } - /// Set a formatter for serializing export batch data pub fn with_formatter(self, formatter: T) -> Self where - T: Fn(ExportBatch) -> Result + Send + Sync + 'static, + T: Fn(Vec) -> Result + Send + Sync + 'static, { StdoutExporterBuilder { formatter: Some(Formatter(Box::new(formatter))), @@ -283,27 +248,12 @@ where } /// Build a new push controller, returning errors if they arise. - pub fn init(mut self) -> PushController { - let period = self.period.take(); - let exporter = StdoutExporter { + pub fn build(self) -> Result> { + Ok(StdoutExporter { writer: self.writer, - do_not_print_time: self.do_not_print_time, + timestamps: self.timestamps, attribute_encoder: self.attribute_encoder.unwrap_or_else(default_encoder), formatter: self.formatter, - }; - let mut push_builder = controllers::push( - simple::Selector::Exact, - ExportKindSelector::Stateless, - exporter, - self.spawn, - self.interval, - ); - if let Some(period) = period { - push_builder = push_builder.with_period(period); - } - - let controller = push_builder.build(); - global::set_meter_provider(controller.provider()); - controller + }) } } diff --git a/opentelemetry-sdk/src/metrics/aggregators/array.rs b/opentelemetry-sdk/src/metrics/aggregators/array.rs deleted file mode 100644 index b36996ee31..0000000000 --- a/opentelemetry-sdk/src/metrics/aggregators/array.rs +++ /dev/null @@ -1,156 +0,0 @@ -use crate::{ - export::metrics::{Count, Points}, - metrics::Aggregator, -}; -use opentelemetry_api::metrics::{ - AtomicNumber, Descriptor, MetricsError, Number, NumberKind, Result, -}; -use std::any::Any; -use std::mem; -use std::sync::{Arc, Mutex}; - -/// Create a new default `ArrayAggregator` -pub fn array() -> ArrayAggregator { - ArrayAggregator::default() -} - -/// An aggregator which stores metrics in an array. -#[derive(Debug, Default)] -pub struct ArrayAggregator { - inner: Mutex, -} - -impl Count for ArrayAggregator { - fn count(&self) -> Result { - self.inner - .lock() - .map_err(Into::into) - .map(|inner| inner.points.as_ref().map_or(0, |p| p.len() as u64)) - } -} - -impl Points for ArrayAggregator { - fn points(&self) -> Result> { - self.inner - .lock() - .map_err(Into::into) - .map(|inner| inner.points.as_ref().map_or_else(Vec::new, |p| p.0.clone())) - } -} - -impl Aggregator for ArrayAggregator { - fn update(&self, number: &Number, descriptor: &Descriptor) -> Result<()> { - self.inner.lock().map_err(Into::into).map(|mut inner| { - if let Some(points) = inner.points.as_mut() { - points.push(number.clone()); - } else { - inner.points = Some(PointsData::with_number(number.clone())); - } - inner.sum.fetch_add(descriptor.number_kind(), number) - }) - } - - fn synchronized_move( - &self, - other: &Arc, - descriptor: &Descriptor, - ) -> Result<()> { - if let Some(other) = other.as_any().downcast_ref::() { - other - .inner - .lock() - .map_err(Into::into) - .and_then(|mut other| { - self.inner.lock().map_err(Into::into).map(|mut inner| { - other.points = mem::take(&mut inner.points); - other.sum = mem::replace( - &mut inner.sum, - descriptor.number_kind().zero().to_atomic(), - ); - - // TODO: This sort should be done lazily, only when quantiles are - // requested. The SDK specification says you can use this aggregator to - // simply list values in the order they were received as an alternative to - // requesting quantile information. - if let Some(points) = &mut other.points { - points.sort(descriptor.number_kind()); - } - }) - }) - } else { - Err(MetricsError::InconsistentAggregator(format!( - "Expected {:?}, got: {:?}", - self, other - ))) - } - } - fn merge(&self, other: &(dyn Aggregator + Send + Sync), desc: &Descriptor) -> Result<()> { - if let Some(other) = other.as_any().downcast_ref::() { - self.inner.lock().map_err(Into::into).and_then(|mut inner| { - other.inner.lock().map_err(From::from).map(|other_inner| { - // Note: Current assumption is that `o` was checkpointed, - // therefore is already sorted. See the TODO above, since - // this is an open question. - inner - .sum - .fetch_add(desc.number_kind(), &other_inner.sum.load()); - match (inner.points.as_mut(), other_inner.points.as_ref()) { - (Some(points), Some(other_points)) => { - points.combine(desc.number_kind(), other_points) - } - (None, Some(other_points)) => inner.points = Some(other_points.clone()), - _ => (), - } - }) - }) - } else { - Err(MetricsError::InconsistentAggregator(format!( - "Expected {:?}, got: {:?}", - self, other - ))) - } - } - - fn as_any(&self) -> &dyn Any { - self - } -} - -#[derive(Debug, Default)] -struct Inner { - sum: AtomicNumber, - points: Option, -} - -#[derive(Clone, Debug, Default)] -struct PointsData(Vec); - -impl PointsData { - fn with_number(number: Number) -> Self { - PointsData(vec![number]) - } - - fn len(&self) -> usize { - self.0.len() - } - - fn push(&mut self, number: Number) { - self.0.push(number) - } - - fn sort(&mut self, kind: &NumberKind) { - match kind { - NumberKind::I64 => self.0.sort_by_key(|a| a.to_u64(kind)), - NumberKind::F64 => self.0.sort_by(|a, b| { - a.to_f64(kind) - .partial_cmp(&b.to_f64(kind)) - .expect("nan values should be rejected. This is a bug.") - }), - NumberKind::U64 => self.0.sort_by_key(|a| a.to_u64(kind)), - } - } - fn combine(&mut self, kind: &NumberKind, other: &PointsData) { - self.0.append(&mut other.0.clone()); - self.sort(kind) - } -} diff --git a/opentelemetry-sdk/src/metrics/aggregators/ddsketch.rs b/opentelemetry-sdk/src/metrics/aggregators/ddsketch.rs deleted file mode 100644 index 11401c1b44..0000000000 --- a/opentelemetry-sdk/src/metrics/aggregators/ddsketch.rs +++ /dev/null @@ -1,877 +0,0 @@ -//! DDSketch quantile sketch with relative-error guarantees. -//! DDSketch is a fast and fully-mergeable quantile sketch with relative-error guarantees. -//! -//! The main difference between this approach and previous art is DDSKetch employ a new method to -//! compute the error. Traditionally, the error rate of one sketch is evaluated by rank accuracy, -//! which can still generate a relative large variance if the dataset has long tail. -//! -//! DDSKetch, on the contrary, employs relative error rate that could work well on long tail dataset. -//! -//! The detail of this algorithm can be found in - -use std::{ - any::Any, - cmp::Ordering, - mem, - ops::AddAssign, - sync::{Arc, RwLock}, -}; - -use crate::export::metrics::{Aggregator, Count, Max, Min, MinMaxSumCount, Sum}; -use opentelemetry_api::metrics::{Descriptor, MetricsError, Number, NumberKind, Result}; - -const INITIAL_NUM_BINS: usize = 128; -const GROW_LEFT_BY: i64 = 128; - -const DEFAULT_MAX_NUM_BINS: i64 = 2048; -const DEFAULT_ALPHA: f64 = 0.01; -const DEFAULT_MIN_BOUNDARY: f64 = 1.0e-9; - -/// An aggregator to calculate quantile -pub fn ddsketch(config: &DdSketchConfig, kind: NumberKind) -> DdSketchAggregator { - DdSketchAggregator::new(config, kind) -} - -/// DDSKetch quantile sketch algorithm -/// -/// It can give q-quantiles with α-accurate for any 0<=q<=1. -/// -/// Here the accurate is calculated based on relative-error rate. Thus, the error guarantee adapts the scale of the output data. With relative error guarantee, the histogram can be more accurate in the area of low data density. For example, the long tail of response time data. -/// -/// For example, if the actual percentile is 1 second, and relative-error guarantee -/// is 2%, then the value should within the range of 0.98 to 1.02 -/// second. But if the actual percentile is 1 millisecond, with the same relative-error -/// guarantee, the value returned should within the range of 0.98 to 1.02 millisecond. -/// -/// In order to support both negative and positive inputs, DDSketchAggregator has two DDSketch store within itself to store the negative and positive inputs. -#[derive(Debug)] -pub struct DdSketchAggregator { - inner: RwLock, -} - -impl DdSketchAggregator { - /// Create a new DDSKetchAggregator that would yield a quantile with relative error rate less - /// than `alpha` - /// - /// The input should have a granularity larger than `key_epsilon` - pub fn new(config: &DdSketchConfig, kind: NumberKind) -> DdSketchAggregator { - DdSketchAggregator { - inner: RwLock::new(Inner::new(config, kind)), - } - } -} - -impl Default for DdSketchAggregator { - fn default() -> Self { - DdSketchAggregator::new( - &DdSketchConfig::new(DEFAULT_ALPHA, DEFAULT_MAX_NUM_BINS, DEFAULT_MIN_BOUNDARY), - NumberKind::F64, - ) - } -} - -impl Sum for DdSketchAggregator { - fn sum(&self) -> Result { - self.inner - .read() - .map_err(From::from) - .map(|inner| inner.sum.clone()) - } -} - -impl Min for DdSketchAggregator { - fn min(&self) -> Result { - self.inner - .read() - .map_err(From::from) - .map(|inner| inner.min_value.clone()) - } -} - -impl Max for DdSketchAggregator { - fn max(&self) -> Result { - self.inner - .read() - .map_err(From::from) - .map(|inner| inner.max_value.clone()) - } -} - -impl Count for DdSketchAggregator { - fn count(&self) -> Result { - self.inner - .read() - .map_err(From::from) - .map(|inner| inner.count()) - } -} - -impl MinMaxSumCount for DdSketchAggregator {} - -impl Aggregator for DdSketchAggregator { - fn update(&self, number: &Number, descriptor: &Descriptor) -> Result<()> { - self.inner - .write() - .map_err(From::from) - .map(|mut inner| inner.add(number, descriptor.number_kind())) - } - - fn synchronized_move( - &self, - destination: &Arc<(dyn Aggregator + Send + Sync)>, - descriptor: &Descriptor, - ) -> Result<()> { - if let Some(other) = destination.as_any().downcast_ref::() { - other - .inner - .write() - .map_err(From::from) - .and_then(|mut other| { - self.inner.write().map_err(From::from).map(|mut inner| { - let kind = descriptor.number_kind(); - other.max_value = mem::replace(&mut inner.max_value, kind.zero()); - other.min_value = mem::replace(&mut inner.min_value, kind.zero()); - other.key_epsilon = mem::take(&mut inner.key_epsilon); - other.offset = mem::take(&mut inner.offset); - other.gamma = mem::take(&mut inner.gamma); - other.gamma_ln = mem::take(&mut inner.gamma_ln); - other.positive_store = mem::take(&mut inner.positive_store); - other.negative_store = mem::take(&mut inner.negative_store); - other.sum = mem::replace(&mut inner.sum, kind.zero()); - }) - }) - } else { - Err(MetricsError::InconsistentAggregator(format!( - "Expected {:?}, got: {:?}", - self, destination - ))) - } - } - - fn merge( - &self, - other: &(dyn Aggregator + Send + Sync), - _descriptor: &Descriptor, - ) -> Result<()> { - if let Some(other) = other.as_any().downcast_ref::() { - self.inner.write() - .map_err(From::from) - .and_then(|mut inner| { - other.inner.read() - .map_err(From::from) - .and_then(|other| { - // assert that it can merge - if inner.positive_store.max_num_bins != other.positive_store.max_num_bins { - return Err(MetricsError::InconsistentAggregator(format!( - "When merging two DDSKetchAggregators, their max number of bins must be the same. Expect max number of bins to be {:?}, but get {:?}", inner.positive_store.max_num_bins, other.positive_store.max_num_bins - ))); - } - if inner.negative_store.max_num_bins != other.negative_store.max_num_bins { - return Err(MetricsError::InconsistentAggregator(format!( - "When merging two DDSKetchAggregators, their max number of bins must be the same. Expect max number of bins to be {:?}, but get {:?}", inner.negative_store.max_num_bins, other.negative_store.max_num_bins - ))); - } - - - if (inner.gamma - other.gamma).abs() > std::f64::EPSILON { - return Err(MetricsError::InconsistentAggregator(format!( - "When merging two DDSKetchAggregators, their gamma must be the same. Expect max number of bins to be {:?}, but get {:?}", inner.gamma, other.gamma - ))); - } - - if other.count() == 0 { - return Ok(()); - } - - if inner.count() == 0 { - inner.positive_store.merge(&other.positive_store); - inner.negative_store.merge(&other.negative_store); - inner.sum = other.sum.clone(); - inner.min_value = other.min_value.clone(); - inner.max_value = other.max_value.clone(); - return Ok(()); - } - - inner.positive_store.merge(&other.positive_store); - inner.negative_store.merge(&other.negative_store); - - inner.sum = match inner.kind { - NumberKind::F64 => - Number::from(inner.sum.to_f64(&inner.kind) + other.sum.to_f64(&other.kind)), - NumberKind::U64 => Number::from(inner.sum.to_u64(&inner.kind) + other.sum.to_u64(&other.kind)), - NumberKind::I64 => Number::from(inner.sum.to_i64(&inner.kind) + other.sum.to_i64(&other.kind)) - }; - - if inner.min_value.partial_cmp(&inner.kind, &other.min_value) == Some(Ordering::Greater) { - inner.min_value = other.min_value.clone(); - }; - - if inner.max_value.partial_cmp(&inner.kind, &other.max_value) == Some(Ordering::Less) { - inner.max_value = other.max_value.clone(); - } - - Ok(()) - }) - }) - } else { - Err(MetricsError::InconsistentAggregator(format!( - "Expected {:?}, got: {:?}", - self, other - ))) - } - } - - fn as_any(&self) -> &dyn Any { - self - } -} - -/// DDSKetch Configuration. -#[derive(Debug)] -pub struct DdSketchConfig { - alpha: f64, - max_num_bins: i64, - key_epsilon: f64, -} - -impl DdSketchConfig { - /// Create a new DDSKetch config - pub fn new(alpha: f64, max_num_bins: i64, key_epsilon: f64) -> Self { - DdSketchConfig { - alpha, - max_num_bins, - key_epsilon, - } - } -} - -/// DDSKetch implementation. -/// -/// Note that Inner is not thread-safe. All operation should be protected by a lock or other -/// synchronization. -/// -/// Inner will also convert all Number into actual primitive type and back. -/// -/// According to the paper, the DDSKetch only support positive number. Inner support -/// either positive or negative number. But cannot yield actual result when input has -/// both positive and negative number. -#[derive(Debug)] -struct Inner { - positive_store: Store, - negative_store: Store, - kind: NumberKind, - // sum of all value within store - sum: Number, - // γ = (1 + α)/(1 - α) - gamma: f64, - // ln(γ) - gamma_ln: f64, - // The epsilon when map value to bin key. Any value between [-key_epsilon, key_epsilon] will - // be mapped to bin key 0. Must be a positive number. - key_epsilon: f64, - // offset is here to ensure that keys for positive numbers that are larger than min_value are - // greater than or equal to 1 while the keys for negative numbers are less than or equal to -1. - offset: i64, - - // minimum number that in store. - min_value: Number, - // maximum number that in store. - max_value: Number, -} - -impl Inner { - fn new(config: &DdSketchConfig, kind: NumberKind) -> Inner { - let gamma: f64 = 1.0 + 2.0 * config.alpha / (1.0 - config.alpha); - let mut inner = Inner { - positive_store: Store::new(config.max_num_bins / 2), - negative_store: Store::new(config.max_num_bins / 2), - min_value: kind.max(), - max_value: kind.min(), - sum: kind.zero(), - gamma, - gamma_ln: gamma.ln(), - key_epsilon: config.key_epsilon, - offset: 0, - kind, - }; - // reset offset based on key_epsilon - inner.offset = -(inner.log_gamma(inner.key_epsilon)).ceil() as i64 + 1i64; - inner - } - - fn add(&mut self, v: &Number, kind: &NumberKind) { - let key = self.key(v, kind); - match v.partial_cmp(kind, &Number::from(0.0)) { - Some(Ordering::Greater) | Some(Ordering::Equal) => { - self.positive_store.add(key); - } - Some(Ordering::Less) => { - self.negative_store.add(key); - } - _ => { - // if return none. Do nothing and return - return; - } - } - - // update min and max - if self.min_value.partial_cmp(&self.kind, v) == Some(Ordering::Greater) { - self.min_value = v.clone(); - } - - if self.max_value.partial_cmp(&self.kind, v) == Some(Ordering::Less) { - self.max_value = v.clone(); - } - - match &self.kind { - NumberKind::I64 => { - self.sum = Number::from(self.sum.to_i64(&self.kind) + v.to_i64(kind)); - } - NumberKind::U64 => { - self.sum = Number::from(self.sum.to_u64(&self.kind) + v.to_u64(kind)); - } - NumberKind::F64 => { - self.sum = Number::from(self.sum.to_f64(&self.kind) + v.to_f64(kind)); - } - } - } - - fn key(&self, num: &Number, kind: &NumberKind) -> i64 { - if num.to_f64(kind) < -self.key_epsilon { - let positive_num = match kind { - NumberKind::F64 => Number::from(-num.to_f64(kind)), - NumberKind::U64 => Number::from(num.to_u64(kind)), - NumberKind::I64 => Number::from(-num.to_i64(kind)), - }; - (-self.log_gamma(positive_num.to_f64(kind)).ceil()) as i64 - self.offset - } else if num.to_f64(kind) > self.key_epsilon { - self.log_gamma(num.to_f64(kind)).ceil() as i64 + self.offset - } else { - 0i64 - } - } - - /// get the index of the bucket based on num - fn log_gamma(&self, num: f64) -> f64 { - num.ln() / self.gamma_ln - } - - fn count(&self) -> u64 { - self.negative_store.count + self.positive_store.count - } -} - -#[derive(Debug)] -struct Store { - bins: Vec, - count: u64, - min_key: i64, - max_key: i64, - // maximum number of bins Store can have. - // In the worst case, the bucket can grow as large as the number of the elements inserted into. - // max_num_bins helps control the number of bins. - max_num_bins: i64, -} - -impl Default for Store { - fn default() -> Self { - Store { - bins: vec![0; INITIAL_NUM_BINS], - count: 0, - min_key: 0, - max_key: 0, - max_num_bins: DEFAULT_MAX_NUM_BINS, - } - } -} - -/// DDSKetchInner stores the data -impl Store { - fn new(max_num_bins: i64) -> Store { - Store { - bins: vec![ - 0; - if max_num_bins as usize > INITIAL_NUM_BINS { - INITIAL_NUM_BINS - } else { - max_num_bins as usize - } - ], - count: 0u64, - min_key: 0i64, - max_key: 0i64, - max_num_bins, - } - } - - /// Add count based on key. - /// - /// If key is not in [min_key, max_key], we will expand to left or right - /// - /// - /// The bins are essentially working in a round-robin fashion where we can use all space in bins - /// to represent any continuous space within length. That's why we need to offset the key - /// with `min_key` so that we get the actual bin index. - fn add(&mut self, key: i64) { - if self.count == 0 { - self.max_key = key; - self.min_key = key - self.bins.len() as i64 + 1 - } - - if key < self.min_key { - self.grow_left(key) - } else if key > self.max_key { - self.grow_right(key) - } - let idx = if key - self.min_key < 0 { - 0 - } else { - key - self.min_key - }; - // we unwrap here because grow_left or grow_right will make sure the idx is less than vector size - let bin_count = self.bins.get_mut(idx as usize).unwrap(); - *bin_count += 1; - self.count += 1; - } - - fn grow_left(&mut self, key: i64) { - if self.min_key < key || self.bins.len() >= self.max_num_bins as usize { - return; - } - - let min_key = if self.max_key - key >= self.max_num_bins { - self.max_key - self.max_num_bins + 1 - } else { - let mut min_key = self.min_key; - while min_key > key { - min_key -= GROW_LEFT_BY; - } - min_key - }; - - // The new vector will contain three parts. - // First part is all 0, which is the part expended - // Second part is from existing bins. - // Third part is what's left. - let expected_len = (self.max_key - min_key + 1) as usize; - let mut new_bins = vec![0u64; expected_len]; - let old_bin_slice = &mut new_bins[(self.min_key - min_key) as usize..]; - old_bin_slice.copy_from_slice(&self.bins); - - self.bins = new_bins; - self.min_key = min_key; - } - - fn grow_right(&mut self, key: i64) { - if self.max_key > key { - return; - } - - if key - self.max_key >= self.max_num_bins { - // if currently key minus currently max key is larger than maximum number of bins. - // Move all elements in current bins into the first bin - self.bins = vec![0; self.max_num_bins as usize]; - self.max_key = key; - self.min_key = key - self.max_num_bins + 1; - self.bins.get_mut(0).unwrap().add_assign(self.count); - } else if key - self.min_key >= self.max_num_bins { - let min_key = key - self.max_num_bins + 1; - let upper_bound = if min_key < self.max_key + 1 { - min_key - } else { - self.max_key + 1 - } - self.min_key; - let n = self.bins.iter().take(upper_bound as usize).sum::(); - - if self.bins.len() < self.max_num_bins as usize { - let mut new_bins = vec![0; self.max_num_bins as usize]; - new_bins[0..self.bins.len() - (min_key - self.min_key) as usize] - .as_mut() - .copy_from_slice(&self.bins[(min_key - self.min_key) as usize..]); - self.bins = new_bins; - } else { - // bins length is equal to max number of bins - self.bins.drain(0..(min_key - self.min_key) as usize); - if self.max_num_bins > self.max_key - min_key + 1 { - self.bins.resize( - self.bins.len() - + (self.max_num_bins - (self.max_key - min_key + 1)) as usize, - 0, - ) - } - } - self.max_key = key; - self.min_key = min_key; - self.bins.get_mut(0).unwrap().add_assign(n); - } else { - let mut new_bin = vec![0; (key - self.min_key + 1) as usize]; - new_bin[0..self.bins.len()] - .as_mut() - .copy_from_slice(&self.bins); - self.bins = new_bin; - self.max_key = key; - } - } - - /// Merge two stores - fn merge(&mut self, other: &Store) { - if self.count == 0 { - return; - } - if other.count == 0 { - self.bins = other.bins.clone(); - self.min_key = other.min_key; - self.max_key = other.max_key; - self.count = other.count; - } - - if self.max_key > other.max_key { - if other.min_key < self.min_key { - self.grow_left(other.min_key); - } - let start = if other.min_key > self.min_key { - other.min_key - } else { - self.min_key - } as usize; - for i in start..other.max_key as usize { - self.bins[i - self.min_key as usize] = other.bins[i - other.min_key as usize]; - } - let mut n = 0; - for i in other.min_key as usize..self.min_key as usize { - n += other.bins[i - other.min_key as usize] - } - self.bins[0] += n; - } else if other.min_key < self.min_key { - let mut tmp_bins = vec![0u64; other.bins.len()]; - tmp_bins.as_mut_slice().copy_from_slice(&other.bins); - - for i in self.min_key as usize..self.max_key as usize { - tmp_bins[i - other.min_key as usize] += self.bins[i - self.min_key as usize]; - } - - self.bins = tmp_bins; - self.max_key = other.max_key; - self.min_key = other.min_key; - } else { - self.grow_right(other.max_key); - for i in other.min_key as usize..(other.max_key + 1) as usize { - self.bins[i - self.min_key as usize] += other.bins[i - other.min_key as usize]; - } - } - - self.count += other.count; - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::export::metrics::{Aggregator, Count, Max, Min, Sum}; - use opentelemetry_api::metrics::{Descriptor, InstrumentKind, Number, NumberKind}; - use rand_distr::{Distribution, Exp, LogNormal, Normal}; - use std::cmp::Ordering; - use std::sync::Arc; - - const TEST_MAX_BINS: i64 = 1024; - const TEST_ALPHA: f64 = 0.01; - const TEST_KEY_EPSILON: f64 = 1.0e-9; - - // Test utils - - struct Dataset { - data: Vec, - kind: NumberKind, - } - - impl Dataset { - fn from_f64_vec(data: Vec) -> Dataset { - Dataset { - data: data.into_iter().map(Number::from).collect::>(), - kind: NumberKind::F64, - } - } - - fn from_u64_vec(data: Vec) -> Dataset { - Dataset { - data: data.into_iter().map(Number::from).collect::>(), - kind: NumberKind::U64, - } - } - - fn from_i64_vec(data: Vec) -> Dataset { - Dataset { - data: data.into_iter().map(Number::from).collect::>(), - kind: NumberKind::I64, - } - } - - fn sum(&self) -> Number { - match self.kind { - NumberKind::F64 => { - Number::from(self.data.iter().map(|e| e.to_f64(&self.kind)).sum::()) - } - NumberKind::U64 => { - Number::from(self.data.iter().map(|e| e.to_u64(&self.kind)).sum::()) - } - NumberKind::I64 => { - Number::from(self.data.iter().map(|e| e.to_i64(&self.kind)).sum::()) - } - } - } - } - - fn generate_linear_dataset_f64(start: f64, step: f64, num: usize) -> Vec { - let mut vec = Vec::with_capacity(num); - for i in 0..num { - vec.push((start + i as f64 * step) as f64); - } - vec - } - - fn generate_linear_dataset_u64(start: u64, step: u64, num: usize) -> Vec { - let mut vec = Vec::with_capacity(num); - for i in 0..num { - vec.push(start + i as u64 * step); - } - vec - } - - fn generate_linear_dataset_i64(start: i64, step: i64, num: usize) -> Vec { - let mut vec = Vec::with_capacity(num); - for i in 0..num { - vec.push(start + i as i64 * step); - } - vec - } - - /// generate a dataset with normal distribution. Return sorted dataset. - fn generate_normal_dataset(mean: f64, stddev: f64, num: usize) -> Vec { - let normal = Normal::new(mean, stddev).unwrap(); - let mut data = Vec::with_capacity(num); - for _ in 0..num { - data.push(normal.sample(&mut rand::thread_rng())); - } - data.as_mut_slice() - .sort_by(|a, b| a.partial_cmp(b).unwrap()); - data - } - - /// generate a dataset with log normal distribution. Return sorted dataset. - fn generate_log_normal_dataset(mean: f64, stddev: f64, num: usize) -> Vec { - let normal = LogNormal::new(mean, stddev).unwrap(); - let mut data = Vec::with_capacity(num); - for _ in 0..num { - data.push(normal.sample(&mut rand::thread_rng())); - } - data.as_mut_slice() - .sort_by(|a, b| a.partial_cmp(b).unwrap()); - data - } - - fn generate_exponential_dataset(rate: f64, num: usize) -> Vec { - let exponential = Exp::new(rate).unwrap(); - let mut data = Vec::with_capacity(num); - for _ in 0..num { - data.push(exponential.sample(&mut rand::thread_rng())); - } - data.as_mut_slice() - .sort_by(|a, b| a.partial_cmp(b).unwrap()); - data - } - - /// Insert all element of data into ddsketch and assert the quantile result is within the error range. - /// Note that data must be sorted. - fn evaluate_sketch(dataset: Dataset) { - let kind = &dataset.kind; - let ddsketch = DdSketchAggregator::new( - &DdSketchConfig::new(TEST_ALPHA, TEST_MAX_BINS, TEST_KEY_EPSILON), - kind.clone(), - ); - let descriptor = Descriptor::new( - "test".to_string(), - "test", - None, - None, - InstrumentKind::Histogram, - kind.clone(), - ); - - for i in &dataset.data { - let _ = ddsketch.update(i, &descriptor); - } - - assert_eq!( - ddsketch - .min() - .unwrap() - .partial_cmp(kind, dataset.data.get(0).unwrap()), - Some(Ordering::Equal) - ); - assert_eq!( - ddsketch - .max() - .unwrap() - .partial_cmp(kind, dataset.data.last().unwrap()), - Some(Ordering::Equal) - ); - assert_eq!( - ddsketch.sum().unwrap().partial_cmp(kind, &dataset.sum()), - Some(Ordering::Equal) - ); - assert_eq!(ddsketch.count().unwrap(), dataset.data.len() as u64); - } - - // Test basic operation of Store - - /// First set max_num_bins < number of keys, test to see if the store will collapse to left - /// most bin instead of expending beyond the max_num_bins - #[test] - fn test_insert_into_store() { - let mut store = Store::new(200); - for i in -100..1300 { - store.add(i) - } - assert_eq!(store.count, 1400); - assert_eq!(store.bins.len(), 200); - } - - /// Test to see if copy_from_slice will panic because the range size is different in left and right - #[test] - fn test_grow_right() { - let mut store = Store::new(150); - for i in &[-100, -50, 150, -20, 10] { - store.add(*i) - } - assert_eq!(store.count, 5); - } - - /// Test to see if copy_from_slice will panic because the range size is different in left and right - #[test] - fn test_grow_left() { - let mut store = Store::new(150); - for i in &[500, 150, 10] { - store.add(*i) - } - assert_eq!(store.count, 3); - } - - /// Before merge, store1 should hold 300 bins that looks like [201,1,1,1,...], - /// store 2 should hold 200 bins looks like [301,1,1,...] - /// After merge, store 1 should still hold 300 bins with following distribution - /// - /// index [0,0] -> 201 - /// - /// index [1,99] -> 1 - /// - /// index [100, 100] -> 302 - /// - /// index [101, 299] -> 2 - #[test] - fn test_merge_stores() { - let mut store1 = Store::new(300); - let mut store2 = Store::new(200); - for i in 500..1000 { - store1.add(i); - store2.add(i); - } - store1.merge(&store2); - assert_eq!(store1.bins.get(0), Some(&201)); - assert_eq!(&store1.bins[1..100], vec![1u64; 99].as_slice()); - assert_eq!(store1.bins[100], 302); - assert_eq!(&store1.bins[101..], vec![2u64; 199].as_slice()); - assert_eq!(store1.count, 1000); - } - - // Test ddsketch with different distribution - - #[test] - fn test_linear_distribution() { - // test u64 - let mut dataset = Dataset::from_u64_vec(generate_linear_dataset_u64(12, 3, 5000)); - evaluate_sketch(dataset); - - // test i64 - dataset = Dataset::from_i64_vec(generate_linear_dataset_i64(-12, 3, 5000)); - evaluate_sketch(dataset); - - // test f64 - dataset = Dataset::from_f64_vec(generate_linear_dataset_f64(-12.0, 3.0, 5000)); - evaluate_sketch(dataset); - } - - #[test] - fn test_normal_distribution() { - let mut dataset = Dataset::from_f64_vec(generate_normal_dataset(150.0, 1.2, 100)); - evaluate_sketch(dataset); - - dataset = Dataset::from_f64_vec(generate_normal_dataset(-30.0, 4.4, 100)); - evaluate_sketch(dataset); - } - - #[test] - fn test_log_normal_distribution() { - let dataset = Dataset::from_f64_vec(generate_log_normal_dataset(120.0, 0.5, 100)); - evaluate_sketch(dataset); - } - - #[test] - fn test_exponential_distribution() { - let dataset = Dataset::from_f64_vec(generate_exponential_dataset(2.0, 500)); - evaluate_sketch(dataset); - } - - // Test Aggregator operation of DDSketch - #[test] - fn test_synchronized_move() { - let dataset = Dataset::from_f64_vec(generate_normal_dataset(1.0, 3.5, 100)); - let kind = &dataset.kind; - let ddsketch = DdSketchAggregator::new( - &DdSketchConfig::new(TEST_ALPHA, TEST_MAX_BINS, TEST_KEY_EPSILON), - kind.clone(), - ); - let descriptor = Descriptor::new( - "test".to_string(), - "test", - None, - None, - InstrumentKind::Histogram, - kind.clone(), - ); - for i in &dataset.data { - let _ = ddsketch.update(i, &descriptor); - } - let expected_sum = ddsketch.sum().unwrap().to_f64(&NumberKind::F64); - let expected_count = ddsketch.count().unwrap(); - let expected_min = ddsketch.min().unwrap().to_f64(&NumberKind::F64); - let expected_max = ddsketch.max().unwrap().to_f64(&NumberKind::F64); - - let moved_ddsketch: Arc<(dyn Aggregator + Send + Sync)> = - Arc::new(DdSketchAggregator::new( - &DdSketchConfig::new(TEST_ALPHA, TEST_MAX_BINS, TEST_KEY_EPSILON), - NumberKind::F64, - )); - let _ = ddsketch - .synchronized_move(&moved_ddsketch, &descriptor) - .expect("Fail to sync move"); - let moved_ddsketch = moved_ddsketch - .as_any() - .downcast_ref::() - .expect("Fail to cast dyn Aggregator down to DDSketchAggregator"); - - // assert sum, max, min and count - assert!( - (moved_ddsketch.max().unwrap().to_f64(&NumberKind::F64) - expected_max).abs() - < std::f64::EPSILON - ); - assert!( - (moved_ddsketch.min().unwrap().to_f64(&NumberKind::F64) - expected_min).abs() - < std::f64::EPSILON - ); - assert!( - (moved_ddsketch.sum().unwrap().to_f64(&NumberKind::F64) - expected_sum).abs() - < std::f64::EPSILON - ); - assert_eq!(moved_ddsketch.count().unwrap(), expected_count); - } -} diff --git a/opentelemetry-sdk/src/metrics/aggregators/histogram.rs b/opentelemetry-sdk/src/metrics/aggregators/histogram.rs index fd7484e9e6..f6ad571fc9 100644 --- a/opentelemetry-sdk/src/metrics/aggregators/histogram.rs +++ b/opentelemetry-sdk/src/metrics/aggregators/histogram.rs @@ -1,13 +1,17 @@ -use crate::export::metrics::{Buckets, Count, Histogram, Sum}; -use crate::metrics::export::metrics::Aggregator; -use opentelemetry_api::metrics::{ - AtomicNumber, Descriptor, MetricsError, Number, NumberKind, Result, +use crate::export::metrics::aggregation::{ + Aggregation, AggregationKind, Buckets, Count, Histogram, Sum, }; +use crate::metrics::{ + aggregators::Aggregator, + sdk_api::{AtomicNumber, Descriptor, Number, NumberKind}, +}; +use opentelemetry_api::metrics::{MetricsError, Result}; +use opentelemetry_api::Context; use std::mem; use std::sync::{Arc, RwLock}; /// Create a new histogram for the given descriptor with the given boundaries -pub fn histogram(_desc: &Descriptor, boundaries: &[f64]) -> HistogramAggregator { +pub fn histogram(boundaries: &[f64]) -> HistogramAggregator { let mut sorted_boundaries = boundaries.to_owned(); sorted_boundaries.sort_by(|a, b| a.partial_cmp(b).unwrap()); let state = State::empty(&sorted_boundaries); @@ -77,8 +81,17 @@ impl Histogram for HistogramAggregator { } } +impl Aggregation for HistogramAggregator { + fn kind(&self) -> &AggregationKind { + &AggregationKind::SUM + } +} + impl Aggregator for HistogramAggregator { - fn update(&self, number: &Number, descriptor: &Descriptor) -> Result<()> { + fn aggregation(&self) -> &dyn Aggregation { + self + } + fn update(&self, _cx: &Context, number: &Number, descriptor: &Descriptor) -> Result<()> { self.inner.write().map_err(From::from).map(|mut inner| { let kind = descriptor.number_kind(); let as_float = number.to_f64(kind); @@ -100,7 +113,7 @@ impl Aggregator for HistogramAggregator { fn synchronized_move( &self, other: &Arc, - _descriptor: &crate::metrics::Descriptor, + _descriptor: &Descriptor, ) -> Result<()> { if let Some(other) = other.as_any().downcast_ref::() { self.inner diff --git a/opentelemetry-sdk/src/metrics/aggregators/last_value.rs b/opentelemetry-sdk/src/metrics/aggregators/last_value.rs index 20047714aa..81de9d086a 100644 --- a/opentelemetry-sdk/src/metrics/aggregators/last_value.rs +++ b/opentelemetry-sdk/src/metrics/aggregators/last_value.rs @@ -1,5 +1,10 @@ -use crate::export::metrics::{Aggregator, LastValue}; -use opentelemetry_api::metrics::{Descriptor, MetricsError, Number, Result}; +use crate::export::metrics::aggregation::{Aggregation, AggregationKind, LastValue}; +use crate::metrics::{ + aggregators::Aggregator, + sdk_api::{Descriptor, Number}, +}; +use opentelemetry_api::metrics::{MetricsError, Result}; +use opentelemetry_api::Context; use std::any::Any; use std::sync::{Arc, Mutex}; use std::time::SystemTime; @@ -17,8 +22,18 @@ pub struct LastValueAggregator { inner: Mutex, } +impl Aggregation for LastValueAggregator { + fn kind(&self) -> &AggregationKind { + &AggregationKind::LAST_VALUE + } +} + impl Aggregator for LastValueAggregator { - fn update(&self, number: &Number, _descriptor: &Descriptor) -> Result<()> { + fn aggregation(&self) -> &dyn Aggregation { + self + } + + fn update(&self, _cx: &Context, number: &Number, _descriptor: &Descriptor) -> Result<()> { self.inner.lock().map_err(Into::into).map(|mut inner| { inner.state = Some(LastValueData { value: number.clone(), @@ -26,6 +41,7 @@ impl Aggregator for LastValueAggregator { }); }) } + fn synchronized_move( &self, other: &Arc, diff --git a/opentelemetry-sdk/src/metrics/aggregators/min_max_sum_count.rs b/opentelemetry-sdk/src/metrics/aggregators/min_max_sum_count.rs deleted file mode 100644 index f8346db043..0000000000 --- a/opentelemetry-sdk/src/metrics/aggregators/min_max_sum_count.rs +++ /dev/null @@ -1,165 +0,0 @@ -use crate::export::metrics::{Aggregator, Count, Max, Min, MinMaxSumCount, Sum}; -use opentelemetry_api::metrics::{AtomicNumber, Descriptor, MetricsError, Number, Result}; -use std::any::Any; -use std::cmp::Ordering; -use std::sync::{Arc, Mutex}; - -/// Create a new `MinMaxSumCountAggregator` -pub fn min_max_sum_count(_descriptor: &Descriptor) -> MinMaxSumCountAggregator { - MinMaxSumCountAggregator { - inner: Mutex::new(Inner { state: None }), - } -} - -#[derive(Debug)] -struct Inner { - state: Option, -} - -/// An `Aggregator` that aggregates events that form a distribution, keeping -/// only the min, max, sum, and count. -#[derive(Debug)] -pub struct MinMaxSumCountAggregator { - inner: Mutex, -} - -impl Min for MinMaxSumCountAggregator { - fn min(&self) -> Result { - self.inner.lock().map_err(From::from).map(|inner| { - inner - .state - .as_ref() - .map_or(0u64.into(), |state| state.min.load()) - }) - } -} - -impl Max for MinMaxSumCountAggregator { - fn max(&self) -> Result { - self.inner.lock().map_err(From::from).map(|inner| { - inner - .state - .as_ref() - .map_or(0u64.into(), |state| state.max.load()) - }) - } -} - -impl Sum for MinMaxSumCountAggregator { - fn sum(&self) -> Result { - self.inner.lock().map_err(From::from).map(|inner| { - inner - .state - .as_ref() - .map_or(0u64.into(), |state| state.sum.load()) - }) - } -} - -impl Count for MinMaxSumCountAggregator { - fn count(&self) -> Result { - self.inner - .lock() - .map_err(From::from) - .map(|inner| inner.state.as_ref().map_or(0u64, |state| state.count)) - } -} - -impl MinMaxSumCount for MinMaxSumCountAggregator {} - -impl Aggregator for MinMaxSumCountAggregator { - fn update(&self, number: &Number, descriptor: &Descriptor) -> Result<()> { - self.inner - .lock() - .map(|mut inner| { - if let Some(state) = &mut inner.state { - let kind = descriptor.number_kind(); - - state.count = state.count.saturating_add(1); - state.sum.fetch_add(kind, number); - if number.partial_cmp(kind, &state.min.load()) == Some(Ordering::Less) { - state.min = number.to_atomic(); - } - if number.partial_cmp(kind, &state.max.load()) == Some(Ordering::Greater) { - state.max = number.to_atomic(); - } - } else { - inner.state = Some(State { - count: 1, - sum: number.to_atomic(), - min: number.to_atomic(), - max: number.to_atomic(), - }) - } - }) - .map_err(From::from) - } - - fn synchronized_move( - &self, - other: &Arc, - _descriptor: &Descriptor, - ) -> Result<()> { - if let Some(other) = other.as_any().downcast_ref::() { - self.inner.lock().map_err(From::from).and_then(|mut inner| { - other.inner.lock().map_err(From::from).map(|mut oi| { - oi.state = inner.state.take(); - }) - }) - } else { - Err(MetricsError::InconsistentAggregator(format!( - "Expected {:?}, got: {:?}", - self, other - ))) - } - } - - fn merge(&self, aggregator: &(dyn Aggregator + Send + Sync), desc: &Descriptor) -> Result<()> { - if let Some(other) = aggregator.as_any().downcast_ref::() { - self.inner.lock().map_err(From::from).and_then(|mut inner| { - other.inner.lock().map_err(From::from).map(|oi| { - match (inner.state.as_mut(), oi.state.as_ref()) { - (None, Some(other_checkpoint)) => { - inner.state = Some(other_checkpoint.clone()); - } - (Some(_), None) | (None, None) => (), - (Some(state), Some(other)) => { - state.count = state.count.saturating_add(other.count); - state.sum.fetch_add(desc.number_kind(), &other.sum.load()); - - let other_min = other.min.load(); - let other_max = other.max.load(); - if state.min.load().partial_cmp(desc.number_kind(), &other_min) - == Some(Ordering::Greater) - { - state.min.store(&other_min); - } - if state.max.load().partial_cmp(desc.number_kind(), &other_max) - == Some(Ordering::Less) - { - state.max.store(&other_max); - } - } - } - }) - }) - } else { - Err(MetricsError::InconsistentAggregator(format!( - "Expected {:?}, got: {:?}", - self, aggregator - ))) - } - } - - fn as_any(&self) -> &dyn Any { - self - } -} - -#[derive(Clone, Debug)] -struct State { - count: u64, - sum: AtomicNumber, - min: AtomicNumber, - max: AtomicNumber, -} diff --git a/opentelemetry-sdk/src/metrics/aggregators/mod.rs b/opentelemetry-sdk/src/metrics/aggregators/mod.rs index 3f03d01ed6..6ddddc5f47 100644 --- a/opentelemetry-sdk/src/metrics/aggregators/mod.rs +++ b/opentelemetry-sdk/src/metrics/aggregators/mod.rs @@ -1,20 +1,22 @@ //! Metric Aggregators -use opentelemetry_api::metrics::{ - Descriptor, InstrumentKind, MetricsError, Number, NumberKind, Result, +use core::fmt; +use std::{any::Any, sync::Arc}; + +use crate::{ + export::metrics::aggregation::Aggregation, + metrics::sdk_api::{Descriptor, InstrumentKind, Number, NumberKind}, +}; +use opentelemetry_api::{ + metrics::{MetricsError, Result}, + Context, }; -mod array; -mod ddsketch; mod histogram; mod last_value; -mod min_max_sum_count; mod sum; -pub use array::{array, ArrayAggregator}; -pub use ddsketch::{ddsketch, DdSketchAggregator, DdSketchConfig}; pub use histogram::{histogram, HistogramAggregator}; pub use last_value::{last_value, LastValueAggregator}; -pub use min_max_sum_count::{min_max_sum_count, MinMaxSumCountAggregator}; pub use sum::{sum, SumAggregator}; /// RangeTest is a common routine for testing for valid input values. This @@ -27,7 +29,7 @@ pub fn range_test(number: &Number, descriptor: &Descriptor) -> Result<()> { } match descriptor.instrument_kind() { - InstrumentKind::Counter | InstrumentKind::SumObserver + InstrumentKind::Counter | InstrumentKind::CounterObserver if descriptor.number_kind() == &NumberKind::F64 => { if number.is_negative(descriptor.number_kind()) { @@ -38,3 +40,58 @@ pub fn range_test(number: &Number, descriptor: &Descriptor) -> Result<()> { }; Ok(()) } + +/// Aggregator implements a specific aggregation behavior, i.e., a behavior to +/// track a sequence of updates to an instrument. Sum-only instruments commonly +/// use a simple Sum aggregator, but for the distribution instruments +/// (Histogram, ValueObserver) there are a number of possible aggregators +/// with different cost and accuracy tradeoffs. +/// +/// Note that any Aggregator may be attached to any instrument--this is the +/// result of the OpenTelemetry API/SDK separation. It is possible to attach a +/// Sum aggregator to a Histogram instrument or a MinMaxSumCount aggregator +/// to a Counter instrument. +pub trait Aggregator: fmt::Debug { + /// The interface to access the current state of this Aggregator. + fn aggregation(&self) -> &dyn Aggregation; + + /// Update receives a new measured value and incorporates it into the + /// aggregation. Update calls may be called concurrently. + /// + /// `Descriptor::number_kind` should be consulted to determine whether the + /// provided number is an `i64`, `u64` or `f64`. + /// + /// The current Context could be inspected for a `Baggage` or + /// `SpanContext`. + fn update(&self, context: &Context, number: &Number, descriptor: &Descriptor) -> Result<()>; + + /// This method is called during collection to finish one period of aggregation + /// by atomically saving the currently-updating state into the argument + /// Aggregator. + /// + /// `synchronized_move` is called concurrently with `update`. These two methods + /// must be synchronized with respect to each other, for correctness. + /// + /// This method will return an `InconsistentAggregator` error if this + /// `Aggregator` cannot be copied into the destination due to an incompatible + /// type. + /// + /// This call has no `Context` argument because it is expected to perform only + /// computation. + fn synchronized_move( + &self, + destination: &Arc, + descriptor: &Descriptor, + ) -> Result<()>; + + /// This combines the checkpointed state from the argument `Aggregator` into this + /// `Aggregator`. `merge` is not synchronized with respect to `update` or + /// `synchronized_move`. + /// + /// The owner of an `Aggregator` being merged is responsible for synchronization + /// of both `Aggregator` states. + fn merge(&self, other: &(dyn Aggregator + Send + Sync), descriptor: &Descriptor) -> Result<()>; + + /// Returns the implementing aggregator as `Any` for downcasting. + fn as_any(&self) -> &dyn Any; +} diff --git a/opentelemetry-sdk/src/metrics/aggregators/sum.rs b/opentelemetry-sdk/src/metrics/aggregators/sum.rs index 752551373e..60c7147a83 100644 --- a/opentelemetry-sdk/src/metrics/aggregators/sum.rs +++ b/opentelemetry-sdk/src/metrics/aggregators/sum.rs @@ -1,10 +1,15 @@ -use crate::export::metrics::{Aggregator, Subtractor, Sum}; -use opentelemetry_api::metrics::{AtomicNumber, Descriptor, MetricsError, Number, Result}; +use crate::export::metrics::aggregation::{Aggregation, AggregationKind, Sum}; +use crate::metrics::{ + aggregators::Aggregator, + sdk_api::{AtomicNumber, Descriptor, Number}, +}; +use opentelemetry_api::metrics::{MetricsError, Result}; +use opentelemetry_api::Context; use std::any::Any; use std::sync::Arc; /// Create a new sum aggregator. -pub fn sum() -> SumAggregator { +pub fn sum() -> impl Aggregator { SumAggregator::default() } @@ -20,36 +25,22 @@ impl Sum for SumAggregator { } } -impl Subtractor for SumAggregator { - fn subtract( - &self, - operand: &(dyn Aggregator + Send + Sync), - result: &(dyn Aggregator + Send + Sync), - descriptor: &Descriptor, - ) -> Result<()> { - match ( - operand.as_any().downcast_ref::(), - result.as_any().downcast_ref::(), - ) { - (Some(op), Some(res)) => { - res.value.store(&self.value.load()); - res.value - .fetch_add(descriptor.number_kind(), &op.value.load()); - Ok(()) - } - _ => Err(MetricsError::InconsistentAggregator(format!( - "Expected {:?}, got: {:?} and {:?}", - self, operand, result - ))), - } +impl Aggregation for SumAggregator { + fn kind(&self) -> &AggregationKind { + &AggregationKind::SUM } } impl Aggregator for SumAggregator { - fn update(&self, number: &Number, descriptor: &Descriptor) -> Result<()> { + fn aggregation(&self) -> &dyn Aggregation { + self + } + + fn update(&self, _cx: &Context, number: &Number, descriptor: &Descriptor) -> Result<()> { self.value.fetch_add(descriptor.number_kind(), number); Ok(()) } + fn synchronized_move( &self, other: &Arc, @@ -67,6 +58,7 @@ impl Aggregator for SumAggregator { ))) } } + fn merge(&self, other: &(dyn Aggregator + Send + Sync), descriptor: &Descriptor) -> Result<()> { if let Some(other_sum) = other.as_any().downcast_ref::() { self.value @@ -75,6 +67,7 @@ impl Aggregator for SumAggregator { Ok(()) } + fn as_any(&self) -> &dyn Any { self } diff --git a/opentelemetry-sdk/src/metrics/controllers/basic.rs b/opentelemetry-sdk/src/metrics/controllers/basic.rs new file mode 100644 index 0000000000..f071533244 --- /dev/null +++ b/opentelemetry-sdk/src/metrics/controllers/basic.rs @@ -0,0 +1,468 @@ +use std::{ + collections::HashMap, + fmt, + sync::{Arc, Mutex}, + time::{Duration, SystemTime}, +}; + +use futures_channel::{mpsc, oneshot}; +use futures_util::{stream, StreamExt}; +use opentelemetry_api::{ + global, + metrics::{noop, Meter, MeterProvider, MetricsError, Result}, + Context, InstrumentationLibrary, +}; + +use crate::{ + export::metrics::{ + Checkpointer, CheckpointerFactory, InstrumentationLibraryReader, LockedCheckpointer, + MetricsExporter, Reader, + }, + metrics::{ + accumulator, + registry::{self, UniqueInstrumentMeterCore}, + sdk_api::{ + wrap_meter_core, AsyncInstrumentCore, Descriptor, MeterCore, SyncInstrumentCore, + }, + Accumulator, + }, + runtime::Runtime, + Resource, +}; + +/// DefaultPeriod is used for: +/// +/// - the minimum time between calls to `collect`. +/// - the timeout for `export`. +/// - the timeout for `collect`. +const DEFAULT_PERIOD: Duration = Duration::from_secs(10); + +/// Returns a new builder using the provided checkpointer factory. +/// +/// Use builder options (including optional exporter) to configure a metric +/// export pipeline. +pub fn basic(factory: T) -> BasicControllerBuilder +where + T: CheckpointerFactory + Send + Sync + 'static, +{ + BasicControllerBuilder { + checkpointer_factory: Box::new(factory), + resource: None, + exporter: None, + collect_period: None, + collect_timeout: None, + push_timeout: None, + } +} + +/// Organizes and synchronizes collection of metric data in both "pull" and +/// "push" configurations. +/// +/// This supports two distinct modes: +/// +/// - Push and Pull: `start` must be called to begin calling the exporter; +/// `collect` is called periodically after starting the controller. +/// - Pull-Only: `start` is optional in this case, to call `collect` +/// periodically. If `start` is not called, `collect` can be called manually to +/// initiate collection. +/// +/// The controller supports mixing push and pull access to metric data using the +/// `InstrumentationLibraryReader` interface. +#[derive(Clone)] +pub struct BasicController(Arc); + +struct ControllerInner { + meters: Mutex>>, + checkpointer_factory: Box, + resource: Resource, + exporter: Mutex>>, + worker_channel: Mutex>>, + collect_period: Duration, + collect_timeout: Duration, + push_timeout: Duration, + collected_time: Mutex>, +} + +enum WorkerMessage { + Tick, + Shutdown((Context, oneshot::Sender<()>)), +} + +impl BasicController { + /// This begins a ticker that periodically collects and exports metrics with the + /// configured interval. + /// + /// This is required for calling a configured [`MetricsExporter`] (see + /// [`BasicControllerBuilder::with_exporter`]) and is otherwise optional when + /// only pulling metric data. + /// + /// The passed in context is passed to `collect` and subsequently to + /// asynchronous instrument callbacks. Returns an error when the controller was + /// already started. + /// + /// Note that it is not necessary to start a controller when only pulling data; + /// use the `collect` and `try_for_each` methods directly in this case. + pub fn start(&self, cx: &Context, rt: T) -> Result<()> { + let (message_sender, message_receiver) = mpsc::channel(8); + let ticker = rt + .interval(self.0.collect_period) + .map(|_| WorkerMessage::Tick); + + let exporter = self + .0 + .exporter + .lock() + .map(|mut ex| ex.take()) + .unwrap_or_default(); + let resource = self.resource().clone(); + let reader = self.clone(); + let cx = cx.clone(); + // Spawn worker process via user-defined spawn function. + rt.spawn(Box::pin(async move { + let mut messages = Box::pin(stream::select(message_receiver, ticker)); + while let Some(message) = messages.next().await { + match message { + WorkerMessage::Tick => { + match reader.checkpoint(&cx) { + Ok(_) => { + if let Some(exporter) = &exporter { + // TODO timeout + if let Err(err) = exporter.export(&cx, &resource, &reader) { + global::handle_error(err); + } + } + } + Err(err) => global::handle_error(err), + }; + } + WorkerMessage::Shutdown((cx, channel)) => { + let _ = reader.checkpoint(&cx); + if let Some(exporter) = &exporter { + let _ = exporter.export(&cx, &resource, &reader); + } + let _ = channel.send(()); + break; + } + } + } + })); + + *self.0.worker_channel.lock()? = Some(message_sender); + + Ok(()) + } + + /// This waits for the background worker to return and then collects + /// and exports metrics one last time before returning. + /// + /// The passed context is passed to the final `collect` and subsequently to the + /// final asynchronous instruments. + /// + /// Note that `stop` will not cancel an ongoing collection or export. + pub fn stop(&self, cx: &Context) -> Result<()> { + self.0 + .worker_channel + .lock() + .map_err(Into::into) + .and_then(|mut worker| { + if let Some(mut worker) = worker.take() { + let (res_sender, res_receiver) = oneshot::channel(); + if worker + .try_send(WorkerMessage::Shutdown((cx.clone(), res_sender))) + .is_ok() + { + futures_executor::block_on(res_receiver) + .map_err(|err| MetricsError::Other(err.to_string())) + } else { + Ok(()) + } + } else { + Ok(()) + } + }) + } + + /// true if the controller was started via `start`, indicating that the + /// current `Reader` is being kept up-to-date. + pub fn is_running(&self) -> bool { + self.0 + .worker_channel + .lock() + .map(|wc| wc.is_some()) + .unwrap_or(false) + } + + /// `true` if the collector should collect now, based on the current time, the + /// last collection time, and the configured period. + fn should_collect(&self) -> bool { + self.0 + .collected_time + .lock() + .map(|mut collected_time| { + if self.0.collect_period.is_zero() { + return true; + } + let now = SystemTime::now(); + if let Some(collected_time) = *collected_time { + if now.duration_since(collected_time).unwrap_or_default() + < self.0.collect_period + { + return false; + } + } + + *collected_time = Some(now); + true + }) + .unwrap_or(false) + } + + /// Requests a collection. + /// + /// The collection will be skipped if the last collection is aged less than the + /// configured collection period. + pub fn collect(&self, cx: &Context) -> Result<()> { + if self.is_running() { + // When the ticker is `Some`, there's a component + // computing checkpoints with the collection period. + return Err(MetricsError::Other("controller already started".into())); + } + + if !self.should_collect() { + return Ok(()); + } + + self.checkpoint(cx) + } + + /// Get a reference to the current resource. + pub fn resource(&self) -> &Resource { + &self.0.resource + } + + /// Returns a snapshot of current accumulators registered to this controller. + /// + /// This briefly locks the controller. + fn with_accumulator_list(&self, mut f: F) -> Result + where + F: FnMut(&[&AccumulatorCheckpointer]) -> Result, + { + self.0.meters.lock().map_err(Into::into).and_then(|meters| { + let accs = meters + .values() + .filter_map(|unique| { + unique + .meter_core() + .downcast_ref::() + }) + .collect::>(); + f(&accs) + }) + } + + /// Calls the accumulator and checkpointer interfaces to + /// compute the reader. + fn checkpoint(&self, cx: &Context) -> Result<()> { + self.with_accumulator_list(|accs| { + for acc in accs { + self.checkpoint_single_accumulator(cx, acc)?; + } + + Ok(()) + }) + } + + fn checkpoint_single_accumulator( + &self, + cx: &Context, + ac: &AccumulatorCheckpointer, + ) -> Result<()> { + ac.checkpointer + .checkpoint(&mut |ckpt: &mut dyn LockedCheckpointer| { + ckpt.start_collection(); + if !self.0.collect_timeout.is_zero() { + // TODO timeouts + } + + ac.accumulator.collect(cx, ckpt.processor()); + + ckpt.finish_collection() + }) + } +} + +impl MeterProvider for BasicController { + fn versioned_meter( + &self, + name: &'static str, + version: Option<&'static str>, + schema_url: Option<&'static str>, + ) -> Meter { + self.0 + .meters + .lock() + .map(|mut meters| { + let library = InstrumentationLibrary::new(name, version, schema_url); + let meter_core = meters.entry(library.clone()).or_insert_with(|| { + let checkpointer = self.0.checkpointer_factory.checkpointer(); + Arc::new(registry::unique_instrument_meter_core( + AccumulatorCheckpointer { + accumulator: accumulator(checkpointer.clone().as_dyn_processor()), + checkpointer, + library: library.clone(), + }, + )) + }); + wrap_meter_core(meter_core.clone(), library) + }) + .unwrap_or_else(|_| { + noop::NoopMeterProvider::new().versioned_meter(name, version, schema_url) + }) + } +} + +struct AccumulatorCheckpointer { + accumulator: Accumulator, + checkpointer: Arc, + library: InstrumentationLibrary, +} + +impl MeterCore for AccumulatorCheckpointer { + fn new_sync_instrument( + &self, + descriptor: Descriptor, + ) -> Result> { + self.accumulator.new_sync_instrument(descriptor) + } + + fn new_async_instrument( + &self, + descriptor: Descriptor, + ) -> Result> { + self.accumulator.new_async_instrument(descriptor) + } + + fn register_callback(&self, f: Box) -> Result<()> { + self.accumulator.register_callback(f) + } +} + +impl InstrumentationLibraryReader for BasicController { + fn try_for_each( + &self, + f: &mut dyn FnMut(&InstrumentationLibrary, &mut dyn Reader) -> Result<()>, + ) -> Result<()> { + let mut res = Ok(()); + self.with_accumulator_list(|acs| { + for ac_pair in acs { + if res.is_err() { + continue; + } + + res = ac_pair + .checkpointer + .checkpoint(&mut |locked| f(&ac_pair.library, locked.reader())) + } + + Ok(()) + })?; + + res + } +} + +impl fmt::Debug for BasicController { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("BasicController") + .field("resource", &self.0.resource) + .field("collect_period", &self.0.collect_period) + .field("collect_timeout", &self.0.collect_timeout) + .field("push_timeout", &self.0.push_timeout) + .field("collected_time", &self.0.collect_timeout) + .finish() + } +} + +/// Options for configuring a [`BasicController`] +pub struct BasicControllerBuilder { + checkpointer_factory: Box, + resource: Option, + exporter: Option>, + collect_period: Option, + collect_timeout: Option, + push_timeout: Option, +} + +impl BasicControllerBuilder { + /// Sets the [`Resource`] used for this controller. + pub fn with_resource(mut self, resource: Resource) -> Self { + self.resource = Some(resource); + self + } + + /// Sets the exporter used for exporting metric data. + /// + /// Note: Exporters such as Prometheus that pull data do not implement + /// [`MetricsExporter`]. They will directly call `collect` and `try_for_each`. + pub fn with_exporter(mut self, exporter: impl MetricsExporter + Send + Sync + 'static) -> Self { + self.exporter = Some(Box::new(exporter)); + self + } + + /// Sets the interval between calls to `collect` a checkpoint. + /// + /// When pulling metrics and not exporting, this is the minimum time between + /// calls to `collect.In a pull-only configuration, collection is performed on + /// demand; set this to `0` to always recompute the export record set. + /// + /// When exporting metrics, this must be > 0. + /// + /// Default value is 10s. + pub fn with_collect_period(mut self, collect_period: Duration) -> Self { + self.collect_period = Some(collect_period); + self + } + + /// Sets the timeout of the `collect` and subsequent observer instrument + /// callbacks. + /// + /// Default value is 10s. If zero or none, no collect timeout is applied. + pub fn with_collect_timeout(mut self, collect_timeout: Duration) -> Self { + self.collect_timeout = Some(collect_timeout); + self + } + + /// Sets push controller timeout when a exporter is configured. + /// + /// Default value is 10s. If zero, no export timeout is applied. + pub fn with_push_timeout(mut self, push_timeout: Duration) -> Self { + self.push_timeout = Some(push_timeout); + self + } + + /// Creates a new basic controller. + pub fn build(self) -> BasicController { + BasicController(Arc::new(ControllerInner { + meters: Default::default(), + checkpointer_factory: self.checkpointer_factory, + resource: self.resource.unwrap_or_default(), + exporter: Mutex::new(self.exporter), + worker_channel: Mutex::new(None), + collect_period: self.collect_period.unwrap_or(DEFAULT_PERIOD), + collect_timeout: self.collect_timeout.unwrap_or(DEFAULT_PERIOD), + push_timeout: self.push_timeout.unwrap_or(DEFAULT_PERIOD), + collected_time: Default::default(), + })) + } +} + +impl fmt::Debug for BasicControllerBuilder { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("BasicControllerBuilder") + .field("resource", &self.resource) + .field("collect_period", &self.collect_period) + .field("collect_timeout", &self.collect_timeout) + .field("push_timeout", &self.push_timeout) + .finish() + } +} diff --git a/opentelemetry-sdk/src/metrics/controllers/mod.rs b/opentelemetry-sdk/src/metrics/controllers/mod.rs index a7e8bc4949..c7326193ad 100644 --- a/opentelemetry-sdk/src/metrics/controllers/mod.rs +++ b/opentelemetry-sdk/src/metrics/controllers/mod.rs @@ -1,6 +1,4 @@ //! SDK Metrics Controllers -mod pull; -mod push; +mod basic; -pub use pull::{pull, PullController}; -pub use push::{push, PushController, PushControllerWorker}; +pub use basic::{basic, BasicController, BasicControllerBuilder}; diff --git a/opentelemetry-sdk/src/metrics/controllers/pull.rs b/opentelemetry-sdk/src/metrics/controllers/pull.rs deleted file mode 100644 index f3a2829b04..0000000000 --- a/opentelemetry-sdk/src/metrics/controllers/pull.rs +++ /dev/null @@ -1,163 +0,0 @@ -use crate::{ - export::metrics::{AggregatorSelector, CheckpointSet, Checkpointer, ExportKindFor, Record}, - metrics::{ - accumulator, - processors::{self, BasicProcessor}, - Accumulator, - }, - Resource, -}; -use opentelemetry_api::metrics::{registry, Result}; -use std::sync::Arc; -use std::time::{Duration, SystemTime}; - -const DEFAULT_CACHE_DURATION: Duration = Duration::from_secs(10); - -/// Returns a builder for creating a `PullController` with the configured and options. -pub fn pull( - aggregator_selector: Box, - export_selector: Box, -) -> PullControllerBuilder { - PullControllerBuilder::with_selectors(aggregator_selector, export_selector) -} - -/// Pull controllers are typically used in an environment where there are -/// multiple readers. It is common, therefore, when configuring a -/// `BasicProcessor` for use with this controller, to use a -/// `ExportKind::Cumulative` strategy and the `with_memory(true)` builder -/// option, which ensures that every `CheckpointSet` includes full state. -#[derive(Debug)] -pub struct PullController { - accumulator: Accumulator, - processor: Arc, - provider: registry::RegistryMeterProvider, - period: Duration, - last_collect: SystemTime, -} - -impl PullController { - /// The provider for this controller - pub fn provider(&self) -> registry::RegistryMeterProvider { - self.provider.clone() - } - - /// Collects all metrics if the last collected at time is past the current period - pub fn collect(&mut self) -> Result<()> { - if self - .last_collect - .elapsed() - .map_or(true, |elapsed| elapsed > self.period) - { - self.last_collect = opentelemetry_api::time::now(); - self.processor.lock().and_then(|mut checkpointer| { - checkpointer.start_collection(); - self.accumulator.0.collect(&mut checkpointer); - checkpointer.finish_collection() - }) - } else { - Ok(()) - } - } -} - -impl CheckpointSet for PullController { - fn try_for_each( - &mut self, - export_selector: &dyn ExportKindFor, - f: &mut dyn FnMut(&Record<'_>) -> Result<()>, - ) -> Result<()> { - self.processor.lock().and_then(|mut locked_processor| { - locked_processor - .checkpoint_set() - .try_for_each(export_selector, f) - }) - } -} - -/// Configuration for a `PullController`. -#[derive(Debug)] -pub struct PullControllerBuilder { - /// The aggregator selector used by the controller - aggregator_selector: Box, - - /// The export kind selector used by this controller - export_selector: Box, - - /// Resource is the OpenTelemetry resource associated with all Meters created by - /// the controller. - resource: Option, - - /// CachePeriod is the period which a recently-computed result will be returned - /// without gathering metric data again. - /// - /// If the period is zero, caching of the result is disabled. The default value - /// is 10 seconds. - cache_period: Option, - - /// Memory controls whether the controller's processor remembers metric - /// instruments and attribute sets that were previously reported. When memory is - /// `true`, `CheckpointSet::try_for_each` will visit metrics that were not - /// updated in the most recent interval. Default true. - memory: bool, -} - -impl PullControllerBuilder { - /// Configure the sectors for this controller - pub fn with_selectors( - aggregator_selector: Box, - export_selector: Box, - ) -> Self { - PullControllerBuilder { - aggregator_selector, - export_selector, - resource: None, - cache_period: None, - memory: true, - } - } - - /// Configure the resource for this controller - pub fn with_resource(self, resource: Resource) -> Self { - PullControllerBuilder { - resource: Some(resource), - ..self - } - } - - /// Configure the cache period for this controller - pub fn with_cache_period(self, period: Duration) -> Self { - PullControllerBuilder { - cache_period: Some(period), - ..self - } - } - - /// Sets the memory behavior of the controller's `Processor`. If this is - /// `true`, the processor will report metric instruments and attribute sets that - /// were previously reported but not updated in the most recent interval. - pub fn with_memory(self, memory: bool) -> Self { - PullControllerBuilder { memory, ..self } - } - - /// Build a new `PullController` from the current configuration. - pub fn build(self) -> PullController { - let processor = Arc::new(processors::basic( - self.aggregator_selector, - self.export_selector, - self.memory, - )); - - let accumulator = accumulator(processor.clone()) - .with_resource(self.resource.unwrap_or_default()) - .build(); - let provider = registry::meter_provider(Arc::new(accumulator.clone())); - - PullController { - accumulator, - processor, - provider, - period: self.cache_period.unwrap_or(DEFAULT_CACHE_DURATION), - last_collect: opentelemetry_api::time::now(), - } - } -} diff --git a/opentelemetry-sdk/src/metrics/controllers/push.rs b/opentelemetry-sdk/src/metrics/controllers/push.rs deleted file mode 100644 index 39a83e795c..0000000000 --- a/opentelemetry-sdk/src/metrics/controllers/push.rs +++ /dev/null @@ -1,201 +0,0 @@ -use crate::{ - export::metrics::{AggregatorSelector, Checkpointer, ExportKindFor, Exporter}, - metrics::{ - self, - processors::{self, BasicProcessor}, - Accumulator, - }, - Resource, -}; -use futures_channel::mpsc; -use futures_util::{ - future::Future, - stream::{select, Stream, StreamExt as _}, - task, -}; -use opentelemetry_api::global; -use opentelemetry_api::metrics::registry; -use std::pin::Pin; -use std::sync::{Arc, Mutex}; -use std::time; - -lazy_static::lazy_static! { - static ref DEFAULT_PUSH_PERIOD: time::Duration = time::Duration::from_secs(10); -} - -/// Create a new `PushControllerBuilder`. -pub fn push( - aggregator_selector: AS, - export_selector: ES, - exporter: E, - spawn: SP, - interval: I, -) -> PushControllerBuilder -where - AS: AggregatorSelector + Send + Sync + 'static, - ES: ExportKindFor + Send + Sync + 'static, - E: Exporter + Send + Sync + 'static, - SP: Fn(PushControllerWorker) -> SO, - I: Fn(time::Duration) -> IO, -{ - PushControllerBuilder { - aggregator_selector: Box::new(aggregator_selector), - export_selector: Box::new(export_selector), - exporter: Box::new(exporter), - spawn, - interval, - resource: None, - period: None, - timeout: None, - } -} - -/// Organizes a periodic push of metric data. -#[derive(Debug)] -pub struct PushController { - message_sender: Mutex>, - provider: registry::RegistryMeterProvider, -} - -#[derive(Debug)] -enum PushMessage { - Tick, - Shutdown, -} - -/// The future which executes push controller work periodically. Can be run on a -/// passed in executor. -#[allow(missing_debug_implementations)] -pub struct PushControllerWorker { - messages: Pin + Send>>, - accumulator: Accumulator, - processor: Arc, - exporter: Box, - _timeout: time::Duration, -} - -impl PushControllerWorker { - fn on_tick(&mut self) { - // TODO handle timeout - if let Err(err) = self.processor.lock().and_then(|mut checkpointer| { - checkpointer.start_collection(); - self.accumulator.0.collect(&mut checkpointer); - checkpointer.finish_collection()?; - self.exporter.export(checkpointer.checkpoint_set()) - }) { - global::handle_error(err) - } - } -} - -impl Future for PushControllerWorker { - type Output = (); - fn poll(mut self: Pin<&mut Self>, cx: &mut task::Context<'_>) -> task::Poll { - loop { - match futures_util::ready!(self.messages.poll_next_unpin(cx)) { - // Span batch interval time reached, export current spans. - Some(PushMessage::Tick) => self.on_tick(), - // Stream has terminated or processor is shutdown, return to finish execution. - None | Some(PushMessage::Shutdown) => { - return task::Poll::Ready(()); - } - } - } - } -} - -impl Drop for PushControllerWorker { - fn drop(&mut self) { - // Try to push data one last time - self.on_tick() - } -} - -impl PushController { - /// The controller's meter provider. - pub fn provider(&self) -> registry::RegistryMeterProvider { - self.provider.clone() - } -} - -impl Drop for PushController { - fn drop(&mut self) { - if let Ok(mut sender) = self.message_sender.lock() { - let _ = sender.try_send(PushMessage::Shutdown); - } - } -} - -/// Configuration for building a new `PushController`. -#[derive(Debug)] -pub struct PushControllerBuilder { - aggregator_selector: Box, - export_selector: Box, - exporter: Box, - spawn: S, - interval: I, - resource: Option, - period: Option, - timeout: Option, -} - -impl PushControllerBuilder -where - S: Fn(PushControllerWorker) -> SO, - I: Fn(time::Duration) -> IS, - IS: Stream + Send + 'static, -{ - /// Configure the period of this controller - pub fn with_period(self, period: time::Duration) -> Self { - PushControllerBuilder { - period: Some(period), - ..self - } - } - - /// Configure the resource used by this controller - pub fn with_resource(self, resource: Resource) -> Self { - PushControllerBuilder { - resource: Some(resource), - ..self - } - } - - /// Config the timeout of one request. - pub fn with_timeout(self, duration: time::Duration) -> Self { - PushControllerBuilder { - timeout: Some(duration), - ..self - } - } - - /// Build a new `PushController` with this configuration. - pub fn build(self) -> PushController { - let processor = processors::basic(self.aggregator_selector, self.export_selector, false); - let processor = Arc::new(processor); - let mut accumulator = metrics::accumulator(processor.clone()); - - if let Some(resource) = self.resource { - accumulator = accumulator.with_resource(resource); - } - let accumulator = accumulator.build(); - let provider = registry::meter_provider(Arc::new(accumulator.clone())); - - let (message_sender, message_receiver) = mpsc::channel(256); - let ticker = - (self.interval)(self.period.unwrap_or(*DEFAULT_PUSH_PERIOD)).map(|_| PushMessage::Tick); - - (self.spawn)(PushControllerWorker { - messages: Box::pin(select(message_receiver, ticker)), - accumulator, - processor, - exporter: self.exporter, - _timeout: self.timeout.unwrap_or(*DEFAULT_PUSH_PERIOD), - }); - - PushController { - message_sender: Mutex::new(message_sender), - provider, - } - } -} diff --git a/opentelemetry-sdk/src/metrics/mod.rs b/opentelemetry-sdk/src/metrics/mod.rs index 7859771cb0..2c242631e9 100644 --- a/opentelemetry-sdk/src/metrics/mod.rs +++ b/opentelemetry-sdk/src/metrics/mod.rs @@ -1,68 +1,37 @@ //! # OpenTelemetry Metrics SDK -use crate::{ - export::{ - self, - metrics::{Aggregator, LockedProcessor, Processor}, +use crate::export; +use crate::export::metrics::{LockedProcessor, Processor}; +use crate::metrics::{ + aggregators::Aggregator, + sdk_api::{ + AsyncInstrumentCore, AtomicNumber, Descriptor, InstrumentCore, MeterCore, Number, + NumberKind, SyncInstrumentCore, }, - resource::Resource, }; use fnv::FnvHasher; -use opentelemetry_api::metrics::{ - sdk_api::{self, InstrumentCore as _, SyncBoundInstrumentCore as _}, - AsyncRunner, AtomicNumber, Descriptor, Measurement, Number, NumberKind, Observation, Result, -}; use opentelemetry_api::{ attributes::{hash_attributes, AttributeSet}, - global, Context, KeyValue, + global, + metrics::Result, + Context, KeyValue, +}; +use std::{ + any::Any, + cmp::Ordering, + fmt, + hash::{Hash, Hasher}, + sync::{Arc, Mutex}, }; -use std::any::Any; -use std::cmp::Ordering; -use std::collections::HashMap; -use std::hash::{Hash, Hasher}; -use std::sync::{Arc, Mutex}; - pub mod aggregators; pub mod controllers; pub mod processors; +pub mod registry; +pub mod sdk_api; pub mod selectors; -use crate::resource::SdkProvidedResourceDetector; -pub use controllers::{PullController, PushController, PushControllerWorker}; -use std::time::Duration; - /// Creates a new accumulator builder -pub fn accumulator(processor: Arc) -> AccumulatorBuilder { - AccumulatorBuilder { - processor, - resource: None, - } -} - -/// Configuration for an accumulator -#[derive(Debug)] -pub struct AccumulatorBuilder { - processor: Arc, - resource: Option, -} - -impl AccumulatorBuilder { - /// The resource that will be applied to all records in this accumulator. - pub fn with_resource(self, resource: Resource) -> Self { - AccumulatorBuilder { - resource: Some(resource), - ..self - } - } - - /// Create a new accumulator from this configuration - pub fn build(self) -> Accumulator { - let sdk_provided_resource = Resource::from_detectors( - Duration::from_secs(0), - vec![Box::new(SdkProvidedResourceDetector)], - ); - let resource = self.resource.unwrap_or(sdk_provided_resource); - Accumulator(Arc::new(AccumulatorCore::new(self.processor, resource))) - } +pub fn accumulator(processor: Arc) -> Accumulator { + Accumulator(Arc::new(AccumulatorCore::new(processor))) } /// Accumulator implements the OpenTelemetry Meter API. The Accumulator is bound @@ -75,128 +44,108 @@ impl AccumulatorBuilder { #[derive(Debug, Clone)] pub struct Accumulator(Arc); -#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] -struct MapKey { - instrument_hash: u64, -} - -type AsyncRunnerPair = (AsyncRunner, Option>); - -#[derive(Default, Debug)] -struct AsyncInstrumentState { - /// The set of runners in the order they were registered that will run each - /// collection interval. +impl Accumulator { + /// Traverses the list of active records and observers and + /// exports data for each active instrument. /// - /// Non-batch observers are entered with an instrument, batch observers are - /// entered without an instrument, each is called once allowing both batch and - /// individual observations to be collected. - runners: Vec, - - /// The set of instruments in the order they were registered. - instruments: Vec>, + /// During the collection pass, the [`LockedProcessor`] will receive + /// one `export` call per current aggregation. + /// + /// Returns the number of records that were checkpointed. + pub fn collect(&self, cx: &Context, locked_processor: &mut dyn LockedProcessor) -> usize { + self.0.collect(cx, locked_processor) + } } -fn collect_async(attributes: &[KeyValue], observations: &[Observation]) { - let attributes = AttributeSet::from_attributes(attributes.iter().cloned()); +impl MeterCore for Accumulator { + fn new_sync_instrument( + &self, + descriptor: Descriptor, + ) -> Result> { + Ok(Arc::new(SyncInstrument { + instrument: Arc::new(BaseInstrument { + meter: self.clone(), + descriptor, + }), + })) + } + + fn new_async_instrument( + &self, + descriptor: Descriptor, + ) -> Result> { + Ok(Arc::new(AsyncInstrument { + instrument: Arc::new(BaseInstrument { + meter: self.clone(), + descriptor, + }), + })) + } - for observation in observations { - if let Some(instrument) = observation - .instrument() - .as_any() - .downcast_ref::() - { - instrument.observe(observation.number(), &attributes) - } + fn register_callback(&self, f: Box) -> Result<()> { + self.0 + .callbacks + .lock() + .map_err(Into::into) + .map(|mut callbacks| callbacks.push(f)) } } -impl AsyncInstrumentState { - /// Executes the complete set of observer callbacks. - fn run(&self) { - for (runner, instrument) in self.runners.iter() { - runner.run(instrument, collect_async) - } - } +#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] +struct MapKey { + instrument_hash: u64, } #[derive(Debug)] +struct AsyncContextKey; + +type Callback = Box; + struct AccumulatorCore { /// A concurrent map of current sync instrument state. current: dashmap::DashMap>, - /// A collection of async instrument state - async_instruments: Mutex, + + /// Async instrument callbacks + callbacks: Mutex>, /// The current epoch number. It is incremented in `collect`. current_epoch: AtomicNumber, + /// The configured processor. processor: Arc, - /// The resource applied to all records in this Accumulator. - resource: Resource, } impl AccumulatorCore { - fn new(processor: Arc, resource: Resource) -> Self { + fn new(processor: Arc) -> Self { AccumulatorCore { current: dashmap::DashMap::new(), - async_instruments: Mutex::new(AsyncInstrumentState::default()), current_epoch: NumberKind::U64.zero().to_atomic(), processor, - resource, + callbacks: Default::default(), } } - fn register( - &self, - instrument: Arc, - runner: Option, - ) -> Result<()> { - self.async_instruments - .lock() - .map_err(Into::into) - .map(|mut async_instruments| { - if let Some(runner) = runner { - async_instruments - .runners - .push((runner, Some(instrument.clone()))); - }; - async_instruments.instruments.push(instrument); - }) - } - - fn register_runner(&self, runner: AsyncRunner) -> Result<()> { - self.async_instruments - .lock() - .map_err(Into::into) - .map(|mut async_instruments| async_instruments.runners.push((runner, None))) - } - - fn collect(&self, locked_processor: &mut dyn LockedProcessor) -> usize { - let mut checkpointed = self.observe_async_instruments(locked_processor); - checkpointed += self.collect_sync_instruments(locked_processor); + fn collect(&self, cx: &Context, locked_processor: &mut dyn LockedProcessor) -> usize { + self.run_async_callbacks(cx); + let checkpointed = self.collect_instruments(locked_processor); self.current_epoch.fetch_add(&NumberKind::U64, &1u64.into()); checkpointed } - fn observe_async_instruments(&self, locked_processor: &mut dyn LockedProcessor) -> usize { - self.async_instruments - .lock() - .map_or(0, |async_instruments| { - let mut async_collected = 0; - - async_instruments.run(); - - for instrument in &async_instruments.instruments { - if let Some(a) = instrument.as_any().downcast_ref::() { - async_collected += self.checkpoint_async(a, locked_processor); - } + fn run_async_callbacks(&self, cx: &Context) { + match self.callbacks.lock() { + Ok(callbacks) => { + let cx = cx.with_value(AsyncContextKey); + for f in callbacks.iter() { + f(&cx) } - - async_collected - }) + } + Err(err) => global::handle_error(err), + } } - fn collect_sync_instruments(&self, locked_processor: &mut dyn LockedProcessor) -> usize { + fn collect_instruments(&self, locked_processor: &mut dyn LockedProcessor) -> usize { let mut checkpointed = 0; self.current.retain(|_key, value| { @@ -243,7 +192,6 @@ impl AccumulatorCore { let accumulation = export::metrics::accumulation( record.instrument.descriptor(), &record.attributes, - &self.resource, checkpoint, ); if let Err(err) = locked_processor.process(accumulation) { @@ -256,101 +204,66 @@ impl AccumulatorCore { } } - fn checkpoint_async( - &self, - instrument: &AsyncInstrument, - locked_processor: &mut dyn LockedProcessor, - ) -> usize { - instrument.recorders.lock().map_or(0, |mut recorders| { - let mut checkpointed = 0; - match recorders.as_mut() { - None => return checkpointed, - Some(recorders) => { - recorders.retain(|_key, attribute_recorder| { - let epoch_diff = self.current_epoch.load().partial_cmp( - &NumberKind::U64, - &attribute_recorder.observed_epoch.into(), - ); - if epoch_diff == Some(Ordering::Equal) { - if let Some(observed) = &attribute_recorder.observed { - let accumulation = export::metrics::accumulation( - instrument.descriptor(), - &attribute_recorder.attributes, - &self.resource, - observed, - ); - - if let Err(err) = locked_processor.process(accumulation) { - global::handle_error(err); - } - checkpointed += 1; - } - } - - // Retain if this is not second collection cycle with no - // observations for this AttributeSet. - epoch_diff == Some(Ordering::Greater) - }); - } - } - if recorders.as_ref().map_or(false, |map| map.is_empty()) { - *recorders = None; - } + // fn checkpoint_async( + // &self, + // instrument: &AsyncInstrument, + // locked_processor: &mut dyn LockedProcessor, + // ) -> usize { + // instrument.recorders.lock().map_or(0, |mut recorders| { + // let mut checkpointed = 0; + // match recorders.as_mut() { + // None => return checkpointed, + // Some(recorders) => { + // recorders.retain(|_key, attribute_recorder| { + // let epoch_diff = self.current_epoch.load().partial_cmp( + // &NumberKind::U64, + // &attribute_recorder.observed_epoch.into(), + // ); + // if epoch_diff == Some(Ordering::Equal) { + // if let Some(observed) = &attribute_recorder.observed { + // let accumulation = export::metrics::accumulation( + // instrument.descriptor(), + // &attribute_recorder.attributes, + // &self.resource, + // observed, + // ); + // + // if let Err(err) = locked_processor.process(accumulation) { + // global::handle_error(err); + // } + // checkpointed += 1; + // } + // } + // + // // Retain if this is not second collection cycle with no + // // observations for this AttributeSet. + // epoch_diff == Some(Ordering::Greater) + // }); + // } + // } + // if recorders.as_ref().map_or(false, |map| map.is_empty()) { + // *recorders = None; + // } + // + // checkpointed + // }) + // } +} - checkpointed - }) +impl fmt::Debug for AccumulatorCore { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("AccumulatorCore").finish() } } #[derive(Debug, Clone)] struct SyncInstrument { - instrument: Arc, + instrument: Arc, } -impl SyncInstrument { - fn acquire_handle(&self, attributes: &[KeyValue]) -> Arc { - let mut hasher = FnvHasher::default(); - self.instrument - .descriptor - .attribute_hash() - .hash(&mut hasher); - - hash_attributes( - &mut hasher, - attributes.iter().map(|kv| (&kv.key, &kv.value)), - ); - - let map_key = MapKey { - instrument_hash: hasher.finish(), - }; - let current = &self.instrument.meter.0.current; - if let Some(existing_record) = current.get(&map_key) { - return existing_record.value().clone(); - } - - let record = Arc::new(Record { - update_count: NumberKind::U64.zero().to_atomic(), - collected_count: NumberKind::U64.zero().to_atomic(), - attributes: AttributeSet::from_attributes(attributes.iter().cloned()), - instrument: self.clone(), - current: self - .instrument - .meter - .0 - .processor - .aggregation_selector() - .aggregator_for(&self.instrument.descriptor), - checkpoint: self - .instrument - .meter - .0 - .processor - .aggregation_selector() - .aggregator_for(&self.instrument.descriptor), - }); - current.insert(map_key, record.clone()); - - record +impl SyncInstrumentCore for SyncInstrument { + fn record_one(&self, cx: &Context, number: sdk_api::Number, kvs: &'_ [KeyValue]) { + self.instrument.acquire_handle(kvs).capture_one(cx, number) } } @@ -358,119 +271,85 @@ impl sdk_api::InstrumentCore for SyncInstrument { fn descriptor(&self) -> &Descriptor { self.instrument.descriptor() } + + fn as_any(&self) -> &dyn Any { + self + } } -impl sdk_api::SyncInstrumentCore for SyncInstrument { - fn bind(&self, attributes: &'_ [KeyValue]) -> Arc { - self.acquire_handle(attributes) +#[derive(Debug, Clone)] +struct AsyncInstrument { + instrument: Arc, +} + +impl AsyncInstrumentCore for AsyncInstrument { + fn observe_one(&self, cx: &Context, number: Number, kvs: &'_ [KeyValue]) { + self.instrument.acquire_handle(kvs).capture_one(cx, number) } - fn record_one(&self, number: Number, attributes: &'_ [KeyValue]) { - let handle = self.acquire_handle(attributes); - handle.record_one(number) +} + +impl sdk_api::InstrumentCore for AsyncInstrument { + fn descriptor(&self) -> &Descriptor { + self.instrument.descriptor() } + fn as_any(&self) -> &dyn Any { self } } -#[derive(Debug)] -struct AttributedRecorder { - observed_epoch: u64, - attributes: AttributeSet, - observed: Option>, -} - #[derive(Debug, Clone)] -struct AsyncInstrument { - instrument: Arc, - recorders: Arc>>>, +struct BaseInstrument { + meter: Accumulator, + descriptor: Descriptor, } -impl AsyncInstrument { - fn observe(&self, number: &Number, attributes: &AttributeSet) { - if let Err(err) = aggregators::range_test(number, &self.instrument.descriptor) { - global::handle_error(err); - } - if let Some(recorder) = self.get_recorder(attributes) { - if let Err(err) = recorder.update(number, &self.instrument.descriptor) { - global::handle_error(err) - } - } - } +impl BaseInstrument { + // acquireHandle gets or creates a `*record` corresponding to `kvs`, + // the input attributes. + fn acquire_handle(&self, kvs: &[KeyValue]) -> Arc { + let mut hasher = FnvHasher::default(); + self.descriptor.attribute_hash().hash(&mut hasher); - fn get_recorder(&self, attributes: &AttributeSet) -> Option> { - self.recorders.lock().map_or(None, |mut recorders| { - let mut hasher = FnvHasher::default(); - hash_attributes(&mut hasher, attributes.into_iter()); - let attribute_hash = hasher.finish(); - if let Some(recorder) = recorders - .as_mut() - .and_then(|rec| rec.get_mut(&attribute_hash)) - { - let current_epoch = self - .instrument - .meter - .0 - .current_epoch - .load() - .to_u64(&NumberKind::U64); - if recorder.observed_epoch == current_epoch { - // last value wins for Observers, so if we see the same attributes - // in the current epoch, we replace the old recorder - return self - .instrument - .meter - .0 - .processor - .aggregation_selector() - .aggregator_for(&self.instrument.descriptor); - } else { - recorder.observed_epoch = current_epoch; - } - return recorder.observed.clone(); - } + hash_attributes(&mut hasher, kvs.iter().map(|kv| (&kv.key, &kv.value))); - let recorder = self - .instrument + let map_key = MapKey { + instrument_hash: hasher.finish(), + }; + let current = &self.meter.0.current; + if let Some(existing_record) = current.get(&map_key) { + return existing_record.value().clone(); + } + + let record = Arc::new(Record { + update_count: NumberKind::U64.zero().to_atomic(), + collected_count: NumberKind::U64.zero().to_atomic(), + attributes: AttributeSet::from_attributes(kvs.iter().cloned()), + instrument: self.clone(), + current: self .meter .0 .processor - .aggregation_selector() - .aggregator_for(&self.instrument.descriptor); - if recorders.is_none() { - *recorders = Some(HashMap::new()); - } - // This may store a recorder with no aggregator in the map, thus disabling the - // async_instrument for the AttributeSet for good. This is intentional, but will be - // revisited later. - let observed_epoch = self - .instrument + .aggregator_selector() + .aggregator_for(&self.descriptor), + checkpoint: self .meter .0 - .current_epoch - .load() - .to_u64(&NumberKind::U64); - recorders.as_mut().unwrap().insert( - attribute_hash, - AttributedRecorder { - observed: recorder.clone(), - attributes: attributes.clone(), - observed_epoch, - }, - ); + .processor + .aggregator_selector() + .aggregator_for(&self.descriptor), + }); + current.insert(map_key, record.clone()); - recorder - }) + record } } -impl sdk_api::InstrumentCore for AsyncInstrument { +impl InstrumentCore for BaseInstrument { fn descriptor(&self) -> &Descriptor { - self.instrument.descriptor() + &self.descriptor } -} -impl sdk_api::AsyncInstrumentCore for AsyncInstrument { fn as_any(&self) -> &dyn Any { self } @@ -495,7 +374,7 @@ struct Record { attributes: AttributeSet, /// The corresponding instrument. - instrument: SyncInstrument, + instrument: BaseInstrument, /// current implements the actual `record_one` API, depending on the type of /// aggregation. If `None`, the metric was disabled by the exporter. @@ -503,163 +382,22 @@ struct Record { checkpoint: Option>, } -impl sdk_api::SyncBoundInstrumentCore for Record { - fn record_one<'a>(&self, number: Number) { - // check if the instrument is disabled according to the AggregatorSelector. - if let Some(recorder) = &self.current { - if let Err(err) = - aggregators::range_test(&number, &self.instrument.instrument.descriptor) - .and_then(|_| recorder.update(&number, &self.instrument.instrument.descriptor)) - { - global::handle_error(err); - return; - } - - // Record was modified, inform the collect() that things need - // to be collected while the record is still mapped. - self.update_count.fetch_add(&NumberKind::U64, &1u64.into()); - } - } -} - -struct Instrument { - descriptor: Descriptor, - meter: Accumulator, -} - -impl std::fmt::Debug for Instrument { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("Instrument") - .field("descriptor", &self.descriptor) - .field("meter", &"Accumulator") - .finish() - } -} - -impl sdk_api::InstrumentCore for Instrument { - fn descriptor(&self) -> &Descriptor { - &self.descriptor - } -} - -impl sdk_api::MeterCore for Accumulator { - fn new_sync_instrument( - &self, - descriptor: Descriptor, - ) -> Result> { - Ok(Arc::new(SyncInstrument { - instrument: Arc::new(Instrument { - descriptor, - meter: self.clone(), - }), - })) - } - - fn record_batch_with_context( - &self, - _cx: &Context, - attributes: &[KeyValue], - measurements: Vec, - ) { - for measure in measurements.into_iter() { - if let Some(instrument) = measure - .instrument() - .as_any() - .downcast_ref::() - { - let handle = instrument.acquire_handle(attributes); - - handle.record_one(measure.into_number()); - } +impl Record { + fn capture_one(&self, cx: &Context, number: Number) { + let current = match &self.current { + Some(current) => current, + // The instrument is disabled according to the AggregatorSelector. + None => return, + }; + if let Err(err) = aggregators::range_test(&number, &self.instrument.descriptor) + .and_then(|_| current.update(cx, &number, &self.instrument.descriptor)) + { + global::handle_error(err); + return; } - } - - fn new_async_instrument( - &self, - descriptor: Descriptor, - runner: Option, - ) -> Result> { - let instrument = Arc::new(AsyncInstrument { - instrument: Arc::new(Instrument { - descriptor, - meter: self.clone(), - }), - recorders: Arc::new(Mutex::new(None)), - }); - self.0.register(instrument.clone(), runner)?; - - Ok(instrument) - } - - fn new_batch_observer(&self, runner: AsyncRunner) -> Result<()> { - self.0.register_runner(runner) - } -} - -#[cfg(test)] -mod tests { - use crate::export::metrics::ExportKindSelector; - use crate::metrics::accumulator; - use crate::metrics::controllers::pull; - use crate::metrics::selectors::simple::Selector; - use crate::testing::metric::NoopProcessor; - use crate::Resource; - use opentelemetry_api::metrics::MeterProvider; - use opentelemetry_api::{Key, KeyValue}; - use std::sync::Arc; - - // Prevent the debug message to get into loop - #[test] - fn test_debug_message() { - let controller = pull( - Box::new(Selector::Exact), - Box::new(ExportKindSelector::Delta), - ) - .build(); - let meter = controller.provider().meter("test", None, None); - let counter = meter.f64_counter("test").init(); - println!("{:?}, {:?}, {:?}", controller, meter, counter); - } - - #[test] - fn test_sdk_provided_resource_in_accumulator() { - let default_service_name = accumulator(Arc::new(NoopProcessor)).build(); - assert_eq!( - default_service_name - .0 - .resource - .get(Key::from_static_str("service.name")) - .map(|v| v.to_string()), - Some("unknown_service".to_string()) - ); - - let custom_service_name = accumulator(Arc::new(NoopProcessor)) - .with_resource(Resource::new(vec![KeyValue::new( - "service.name", - "test_service", - )])) - .build(); - assert_eq!( - custom_service_name - .0 - .resource - .get(Key::from_static_str("service.name")) - .map(|v| v.to_string()), - Some("test_service".to_string()) - ); - - let no_service_name = accumulator(Arc::new(NoopProcessor)) - .with_resource(Resource::empty()) - .build(); - - assert_eq!( - no_service_name - .0 - .resource - .get(Key::from_static_str("service.name")) - .map(|v| v.to_string()), - None - ) + // Record was modified, inform the collect() that things need + // to be collected while the record is still mapped. + self.update_count.fetch_add(&NumberKind::U64, &1u64.into()); } } diff --git a/opentelemetry-sdk/src/metrics/processors/basic.rs b/opentelemetry-sdk/src/metrics/processors/basic.rs index 06df88cb30..dcd55d5093 100644 --- a/opentelemetry-sdk/src/metrics/processors/basic.rs +++ b/opentelemetry-sdk/src/metrics/processors/basic.rs @@ -1,15 +1,17 @@ use crate::{ export::metrics::{ - self, Accumulation, Aggregator, AggregatorSelector, CheckpointSet, Checkpointer, - ExportKind, ExportKindFor, LockedProcessor, Processor, Record, Subtractor, + self, + aggregation::{Temporality, TemporalitySelector}, + Accumulation, AggregatorSelector, Checkpointer, CheckpointerFactory, LockedCheckpointer, + LockedProcessor, Processor, Reader, Record, }, - metrics::aggregators::SumAggregator, - Resource, + metrics::{aggregators::Aggregator, sdk_api::Descriptor}, }; +use core::fmt; use fnv::FnvHasher; use opentelemetry_api::{ attributes::{hash_attributes, AttributeSet}, - metrics::{Descriptor, MetricsError, Result}, + metrics::{MetricsError, Result}, }; use std::collections::HashMap; use std::hash::{Hash, Hasher}; @@ -17,48 +19,92 @@ use std::sync::{Arc, Mutex, MutexGuard}; use std::time::SystemTime; /// Create a new basic processor -pub fn basic( - aggregator_selector: Box, - export_selector: Box, +pub fn factory(aggregator_selector: A, temporality_selector: T) -> BasicProcessorBuilder +where + A: AggregatorSelector + Send + Sync + 'static, + T: TemporalitySelector + Send + Sync + 'static, +{ + BasicProcessorBuilder { + aggregator_selector: Arc::new(aggregator_selector), + temporality_selector: Arc::new(temporality_selector), + memory: false, + } +} + +pub struct BasicProcessorBuilder { + aggregator_selector: Arc, + temporality_selector: Arc, memory: bool, -) -> BasicProcessor { - BasicProcessor { - aggregator_selector, - export_selector, - state: Mutex::new(BasicProcessorState::with_memory(memory)), +} + +impl BasicProcessorBuilder { + /// Memory controls whether the processor remembers metric instruments and + /// attribute sets that were previously reported. + /// + /// When Memory is `true`, [`Reader::try_for_each`] will visit metrics that were + /// not updated in the most recent interval. + pub fn with_memory(mut self, memory: bool) -> Self { + self.memory = memory; + self + } +} + +impl fmt::Debug for BasicProcessorBuilder { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("BasicProcessorBuilder") + .field("memory", &self.memory) + .finish() + } +} + +impl CheckpointerFactory for BasicProcessorBuilder { + fn checkpointer(&self) -> Arc { + Arc::new(BasicProcessor { + aggregator_selector: Arc::clone(&self.aggregator_selector), + temporality_selector: Arc::clone(&self.temporality_selector), + state: Mutex::new(BasicProcessorState::with_memory(self.memory)), + }) } } /// Basic metric integration strategy -#[derive(Debug)] pub struct BasicProcessor { - aggregator_selector: Box, - export_selector: Box, + aggregator_selector: Arc, + temporality_selector: Arc, state: Mutex, } -impl BasicProcessor { - /// Lock this processor to return a mutable locked processor - pub fn lock(&self) -> Result> { - self.state - .lock() - .map_err(From::from) - .map(|locked| BasicLockedProcessor { +impl Processor for BasicProcessor { + fn aggregator_selector(&self) -> &dyn AggregatorSelector { + self.aggregator_selector.as_ref() + } +} + +impl Checkpointer for BasicProcessor { + fn checkpoint( + &self, + f: &mut dyn FnMut(&mut dyn LockedCheckpointer) -> Result<()>, + ) -> Result<()> { + self.state.lock().map_err(From::from).and_then(|locked| { + f(&mut BasicLockedProcessor { parent: self, state: locked, }) + }) } } -impl Processor for BasicProcessor { - fn aggregation_selector(&self) -> &dyn AggregatorSelector { - self.aggregator_selector.as_ref() +impl fmt::Debug for BasicProcessor { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("BasicProcessor") + .field("state", &self.state) + .finish() } } /// A locked representation of the processor used where mutable references are necessary. #[derive(Debug)] -pub struct BasicLockedProcessor<'a> { +struct BasicLockedProcessor<'a> { parent: &'a BasicProcessor, state: MutexGuard<'a, BasicProcessorState>, } @@ -73,7 +119,6 @@ impl<'a> LockedProcessor for BasicLockedProcessor<'a> { let mut hasher = FnvHasher::default(); desc.attribute_hash().hash(&mut hasher); hash_attributes(&mut hasher, accumulation.attributes().into_iter()); - hash_attributes(&mut hasher, accumulation.resource().into_iter()); let key = StateKey(hasher.finish()); let agg = accumulation.aggregator(); let finished_collection = self.state.finished_collection; @@ -127,7 +172,7 @@ impl<'a> LockedProcessor for BasicLockedProcessor<'a> { // before merging below. if !value.current_owned { let tmp = value.current.clone(); - if let Some(current) = self.parent.aggregation_selector().aggregator_for(desc) { + if let Some(current) = self.parent.aggregator_selector.aggregator_for(desc) { value.current = current; value.current_owned = true; tmp.synchronized_move(&value.current, desc)?; @@ -140,18 +185,17 @@ impl<'a> LockedProcessor for BasicLockedProcessor<'a> { let stateful = self .parent - .export_selector - .export_kind_for(desc) + .temporality_selector + .temporality_for(desc, agg.aggregation().kind()) .memory_required(desc.instrument_kind()); - let mut delta = None; let cumulative = if stateful { if desc.instrument_kind().precomputed_sum() { // If we know we need to compute deltas, allocate one. - delta = self.parent.aggregation_selector().aggregator_for(desc); + return Err(MetricsError::Other("No cumulative to sum support".into())); } // Always allocate a cumulative aggregator if stateful - self.parent.aggregation_selector().aggregator_for(desc) + self.parent.aggregator_selector.aggregator_for(desc) } else { None }; @@ -161,10 +205,8 @@ impl<'a> LockedProcessor for BasicLockedProcessor<'a> { StateValue { descriptor: desc.clone(), attributes: accumulation.attributes().clone(), - resource: accumulation.resource().clone(), current_owned: false, current: agg.clone(), - delta, cumulative, stateful, updated: finished_collection, @@ -175,8 +217,12 @@ impl<'a> LockedProcessor for BasicLockedProcessor<'a> { } } -impl Checkpointer for BasicLockedProcessor<'_> { - fn checkpoint_set(&mut self) -> &mut dyn CheckpointSet { +impl LockedCheckpointer for BasicLockedProcessor<'_> { + fn processor(&mut self) -> &mut dyn LockedProcessor { + self + } + + fn reader(&mut self) -> &mut dyn Reader { &mut *self.state } @@ -221,29 +267,10 @@ impl Checkpointer for BasicLockedProcessor<'_> { return true; } - // Update Aggregator state to support exporting either a - // delta or a cumulative aggregation. - if mkind.precomputed_sum() { - if let Some(current_subtractor) = - value.current.as_any().downcast_ref::() - { - // This line is equivalent to: - // value.delta = currentSubtractor - value.cumulative - if let (Some(cumulative), Some(delta)) = - (value.cumulative.as_ref(), value.delta.as_ref()) - { - result = current_subtractor - .subtract(cumulative.as_ref(), delta.as_ref(), &value.descriptor) - .and_then(|_| { - value - .current - .synchronized_move(cumulative, &value.descriptor) - }); - } - } else { - result = Err(MetricsError::NoSubtraction); - } - } else { + // The only kind of aggregators that are not stateless + // are the ones needing delta to cumulative + // conversion. Merge aggregator state in this case. + if !mkind.precomputed_sum() { // This line is equivalent to: // value.cumulative = value.cumulative + value.delta if let Some(cumulative) = value.cumulative.as_ref() { @@ -301,10 +328,10 @@ impl Default for BasicProcessorState { } } -impl CheckpointSet for BasicProcessorState { +impl Reader for BasicProcessorState { fn try_for_each( &mut self, - exporter: &dyn ExportKindFor, + temporality_selector: &dyn TemporalitySelector, f: &mut dyn FnMut(&Record<'_>) -> Result<()>, ) -> Result<()> { if self.started_collection != self.finished_collection { @@ -323,8 +350,10 @@ impl CheckpointSet for BasicProcessorState { return Ok(()); } - match exporter.export_kind_for(&value.descriptor) { - ExportKind::Cumulative => { + match temporality_selector + .temporality_for(&value.descriptor, value.current.aggregation().kind()) + { + Temporality::Cumulative => { // If stateful, the sum has been computed. If stateless, the // input was already cumulative. Either way, use the // checkpointed value: @@ -336,15 +365,13 @@ impl CheckpointSet for BasicProcessorState { start = self.process_start; } - - ExportKind::Delta => { + Temporality::Delta => { // Precomputed sums are a special case. if instrument_kind.precomputed_sum() { - agg = value.delta.as_ref(); - } else { - agg = Some(&value.current); + return Err(MetricsError::Other("No cumulative to delta".into())); } + agg = Some(&value.current); start = self.interval_start; } } @@ -352,11 +379,11 @@ impl CheckpointSet for BasicProcessorState { let res = f(&metrics::record( &value.descriptor, &value.attributes, - &value.resource, agg, start, self.interval_end, )); + if let Err(MetricsError::NoDataCollected) = res { Ok(()) } else { @@ -377,9 +404,6 @@ struct StateValue { /// Instrument attributes attributes: AttributeSet, - /// Resource that created the instrument - resource: Resource, - /// Indicates the last sequence number when this value had process called by an /// accumulator. updated: u64, @@ -402,10 +426,6 @@ struct StateValue { /// single collection round. current: Arc, - /// If `Some`, refers to an `Aggregator` owned by the processor used to compute - /// deltas between precomputed sums. - delta: Option>, - /// If `Some`, refers to an `Aggregator` owned by the processor used to store /// the last cumulative value. cumulative: Option>, diff --git a/opentelemetry-sdk/src/metrics/processors/mod.rs b/opentelemetry-sdk/src/metrics/processors/mod.rs index bb3bd423eb..5267c631de 100644 --- a/opentelemetry-sdk/src/metrics/processors/mod.rs +++ b/opentelemetry-sdk/src/metrics/processors/mod.rs @@ -1,4 +1,4 @@ //! Metric Processors mod basic; -pub use basic::{basic, BasicProcessor}; +pub use basic::{factory, BasicProcessor}; diff --git a/opentelemetry-sdk/src/metrics/registry.rs b/opentelemetry-sdk/src/metrics/registry.rs new file mode 100644 index 0000000000..6c48849f6a --- /dev/null +++ b/opentelemetry-sdk/src/metrics/registry.rs @@ -0,0 +1,129 @@ +//! Metrics Registry API +use crate::metrics::sdk_api::{Descriptor, SyncInstrumentCore}; +use core::fmt; +use opentelemetry_api::{ + metrics::{MetricsError, Result}, + Context, +}; +use std::sync::{Arc, Mutex}; +use std::{any::Any, collections::HashMap}; + +use super::sdk_api::{AsyncInstrumentCore, InstrumentCore, MeterCore}; + +/// Create a new `UniqueInstrumentMeterCore` from a `InstrumentProvider`. +pub fn unique_instrument_meter_core(core: T) -> UniqueInstrumentMeterCore +where + T: AnyMeterCore + Send + Sync + 'static, +{ + UniqueInstrumentMeterCore::wrap(core) +} + +/// An extension trait that allows meters to be downcast +pub trait AnyMeterCore: MeterCore { + /// Returns the current type as [`Any`] + fn as_any(&self) -> &dyn Any; +} + +impl AnyMeterCore for T { + fn as_any(&self) -> &dyn Any { + self + } +} + +/// Implements the [`MeterCore`] interface, adding uniqueness checking for +/// instrument descriptors. +pub struct UniqueInstrumentMeterCore { + inner: Box, + state: Mutex>>, +} + +impl fmt::Debug for UniqueInstrumentMeterCore { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("UniqueInstrumentMeterCore") + } +} + +impl UniqueInstrumentMeterCore { + fn wrap(inner: T) -> Self + where + T: AnyMeterCore + Send + Sync + 'static, + { + UniqueInstrumentMeterCore { + inner: Box::new(inner), + state: Mutex::new(HashMap::default()), + } + } + + pub(crate) fn meter_core(&self) -> &dyn Any { + self.inner.as_any() + } +} + +impl MeterCore for UniqueInstrumentMeterCore { + fn new_sync_instrument( + &self, + descriptor: Descriptor, + ) -> Result> { + self.state.lock().map_err(Into::into).and_then(|mut state| { + let instrument = check_uniqueness(&state, &descriptor)?; + match instrument { + Some(instrument) => Ok(instrument), + None => { + let instrument = self.inner.new_sync_instrument(descriptor.clone())?; + state.insert(descriptor.name().into(), instrument.clone().as_dyn_core()); + + Ok(instrument) + } + } + }) + } + + fn new_async_instrument( + &self, + descriptor: Descriptor, + ) -> Result> { + self.state.lock().map_err(Into::into).and_then(|mut state| { + let instrument = check_uniqueness(&state, &descriptor)?; + match instrument { + Some(instrument) => Ok(instrument), + None => { + let instrument = self.inner.new_async_instrument(descriptor)?; + state.insert( + instrument.descriptor().name().into(), + instrument.clone().as_dyn_core(), + ); + + Ok(instrument) + } + } + }) + } + + fn register_callback(&self, f: Box) -> Result<()> { + self.inner.register_callback(f) + } +} + +fn check_uniqueness( + instruments: &HashMap>, + descriptor: &Descriptor, +) -> Result> { + if let Some(instrument) = instruments.get(descriptor.name()) { + if is_equal(instrument.descriptor(), descriptor) { + Ok(instrument.as_any().downcast_ref::().cloned()) + } else { + Err(MetricsError::MetricKindMismatch(format!( + "metric {} registered as a {:?} {:?}", + descriptor.name(), + descriptor.number_kind(), + descriptor.instrument_kind() + ))) + } + } else { + Ok(None) + } +} + +fn is_equal(a: &Descriptor, b: &Descriptor) -> bool { + a.instrument_kind() == b.instrument_kind() && a.number_kind() == b.number_kind() +} diff --git a/opentelemetry-api/src/metrics/async_instrument.rs b/opentelemetry-sdk/src/metrics/sdk_api/async_instrument.rs similarity index 63% rename from opentelemetry-api/src/metrics/async_instrument.rs rename to opentelemetry-sdk/src/metrics/sdk_api/async_instrument.rs index 3061491f47..cf80e4933b 100644 --- a/opentelemetry-api/src/metrics/async_instrument.rs +++ b/opentelemetry-sdk/src/metrics/sdk_api/async_instrument.rs @@ -120,60 +120,3 @@ impl BatchObserverResult { (self.f)(attributes, observations) } } - -/// Called when collecting async instruments -pub enum AsyncRunner { - /// Callback for `f64` observed values - F64(F64ObserverCallback), - /// Callback for `i64` observed values - I64(I64ObserverCallback), - /// Callback for `u64` observed values - U64(U64ObserverCallback), - /// Callback for batch observed values - Batch(BatchObserverCallback), -} - -impl AsyncRunner { - /// Run accepts a single instrument and function for capturing observations - /// of that instrument. Each call to the function receives one captured - /// observation. (The function accepts multiple observations so the same - /// implementation can be used for batch runners.) - pub fn run( - &self, - instrument: &Option>, - f: fn(&[KeyValue], &[Observation]), - ) { - match (instrument, self) { - (Some(i), AsyncRunner::F64(run)) => run(ObserverResult::new(i.clone(), f)), - (Some(i), AsyncRunner::I64(run)) => run(ObserverResult::new(i.clone(), f)), - (Some(i), AsyncRunner::U64(run)) => run(ObserverResult::new(i.clone(), f)), - (None, AsyncRunner::Batch(run)) => run(BatchObserverResult::new(f)), - _ => global::handle_error(MetricsError::Other( - "Invalid async runner / instrument pair".into(), - )), - } - } -} - -impl fmt::Debug for AsyncRunner { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - AsyncRunner::F64(_) => f - .debug_struct("AsyncRunner::F64") - .field("closure", &"Fn(ObserverResult)") - .finish(), - AsyncRunner::I64(_) => f - .debug_struct("AsyncRunner::I64") - .field("closure", &"Fn(ObserverResult)") - .finish(), - AsyncRunner::U64(_) => f - .debug_struct("AsyncRunner::U64") - .field("closure", &"Fn(ObserverResult)") - .finish(), - AsyncRunner::Batch(_) => f - .debug_struct("AsyncRunner::Batch") - .field("closure", &"Fn(BatchObserverResult)") - .finish(), - } - } -} diff --git a/opentelemetry-api/src/metrics/descriptor.rs b/opentelemetry-sdk/src/metrics/sdk_api/descriptor.rs similarity index 56% rename from opentelemetry-api/src/metrics/descriptor.rs rename to opentelemetry-sdk/src/metrics/sdk_api/descriptor.rs index 14bed98152..dc324ce11e 100644 --- a/opentelemetry-api/src/metrics/descriptor.rs +++ b/opentelemetry-sdk/src/metrics/sdk_api/descriptor.rs @@ -1,7 +1,6 @@ -use crate::metrics::{InstrumentConfig, InstrumentKind, NumberKind}; -use crate::InstrumentationLibrary; +use crate::metrics::sdk_api::{InstrumentKind, NumberKind}; use fnv::FnvHasher; -use std::borrow::Cow; +use opentelemetry_api::metrics::Unit; use std::hash::{Hash, Hasher}; /// Descriptor contains all the settings that describe an instrument, including @@ -11,39 +10,37 @@ pub struct Descriptor { name: String, instrument_kind: InstrumentKind, number_kind: NumberKind, - pub(crate) config: InstrumentConfig, + description: Option, + unit: Option, attribute_hash: u64, } impl Descriptor { /// Create a new descriptor - pub fn new>>( + pub fn new( name: String, - instrumentation_name: T, - instrumentation_version: Option, - schema_url: Option, instrument_kind: InstrumentKind, number_kind: NumberKind, + description: Option, + unit: Option, ) -> Self { let mut hasher = FnvHasher::default(); name.hash(&mut hasher); - let instrumentation_name = instrumentation_name.into(); - let instrumentation_version = instrumentation_version.map(Into::>::into); - instrumentation_name.as_ref().hash(&mut hasher); - instrumentation_version.as_ref().hash(&mut hasher); instrument_kind.hash(&mut hasher); number_kind.hash(&mut hasher); - let config = InstrumentConfig::with_instrumentation( - instrumentation_name, - instrumentation_version.map(Into::into), - schema_url.map(Into::into), - ); + if let Some(description) = &description { + description.hash(&mut hasher); + } + if let Some(unit) = &unit { + unit.hash(&mut hasher); + } Descriptor { name, instrument_kind, number_kind, - config, + description, + unit, attribute_hash: hasher.finish(), } } @@ -66,27 +63,17 @@ impl Descriptor { /// A human-readable description of the metric instrument. pub fn description(&self) -> Option<&String> { - self.config.description.as_ref() + self.description.as_ref() } /// Assign a new description pub fn set_description(&mut self, description: String) { - self.config.description = Some(description); + self.description = Some(description); } /// Unit describes the units of the metric instrument. pub fn unit(&self) -> Option<&str> { - self.config.unit.as_ref().map(|unit| unit.as_ref()) - } - - /// The name of the library that provided instrumentation for this instrument. - pub fn instrumentation_name(&self) -> Cow<'static, str> { - self.config.instrumentation_name() - } - - /// Instrumentation library reference - pub fn instrumentation_library(&self) -> &InstrumentationLibrary { - &self.config.instrumentation_library + self.unit.as_ref().map(|unit| unit.as_ref()) } /// The pre-computed hash of the descriptor data diff --git a/opentelemetry-api/src/metrics/kind.rs b/opentelemetry-sdk/src/metrics/sdk_api/instrument_kind.rs similarity index 83% rename from opentelemetry-api/src/metrics/kind.rs rename to opentelemetry-sdk/src/metrics/sdk_api/instrument_kind.rs index f27316496e..824ee1d162 100644 --- a/opentelemetry-api/src/metrics/kind.rs +++ b/opentelemetry-sdk/src/metrics/sdk_api/instrument_kind.rs @@ -3,7 +3,7 @@ /// | **Name** | Instrument kind | Function(argument) | Default aggregation | Notes | /// | ----------------------- | ----- | --------- | ------------- | --- | /// | **Histogram** | Synchronous | Record(value) | MinMaxSumCount | Per-request, any non-additive measurement | -/// | **ValueRecorder** | Synchronous | Record(value) | MinMaxSumCount | Depreated. Use Histogram. | +/// | **Gauge** | Synchronous | Record(value) | MinMaxSumCount | Depreated. Use Histogram. | /// | **ValueObserver** | Asynchronous | Observe(value) | MinMaxSumCount | Per-interval, any non-additive measurement | /// | **Counter** | Synchronous additive monotonic | Add(increment) | Sum | Per-request, part of a monotonic sum | /// | **UpDownCounter** | Synchronous additive | Add(increment) | Sum | Per-request, part of a non-monotonic sum | @@ -11,18 +11,18 @@ /// | **UpDownSumObserver** | Asynchronous additive | Observe(sum) | Sum | Per-interval, reporting a non-monotonic sum | #[derive(Clone, Debug, PartialEq, Hash)] pub enum InstrumentKind { - /// A synchronous per-request recorder of non-additive measurements. + /// A histogram instrument Histogram, - /// An asynchronous per-interval recorder of non-additive measurements. - ValueObserver, + /// A gauge observer instrument + GaugeObserver, /// A synchronous per-request part of a monotonic sum. Counter, /// A synchronous per-request part of a non-monotonic sum. UpDownCounter, /// An asynchronous per-interval recorder of a monotonic sum. - SumObserver, + CounterObserver, /// An asynchronous per-interval recorder of a non-monotonic sum. - UpDownSumObserver, + UpDownCounterObserver, } impl InstrumentKind { @@ -45,8 +45,8 @@ impl InstrumentKind { self, InstrumentKind::Counter | InstrumentKind::UpDownCounter - | InstrumentKind::SumObserver - | InstrumentKind::UpDownSumObserver + | InstrumentKind::CounterObserver + | InstrumentKind::UpDownCounterObserver ) } @@ -57,7 +57,10 @@ impl InstrumentKind { /// Whether this kind of instrument exposes a non-decreasing sum. pub fn monotonic(&self) -> bool { - matches!(self, InstrumentKind::Counter | InstrumentKind::SumObserver) + matches!( + self, + InstrumentKind::Counter | InstrumentKind::CounterObserver + ) } /// Whether this kind of instrument receives precomputed sums. diff --git a/opentelemetry-sdk/src/metrics/sdk_api/mod.rs b/opentelemetry-sdk/src/metrics/sdk_api/mod.rs new file mode 100644 index 0000000000..388670eca7 --- /dev/null +++ b/opentelemetry-sdk/src/metrics/sdk_api/mod.rs @@ -0,0 +1,81 @@ +//! SDK API + +// mod async_instrument; +mod descriptor; +mod instrument_kind; +mod number; +mod wrap; +// mod sync_instrument; + +use std::any::Any; +use std::sync::Arc; + +pub use descriptor::*; +pub use instrument_kind::*; +pub use number::*; +use opentelemetry_api::{metrics::Result, Context, KeyValue}; +pub use wrap::wrap_meter_core; + +/// The interface an SDK must implement to supply a Meter implementation. +pub trait MeterCore { + /// Create a new synchronous instrument implementation. + fn new_sync_instrument( + &self, + descriptor: Descriptor, + ) -> Result>; + + /// Create a new asynchronous instrument implementation. + /// + /// Runner is `None` if used in batch as the batch runner is registered separately. + fn new_async_instrument( + &self, + descriptor: Descriptor, + ) -> Result>; + + /// Register a batch observer + fn register_callback(&self, f: Box) -> Result<()>; +} + +/// A utility extension to allow upcasting. +/// +/// Can be removed once [trait_upcasting] is stablized. +/// +/// [trait_upcasting]: https://doc.rust-lang.org/unstable-book/language-features/trait-upcasting.html +pub trait AsDynInstrumentCore { + /// Create an `Arc` from an impl of `InstrumentCore`. + fn as_dyn_core<'a>(self: Arc) -> Arc + where + Self: 'a; +} + +impl AsDynInstrumentCore for T { + fn as_dyn_core<'a>(self: Arc) -> Arc + where + Self: 'a, + { + self + } +} + +/// A common interface for synchronous and asynchronous instruments. +pub trait InstrumentCore: AsDynInstrumentCore { + /// Description of the instrument's descriptor + fn descriptor(&self) -> &Descriptor; + + /// Returns self as any + fn as_any(&self) -> &dyn Any; +} + +/// The implementation-level interface to a generic synchronous instrument +/// (e.g., Histogram and Counter instruments). +pub trait SyncInstrumentCore: InstrumentCore { + /// Capture a single synchronous metric event. + fn record_one(&self, cx: &Context, number: Number, attributes: &'_ [KeyValue]); +} + +/// An implementation-level interface to an asynchronous instrument (e.g., +/// Observable instruments). +pub trait AsyncInstrumentCore: InstrumentCore { + /// Captures a single asynchronous metric event. + fn observe_one(&self, cx: &Context, number: Number, attributes: &'_ [KeyValue]); +} diff --git a/opentelemetry-api/src/metrics/number.rs b/opentelemetry-sdk/src/metrics/sdk_api/number.rs similarity index 100% rename from opentelemetry-api/src/metrics/number.rs rename to opentelemetry-sdk/src/metrics/sdk_api/number.rs diff --git a/opentelemetry-api/src/metrics/sync_instrument.rs b/opentelemetry-sdk/src/metrics/sdk_api/sync_instrument.rs similarity index 100% rename from opentelemetry-api/src/metrics/sync_instrument.rs rename to opentelemetry-sdk/src/metrics/sdk_api/sync_instrument.rs diff --git a/opentelemetry-sdk/src/metrics/sdk_api/wrap.rs b/opentelemetry-sdk/src/metrics/sdk_api/wrap.rs new file mode 100644 index 0000000000..636e1579b5 --- /dev/null +++ b/opentelemetry-sdk/src/metrics/sdk_api/wrap.rs @@ -0,0 +1,319 @@ +use crate::metrics::sdk_api::MeterCore; +use crate::metrics::sdk_api::{ + AsyncInstrumentCore, Descriptor, InstrumentKind, Number, NumberKind, SyncInstrumentCore, +}; +use opentelemetry_api::metrics::{ + AsyncCounter, AsyncUpDownCounter, ObservableUpDownCounter, SyncCounter, SyncHistogram, + SyncUpDownCounter, UpDownCounter, +}; +use opentelemetry_api::KeyValue; +use opentelemetry_api::{ + metrics::{ + AsyncGauge, Counter, Histogram, InstrumentProvider, Meter, ObservableCounter, + ObservableGauge, Result, Unit, + }, + Context, InstrumentationLibrary, +}; +use std::sync::Arc; + +/// wraps impl to be a full implementation of a Meter. +pub fn wrap_meter_core( + core: Arc, + library: InstrumentationLibrary, +) -> Meter { + Meter::new(library, Arc::new(MeterImpl(core))) +} + +struct MeterImpl(Arc); + +struct SyncInstrument(Arc); + +impl> SyncCounter for SyncInstrument { + fn add(&self, cx: &Context, value: T, attributes: &[KeyValue]) { + self.0.record_one(cx, value.into(), attributes) + } +} + +impl> SyncUpDownCounter for SyncInstrument { + fn add(&self, cx: &Context, value: T, attributes: &[KeyValue]) { + self.0.record_one(cx, value.into(), attributes) + } +} + +impl> SyncHistogram for SyncInstrument { + fn record(&self, cx: &Context, value: T, attributes: &[KeyValue]) { + self.0.record_one(cx, value.into(), attributes) + } +} + +struct AsyncInstrument(Arc); + +impl> AsyncCounter for AsyncInstrument { + fn observe(&self, cx: &Context, value: T, attributes: &[KeyValue]) { + self.0.observe_one(cx, value.into(), attributes) + } +} + +impl> AsyncUpDownCounter for AsyncInstrument { + fn observe(&self, cx: &Context, value: T, attributes: &[KeyValue]) { + self.0.observe_one(cx, value.into(), attributes) + } +} + +impl> AsyncGauge for AsyncInstrument { + fn observe(&self, cx: &Context, value: T, attributes: &[KeyValue]) { + self.0.observe_one(cx, value.into(), attributes) + } +} + +impl InstrumentProvider for MeterImpl { + fn u64_counter( + &self, + name: String, + description: Option, + unit: Option, + ) -> Result> { + let instrument = self.0.new_sync_instrument(Descriptor::new( + name, + InstrumentKind::Counter, + NumberKind::U64, + description, + unit, + ))?; + + Ok(Counter::new(Arc::new(SyncInstrument(instrument)))) + } + + fn f64_counter( + &self, + name: String, + description: Option, + unit: Option, + ) -> Result> { + let instrument = self.0.new_sync_instrument(Descriptor::new( + name, + InstrumentKind::Counter, + NumberKind::F64, + description, + unit, + ))?; + + Ok(Counter::new(Arc::new(SyncInstrument(instrument)))) + } + + fn u64_observable_counter( + &self, + name: String, + description: Option, + unit: Option, + ) -> Result> { + let instrument = self.0.new_async_instrument(Descriptor::new( + name, + InstrumentKind::Counter, + NumberKind::U64, + description, + unit, + ))?; + + Ok(ObservableCounter::new(Arc::new(AsyncInstrument( + instrument, + )))) + } + + fn f64_observable_counter( + &self, + name: String, + description: Option, + unit: Option, + ) -> Result> { + let instrument = self.0.new_async_instrument(Descriptor::new( + name, + InstrumentKind::Counter, + NumberKind::F64, + description, + unit, + ))?; + + Ok(ObservableCounter::new(Arc::new(AsyncInstrument( + instrument, + )))) + } + + fn i64_up_down_counter( + &self, + name: String, + description: Option, + unit: Option, + ) -> Result> { + let instrument = self.0.new_sync_instrument(Descriptor::new( + name, + InstrumentKind::UpDownCounter, + NumberKind::I64, + description, + unit, + ))?; + + Ok(UpDownCounter::new(Arc::new(SyncInstrument(instrument)))) + } + + fn f64_up_down_counter( + &self, + name: String, + description: Option, + unit: Option, + ) -> Result> { + let instrument = self.0.new_sync_instrument(Descriptor::new( + name, + InstrumentKind::UpDownCounter, + NumberKind::F64, + description, + unit, + ))?; + + Ok(UpDownCounter::new(Arc::new(SyncInstrument(instrument)))) + } + + fn i64_observable_up_down_counter( + &self, + name: String, + description: Option, + unit: Option, + ) -> Result> { + let instrument = self.0.new_async_instrument(Descriptor::new( + name, + InstrumentKind::UpDownCounterObserver, + NumberKind::I64, + description, + unit, + ))?; + + Ok(ObservableUpDownCounter::new(Arc::new(AsyncInstrument( + instrument, + )))) + } + + fn f64_observable_up_down_counter( + &self, + name: String, + description: Option, + unit: Option, + ) -> Result> { + let instrument = self.0.new_async_instrument(Descriptor::new( + name, + InstrumentKind::UpDownCounterObserver, + NumberKind::F64, + description, + unit, + ))?; + + Ok(ObservableUpDownCounter::new(Arc::new(AsyncInstrument( + instrument, + )))) + } + + fn u64_observable_gauge( + &self, + name: String, + description: Option, + unit: Option, + ) -> Result> { + let instrument = self.0.new_async_instrument(Descriptor::new( + name, + InstrumentKind::GaugeObserver, + NumberKind::U64, + description, + unit, + ))?; + + Ok(ObservableGauge::new(Arc::new(AsyncInstrument(instrument)))) + } + + fn i64_observable_gauge( + &self, + name: String, + description: Option, + unit: Option, + ) -> Result> { + let instrument = self.0.new_async_instrument(Descriptor::new( + name, + InstrumentKind::GaugeObserver, + NumberKind::I64, + description, + unit, + ))?; + + Ok(ObservableGauge::new(Arc::new(AsyncInstrument(instrument)))) + } + + fn f64_observable_gauge( + &self, + name: String, + description: Option, + unit: Option, + ) -> Result> { + let instrument = self.0.new_async_instrument(Descriptor::new( + name, + InstrumentKind::GaugeObserver, + NumberKind::F64, + description, + unit, + ))?; + + Ok(ObservableGauge::new(Arc::new(AsyncInstrument(instrument)))) + } + + fn f64_histogram( + &self, + name: String, + description: Option, + unit: Option, + ) -> Result> { + let instrument = self.0.new_sync_instrument(Descriptor::new( + name, + InstrumentKind::Histogram, + NumberKind::F64, + description, + unit, + ))?; + + Ok(Histogram::new(Arc::new(SyncInstrument(instrument)))) + } + + fn u64_histogram( + &self, + name: String, + description: Option, + unit: Option, + ) -> Result> { + let instrument = self.0.new_sync_instrument(Descriptor::new( + name, + InstrumentKind::Histogram, + NumberKind::U64, + description, + unit, + ))?; + + Ok(Histogram::new(Arc::new(SyncInstrument(instrument)))) + } + + fn i64_histogram( + &self, + name: String, + description: Option, + unit: Option, + ) -> Result> { + let instrument = self.0.new_sync_instrument(Descriptor::new( + name, + InstrumentKind::Histogram, + NumberKind::I64, + description, + unit, + ))?; + + Ok(Histogram::new(Arc::new(SyncInstrument(instrument)))) + } + + fn register_callback(&self, callback: Box) -> Result<()> { + self.0.register_callback(callback) + } +} diff --git a/opentelemetry-sdk/src/metrics/selectors/mod.rs b/opentelemetry-sdk/src/metrics/selectors/mod.rs index b7a0d4f547..5aab06f72e 100644 --- a/opentelemetry-sdk/src/metrics/selectors/mod.rs +++ b/opentelemetry-sdk/src/metrics/selectors/mod.rs @@ -1,2 +1,2 @@ -//! Metric Selectors +//! Aggregator Selectors pub mod simple; diff --git a/opentelemetry-sdk/src/metrics/selectors/simple.rs b/opentelemetry-sdk/src/metrics/selectors/simple.rs index 83fd5c9e8f..550ce34643 100644 --- a/opentelemetry-sdk/src/metrics/selectors/simple.rs +++ b/opentelemetry-sdk/src/metrics/selectors/simple.rs @@ -1,61 +1,43 @@ //! Simple Metric Selectors -use crate::export::metrics::{Aggregator, AggregatorSelector}; -use crate::metrics::aggregators; -use opentelemetry_api::metrics::{Descriptor, InstrumentKind}; +use crate::export::metrics::AggregatorSelector; +use crate::metrics::aggregators::{self, Aggregator}; +use crate::metrics::sdk_api::{Descriptor, InstrumentKind}; use std::sync::Arc; -/// Aggregation selection strategies. -#[derive(Debug)] -pub enum Selector { - /// A simple aggregation selector that uses counter, ddsketch, and ddsketch - /// aggregators for the three kinds of metric. This selector uses more cpu - /// and memory than the NewWithInexpensiveDistribution because it uses one - /// DDSketch per distinct instrument and attribute set. - Sketch(aggregators::DdSketchConfig), - /// A simple aggregation selector that uses last_value, sum, and - /// minmaxsumcount aggregators for metrics. This selector is faster and uses - /// less memory than the others because minmaxsumcount does not aggregate - /// quantile information. - Inexpensive, - /// A simple aggregation selector that uses sum and array aggregators for - /// metrics. This selector is able to compute exact quantiles. - Exact, - /// A simple aggregation selector that uses sum, and histogram aggregators - /// for metrics. This selector uses more memory than `Inexpensive` because - /// it uses a counter per bucket. - Histogram(Vec), +/// This selector is faster and uses less memory than the others in this package. +pub fn inexpensive() -> impl AggregatorSelector { + InexpensiveSelector } -impl AggregatorSelector for Selector { +#[derive(Debug, Clone)] +struct InexpensiveSelector; + +impl AggregatorSelector for InexpensiveSelector { + fn aggregator_for(&self, descriptor: &Descriptor) -> Option> { + match descriptor.instrument_kind() { + InstrumentKind::GaugeObserver => Some(Arc::new(aggregators::last_value())), + _ => Some(Arc::new(aggregators::sum())), + } + } +} + +/// A simple aggregator selector that uses histogram aggregators for `Histogram` +/// instruments. +/// +/// This selector is a good default choice for most metric exporters. +pub fn histogram(boundaries: impl Into>) -> impl AggregatorSelector { + HistogramSelector(boundaries.into()) +} + +#[derive(Debug, Clone)] +struct HistogramSelector(Vec); + +impl AggregatorSelector for HistogramSelector { fn aggregator_for(&self, descriptor: &Descriptor) -> Option> { - match self { - Selector::Sketch(config) => match descriptor.instrument_kind() { - InstrumentKind::ValueObserver => Some(Arc::new(aggregators::last_value())), - InstrumentKind::Histogram => Some(Arc::new(aggregators::ddsketch( - config, - descriptor.number_kind().clone(), - ))), - _ => Some(Arc::new(aggregators::sum())), - }, - Selector::Inexpensive => match descriptor.instrument_kind() { - InstrumentKind::ValueObserver => Some(Arc::new(aggregators::last_value())), - InstrumentKind::Histogram => { - Some(Arc::new(aggregators::min_max_sum_count(descriptor))) - } - _ => Some(Arc::new(aggregators::sum())), - }, - Selector::Exact => match descriptor.instrument_kind() { - InstrumentKind::ValueObserver => Some(Arc::new(aggregators::last_value())), - InstrumentKind::Histogram => Some(Arc::new(aggregators::array())), - _ => Some(Arc::new(aggregators::sum())), - }, - Selector::Histogram(boundaries) => match descriptor.instrument_kind() { - InstrumentKind::ValueObserver => Some(Arc::new(aggregators::last_value())), - InstrumentKind::Histogram => { - Some(Arc::new(aggregators::histogram(descriptor, boundaries))) - } - _ => Some(Arc::new(aggregators::sum())), - }, + match descriptor.instrument_kind() { + InstrumentKind::GaugeObserver => Some(Arc::new(aggregators::last_value())), + InstrumentKind::Histogram => Some(Arc::new(aggregators::histogram(&self.0))), + _ => Some(Arc::new(aggregators::sum())), } } } diff --git a/opentelemetry-sdk/src/testing/metric.rs b/opentelemetry-sdk/src/testing/metric.rs index a1c5293c7f..37e526bfbf 100644 --- a/opentelemetry-sdk/src/testing/metric.rs +++ b/opentelemetry-sdk/src/testing/metric.rs @@ -1,11 +1,38 @@ -use crate::export::metrics::{AggregatorSelector, Processor}; -use crate::metrics::selectors::simple::Selector; +use std::sync::Arc; + +use opentelemetry_api::metrics::Result; + +use crate::{ + export::metrics::{AggregatorSelector, Checkpointer, LockedCheckpointer, Processor}, + metrics::{aggregators::Aggregator, sdk_api::Descriptor}, +}; + +#[derive(Debug)] +struct NoopAggregatorSelector; + +impl AggregatorSelector for NoopAggregatorSelector { + fn aggregator_for( + &self, + _descriptor: &Descriptor, + ) -> Option> { + None + } +} #[derive(Debug)] -pub struct NoopProcessor; +pub struct NoopCheckpointer; + +impl Processor for NoopCheckpointer { + fn aggregator_selector(&self) -> &dyn AggregatorSelector { + &NoopAggregatorSelector + } +} -impl Processor for NoopProcessor { - fn aggregation_selector(&self) -> &dyn AggregatorSelector { - &Selector::Exact +impl Checkpointer for NoopCheckpointer { + fn checkpoint( + &self, + _f: &mut dyn FnMut(&mut dyn LockedCheckpointer) -> Result<()>, + ) -> Result<()> { + Ok(()) } } diff --git a/opentelemetry/src/lib.rs b/opentelemetry/src/lib.rs index b9bc2ebaa8..4926e15a09 100644 --- a/opentelemetry/src/lib.rs +++ b/opentelemetry/src/lib.rs @@ -82,7 +82,9 @@ //! ``` //! # #[cfg(feature = "metrics")] //! # { -//! use opentelemetry::{global, KeyValue}; +//! use opentelemetry::{global, Context, KeyValue}; +//! +//! let cx = Context::current(); //! //! // get a meter from a provider //! let meter = global::meter("my_service"); @@ -91,7 +93,7 @@ //! let counter = meter.u64_counter("my_counter").init(); //! //! // record a measurement -//! counter.add(1, &[KeyValue::new("http.client_ip", "83.164.160.102")]); +//! counter.add(&cx, 1, &[KeyValue::new("http.client_ip", "83.164.160.102")]); //! # } //! ``` //!