From b1e5e35b5c34dfef19c7b1f55b0f3d497be4d960 Mon Sep 17 00:00:00 2001 From: Tyler Yahn Date: Thu, 15 Sep 2022 18:41:24 -0700 Subject: [PATCH] Merge metric SDK development branch "new_sdk/main" into "main" (#3175) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Remove Old SDK and dependent code on that SDK (#2802) * Remove prometheus example code * Remove prometheus exporter code * Remove stdoutmetric code * Remove sdk/metric/* packages * Remove opencensus example code * Remove otlpmetric exporter code * Remove OpenCensus bridge code * go mod tidy * Remove empty modules * Remove the number and aggregator from the metric SDK (#2840) * Add MeterProvider/meter structure to new SDK (#2822) * Remove prometheus example code * Remove prometheus exporter code * Remove stdoutmetric code * Remove sdk/metric/* packages * Remove opencensus example code * Remove otlpmetric exporter code * Remove OpenCensus bridge code * go mod tidy * Remove empty modules * Add MeterProvider/meter structure to new SDK * Add vanity imports * go mod tidy * Add MeterProvider Flush/Shutdown required by spec * Cast nil ptr instead of alloc for comp time check * Apply suggestions from code review Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Apply suggested Shutdown comment * Apply fixes from feedback Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Add sdk/metric/view structure (#2838) * Add sdk/metric/view package structure * Vanity import * Define the reader interface, and create a manual reader (#2885) * Add the manual reader to the sdk. * Incoperate feedback from PR. * additional PR comments * Fix lint * Fixes for PR. * Unexport ManualReader fix a few comments * Refactor reader testing into a harness (#2910) * Refactor reader testing into a harness * Run lint * Removed merge leftover * Use opentracing bridge from main * go mod tidy * crosslink * Remove Prometheus exporter from README for now * Run make with new tool set * Replace testReaderHarness with testify suite (#2915) * Add the periodic reader (#2909) * Add the metric.Exporter interface * Move the reader errors to reader.go * Update Reader.Collect docs Remove TODO being addressed in this PR and restate purpose of method. * Initial draft of the periodic reader * Refer to correct config in periodic reader opts * Refactor reader testing into a harness * Move wait group handling out of run * Refactor ticker creation to allow testing * Honor export timeout in run * Fix wait group wait bug * Add periodic reader tests * Fix lint * Update periodic reader comments * Add concurrency test for readers * Simplify the ticker stop deferral * Only register once * Restrict build of metric sdk to Go>1.16 * Clean up ShutdownBeforeRegister test * Test duplicate Reader registration (#2914) The specification requires the SDK prevent duplicate registrations for readers. This adds a test for that and fixes this for the manualReader. * Add WithReader and WithResource Options (#2905) * Add WithReader and WithResource Options * Run lint * Update WithReader fn signature based on feedback * crosslink * Remove zero-len check in unify * Restrict build to Go > 1.16 * Add bench test for reader collect methods (#2922) * Unify reader implementations (#2923) * Unify reader implementations Use an atomic.Value to manage concurrency without a lock. * Lint * Merge main into new sdk main (#2925) * Use already enabled revive linter and add depguard (#2883) * Refactor golangci-lint conf Order settings alphabetically. * Add revive settings to golangci conf * Check blank imports * Check bool-literal-in-expr * Check constant-logical-expr * Check context-as-argument * Check context-key-type * Check deep-exit * Check defer * Check dot-imports * Check duplicated-imports * Check early-return * Check empty-block * Check empty-lines * Check error-naming * Check error-return * Check error-strings * Check errorf * Stop ignoring context first arg in tests * Check exported comments * Check flag-parameter * Check identical branches * Check if-return * Check increment-decrement * Check indent-error-flow * Check deny list of go imports * Check import shadowing * Check package comments * Check range * Check range val in closure * Check range val address * Check redefines builtin id * Check string-format * Check struct tag * Check superfluous else * Check time equal * Check var naming * Check var declaration * Check unconditional recursion * Check unexported return * Check unhandled errors * Check unnecessary stmt * Check unnecessary break * Check waitgroup by value * Exclude deep-exit check in example*_test.go files * Move the minimum version to go 1.17 (#2917) * Move the minimum version to go 1.17 * Update readme and changelog Co-authored-by: Chester Cheung * Make lint Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> Co-authored-by: Chester Cheung * Add view to metrics. (#2926) * WIP views public * Add attribute filters and comments. * Fixes for lint * Address comments * Fix lint * Changed view matching to expand end Removed the dscriptor, it was moved in previous patch * change wildcards into regex * Update comments * address comments. * Address more PR comments * renamed WithDescription to WithSetDescription. Co-authored-by: Chester Cheung * Implement MeterProvider's Meter method (#2945) * Implement stubbed meter create method * Rename return value to avoid comment * Encapsulate meterRegistry tests with identifying name * Run lint fix * Comment meterRegistry being concurrent safe * Remove ordered meter tracking in the meterRegistry * Test range completeness instead of order * Remove provider field from meter * Initialize MeterProvider readers field for new (#2948) * Introduce Temporality, WithTemporality Reader options and InstrumentKind (#2949) * Introduce Temporality and InstrumentKind Because Temporality is the responsibility of the Reader additional methods are added to the Reader interface. And New options are created to configure the temporality selector. * Addresses comments, and adds tests. * Fix addition PR comment * Add aggregation package and reader/view options (#2958) * Add aggregation pkg and options * Update documentation for the aggregation pkg * Test Aggregation.Err * Fix aggregation pkg comment * Add WithAggregation comment * Add default aggregation * Rename WithAggregation and add AggregationSelector * Fix DefaultAggregationSelector use and decl * Replace Aggregation struct with iface * Add Copy method to hist and fix Err method * Additional test for monotonic bounds * Add aggregation method to Reader * Use AggregationSelector instead of inline func type * Switch RecordMinMax to NoMinMax * Deep copy and validate in options * Test the DefaultAggregationSelector * nolint for import-shadow of method * Fix Default aggregation comment * Test the explicit bucket histogram deep copy * Update temporality selector option (#2967) Match the WithAggregationSelector option pattern: define a TemporalitySelector type, export the DefaultTemporalitySelector function, and name the option with a Selector suffix. * Minor NewMeterProvider and producer docs fix (#2983) * Add internal package structure for aggregation (#2954) * Add the aggtor package * Restrict to Go 1.18 * Add missing build block to view_test.go * Comment Aggregator iface * Use Go 1.18 as the default ci version * Update Aggregator iface from feedback * Accept hist conf * Flatten aggtor into just internal * Add Cycler interface Separate the duties of aggregation and maintaining state across aggregation periods. * Remove build flags for doc.go * Clarify Cycler documentation * Remove aggregation fold logic * Rename Number to Atomic * Add tests for Atomic impls * Remove unneeded Atomic implementation Add back when filling in structures. * Fix article in Float64 docs * Remove Atomic This is an implementation detail. * Add aggregator_example_test * Fix hist example * Add issue numbers to all TODO and FIXME * Remove zero parameter comment * Combine the cycler into the aggregators * Remove the drop aggregator * Fix lint * Use attribute.Set instead of ptr to it Co-authored-by: Anthony Mirabella * Merge main into new_sdk/main (#2996) * Use already enabled revive linter and add depguard (#2883) * Refactor golangci-lint conf Order settings alphabetically. * Add revive settings to golangci conf * Check blank imports * Check bool-literal-in-expr * Check constant-logical-expr * Check context-as-argument * Check context-key-type * Check deep-exit * Check defer * Check dot-imports * Check duplicated-imports * Check early-return * Check empty-block * Check empty-lines * Check error-naming * Check error-return * Check error-strings * Check errorf * Stop ignoring context first arg in tests * Check exported comments * Check flag-parameter * Check identical branches * Check if-return * Check increment-decrement * Check indent-error-flow * Check deny list of go imports * Check import shadowing * Check package comments * Check range * Check range val in closure * Check range val address * Check redefines builtin id * Check string-format * Check struct tag * Check superfluous else * Check time equal * Check var naming * Check var declaration * Check unconditional recursion * Check unexported return * Check unhandled errors * Check unnecessary stmt * Check unnecessary break * Check waitgroup by value * Exclude deep-exit check in example*_test.go files * Move the minimum version to go 1.17 (#2917) * Move the minimum version to go 1.17 * Update readme and changelog Co-authored-by: Chester Cheung * Use ByteSliceToString from golang.org/x/sys/unix (#2924) Use unix.ByteSliceToString to convert Utsname []byte fields to strings. This also allows to drop the charsToString helper which serves the same purpose and matches ByteSliceToString's implementation. Signed-off-by: Tobias Klauser Co-authored-by: Tyler Yahn * docs: fix typo (#2935) * add timeout to grpc connection in otel-collector example (#2939) * Closes: #2951 (#2952) This PR updates the example listed in the getting started doc so that it will compile without error. It also makes this example consistent with the code found in https://github.com/open-telemetry/opentelemetry-go/blob/main/example/fib/main.go Signed-off-by: Brad Topol * fix data-model link (#2955) * Bump go.opentelemetry.io/proto/otlp from v0.16.0 to v0.18.0 (#2960) * Move to using Instrumentation Scope (#2976) * Move to using Instrumentation Scope * Use type alias, not definition * Add a changelog entry * docs(website_docs): fix exporting_data.md and getting-started.md toc (#2930) * docs(website_docs): fix toc * docs(website_docs): fix toc * update exporting_data.md for rerun check-links * update exporting_data.md for rerun check-links Co-authored-by: Chester Cheung Co-authored-by: Tyler Yahn * Update getting-started.md (#2984) grammar edit for line 175 of readme * fix typo (#2986) * fix typo * spell fix * typo fix (#2991) * added traces.txt to gitignore for fib (#2993) * Deprecate Library and move all uses to Scope (#2977) * Deprecate Library and move all uses to Scope * Add PR number to changelog * Don't change signatures in stable modules * Revert some changes * Rename internal struct names * A bit more renaming * Update sdk/trace/span.go Co-authored-by: Tyler Yahn * Update based on feedback * Revert change Co-authored-by: Tyler Yahn Co-authored-by: Anthony Mirabella * Feat/bridge support text map (#2911) * feat: support TextMap * doc: add comment * test: support for ot.TextMap * Retrieve lost code due to merge * fix: retrieve lost code due to merge. test: support for ot.HTTPHeaders * go mod tidy * Optimized code style, add changelog * doc: Restore comments * wip: add test cases * test: fix args error * delete empty line * Fix syntax and changelog errors * Fix formatting errors Co-authored-by: Chester Cheung Co-authored-by: Anthony Mirabella * Add a release template (#2863) * Add a release template * Update the about field Co-authored-by: Damien Mathieu <42@dmathieu.com> * Fix linting Issues * Add ignore for template link Co-authored-by: Damien Mathieu <42@dmathieu.com> Co-authored-by: Chester Cheung Co-authored-by: Anthony Mirabella * Fix merge of CHANGELOG.md Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> Co-authored-by: Chester Cheung Co-authored-by: Tobias Klauser Co-authored-by: petrie <244236866@qq.com> Co-authored-by: Damien Mathieu <42@dmathieu.com> Co-authored-by: Brad Topol Co-authored-by: Craig Pastro Co-authored-by: Kshitija Murudi Co-authored-by: Petrie Liu Co-authored-by: Guangya Liu Co-authored-by: Craig Pastro Co-authored-by: Anthony Mirabella Co-authored-by: ttoad * Add structure to the export data. (#2961) * Add structure to the export data. * Fix comments. * Apply suggestions from code review Co-authored-by: Tyler Yahn * Address PR comments. * Updated optional historgram parameters. * Address PR comments. Co-authored-by: Tyler Yahn Co-authored-by: Chester Cheung * Use export.Aggregation instead of internal.Aggregation (#3007) * Use export.Aggregation instead of internal * Return an export.Aggregation instead of a slice * Use attribute Sets instead of KeyValues for export data (#3012) Attribute Sets have stronger guarantees about the uniqueness of their keys and more functionality. We already ensure attributes are stored as Sets by the aggregator which will produce these data types. Instead of converting to a KeyValue slice, keep the data as a Set. Any user of the data can always call the ToSlice method to use the data as a slice of KeyValues. * Change Instrument Library to Instrument Scope (#3016) Co-authored-by: Tyler Yahn * move temporality to export/temporality (#3017) * move temporality to export/temporality * fix lint errors Co-authored-by: Tyler Yahn * Rename Package sdk/metric/export into sdk/metric/metricdata (#3014) * fix unrelated changes * fix quote code * fix format * rebase pr * rebase pr * change usage of export to metricdata * Add metricdatatest package (#3025) * Use export.Aggregation instead of internal * Return an export.Aggregation instead of a slice * Use attribute Sets instead of KeyValues for export data Attribute Sets have stronger guarantees about the uniqueness of their keys and more functionality. We already ensure attributes are stored as Sets by the aggregator which will produce these data types. Instead of converting to a KeyValue slice, keep the data as a Set. Any user of the data can always call the ToSlice method to use the data as a slice of KeyValues. * Add export data type comparison testing API * Add Aggregation and Value comparison funcs * Move export testing to own pkg * Move exporttest to metricdatatest * Add licenses headers to files missing them * Use metricdata instead of export Fix merge of new_sdk/main * Rename exporttest pkg to metricdatatest * Fix spelling errors * Fix lint issues * Use testing pkg to error directly Include Helper() method calls to correct the call-stack. * Fix CompareAggregations Set equal to true by default * Generalize assertions and unexport equal checks * Abstract assert tests * Rename all exp var to r * Test AssertAggregationsEqual * Comment why Value and Aggregation are separate * Test AssertValuesEqual * Revert changes to metricdata/temporality.go * Expand pkg doc sentence * Add license header to assertion.go * Update assertion docs * Consolidate comparisons funcs into one file * Consolidate and fix docs * Consolidate assertion.go * Consolidate comparisons.go * make lint * Test with relatively static times * Update sdk/metric/metricdata/metricdatatest/comparisons.go Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Drop equal return from comparison funcs * Refactor AssertEqual * Remove reasN from testDatatype func params * Consolidate AssertEqual type conversions * Fix assertion error message * Add assertion failure tests * Remove unneeded strings join * Make comment include a possessive Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Adds a pipeline for creating reader's output (#3026) * Adds a pipeline for creating reader's output * fix metricdata move * fix lint * Apply suggestions from code review Co-authored-by: Tyler Yahn * Address PR comments * Added resource test Co-authored-by: Tyler Yahn * Use generic Sum, Gauge, and DataPoint value removing Value, Int64, and Float64 from metricdata (#3036) * Use generic DataPoint value * Fix assertion_fail_test.go * Declare Sum and DataPoints type in pipeline_test * Add MatchInstrumentKind filter for Views. (#3037) * Move InstrumentKind to view, Add view filter * remove TODO * Add the Option function, fix lint * use local var over 0 * Fix missing undefinedInstrumnet Co-authored-by: Tyler Yahn * Change View Attribute Filter to detect if not set. (#3039) * Change View Attribute Filter to detect if not set. * Fix PR comments. * Rework test for no filter logic. * Add implementation of last-value aggregator (#3008) * Add last-value aggregator * Add test of last-value reset of unseen attrs * Add benchmark * Use generic DataPoint value * Fix assertion_fail_test.go * Fix tests * Remove unused test increment values * View.New() miss InstrumentKind check (#3043) Signed-off-by: liupengfei Co-authored-by: Tyler Yahn * Add delta/cumulative histogram implementation (#3045) * Add delta/cumulative histogram implementation * Add histogram unit tests * Fix histValues Aggregate Store the new buckets value back to the values map. Ensure min/max are measured values, not zero values. * Fix lint * Add benchmarks * Test histograms internal functionality * Fix lint * Add TODO to look at memory use for cumu hist * Update sdk/metric/internal/histogram.go Co-authored-by: Chester Cheung Co-authored-by: Chester Cheung * use TemporalitySelector (#3050) Signed-off-by: Petrie Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Add implementation of Sum aggregators (#3000) * Implement the sum aggregators * Add unit tests for delta/cumulative sums * Add benchmarks * Merge sum tests into one * Remove unused start time from cumulative sum * Refactor benchmark tests Split benchmarks for the Aggregations and Aggregate methods so computational resource use can be determined. * goimports * Move timestamp out of lock * Refactor testing * Fix spelling mistake * Name param of expectFunc * Reset delta sum to zero instead of delete * Revert to deleting unused attr sets * Refactor testing to allow use across other aggs * Add TODO to bound cumulative sum mem usage * Fix misspelling * Unify aggregator benchmark code in aggregator_test * Use generic DataPoint value * Fix assertion_fail_test.go * Use generic metricdata types * Fix tests * Fix benchmarks * Fix lint * Update sum documentation * Remove leftover encapsulating test run * Use t.Cleanup to mock time * Consolidate expecter logic into funcs * Move errNegVal closer to use * Run the agg test * Add tests for monotonic sum Aggregate err * Run make lint * Make monotonic an arg of creation funcs * Remove Aggregate monotonic validation * Rename sum to valueMap The term sum is a good variable name that we do not want to take and valueMap better describes the type as the storage of the aggregator. * Adds a filter Aggregator. (#3040) * Adds a filter Aggregator. * Add lock and tests * Add Concurrency tests * fix lint errors * Add memory constrained todo. * Update filter comment. Co-authored-by: Tyler Yahn Co-authored-by: Chester Cheung Co-authored-by: Tyler Yahn * Add back the stdoutmetric exporter (#3057) * PoC stdoutmetric exporter * Use stringer to generate String for Temporality * Add vanity imports * Update Temporality string expected output * Do not return error from newConfig * Add shutdown unit tests * Fix spelling error * Unify testing of ctx errors and test ForceFlush * Add unit test for Export handle of ctx errs * Clarify documentation about alt OTLP exporter * Remove unused ErrUnrecognized A third party encoder can produce their own errors. This code does nothing unique with this error, therefore, it is removed. * Lint exporter_test.go * Refactor example_test.go removing FIXME * Add test for Export shutdown err * Add a discard encoder for testing * Acknowledged error is returned from Shutdown * Remove unexpected SchemaURL from stdouttrace test * Remove unneeded *testing.T arg from testEncoderOption * Fix the location of now * Revise and edit docs Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Remove stale TODO from metricdata/data.go (#3064) * Merge main into new_sdk/main (#3082) * Use already enabled revive linter and add depguard (#2883) * Refactor golangci-lint conf Order settings alphabetically. * Add revive settings to golangci conf * Check blank imports * Check bool-literal-in-expr * Check constant-logical-expr * Check context-as-argument * Check context-key-type * Check deep-exit * Check defer * Check dot-imports * Check duplicated-imports * Check early-return * Check empty-block * Check empty-lines * Check error-naming * Check error-return * Check error-strings * Check errorf * Stop ignoring context first arg in tests * Check exported comments * Check flag-parameter * Check identical branches * Check if-return * Check increment-decrement * Check indent-error-flow * Check deny list of go imports * Check import shadowing * Check package comments * Check range * Check range val in closure * Check range val address * Check redefines builtin id * Check string-format * Check struct tag * Check superfluous else * Check time equal * Check var naming * Check var declaration * Check unconditional recursion * Check unexported return * Check unhandled errors * Check unnecessary stmt * Check unnecessary break * Check waitgroup by value * Exclude deep-exit check in example*_test.go files * Move the minimum version to go 1.17 (#2917) * Move the minimum version to go 1.17 * Update readme and changelog Co-authored-by: Chester Cheung * Use ByteSliceToString from golang.org/x/sys/unix (#2924) Use unix.ByteSliceToString to convert Utsname []byte fields to strings. This also allows to drop the charsToString helper which serves the same purpose and matches ByteSliceToString's implementation. Signed-off-by: Tobias Klauser Co-authored-by: Tyler Yahn * docs: fix typo (#2935) * add timeout to grpc connection in otel-collector example (#2939) * Closes: #2951 (#2952) This PR updates the example listed in the getting started doc so that it will compile without error. It also makes this example consistent with the code found in https://github.com/open-telemetry/opentelemetry-go/blob/main/example/fib/main.go Signed-off-by: Brad Topol * fix data-model link (#2955) * Bump go.opentelemetry.io/proto/otlp from v0.16.0 to v0.18.0 (#2960) * Move to using Instrumentation Scope (#2976) * Move to using Instrumentation Scope * Use type alias, not definition * Add a changelog entry * docs(website_docs): fix exporting_data.md and getting-started.md toc (#2930) * docs(website_docs): fix toc * docs(website_docs): fix toc * update exporting_data.md for rerun check-links * update exporting_data.md for rerun check-links Co-authored-by: Chester Cheung Co-authored-by: Tyler Yahn * Update getting-started.md (#2984) grammar edit for line 175 of readme * fix typo (#2986) * fix typo * spell fix * typo fix (#2991) * added traces.txt to gitignore for fib (#2993) * Deprecate Library and move all uses to Scope (#2977) * Deprecate Library and move all uses to Scope * Add PR number to changelog * Don't change signatures in stable modules * Revert some changes * Rename internal struct names * A bit more renaming * Update sdk/trace/span.go Co-authored-by: Tyler Yahn * Update based on feedback * Revert change Co-authored-by: Tyler Yahn Co-authored-by: Anthony Mirabella * Feat/bridge support text map (#2911) * feat: support TextMap * doc: add comment * test: support for ot.TextMap * Retrieve lost code due to merge * fix: retrieve lost code due to merge. test: support for ot.HTTPHeaders * go mod tidy * Optimized code style, add changelog * doc: Restore comments * wip: add test cases * test: fix args error * delete empty line * Fix syntax and changelog errors * Fix formatting errors Co-authored-by: Chester Cheung Co-authored-by: Anthony Mirabella * Add a release template (#2863) * Add a release template * Update the about field Co-authored-by: Damien Mathieu <42@dmathieu.com> * Fix linting Issues * Add ignore for template link Co-authored-by: Damien Mathieu <42@dmathieu.com> Co-authored-by: Chester Cheung Co-authored-by: Anthony Mirabella * Add workflow to automate bundling dependabot PRs (#2997) Signed-off-by: Anthony J Mirabella * Release prep 1.8.0 (#3001) * Update CHANGELOG and versions.yaml for 1.8.0 release Signed-off-by: Anthony J Mirabella * Update go-build-tools Signed-off-by: Anthony J Mirabella * Prepare stable-v1 for version v1.8.0 * Prepare experimental-metrics for version v0.31.0 * Prepare bridge for version v0.31.0 * `make go-mod-tidy` should use `-compat=1.17` now Signed-off-by: Anthony J Mirabella * Update CHANGELOG.md Co-authored-by: Tyler Yahn Co-authored-by: Tyler Yahn * Add benchmark metric test for UpDownCounter (#2655) * add benchmark metric test for UpDownCounter * move counter annotation up * fix syncFloat64 to syncInt64 * fix syncFloat64 to syncInt64 * fix go-lint err * Add semconv/v1.11.0 (#3009) Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Add semconv/v1.12.0 (#3010) * Add semconv/v1.12.0 * Update all semconv use to v1.12.0 Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Add http.method attribute to http server metric (#3018) * Add http.method attribute to http server metric Signed-off-by: Ziqi Zhao * fix lint Signed-off-by: Ziqi Zhao * fix lint Signed-off-by: Ziqi Zhao * fix for reviews Signed-off-by: Ziqi Zhao * add changelog entry Signed-off-by: Ziqi Zhao * Add tests and fix opentracing bridge defer warning (#3029) * add tests and fix opentracing bridge defer warning * add changelog entry * Update CHANGELOG.md Co-authored-by: Tyler Yahn * Update bridge/opentracing/bridge_test.go Co-authored-by: Tyler Yahn Co-authored-by: Tyler Yahn * Introduce "split" metric schema transformation (#2999) This is a new transformation type that allows to describe a change where a metric is converted to several other metrics by eliminating an attribute. An example of such change that happened recently is this: https://github.com/open-telemetry/opentelemetry-specification/pull/2617 This PR implements specification change https://github.com/open-telemetry/opentelemetry-specification/pull/2653 This PR creates package v1.1 for the new functionality. The old package v1.0 remains unchanged. * Release v1.9.0 (#3052) * Bump versions in versions.yaml * Prepare stable-v1 for version v1.9.0 * Prepare experimental-schema for version v0.0.3 * Update changelog for release * Replace ioutil with io and os (#3058) * Make several vars into consts (#3068) * Add support for Go 1.19 (#3077) * Add support for Go 1.19 * Update CHANGELOG.md Co-authored-by: Sam Xie Co-authored-by: Sam Xie * Update compatibility documentation (#3079) Remove 3 month timeline for backwards support of old versions of Go. Signed-off-by: Brad Topol Signed-off-by: Anthony J Mirabella Signed-off-by: Ziqi Zhao Co-authored-by: Tyler Yahn Co-authored-by: Chester Cheung Co-authored-by: Tobias Klauser Co-authored-by: petrie <244236866@qq.com> Co-authored-by: Damien Mathieu <42@dmathieu.com> Co-authored-by: Brad Topol Co-authored-by: Craig Pastro Co-authored-by: Kshitija Murudi Co-authored-by: Petrie Liu Co-authored-by: Guangya Liu Co-authored-by: Craig Pastro Co-authored-by: Anthony Mirabella Co-authored-by: ttoad Co-authored-by: Ziqi Zhao Co-authored-by: Tigran Najaryan <4194920+tigrannajaryan@users.noreply.github.com> Co-authored-by: Håvard Anda Estensen Co-authored-by: Mikhail Mazurskiy <126021+ash2k@users.noreply.github.com> Co-authored-by: Sam Xie * Adds the option to ignore timestamps in metric data tests. (#3076) * Adds the option to ignore timestamps in metric data tests * use config over bool Co-authored-by: Tyler Yahn * Adds a pipelineRegistry to manage creating aggregators. (#3044) * Adds a pipelineRegistry to manage creating aggregators. * Made pipeline generic * Add aggregation filter to the registry. Co-authored-by: Chester Cheung * Remove stale TODO (#3083) The aggregation transform function was added in #2958. * Add back the otlpmetric transforms (#3065) * Add otlpmetric transforms * Split aggregation transforms to own file * Rename Iterator to AttrIter * Update pkg docs These are internal docs use developer based language. * Document all exported funcs * Unify metricdata type transforms into one file * Rename metrics.go to metricdata.go * Copy back attribute tests * Copy back in Iterator test * Refactor attribute tests * Add tests for metricdata transforms * Add multiErr support for digestible transform errs * Test transform errors * go mod tidy * Use key field * goimported * gofmt-ed * Fix error documentation * go mod tidy * Changes instruments uniqueness in pipeline. (#3071) * Changes instruments uniquness in pipeline. * Fix lint * Update sdk/metric/pipeline.go Co-authored-by: Tyler Yahn Co-authored-by: Tyler Yahn * Restore the exporters/otlp/otlpmetric/internal/otelconfig package (#3090) * Restore otlpmetric/otlpconfig from main * Rename otlpconfig to oconf * Remove the empty envconfig_test.go * Update import of otlpconfig in oconf_test * go mod tidy * Run make * add internal OpenCensus metric translation library (#3099) * reintroduce opencensus trace bridge (#3098) Co-authored-by: Tyler Yahn * Document the sdk/metric/view package (#3086) * Add package documentation for sdk/metric/view * Refer to views not configs in WithReader docs * Fix vanity url for view_test.go * Add example tests for view options * Add package example * Fix view type docs * Remove build constraint for doc.go * Fix lint * Adds async instruments and providers. (#3084) * Adds instrument providers and instruments. * Don't return nil instrument, return with error * removed sync * Added a number of tests. Signed-off-by: GitHub * Address PR comments * fix error messages * fixes typo in test name Signed-off-by: GitHub * Fix lint issues * moved the testCallback into the TestMeterCreateInstrument Signed-off-by: GitHub Co-authored-by: Tyler Yahn * Merge branch 'main' into new_sdk/main (#3111) * Use already enabled revive linter and add depguard (#2883) * Refactor golangci-lint conf Order settings alphabetically. * Add revive settings to golangci conf * Check blank imports * Check bool-literal-in-expr * Check constant-logical-expr * Check context-as-argument * Check context-key-type * Check deep-exit * Check defer * Check dot-imports * Check duplicated-imports * Check early-return * Check empty-block * Check empty-lines * Check error-naming * Check error-return * Check error-strings * Check errorf * Stop ignoring context first arg in tests * Check exported comments * Check flag-parameter * Check identical branches * Check if-return * Check increment-decrement * Check indent-error-flow * Check deny list of go imports * Check import shadowing * Check package comments * Check range * Check range val in closure * Check range val address * Check redefines builtin id * Check string-format * Check struct tag * Check superfluous else * Check time equal * Check var naming * Check var declaration * Check unconditional recursion * Check unexported return * Check unhandled errors * Check unnecessary stmt * Check unnecessary break * Check waitgroup by value * Exclude deep-exit check in example*_test.go files * Move the minimum version to go 1.17 (#2917) * Move the minimum version to go 1.17 * Update readme and changelog Co-authored-by: Chester Cheung * Use ByteSliceToString from golang.org/x/sys/unix (#2924) Use unix.ByteSliceToString to convert Utsname []byte fields to strings. This also allows to drop the charsToString helper which serves the same purpose and matches ByteSliceToString's implementation. Signed-off-by: Tobias Klauser Co-authored-by: Tyler Yahn * docs: fix typo (#2935) * add timeout to grpc connection in otel-collector example (#2939) * Closes: #2951 (#2952) This PR updates the example listed in the getting started doc so that it will compile without error. It also makes this example consistent with the code found in https://github.com/open-telemetry/opentelemetry-go/blob/main/example/fib/main.go Signed-off-by: Brad Topol * fix data-model link (#2955) * Bump go.opentelemetry.io/proto/otlp from v0.16.0 to v0.18.0 (#2960) * Move to using Instrumentation Scope (#2976) * Move to using Instrumentation Scope * Use type alias, not definition * Add a changelog entry * docs(website_docs): fix exporting_data.md and getting-started.md toc (#2930) * docs(website_docs): fix toc * docs(website_docs): fix toc * update exporting_data.md for rerun check-links * update exporting_data.md for rerun check-links Co-authored-by: Chester Cheung Co-authored-by: Tyler Yahn * Update getting-started.md (#2984) grammar edit for line 175 of readme * fix typo (#2986) * fix typo * spell fix * typo fix (#2991) * added traces.txt to gitignore for fib (#2993) * Deprecate Library and move all uses to Scope (#2977) * Deprecate Library and move all uses to Scope * Add PR number to changelog * Don't change signatures in stable modules * Revert some changes * Rename internal struct names * A bit more renaming * Update sdk/trace/span.go Co-authored-by: Tyler Yahn * Update based on feedback * Revert change Co-authored-by: Tyler Yahn Co-authored-by: Anthony Mirabella * Feat/bridge support text map (#2911) * feat: support TextMap * doc: add comment * test: support for ot.TextMap * Retrieve lost code due to merge * fix: retrieve lost code due to merge. test: support for ot.HTTPHeaders * go mod tidy * Optimized code style, add changelog * doc: Restore comments * wip: add test cases * test: fix args error * delete empty line * Fix syntax and changelog errors * Fix formatting errors Co-authored-by: Chester Cheung Co-authored-by: Anthony Mirabella * Add a release template (#2863) * Add a release template * Update the about field Co-authored-by: Damien Mathieu <42@dmathieu.com> * Fix linting Issues * Add ignore for template link Co-authored-by: Damien Mathieu <42@dmathieu.com> Co-authored-by: Chester Cheung Co-authored-by: Anthony Mirabella * Add workflow to automate bundling dependabot PRs (#2997) Signed-off-by: Anthony J Mirabella * Release prep 1.8.0 (#3001) * Update CHANGELOG and versions.yaml for 1.8.0 release Signed-off-by: Anthony J Mirabella * Update go-build-tools Signed-off-by: Anthony J Mirabella * Prepare stable-v1 for version v1.8.0 * Prepare experimental-metrics for version v0.31.0 * Prepare bridge for version v0.31.0 * `make go-mod-tidy` should use `-compat=1.17` now Signed-off-by: Anthony J Mirabella * Update CHANGELOG.md Co-authored-by: Tyler Yahn Co-authored-by: Tyler Yahn * Add benchmark metric test for UpDownCounter (#2655) * add benchmark metric test for UpDownCounter * move counter annotation up * fix syncFloat64 to syncInt64 * fix syncFloat64 to syncInt64 * fix go-lint err * Add semconv/v1.11.0 (#3009) Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Add semconv/v1.12.0 (#3010) * Add semconv/v1.12.0 * Update all semconv use to v1.12.0 Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Add http.method attribute to http server metric (#3018) * Add http.method attribute to http server metric Signed-off-by: Ziqi Zhao * fix lint Signed-off-by: Ziqi Zhao * fix lint Signed-off-by: Ziqi Zhao * fix for reviews Signed-off-by: Ziqi Zhao * add changelog entry Signed-off-by: Ziqi Zhao * Add tests and fix opentracing bridge defer warning (#3029) * add tests and fix opentracing bridge defer warning * add changelog entry * Update CHANGELOG.md Co-authored-by: Tyler Yahn * Update bridge/opentracing/bridge_test.go Co-authored-by: Tyler Yahn Co-authored-by: Tyler Yahn * Introduce "split" metric schema transformation (#2999) This is a new transformation type that allows to describe a change where a metric is converted to several other metrics by eliminating an attribute. An example of such change that happened recently is this: https://github.com/open-telemetry/opentelemetry-specification/pull/2617 This PR implements specification change https://github.com/open-telemetry/opentelemetry-specification/pull/2653 This PR creates package v1.1 for the new functionality. The old package v1.0 remains unchanged. * Release v1.9.0 (#3052) * Bump versions in versions.yaml * Prepare stable-v1 for version v1.9.0 * Prepare experimental-schema for version v0.0.3 * Update changelog for release * Replace ioutil with io and os (#3058) * Make several vars into consts (#3068) * Add support for Go 1.19 (#3077) * Add support for Go 1.19 * Update CHANGELOG.md Co-authored-by: Sam Xie Co-authored-by: Sam Xie * Update compatibility documentation (#3079) Remove 3 month timeline for backwards support of old versions of Go. * Fix `opentracing.Bridge` where it miss identifying the spanKind (#3096) * Fix opentracing.Bridge where it was not identifying the spanKinf correctly * fix test * changelog * Keeping backward comppatibillity * Update CHANGELOG.md Co-authored-by: Anthony Mirabella * Update CHANGELOG.md Co-authored-by: Anthony Mirabella Co-authored-by: Chester Cheung * replace `required` by `requirementlevel` (#3103) * Change the inclusivity of exponential histogram bounds (#2982) * Use lower-inclusive boundaries * make exponent and logarithm more symmetric Co-authored-by: Anthony Mirabella Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Update golangci-lint to v1.48.0 (#3105) * Update golangci-lint to v1.48.0 Co-authored-by: Chester Cheung * Bump go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 (#3107) * Bump go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> Signed-off-by: Brad Topol Signed-off-by: Anthony J Mirabella Signed-off-by: Ziqi Zhao Co-authored-by: Tyler Yahn Co-authored-by: Chester Cheung Co-authored-by: Tobias Klauser Co-authored-by: petrie <244236866@qq.com> Co-authored-by: Damien Mathieu <42@dmathieu.com> Co-authored-by: Brad Topol Co-authored-by: Craig Pastro Co-authored-by: Kshitija Murudi Co-authored-by: Petrie Liu Co-authored-by: Guangya Liu Co-authored-by: Craig Pastro Co-authored-by: Anthony Mirabella Co-authored-by: ttoad Co-authored-by: Ziqi Zhao Co-authored-by: Tigran Najaryan <4194920+tigrannajaryan@users.noreply.github.com> Co-authored-by: Håvard Anda Estensen Co-authored-by: Mikhail Mazurskiy <126021+ash2k@users.noreply.github.com> Co-authored-by: Sam Xie Co-authored-by: Alan Protasio Co-authored-by: Joshua MacDonald * Add otlpmetric exporter (#3089) * Add otlpmetric package doc * Add Client interface * Add the Exporter Have the Exporter ensure synchronous access to all client methods. * Add race detection test for Exporter * Expand New godocs * Fix lint * Merge transform and upload errors * Fix ineffectual increment * Make pipelineRegistry non-generic (#3115) * Make pipelineRegistry non-generic * Add Synchronous instruments (#3124) * Add Synchronous instruments * remove duplicate code in instrument * Fixes to Histogram comments * Add back the otlpmetricgrpc exporter (#3094) * Add otlpmetric package doc * Add Client interface * Add the Exporter Have the Exporter ensure synchronous access to all client methods. * Add race detection test for Exporter * Expand New godocs * Fix lint * Add the otlpmetricgrpc Go module * Restore otlpmetricgrpc from main * Remove integration testing from otlpmetricgrpc * Fix import of otlpconfig to oconf * Update client Add ForceFlush method to satisfy otlpmetric.Client, unexport Start, and restructure NewClient to return a started client. * Update otlpmetricgrpc New functions Remove NewUnstarted and only export New. * Remove unneeded client sync The exporter handle the synchronization of client method calls. * Update example_test.go * Update client_unit_test.go * Rename client_unit_test.go to client_test.go * Rename options.go to config.go * Add package doc * Unify exporter.go and doc.go into client.go * Unexport NewClient * Correct option documentation * Add env config documentation * go mod tidy * Restrict build to Go 1.18 * Update client.go Fix copied UploadMetrics documentation. * Run make * Close client conn even if context deadline reached * Add sdk/metric Go pkg docs and example (#3139) * Add sdk/metric Go pkg docs * Add example_test.go * Add Go 1.18 build guard to example_test.go * Merge main into new_sdk/main (#3141) * Use already enabled revive linter and add depguard (#2883) * Refactor golangci-lint conf Order settings alphabetically. * Add revive settings to golangci conf * Check blank imports * Check bool-literal-in-expr * Check constant-logical-expr * Check context-as-argument * Check context-key-type * Check deep-exit * Check defer * Check dot-imports * Check duplicated-imports * Check early-return * Check empty-block * Check empty-lines * Check error-naming * Check error-return * Check error-strings * Check errorf * Stop ignoring context first arg in tests * Check exported comments * Check flag-parameter * Check identical branches * Check if-return * Check increment-decrement * Check indent-error-flow * Check deny list of go imports * Check import shadowing * Check package comments * Check range * Check range val in closure * Check range val address * Check redefines builtin id * Check string-format * Check struct tag * Check superfluous else * Check time equal * Check var naming * Check var declaration * Check unconditional recursion * Check unexported return * Check unhandled errors * Check unnecessary stmt * Check unnecessary break * Check waitgroup by value * Exclude deep-exit check in example*_test.go files * Move the minimum version to go 1.17 (#2917) * Move the minimum version to go 1.17 * Update readme and changelog Co-authored-by: Chester Cheung * Use ByteSliceToString from golang.org/x/sys/unix (#2924) Use unix.ByteSliceToString to convert Utsname []byte fields to strings. This also allows to drop the charsToString helper which serves the same purpose and matches ByteSliceToString's implementation. Signed-off-by: Tobias Klauser Co-authored-by: Tyler Yahn * docs: fix typo (#2935) * add timeout to grpc connection in otel-collector example (#2939) * Closes: #2951 (#2952) This PR updates the example listed in the getting started doc so that it will compile without error. It also makes this example consistent with the code found in https://github.com/open-telemetry/opentelemetry-go/blob/main/example/fib/main.go Signed-off-by: Brad Topol * fix data-model link (#2955) * Bump go.opentelemetry.io/proto/otlp from v0.16.0 to v0.18.0 (#2960) * Move to using Instrumentation Scope (#2976) * Move to using Instrumentation Scope * Use type alias, not definition * Add a changelog entry * docs(website_docs): fix exporting_data.md and getting-started.md toc (#2930) * docs(website_docs): fix toc * docs(website_docs): fix toc * update exporting_data.md for rerun check-links * update exporting_data.md for rerun check-links Co-authored-by: Chester Cheung Co-authored-by: Tyler Yahn * Update getting-started.md (#2984) grammar edit for line 175 of readme * fix typo (#2986) * fix typo * spell fix * typo fix (#2991) * added traces.txt to gitignore for fib (#2993) * Deprecate Library and move all uses to Scope (#2977) * Deprecate Library and move all uses to Scope * Add PR number to changelog * Don't change signatures in stable modules * Revert some changes * Rename internal struct names * A bit more renaming * Update sdk/trace/span.go Co-authored-by: Tyler Yahn * Update based on feedback * Revert change Co-authored-by: Tyler Yahn Co-authored-by: Anthony Mirabella * Feat/bridge support text map (#2911) * feat: support TextMap * doc: add comment * test: support for ot.TextMap * Retrieve lost code due to merge * fix: retrieve lost code due to merge. test: support for ot.HTTPHeaders * go mod tidy * Optimized code style, add changelog * doc: Restore comments * wip: add test cases * test: fix args error * delete empty line * Fix syntax and changelog errors * Fix formatting errors Co-authored-by: Chester Cheung Co-authored-by: Anthony Mirabella * Add a release template (#2863) * Add a release template * Update the about field Co-authored-by: Damien Mathieu <42@dmathieu.com> * Fix linting Issues * Add ignore for template link Co-authored-by: Damien Mathieu <42@dmathieu.com> Co-authored-by: Chester Cheung Co-authored-by: Anthony Mirabella * Add workflow to automate bundling dependabot PRs (#2997) Signed-off-by: Anthony J Mirabella * Release prep 1.8.0 (#3001) * Update CHANGELOG and versions.yaml for 1.8.0 release Signed-off-by: Anthony J Mirabella * Update go-build-tools Signed-off-by: Anthony J Mirabella * Prepare stable-v1 for version v1.8.0 * Prepare experimental-metrics for version v0.31.0 * Prepare bridge for version v0.31.0 * `make go-mod-tidy` should use `-compat=1.17` now Signed-off-by: Anthony J Mirabella * Update CHANGELOG.md Co-authored-by: Tyler Yahn Co-authored-by: Tyler Yahn * Add benchmark metric test for UpDownCounter (#2655) * add benchmark metric test for UpDownCounter * move counter annotation up * fix syncFloat64 to syncInt64 * fix syncFloat64 to syncInt64 * fix go-lint err * Add semconv/v1.11.0 (#3009) Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Add semconv/v1.12.0 (#3010) * Add semconv/v1.12.0 * Update all semconv use to v1.12.0 Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Add http.method attribute to http server metric (#3018) * Add http.method attribute to http server metric Signed-off-by: Ziqi Zhao * fix lint Signed-off-by: Ziqi Zhao * fix lint Signed-off-by: Ziqi Zhao * fix for reviews Signed-off-by: Ziqi Zhao * add changelog entry Signed-off-by: Ziqi Zhao * Add tests and fix opentracing bridge defer warning (#3029) * add tests and fix opentracing bridge defer warning * add changelog entry * Update CHANGELOG.md Co-authored-by: Tyler Yahn * Update bridge/opentracing/bridge_test.go Co-authored-by: Tyler Yahn Co-authored-by: Tyler Yahn * Introduce "split" metric schema transformation (#2999) This is a new transformation type that allows to describe a change where a metric is converted to several other metrics by eliminating an attribute. An example of such change that happened recently is this: https://github.com/open-telemetry/opentelemetry-specification/pull/2617 This PR implements specification change https://github.com/open-telemetry/opentelemetry-specification/pull/2653 This PR creates package v1.1 for the new functionality. The old package v1.0 remains unchanged. * Release v1.9.0 (#3052) * Bump versions in versions.yaml * Prepare stable-v1 for version v1.9.0 * Prepare experimental-schema for version v0.0.3 * Update changelog for release * Replace ioutil with io and os (#3058) * Make several vars into consts (#3068) * Add support for Go 1.19 (#3077) * Add support for Go 1.19 * Update CHANGELOG.md Co-authored-by: Sam Xie Co-authored-by: Sam Xie * Update compatibility documentation (#3079) Remove 3 month timeline for backwards support of old versions of Go. * Fix `opentracing.Bridge` where it miss identifying the spanKind (#3096) * Fix opentracing.Bridge where it was not identifying the spanKinf correctly * fix test * changelog * Keeping backward comppatibillity * Update CHANGELOG.md Co-authored-by: Anthony Mirabella * Update CHANGELOG.md Co-authored-by: Anthony Mirabella Co-authored-by: Chester Cheung * replace `required` by `requirementlevel` (#3103) * Change the inclusivity of exponential histogram bounds (#2982) * Use lower-inclusive boundaries * make exponent and logarithm more symmetric Co-authored-by: Anthony Mirabella Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Update golangci-lint to v1.48.0 (#3105) * Update golangci-lint to v1.48.0 Co-authored-by: Chester Cheung * Bump go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 (#3107) * Bump go.opentelemetry.io/proto/otlp from v0.18.0 to v0.19.0 Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Update tracer to guard for a nil ctx (#3110) * Update tracer to guard for a nil ctx Co-authored-by: Chester Cheung Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> Co-authored-by: Tyler Yahn * Fix sdk/instrumentation pkg docs (#3130) * Add instrumentation scope attributes (#3131) * Add WithScopeAttributes TracerOption to trace API * Add Attributes field to instrumentation Scope * Use scope attributes for new Tracer * Fix stdouttrace expected test output * Allow unexported Set fields in sdk/trace test * Export instrumentation scope attrs in OTLP * Add changes to the changelog * Fix imports with make lint * Add unit tests for WithScopeAttributes * Fix English in Scope documentation * Add WithScopeAttributes MeterOption to metric API package (#3132) * Add WithScopeAttributes MeterOption to metric pkg * Add MeterConfig unit tests * Add changes to changelog * Fix import linting * Update MeterProvider documentation Include information about how to use WithScopeAttributes. * Refactor TracerProvider documentation (#3133) * Refactor TracerProvider documentation * Fix English article * Grammar fixes * consistency-of: Changed signal names for website docs (#3137) * Shut down all processors even on error (#3091) * Fix stdoutmetric example test The merged instrumentation Scope includes SchemaURL and Attributes now, add them to the expected output. Signed-off-by: Brad Topol Signed-off-by: Anthony J Mirabella Signed-off-by: Ziqi Zhao Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> Co-authored-by: Chester Cheung Co-authored-by: Tobias Klauser Co-authored-by: petrie <244236866@qq.com> Co-authored-by: Damien Mathieu <42@dmathieu.com> Co-authored-by: Brad Topol Co-authored-by: Craig Pastro Co-authored-by: Kshitija Murudi Co-authored-by: Petrie Liu Co-authored-by: Guangya Liu Co-authored-by: Craig Pastro Co-authored-by: Anthony Mirabella Co-authored-by: ttoad Co-authored-by: Ziqi Zhao Co-authored-by: Tigran Najaryan <4194920+tigrannajaryan@users.noreply.github.com> Co-authored-by: Håvard Anda Estensen Co-authored-by: Mikhail Mazurskiy <126021+ash2k@users.noreply.github.com> Co-authored-by: Sam Xie Co-authored-by: Alan Protasio Co-authored-by: Joshua MacDonald Co-authored-by: Mitch Usher Co-authored-by: Gaurang Patel * Remove empty metrictest pkg (#3148) * Add exporters/otlp/otlpmetric/internal/otest (#3125) * Add otlpmetric package doc * Add Client interface * Add the Exporter Have the Exporter ensure synchronous access to all client methods. * Add race detection test for Exporter * Expand New godocs * Fix lint * Restore otlpmetrictest from main * Rename otlpmetrictest to otest * Remove data.go The functions and types it contains are no longer relevant to the SDK. * Update client context error tests Remove multiple shutdown tests. The Client interface states this should never happen. * Remove collector.go and otlptest.go * Expand client tests with ctx and force-flush * Add UploadMetrics tests * Test the tests with a trivial client * Condense all to client.go * Example of how to run RunClientTests * Add client integration testing * Add GRPCCollector * Remove GRPCCollector to limit scope of PR * Add back the otlpmetrichttp exporter (#3097) * Add otlpmetric package doc * Add Client interface * Add the Exporter Have the Exporter ensure synchronous access to all client methods. * Add race detection test for Exporter * Expand New godocs * Fix lint * Add back the otlpmetrichttp pkg from main * Restrict to Go 1.18 and above * Remove integration testing * Rename client_unit_test.go to client_test.go * Rename options.go to config.go * Remove the NewUnstarted func * Remove Start method from client * Add no-op ForceFlush method to client * Update otlpconfig pkg name to oconf * Rename Stop method to Shutdown Match the otlpmetric.Client interface. * Update creation functions to compile * Remove name field from client * Remove sync of methods from client This is handled by the exporter. * Remove unused generalCfg field from client * Replace cfg client field with used conf vals * Use a http request instead of url/header fields * Remove NewClient and move New into client.go * Rename client.client field to client.httpClient * Update client tests Remove test of a retry config and add functional tests of the client methods honoring a context. * Remove deprecated WithMaxAttempts and WithBackoff * Update option docs Include info on envvars. * Fix lint * Fix lint errors * Revert New to accept a context * Add example test * Update pkg docs * go mod tidy * Use url.URL to form HTTP request URL * Remove stale TODO in sdk/view (#3149) Co-authored-by: Chester Cheung * Use unique metric testing data in reader_test (#3151) Address unresolved TODO. Co-authored-by: Chester Cheung * Add new metric SDK changes to changelog (#3150) Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> * Add integration and config testing to otlpmetricgrpc (#3126) * Add the GRPCCollector to otest * Use otest to test otlpmetricgrpc Client * Add WithHeaders and WithTimeout tests * Add integration and config testing to otlpmetrichttp (#3155) * Add HTTPCollector to otest * Add integration testing for otlpmetrichttp * Fix NewHTTPCollector docs * Add config tests * Fix lint * Add WithURLPath test * Add WithTLSClientConfig test * Ignore depguard for crypto/x509/pkix This is a testing package that uses the package to generate a weak testing TLS certificate. * Add Prometheus exporter code (#3135) * Add Prometheus exporter example (#3168) * Add back prom exporter to README.md * Fix removal changes from #3154 in API * Update CHANGELOG with PR number Signed-off-by: Brad Topol Signed-off-by: Anthony J Mirabella Signed-off-by: Ziqi Zhao Signed-off-by: GitHub Co-authored-by: Aaron Clawson <3766680+MadVikingGod@users.noreply.github.com> Co-authored-by: Chester Cheung Co-authored-by: Anthony Mirabella Co-authored-by: Tobias Klauser Co-authored-by: petrie <244236866@qq.com> Co-authored-by: Damien Mathieu <42@dmathieu.com> Co-authored-by: Brad Topol Co-authored-by: Craig Pastro Co-authored-by: Kshitija Murudi Co-authored-by: Petrie Liu Co-authored-by: Guangya Liu Co-authored-by: Craig Pastro Co-authored-by: ttoad Co-authored-by: Ziqi Zhao Co-authored-by: Tigran Najaryan <4194920+tigrannajaryan@users.noreply.github.com> Co-authored-by: Håvard Anda Estensen Co-authored-by: Mikhail Mazurskiy <126021+ash2k@users.noreply.github.com> Co-authored-by: Sam Xie Co-authored-by: David Ashpole Co-authored-by: Alan Protasio Co-authored-by: Joshua MacDonald Co-authored-by: Mitch Usher Co-authored-by: Gaurang Patel Co-authored-by: Mike Dame --- .github/dependabot.yml | 12 +- CHANGELOG.md | 34 + bridge/opencensus/README.md | 47 - bridge/opencensus/aggregation.go | 157 ---- bridge/opencensus/aggregation_test.go | 322 ------- bridge/opencensus/exporter.go | 186 ---- bridge/opencensus/exporter_test.go | 475 ---------- bridge/opencensus/go.mod | 11 - bridge/opencensus/go.sum | 4 - .../opencensus/opencensusmetric}/doc.go | 8 +- bridge/opencensus/opencensusmetric/go.mod | 28 + bridge/opencensus/opencensusmetric/go.sum | 98 ++ .../opencensusmetric/internal/metric.go | 219 +++++ .../opencensusmetric/internal/metric_test.go | 667 ++++++++++++++ bridge/opencensus/test/go.mod | 6 - bridge/opencensus/test/go.sum | 1 - example/opencensus/go.mod | 38 - example/opencensus/go.sum | 61 -- example/opencensus/main.go | 154 ---- .../prometheus/doc.go | 3 +- example/prometheus/go.mod | 26 +- example/prometheus/go.sum | 27 +- example/prometheus/main.go | 148 ++- exporters/otlp/otlpmetric/client.go | 52 ++ exporters/otlp/otlpmetric/clients.go | 43 - exporters/otlp/otlpmetric/doc.go | 20 + exporters/otlp/otlpmetric/exporter.go | 153 ++-- exporters/otlp/otlpmetric/exporter_test.go | 861 ++---------------- exporters/otlp/otlpmetric/go.mod | 22 +- exporters/otlp/otlpmetric/go.sum | 12 +- .../internal/metrictransform/attribute.go | 158 ---- .../metrictransform/attribute_test.go | 258 ------ .../internal/metrictransform/metric.go | 437 --------- .../internal/metrictransform/metric_test.go | 314 ------- .../internal/metrictransform/resource_test.go | 48 - .../{otlpconfig => oconf}/envconfig.go | 2 +- .../internal/{otlpconfig => oconf}/options.go | 2 +- .../{otlpconfig => oconf}/options_test.go | 132 +-- .../{otlpconfig => oconf}/optiontypes.go | 2 +- .../internal/{otlpconfig => oconf}/tls.go | 2 +- .../otlp/otlpmetric/internal/otest/client.go | 254 ++++++ .../otlpmetric/internal/otest/client_test.go | 54 ++ .../otlpmetric/internal/otest/collector.go | 428 +++++++++ .../internal/otlpmetrictest/client.go | 132 --- .../internal/otlpmetrictest/collector.go | 55 -- .../internal/otlpmetrictest/data.go | 71 -- .../internal/otlpmetrictest/otlptest.go | 174 ---- .../internal/transform/attribute.go | 155 ++++ .../internal/transform/attribute_test.go | 197 ++++ .../otlp/otlpmetric/internal/transform/doc.go | 14 +- .../otlpmetric/internal/transform/error.go | 114 +++ .../internal/transform/error_test.go | 91 ++ .../internal/transform/metricdata.go | 207 +++++ .../internal/transform/metricdata_test.go | 355 ++++++++ exporters/otlp/otlpmetric/options.go | 43 - .../otlp/otlpmetric/otlpmetricgrpc/client.go | 162 ++-- .../otlpmetric/otlpmetricgrpc/client_test.go | 411 +++------ .../otlpmetricgrpc/client_unit_test.go | 193 ---- .../otlp/otlpmetric/otlpmetricgrpc/config.go | 241 +++++ .../otlpmetricgrpc/{exporter.go => doc.go} | 18 +- .../otlpmetric/otlpmetricgrpc/example_test.go | 184 +--- .../otlp/otlpmetric/otlpmetricgrpc/go.mod | 5 +- .../otlp/otlpmetric/otlpmetricgrpc/go.sum | 2 +- .../otlpmetricgrpc/mock_collector_test.go | 169 ---- .../otlp/otlpmetric/otlpmetricgrpc/options.go | 189 ---- .../otlpmetrichttp/certificate_test.go | 92 -- .../otlp/otlpmetric/otlpmetrichttp/client.go | 190 ++-- .../otlpmetric/otlpmetrichttp/client_test.go | 354 +++---- .../otlpmetrichttp/client_unit_test.go | 68 -- .../otlp/otlpmetric/otlpmetrichttp/config.go | 184 ++++ .../otlp/otlpmetric/otlpmetrichttp/doc.go | 11 +- .../otlpmetric/otlpmetrichttp/example_test.go | 45 + .../otlpmetric/otlpmetrichttp/exporter.go | 31 - .../otlp/otlpmetric/otlpmetrichttp/go.mod | 9 +- .../otlp/otlpmetric/otlpmetrichttp/go.sum | 2 +- .../otlpmetrichttp/mock_collector_test.go | 239 ----- .../otlp/otlpmetric/otlpmetrichttp/options.go | 185 ---- exporters/prometheus/README.md | 9 - .../number => exporters/prometheus}/doc.go | 13 +- exporters/prometheus/exporter.go | 233 +++++ exporters/prometheus/exporter_test.go | 130 +++ exporters/prometheus/go.mod | 18 +- exporters/prometheus/go.sum | 27 +- exporters/prometheus/prometheus.go | 324 ------- exporters/prometheus/prometheus_test.go | 228 ----- exporters/prometheus/sanitize.go | 50 - exporters/prometheus/sanitize_test.go | 61 -- exporters/prometheus/testdata/counter.txt | 3 + exporters/prometheus/testdata/gauge.txt | 3 + exporters/prometheus/testdata/histogram.txt | 15 + .../prometheus/testdata/sanitized_labels.txt | 3 + exporters/stdout/stdoutmetric/config.go | 110 +-- exporters/stdout/stdoutmetric/doc.go | 12 +- exporters/stdout/stdoutmetric/encoder.go | 43 + exporters/stdout/stdoutmetric/example_test.go | 280 ++++-- exporters/stdout/stdoutmetric/exporter.go | 65 +- .../stdout/stdoutmetric/exporter_test.go | 102 +++ exporters/stdout/stdoutmetric/go.mod | 17 +- exporters/stdout/stdoutmetric/go.sum | 1 - exporters/stdout/stdoutmetric/metric.go | 144 --- exporters/stdout/stdoutmetric/metric_test.go | 260 ------ sdk/metric/aggregation/aggregation.go | 164 ++++ sdk/metric/aggregation/aggregation_test.go | 70 ++ sdk/metric/aggregator/aggregator.go | 114 --- sdk/metric/aggregator/aggregator_test.go | 104 --- sdk/metric/aggregator/aggregatortest/test.go | 307 ------- sdk/metric/aggregator/exponential/README.md | 27 - .../aggregator/exponential/benchmark_test.go | 67 -- .../exponential/mapping/exponent/exponent.go | 127 --- .../mapping/exponent/exponent_test.go | 341 ------- .../exponential/mapping/internal/float64.go | 72 -- .../mapping/internal/float64_test.go | 47 - .../mapping/logarithm/logarithm.go | 190 ---- .../mapping/logarithm/logarithm_test.go | 231 ----- .../aggregator/exponential/mapping/mapping.go | 48 - .../aggregator/histogram/benchmark_test.go | 130 --- sdk/metric/aggregator/histogram/histogram.go | 269 ------ .../aggregator/histogram/histogram_test.go | 295 ------ sdk/metric/aggregator/lastvalue/lastvalue.go | 133 --- .../aggregator/lastvalue/lastvalue_test.go | 146 --- sdk/metric/aggregator/sum/sum.go | 88 -- sdk/metric/aggregator/sum/sum_test.go | 154 ---- sdk/metric/alignment_test.go | 43 - sdk/metric/benchmark_test.go | 444 --------- sdk/metric/config.go | 139 +++ sdk/metric/config_test.go | 134 +++ sdk/metric/controller/basic/config.go | 131 --- sdk/metric/controller/basic/config_test.go | 37 - sdk/metric/controller/basic/controller.go | 382 -------- .../controller/basic/controller_test.go | 493 ---------- sdk/metric/controller/basic/pull_test.go | 121 --- sdk/metric/controller/basic/push_test.go | 230 ----- .../controllertest/controller_test.go | 66 -- sdk/metric/controller/controllertest/test.go | 85 -- sdk/metric/controller/time/time.go | 67 -- sdk/metric/correct_test.go | 572 ------------ sdk/metric/doc.go | 146 +-- sdk/metric/example_test.go | 63 ++ sdk/metric/export/aggregation/aggregation.go | 119 --- sdk/metric/export/aggregation/temporality.go | 117 --- .../export/aggregation/temporality_test.go | 73 -- sdk/metric/export/metric.go | 280 ------ sdk/metric/export/metric_test.go | 71 -- sdk/metric/exporter.go | 61 ++ sdk/metric/go.mod | 25 +- sdk/metric/go.sum | 2 - sdk/metric/histogram_stress_test.go | 67 -- sdk/metric/instrument.go | 76 ++ sdk/metric/instrument_provider.go | 275 ++++++ sdk/metric/internal/aggregator.go | 40 + .../internal/aggregator_example_test.go | 122 +++ sdk/metric/internal/aggregator_test.go | 155 ++++ sdk/metric/internal/doc.go | 18 + sdk/metric/internal/filter.go | 67 ++ sdk/metric/internal/filter_test.go | 202 ++++ sdk/metric/internal/histogram.go | 243 +++++ sdk/metric/internal/histogram_test.go | 203 +++++ sdk/metric/internal/lastvalue.go | 77 ++ sdk/metric/internal/lastvalue_test.go | 91 ++ sdk/metric/internal/sum.go | 156 ++++ sdk/metric/internal/sum_test.go | 135 +++ sdk/metric/manual_reader.go | 134 +++ sdk/metric/manual_reader_test.go | 77 ++ sdk/metric/meter.go | 135 +++ sdk/metric/meter_test.go | 517 +++++++++++ sdk/metric/metricdata/data.go | 133 +++ .../metricdata/metricdatatest/assertion.go | 134 +++ .../metricdatatest/assertion_fail_test.go | 59 ++ .../metricdatatest/assertion_test.go | 319 +++++++ .../metricdata/metricdatatest/comparisons.go | 363 ++++++++ sdk/metric/metricdata/temporality.go | 43 + .../temporality_string.go | 10 +- sdk/metric/metrictest/config.go | 57 -- sdk/metric/metrictest/exporter.go | 200 ---- sdk/metric/metrictest/exporter_test.go | 414 --------- sdk/metric/number/kind_string.go | 24 - sdk/metric/number/number.go | 539 ----------- sdk/metric/number/number_test.go | 212 ----- sdk/metric/periodic_reader.go | 245 +++++ sdk/metric/periodic_reader_test.go | 225 +++++ sdk/metric/pipeline.go | 349 +++++++ sdk/metric/pipeline_registry_test.go | 587 ++++++++++++ sdk/metric/pipeline_test.go | 216 +++++ sdk/metric/processor/basic/basic.go | 383 -------- sdk/metric/processor/basic/basic_test.go | 510 ----------- sdk/metric/processor/basic/config.go | 43 - sdk/metric/processor/processortest/test.go | 432 --------- .../processor/processortest/test_test.go | 90 -- sdk/metric/processor/reducer/doc.go | 60 -- sdk/metric/processor/reducer/reducer.go | 66 -- sdk/metric/processor/reducer/reducer_test.go | 117 --- sdk/metric/provider.go | 126 +++ sdk/metric/provider_test.go | 79 ++ sdk/metric/reader.go | 216 +++++ sdk/metric/reader_test.go | 241 +++++ sdk/metric/refcount_mapped.go | 60 -- sdk/metric/registry/doc.go | 24 - sdk/metric/registry/registry.go | 139 --- sdk/metric/registry/registry_test.go | 112 --- sdk/metric/sdk.go | 423 --------- sdk/metric/sdkapi/descriptor.go | 70 -- sdk/metric/sdkapi/descriptor_test.go | 33 - sdk/metric/sdkapi/instrumentkind.go | 80 -- sdk/metric/sdkapi/instrumentkind_string.go | 28 - sdk/metric/sdkapi/instrumentkind_test.go | 32 - sdk/metric/sdkapi/noop.go | 83 -- sdk/metric/sdkapi/sdkapi.go | 162 ---- sdk/metric/sdkapi/sdkapi_test.go | 41 - sdk/metric/sdkapi/wrap.go | 183 ---- sdk/metric/selector/simple/simple.go | 94 -- sdk/metric/selector/simple/simple_test.go | 66 -- sdk/metric/view/doc.go | 20 + sdk/metric/view/example_test.go | 199 ++++ .../metric/view/instrument.go | 23 +- sdk/metric/view/instrumentkind.go | 46 + sdk/metric/view/view.go | 235 +++++ sdk/metric/view/view_test.go | 447 +++++++++ 217 files changed, 12735 insertions(+), 18901 deletions(-) delete mode 100644 bridge/opencensus/aggregation.go delete mode 100644 bridge/opencensus/aggregation_test.go delete mode 100644 bridge/opencensus/exporter.go delete mode 100644 bridge/opencensus/exporter_test.go rename {sdk/metric/metrictest => bridge/opencensus/opencensusmetric}/doc.go (76%) create mode 100644 bridge/opencensus/opencensusmetric/go.mod create mode 100644 bridge/opencensus/opencensusmetric/go.sum create mode 100644 bridge/opencensus/opencensusmetric/internal/metric.go create mode 100644 bridge/opencensus/opencensusmetric/internal/metric_test.go delete mode 100644 example/opencensus/go.mod delete mode 100644 example/opencensus/go.sum delete mode 100644 example/opencensus/main.go rename exporters/otlp/otlpmetric/internal/otlpconfig/envconfig_test.go => example/prometheus/doc.go (88%) create mode 100644 exporters/otlp/otlpmetric/client.go delete mode 100644 exporters/otlp/otlpmetric/clients.go create mode 100644 exporters/otlp/otlpmetric/doc.go delete mode 100644 exporters/otlp/otlpmetric/internal/metrictransform/attribute.go delete mode 100644 exporters/otlp/otlpmetric/internal/metrictransform/attribute_test.go delete mode 100644 exporters/otlp/otlpmetric/internal/metrictransform/metric.go delete mode 100644 exporters/otlp/otlpmetric/internal/metrictransform/metric_test.go delete mode 100644 exporters/otlp/otlpmetric/internal/metrictransform/resource_test.go rename exporters/otlp/otlpmetric/internal/{otlpconfig => oconf}/envconfig.go (97%) rename exporters/otlp/otlpmetric/internal/{otlpconfig => oconf}/options.go (98%) rename exporters/otlp/otlpmetric/internal/{otlpconfig => oconf}/options_test.go (70%) rename exporters/otlp/otlpmetric/internal/{otlpconfig => oconf}/optiontypes.go (95%) rename exporters/otlp/otlpmetric/internal/{otlpconfig => oconf}/tls.go (92%) create mode 100644 exporters/otlp/otlpmetric/internal/otest/client.go create mode 100644 exporters/otlp/otlpmetric/internal/otest/client_test.go create mode 100644 exporters/otlp/otlpmetric/internal/otest/collector.go delete mode 100644 exporters/otlp/otlpmetric/internal/otlpmetrictest/client.go delete mode 100644 exporters/otlp/otlpmetric/internal/otlpmetrictest/collector.go delete mode 100644 exporters/otlp/otlpmetric/internal/otlpmetrictest/data.go delete mode 100644 exporters/otlp/otlpmetric/internal/otlpmetrictest/otlptest.go create mode 100644 exporters/otlp/otlpmetric/internal/transform/attribute.go create mode 100644 exporters/otlp/otlpmetric/internal/transform/attribute_test.go rename sdk/metric/atomicfields.go => exporters/otlp/otlpmetric/internal/transform/doc.go (64%) create mode 100644 exporters/otlp/otlpmetric/internal/transform/error.go create mode 100644 exporters/otlp/otlpmetric/internal/transform/error_test.go create mode 100644 exporters/otlp/otlpmetric/internal/transform/metricdata.go create mode 100644 exporters/otlp/otlpmetric/internal/transform/metricdata_test.go delete mode 100644 exporters/otlp/otlpmetric/options.go delete mode 100644 exporters/otlp/otlpmetric/otlpmetricgrpc/client_unit_test.go create mode 100644 exporters/otlp/otlpmetric/otlpmetricgrpc/config.go rename exporters/otlp/otlpmetric/otlpmetricgrpc/{exporter.go => doc.go} (61%) delete mode 100644 exporters/otlp/otlpmetric/otlpmetricgrpc/mock_collector_test.go delete mode 100644 exporters/otlp/otlpmetric/otlpmetricgrpc/options.go delete mode 100644 exporters/otlp/otlpmetric/otlpmetrichttp/certificate_test.go delete mode 100644 exporters/otlp/otlpmetric/otlpmetrichttp/client_unit_test.go create mode 100644 exporters/otlp/otlpmetric/otlpmetrichttp/config.go create mode 100644 exporters/otlp/otlpmetric/otlpmetrichttp/example_test.go delete mode 100644 exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go delete mode 100644 exporters/otlp/otlpmetric/otlpmetrichttp/mock_collector_test.go delete mode 100644 exporters/otlp/otlpmetric/otlpmetrichttp/options.go delete mode 100644 exporters/prometheus/README.md rename {sdk/metric/number => exporters/prometheus}/doc.go (59%) create mode 100644 exporters/prometheus/exporter.go create mode 100644 exporters/prometheus/exporter_test.go delete mode 100644 exporters/prometheus/prometheus.go delete mode 100644 exporters/prometheus/prometheus_test.go delete mode 100644 exporters/prometheus/sanitize.go delete mode 100644 exporters/prometheus/sanitize_test.go create mode 100755 exporters/prometheus/testdata/counter.txt create mode 100644 exporters/prometheus/testdata/gauge.txt create mode 100644 exporters/prometheus/testdata/histogram.txt create mode 100755 exporters/prometheus/testdata/sanitized_labels.txt create mode 100644 exporters/stdout/stdoutmetric/encoder.go create mode 100644 exporters/stdout/stdoutmetric/exporter_test.go delete mode 100644 exporters/stdout/stdoutmetric/metric.go delete mode 100644 exporters/stdout/stdoutmetric/metric_test.go create mode 100644 sdk/metric/aggregation/aggregation.go create mode 100644 sdk/metric/aggregation/aggregation_test.go delete mode 100644 sdk/metric/aggregator/aggregator.go delete mode 100644 sdk/metric/aggregator/aggregator_test.go delete mode 100644 sdk/metric/aggregator/aggregatortest/test.go delete mode 100644 sdk/metric/aggregator/exponential/README.md delete mode 100644 sdk/metric/aggregator/exponential/benchmark_test.go delete mode 100644 sdk/metric/aggregator/exponential/mapping/exponent/exponent.go delete mode 100644 sdk/metric/aggregator/exponential/mapping/exponent/exponent_test.go delete mode 100644 sdk/metric/aggregator/exponential/mapping/internal/float64.go delete mode 100644 sdk/metric/aggregator/exponential/mapping/internal/float64_test.go delete mode 100644 sdk/metric/aggregator/exponential/mapping/logarithm/logarithm.go delete mode 100644 sdk/metric/aggregator/exponential/mapping/logarithm/logarithm_test.go delete mode 100644 sdk/metric/aggregator/exponential/mapping/mapping.go delete mode 100644 sdk/metric/aggregator/histogram/benchmark_test.go delete mode 100644 sdk/metric/aggregator/histogram/histogram.go delete mode 100644 sdk/metric/aggregator/histogram/histogram_test.go delete mode 100644 sdk/metric/aggregator/lastvalue/lastvalue.go delete mode 100644 sdk/metric/aggregator/lastvalue/lastvalue_test.go delete mode 100644 sdk/metric/aggregator/sum/sum.go delete mode 100644 sdk/metric/aggregator/sum/sum_test.go delete mode 100644 sdk/metric/alignment_test.go delete mode 100644 sdk/metric/benchmark_test.go create mode 100644 sdk/metric/config.go create mode 100644 sdk/metric/config_test.go delete mode 100644 sdk/metric/controller/basic/config.go delete mode 100644 sdk/metric/controller/basic/config_test.go delete mode 100644 sdk/metric/controller/basic/controller.go delete mode 100644 sdk/metric/controller/basic/controller_test.go delete mode 100644 sdk/metric/controller/basic/pull_test.go delete mode 100644 sdk/metric/controller/basic/push_test.go delete mode 100644 sdk/metric/controller/controllertest/controller_test.go delete mode 100644 sdk/metric/controller/controllertest/test.go delete mode 100644 sdk/metric/controller/time/time.go delete mode 100644 sdk/metric/correct_test.go create mode 100644 sdk/metric/example_test.go delete mode 100644 sdk/metric/export/aggregation/aggregation.go delete mode 100644 sdk/metric/export/aggregation/temporality.go delete mode 100644 sdk/metric/export/aggregation/temporality_test.go delete mode 100644 sdk/metric/export/metric.go delete mode 100644 sdk/metric/export/metric_test.go create mode 100644 sdk/metric/exporter.go delete mode 100644 sdk/metric/histogram_stress_test.go create mode 100644 sdk/metric/instrument.go create mode 100644 sdk/metric/instrument_provider.go create mode 100644 sdk/metric/internal/aggregator.go create mode 100644 sdk/metric/internal/aggregator_example_test.go create mode 100644 sdk/metric/internal/aggregator_test.go create mode 100644 sdk/metric/internal/doc.go create mode 100644 sdk/metric/internal/filter.go create mode 100644 sdk/metric/internal/filter_test.go create mode 100644 sdk/metric/internal/histogram.go create mode 100644 sdk/metric/internal/histogram_test.go create mode 100644 sdk/metric/internal/lastvalue.go create mode 100644 sdk/metric/internal/lastvalue_test.go create mode 100644 sdk/metric/internal/sum.go create mode 100644 sdk/metric/internal/sum_test.go create mode 100644 sdk/metric/manual_reader.go create mode 100644 sdk/metric/manual_reader_test.go create mode 100644 sdk/metric/meter.go create mode 100644 sdk/metric/meter_test.go create mode 100644 sdk/metric/metricdata/data.go create mode 100644 sdk/metric/metricdata/metricdatatest/assertion.go create mode 100644 sdk/metric/metricdata/metricdatatest/assertion_fail_test.go create mode 100644 sdk/metric/metricdata/metricdatatest/assertion_test.go create mode 100644 sdk/metric/metricdata/metricdatatest/comparisons.go create mode 100644 sdk/metric/metricdata/temporality.go rename sdk/metric/{export/aggregation => metricdata}/temporality_string.go (66%) delete mode 100644 sdk/metric/metrictest/config.go delete mode 100644 sdk/metric/metrictest/exporter.go delete mode 100644 sdk/metric/metrictest/exporter_test.go delete mode 100644 sdk/metric/number/kind_string.go delete mode 100644 sdk/metric/number/number.go delete mode 100644 sdk/metric/number/number_test.go create mode 100644 sdk/metric/periodic_reader.go create mode 100644 sdk/metric/periodic_reader_test.go create mode 100644 sdk/metric/pipeline.go create mode 100644 sdk/metric/pipeline_registry_test.go create mode 100644 sdk/metric/pipeline_test.go delete mode 100644 sdk/metric/processor/basic/basic.go delete mode 100644 sdk/metric/processor/basic/basic_test.go delete mode 100644 sdk/metric/processor/basic/config.go delete mode 100644 sdk/metric/processor/processortest/test.go delete mode 100644 sdk/metric/processor/processortest/test_test.go delete mode 100644 sdk/metric/processor/reducer/doc.go delete mode 100644 sdk/metric/processor/reducer/reducer.go delete mode 100644 sdk/metric/processor/reducer/reducer_test.go create mode 100644 sdk/metric/provider.go create mode 100644 sdk/metric/provider_test.go create mode 100644 sdk/metric/reader.go create mode 100644 sdk/metric/reader_test.go delete mode 100644 sdk/metric/refcount_mapped.go delete mode 100644 sdk/metric/registry/doc.go delete mode 100644 sdk/metric/registry/registry.go delete mode 100644 sdk/metric/registry/registry_test.go delete mode 100644 sdk/metric/sdk.go delete mode 100644 sdk/metric/sdkapi/descriptor.go delete mode 100644 sdk/metric/sdkapi/descriptor_test.go delete mode 100644 sdk/metric/sdkapi/instrumentkind.go delete mode 100644 sdk/metric/sdkapi/instrumentkind_string.go delete mode 100644 sdk/metric/sdkapi/instrumentkind_test.go delete mode 100644 sdk/metric/sdkapi/noop.go delete mode 100644 sdk/metric/sdkapi/sdkapi.go delete mode 100644 sdk/metric/sdkapi/sdkapi_test.go delete mode 100644 sdk/metric/sdkapi/wrap.go delete mode 100644 sdk/metric/selector/simple/simple.go delete mode 100644 sdk/metric/selector/simple/simple_test.go create mode 100644 sdk/metric/view/doc.go create mode 100644 sdk/metric/view/example_test.go rename exporters/otlp/otlpmetric/internal/metrictransform/resource.go => sdk/metric/view/instrument.go (59%) create mode 100644 sdk/metric/view/instrumentkind.go create mode 100644 sdk/metric/view/view.go create mode 100644 sdk/metric/view/view_test.go diff --git a/.github/dependabot.yml b/.github/dependabot.yml index e1ae982eb5c..d62ae0b70ea 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -29,7 +29,7 @@ updates: interval: weekly day: sunday - package-ecosystem: gomod - directory: /bridge/opencensus/test + directory: /bridge/opencensus/opencensusmetric labels: - dependencies - go @@ -38,7 +38,7 @@ updates: interval: weekly day: sunday - package-ecosystem: gomod - directory: /bridge/opentracing + directory: /bridge/opencensus/test labels: - dependencies - go @@ -47,7 +47,7 @@ updates: interval: weekly day: sunday - package-ecosystem: gomod - directory: /example/fib + directory: /bridge/opentracing labels: - dependencies - go @@ -56,7 +56,7 @@ updates: interval: weekly day: sunday - package-ecosystem: gomod - directory: /example/jaeger + directory: /example/fib labels: - dependencies - go @@ -65,7 +65,7 @@ updates: interval: weekly day: sunday - package-ecosystem: gomod - directory: /example/namedtracer + directory: /example/jaeger labels: - dependencies - go @@ -74,7 +74,7 @@ updates: interval: weekly day: sunday - package-ecosystem: gomod - directory: /example/opencensus + directory: /example/namedtracer labels: - dependencies - go diff --git a/CHANGELOG.md b/CHANGELOG.md index 906e17ce94f..486916d00b4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,40 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] +### Changed + +- The metric SDK in `go.opentelemetry.io/otel/sdk/metric` is completely refactored to comply with the OpenTelemetry specification. + Please see the package documentation for how the new SDK is initialized and configured. (#3175) + +### Removed + +- The metric portion of the OpenCensus bridge (`go.opentelemetry.io/otel/bridge/opencensus`) has been removed. + A new bridge compliant with the revised metric SDK will be added back in a future release. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/histogram` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator/sum` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/aggregator` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/basic` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/controllertest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/controller/time` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/export/aggregation` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/export` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/metrictest` package is removed. + A replacement package that supports the new metric SDK will be added back in a future release. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/number` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/basic` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/processortest` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/processor/reducer` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/registry` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/sdkapi` package is removed, see the new metric SDK. (#3175) +- The `go.opentelemetry.io/otel/sdk/metric/selector/simple` package is removed, see the new metric SDK. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".ErrUninitializedInstrument` variable was removed. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".ErrBadInstrument` variable was removed. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".Accumulator` type was removed, see the `MeterProvider`in the new metric SDK. (#3175) +- The `"go.opentelemetry.io/otel/sdk/metric".NewAccumulator` function was removed, see `NewMeterProvider`in the new metric SDK. (#3175) +- The deprecated `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets` function was removed. (#3175) + ## [1.10.0] - 2022-09-09 ### Added diff --git a/bridge/opencensus/README.md b/bridge/opencensus/README.md index 07dc57380b3..3df9dc7eb07 100644 --- a/bridge/opencensus/README.md +++ b/bridge/opencensus/README.md @@ -79,50 +79,3 @@ OpenCensus and OpenTelemetry APIs are not entirely compatible. If the bridge fi * Custom OpenCensus Samplers specified during StartSpan are ignored. * Links cannot be added to OpenCensus spans. * OpenTelemetry Debug or Deferred trace flags are dropped after an OpenCensus span is created. - -## Metrics - -### The problem: mixing libraries without mixing pipelines - -The problem for monitoring is simpler than the problem for tracing, since there -are no context propagation issues to deal with. However, it still is difficult -for users to migrate an entire applications' monitoring at once. It -should be possible to send metrics generated by OpenCensus libraries to an -OpenTelemetry pipeline so that migrating a metric does not require maintaining -separate export pipelines for OpenCensus and OpenTelemetry. - -### The Exporter "wrapper" solution - -The solution we use here is to allow wrapping an OpenTelemetry exporter such -that it implements the OpenCensus exporter interfaces. This allows a single -exporter to be used for metrics from *both* OpenCensus and OpenTelemetry. - -### User Journey - -Starting from an application using entirely OpenCensus APIs: - -1. Instantiate OpenTelemetry SDK and Exporters. -2. Replace OpenCensus exporters with a wrapped OpenTelemetry exporter from step 1. -3. Migrate libraries individually from OpenCensus to OpenTelemetry -4. Remove OpenCensus Exporters and configuration. - -For example, to swap out the OpenCensus logging exporter for the OpenTelemetry stdout exporter: - -```go -import ( - "go.opencensus.io/metric/metricexport" - "go.opentelemetry.io/otel/bridge/opencensus" - "go.opentelemetry.io/otel/exporters/stdout" - "go.opentelemetry.io/otel" -) -// With OpenCensus, you could have previously configured the logging exporter like this: -// import logexporter "go.opencensus.io/examples/exporter" -// exporter, _ := logexporter.NewLogExporter(logexporter.Options{}) -// Instead, we can create an equivalent using the OpenTelemetry stdout exporter: -openTelemetryExporter, _ := stdout.New(stdout.WithPrettyPrint()) -exporter := opencensus.NewMetricExporter(openTelemetryExporter) - -// Use the wrapped OpenTelemetry exporter like you normally would with OpenCensus -intervalReader, _ := metricexport.NewIntervalReader(&metricexport.Reader{}, exporter) -intervalReader.Start() -``` diff --git a/bridge/opencensus/aggregation.go b/bridge/opencensus/aggregation.go deleted file mode 100644 index 99d2b07afad..00000000000 --- a/bridge/opencensus/aggregation.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package opencensus // import "go.opentelemetry.io/otel/bridge/opencensus" - -import ( - "errors" - "fmt" - "time" - - "go.opencensus.io/metric/metricdata" - - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - "go.opentelemetry.io/otel/sdk/metric/number" -) - -var ( - errIncompatibleType = errors.New("incompatible type for aggregation") - errEmpty = errors.New("points may not be empty") - errBadPoint = errors.New("point cannot be converted") -) - -type recordFunc func(agg aggregation.Aggregation, end time.Time) error - -// recordAggregationsFromPoints records one OpenTelemetry aggregation for -// each OpenCensus point. Points may not be empty and must be either -// all (int|float)64 or all *metricdata.Distribution. -func recordAggregationsFromPoints(points []metricdata.Point, recorder recordFunc) error { - if len(points) == 0 { - return errEmpty - } - switch t := points[0].Value.(type) { - case int64: - return recordGaugePoints(points, recorder) - case float64: - return recordGaugePoints(points, recorder) - case *metricdata.Distribution: - return recordDistributionPoint(points, recorder) - default: - // TODO add *metricdata.Summary support - return fmt.Errorf("%w: %v", errIncompatibleType, t) - } -} - -var _ aggregation.Aggregation = &ocRawAggregator{} -var _ aggregation.LastValue = &ocRawAggregator{} - -// recordGaugePoints creates an OpenTelemetry aggregation from OpenCensus points. -// Points may not be empty, and must only contain integers or floats. -func recordGaugePoints(pts []metricdata.Point, recorder recordFunc) error { - for _, pt := range pts { - switch t := pt.Value.(type) { - case int64: - if err := recorder(&ocRawAggregator{ - value: number.NewInt64Number(pt.Value.(int64)), - time: pt.Time, - }, pt.Time); err != nil { - return err - } - case float64: - if err := recorder(&ocRawAggregator{ - value: number.NewFloat64Number(pt.Value.(float64)), - time: pt.Time, - }, pt.Time); err != nil { - return err - } - default: - return fmt.Errorf("%w: %v", errIncompatibleType, t) - } - } - return nil -} - -type ocRawAggregator struct { - value number.Number - time time.Time -} - -// Kind returns the kind of aggregation this is. -func (o *ocRawAggregator) Kind() aggregation.Kind { - return aggregation.LastValueKind -} - -// LastValue returns the last point. -func (o *ocRawAggregator) LastValue() (number.Number, time.Time, error) { - return o.value, o.time, nil -} - -var _ aggregation.Aggregation = &ocDistAggregator{} -var _ aggregation.Histogram = &ocDistAggregator{} - -// recordDistributionPoint creates an OpenTelemetry aggregation from -// OpenCensus points. Points may not be empty, and must only contain -// Distributions. The most recent disribution will be used in the aggregation. -func recordDistributionPoint(pts []metricdata.Point, recorder recordFunc) error { - // only use the most recent datapoint for now. - pt := pts[len(pts)-1] - val, ok := pt.Value.(*metricdata.Distribution) - if !ok { - return fmt.Errorf("%w: %v", errBadPoint, pt.Value) - } - bucketCounts := make([]uint64, len(val.Buckets)) - for i, bucket := range val.Buckets { - if bucket.Count < 0 { - return fmt.Errorf("%w: bucket count may not be negative", errBadPoint) - } - bucketCounts[i] = uint64(bucket.Count) - } - if val.Count < 0 { - return fmt.Errorf("%w: count may not be negative", errBadPoint) - } - return recorder(&ocDistAggregator{ - sum: number.NewFloat64Number(val.Sum), - count: uint64(val.Count), - buckets: aggregation.Buckets{ - Boundaries: val.BucketOptions.Bounds, - Counts: bucketCounts, - }, - }, pts[len(pts)-1].Time) -} - -type ocDistAggregator struct { - sum number.Number - count uint64 - buckets aggregation.Buckets -} - -// Kind returns the kind of aggregation this is. -func (o *ocDistAggregator) Kind() aggregation.Kind { - return aggregation.HistogramKind -} - -// Sum returns the sum of values. -func (o *ocDistAggregator) Sum() (number.Number, error) { - return o.sum, nil -} - -// Count returns the number of values. -func (o *ocDistAggregator) Count() (uint64, error) { - return o.count, nil -} - -// Histogram returns the count of events in pre-determined buckets. -func (o *ocDistAggregator) Histogram() (aggregation.Buckets, error) { - return o.buckets, nil -} diff --git a/bridge/opencensus/aggregation_test.go b/bridge/opencensus/aggregation_test.go deleted file mode 100644 index bbaff5c6440..00000000000 --- a/bridge/opencensus/aggregation_test.go +++ /dev/null @@ -1,322 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package opencensus - -import ( - "errors" - "testing" - "time" - - "go.opencensus.io/metric/metricdata" - - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" -) - -func TestNewAggregationFromPoints(t *testing.T) { - now := time.Now() - for _, tc := range []struct { - desc string - input []metricdata.Point - expectedKind aggregation.Kind - expectedErr error - }{ - { - desc: "no points", - expectedErr: errEmpty, - }, - { - desc: "int point", - input: []metricdata.Point{ - { - Time: now, - Value: int64(23), - }, - }, - expectedKind: aggregation.LastValueKind, - }, - { - desc: "float point", - input: []metricdata.Point{ - { - Time: now, - Value: float64(23), - }, - }, - expectedKind: aggregation.LastValueKind, - }, - { - desc: "distribution point", - input: []metricdata.Point{ - { - Time: now, - Value: &metricdata.Distribution{ - Count: 2, - Sum: 55, - BucketOptions: &metricdata.BucketOptions{ - Bounds: []float64{20, 30}, - }, - Buckets: []metricdata.Bucket{ - {Count: 1}, - {Count: 1}, - }, - }, - }, - }, - expectedKind: aggregation.HistogramKind, - }, - { - desc: "bad distribution bucket count", - input: []metricdata.Point{ - { - Time: now, - Value: &metricdata.Distribution{ - Count: 2, - Sum: 55, - BucketOptions: &metricdata.BucketOptions{ - Bounds: []float64{20, 30}, - }, - Buckets: []metricdata.Bucket{ - // negative bucket - {Count: -1}, - {Count: 1}, - }, - }, - }, - }, - expectedErr: errBadPoint, - }, - { - desc: "bad distribution count", - input: []metricdata.Point{ - { - Time: now, - Value: &metricdata.Distribution{ - // negative count - Count: -2, - Sum: 55, - BucketOptions: &metricdata.BucketOptions{ - Bounds: []float64{20, 30}, - }, - Buckets: []metricdata.Bucket{ - {Count: 1}, - {Count: 1}, - }, - }, - }, - }, - expectedErr: errBadPoint, - }, - { - desc: "incompatible point type bool", - input: []metricdata.Point{ - { - Time: now, - Value: true, - }, - }, - expectedErr: errIncompatibleType, - }, - { - desc: "dist is incompatible with raw points", - input: []metricdata.Point{ - { - Time: now, - Value: int64(23), - }, - { - Time: now, - Value: &metricdata.Distribution{ - Count: 2, - Sum: 55, - BucketOptions: &metricdata.BucketOptions{ - Bounds: []float64{20, 30}, - }, - Buckets: []metricdata.Bucket{ - {Count: 1}, - {Count: 1}, - }, - }, - }, - }, - expectedErr: errIncompatibleType, - }, - { - desc: "int point is incompatible with dist", - input: []metricdata.Point{ - { - Time: now, - Value: &metricdata.Distribution{ - Count: 2, - Sum: 55, - BucketOptions: &metricdata.BucketOptions{ - Bounds: []float64{20, 30}, - }, - Buckets: []metricdata.Bucket{ - {Count: 1}, - {Count: 1}, - }, - }, - }, - { - Time: now, - Value: int64(23), - }, - }, - expectedErr: errBadPoint, - }, - } { - t.Run(tc.desc, func(t *testing.T) { - var output []aggregation.Aggregation - err := recordAggregationsFromPoints(tc.input, func(agg aggregation.Aggregation, ts time.Time) error { - last := tc.input[len(tc.input)-1] - if ts != last.Time { - t.Errorf("incorrect timestamp %v != %v", ts, last.Time) - } - output = append(output, agg) - return nil - }) - if !errors.Is(err, tc.expectedErr) { - t.Errorf("newAggregationFromPoints(%v) = err(%v), want err(%v)", tc.input, err, tc.expectedErr) - } - for _, out := range output { - if tc.expectedErr == nil && out.Kind() != tc.expectedKind { - t.Errorf("newAggregationFromPoints(%v) = %v, want %v", tc.input, out.Kind(), tc.expectedKind) - } - } - }) - } -} - -func TestLastValueAggregation(t *testing.T) { - now := time.Now() - input := []metricdata.Point{ - {Value: int64(15), Time: now.Add(-time.Minute)}, - {Value: int64(-23), Time: now}, - } - idx := 0 - err := recordAggregationsFromPoints(input, func(agg aggregation.Aggregation, end time.Time) error { - if agg.Kind() != aggregation.LastValueKind { - t.Errorf("recordAggregationsFromPoints(%v) = %v, want %v", input, agg.Kind(), aggregation.LastValueKind) - } - if end != input[idx].Time { - t.Errorf("recordAggregationsFromPoints(%v).end() = %v, want %v", input, end, input[idx].Time) - } - pointsLV, ok := agg.(aggregation.LastValue) - if !ok { - t.Errorf("recordAggregationsFromPoints(%v) = %v does not implement the aggregation.LastValue interface", input, agg) - } - lv, ts, _ := pointsLV.LastValue() - if lv.AsInt64() != input[idx].Value { - t.Errorf("recordAggregationsFromPoints(%v) = %v, want %v", input, lv.AsInt64(), input[idx].Value) - } - if ts != input[idx].Time { - t.Errorf("recordAggregationsFromPoints(%v) = %v, want %v", input, ts, input[idx].Time) - } - idx++ - return nil - }) - if err != nil { - t.Errorf("recordAggregationsFromPoints(%v) = unexpected error %v", input, err) - } -} - -func TestHistogramAggregation(t *testing.T) { - now := time.Now() - input := []metricdata.Point{ - { - Value: &metricdata.Distribution{ - Count: 0, - Sum: 0, - BucketOptions: &metricdata.BucketOptions{ - Bounds: []float64{20, 30}, - }, - Buckets: []metricdata.Bucket{ - {Count: 0}, - {Count: 0}, - }, - }, - }, - { - Time: now, - Value: &metricdata.Distribution{ - Count: 2, - Sum: 55, - BucketOptions: &metricdata.BucketOptions{ - Bounds: []float64{20, 30}, - }, - Buckets: []metricdata.Bucket{ - {Count: 1}, - {Count: 1}, - }, - }, - }, - } - var output aggregation.Aggregation - var end time.Time - err := recordAggregationsFromPoints(input, func(argAgg aggregation.Aggregation, argEnd time.Time) error { - output = argAgg - end = argEnd - return nil - }) - if err != nil { - t.Fatalf("recordAggregationsFromPoints(%v) = err(%v), want ", input, err) - } - if output.Kind() != aggregation.HistogramKind { - t.Errorf("recordAggregationsFromPoints(%v) = %v, want %v", input, output.Kind(), aggregation.HistogramKind) - } - if !end.Equal(now) { - t.Errorf("recordAggregationsFromPoints(%v).end() = %v, want %v", input, end, now) - } - distAgg, ok := output.(aggregation.Histogram) - if !ok { - t.Errorf("recordAggregationsFromPoints(%v) = %v does not implement the aggregation.Points interface", input, output) - } - sum, err := distAgg.Sum() - if err != nil { - t.Fatalf("Unexpected err: %v", err) - } - if sum.AsFloat64() != float64(55) { - t.Errorf("recordAggregationsFromPoints(%v).Sum() = %v, want %v", input, sum.AsFloat64(), float64(55)) - } - count, err := distAgg.Count() - if err != nil { - t.Fatalf("Unexpected err: %v", err) - } - if count != 2 { - t.Errorf("recordAggregationsFromPoints(%v).Count() = %v, want %v", input, count, 2) - } - hist, err := distAgg.Histogram() - if err != nil { - t.Fatalf("Unexpected err: %v", err) - } - inputBucketBoundaries := []float64{20, 30} - if len(hist.Boundaries) != len(inputBucketBoundaries) { - t.Fatalf("recordAggregationsFromPoints(%v).Histogram() produced %d boundaries, want %d boundaries", input, len(hist.Boundaries), len(inputBucketBoundaries)) - } - for i, b := range hist.Boundaries { - if b != inputBucketBoundaries[i] { - t.Errorf("recordAggregationsFromPoints(%v).Histogram().Boundaries[%d] = %v, want %v", input, i, b, inputBucketBoundaries[i]) - } - } - inputBucketCounts := []uint64{1, 1} - if len(hist.Counts) != len(inputBucketCounts) { - t.Fatalf("recordAggregationsFromPoints(%v).Histogram() produced %d buckets, want %d buckets", input, len(hist.Counts), len(inputBucketCounts)) - } - for i, c := range hist.Counts { - if c != inputBucketCounts[i] { - t.Errorf("recordAggregationsFromPoints(%v).Histogram().Counts[%d] = %d, want %d", input, i, c, inputBucketCounts[i]) - } - } -} diff --git a/bridge/opencensus/exporter.go b/bridge/opencensus/exporter.go deleted file mode 100644 index 7e7e7960007..00000000000 --- a/bridge/opencensus/exporter.go +++ /dev/null @@ -1,186 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package opencensus // import "go.opentelemetry.io/otel/bridge/opencensus" - -import ( - "context" - "errors" - "fmt" - "sync" - "time" - - "go.opencensus.io/metric/metricdata" - "go.opencensus.io/metric/metricexport" - ocresource "go.opencensus.io/resource" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/instrument" - "go.opentelemetry.io/otel/metric/unit" - "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/sdk/metric/export" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - "go.opentelemetry.io/otel/sdk/metric/number" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" - "go.opentelemetry.io/otel/sdk/resource" -) - -var errConversion = errors.New("unable to convert from OpenCensus to OpenTelemetry") - -// NewMetricExporter returns an OpenCensus exporter that exports to an -// OpenTelemetry exporter. -func NewMetricExporter(base export.Exporter) metricexport.Exporter { - return &exporter{base: base} -} - -// exporter implements the OpenCensus metric Exporter interface using an -// OpenTelemetry base exporter. -type exporter struct { - base export.Exporter -} - -// ExportMetrics implements the OpenCensus metric Exporter interface. -func (e *exporter) ExportMetrics(ctx context.Context, metrics []*metricdata.Metric) error { - res := resource.Empty() - if len(metrics) != 0 { - res = convertResource(metrics[0].Resource) - } - return e.base.Export(ctx, res, &censusLibraryReader{metrics: metrics}) -} - -type censusLibraryReader struct { - metrics []*metricdata.Metric -} - -func (r censusLibraryReader) ForEach(readerFunc func(instrumentation.Library, export.Reader) error) error { - return readerFunc(instrumentation.Library{ - Name: "OpenCensus Bridge", - }, &metricReader{metrics: r.metrics}) -} - -type metricReader struct { - // RWMutex implements locking for the `Reader` interface. - sync.RWMutex - metrics []*metricdata.Metric -} - -var _ export.Reader = &metricReader{} - -// ForEach iterates through the metrics data, synthesizing an -// export.Record with the appropriate aggregation for the exporter. -func (d *metricReader) ForEach(_ aggregation.TemporalitySelector, f func(export.Record) error) error { - for _, m := range d.metrics { - descriptor, err := convertDescriptor(m.Descriptor) - if err != nil { - otel.Handle(err) - continue - } - for _, ts := range m.TimeSeries { - if len(ts.Points) == 0 { - continue - } - attrs, err := convertAttrs(m.Descriptor.LabelKeys, ts.LabelValues) - if err != nil { - otel.Handle(err) - continue - } - err = recordAggregationsFromPoints( - ts.Points, - func(agg aggregation.Aggregation, end time.Time) error { - return f(export.NewRecord( - &descriptor, - &attrs, - agg, - ts.StartTime, - end, - )) - }) - if err != nil && !errors.Is(err, aggregation.ErrNoData) { - return err - } - } - } - return nil -} - -// convertAttrs converts from OpenCensus attribute keys and values to an -// OpenTelemetry attribute Set. -func convertAttrs(keys []metricdata.LabelKey, values []metricdata.LabelValue) (attribute.Set, error) { - if len(keys) != len(values) { - return attribute.NewSet(), fmt.Errorf("%w different number of label keys (%d) and values (%d)", errConversion, len(keys), len(values)) - } - attrs := []attribute.KeyValue{} - for i, lv := range values { - if !lv.Present { - continue - } - attrs = append(attrs, attribute.KeyValue{ - Key: attribute.Key(keys[i].Key), - Value: attribute.StringValue(lv.Value), - }) - } - return attribute.NewSet(attrs...), nil -} - -// convertResource converts an OpenCensus Resource to an OpenTelemetry Resource -// Note: the ocresource.Resource Type field is not used. -func convertResource(res *ocresource.Resource) *resource.Resource { - attrs := []attribute.KeyValue{} - if res == nil { - return nil - } - for k, v := range res.Labels { - attrs = append(attrs, attribute.KeyValue{Key: attribute.Key(k), Value: attribute.StringValue(v)}) - } - return resource.NewSchemaless(attrs...) -} - -// convertDescriptor converts an OpenCensus Descriptor to an OpenTelemetry Descriptor. -func convertDescriptor(ocDescriptor metricdata.Descriptor) (sdkapi.Descriptor, error) { - var ( - nkind number.Kind - ikind sdkapi.InstrumentKind - ) - switch ocDescriptor.Type { - case metricdata.TypeGaugeInt64: - nkind = number.Int64Kind - ikind = sdkapi.GaugeObserverInstrumentKind - case metricdata.TypeGaugeFloat64: - nkind = number.Float64Kind - ikind = sdkapi.GaugeObserverInstrumentKind - case metricdata.TypeCumulativeInt64: - nkind = number.Int64Kind - ikind = sdkapi.CounterObserverInstrumentKind - case metricdata.TypeCumulativeFloat64: - nkind = number.Float64Kind - ikind = sdkapi.CounterObserverInstrumentKind - default: - // Includes TypeGaugeDistribution, TypeCumulativeDistribution, TypeSummary - return sdkapi.Descriptor{}, fmt.Errorf("%w; descriptor type: %v", errConversion, ocDescriptor.Type) - } - opts := []instrument.Option{ - instrument.WithDescription(ocDescriptor.Description), - } - switch ocDescriptor.Unit { - case metricdata.UnitDimensionless: - opts = append(opts, instrument.WithUnit(unit.Dimensionless)) - case metricdata.UnitBytes: - opts = append(opts, instrument.WithUnit(unit.Bytes)) - case metricdata.UnitMilliseconds: - opts = append(opts, instrument.WithUnit(unit.Milliseconds)) - } - cfg := instrument.NewConfig(opts...) - return sdkapi.NewDescriptor(ocDescriptor.Name, ikind, nkind, cfg.Description(), cfg.Unit()), nil -} diff --git a/bridge/opencensus/exporter_test.go b/bridge/opencensus/exporter_test.go deleted file mode 100644 index 2634f5334d5..00000000000 --- a/bridge/opencensus/exporter_test.go +++ /dev/null @@ -1,475 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package opencensus - -import ( - "context" - "errors" - "fmt" - "testing" - "time" - - "go.opencensus.io/metric/metricdata" - ocresource "go.opencensus.io/resource" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/instrument" - "go.opentelemetry.io/otel/metric/unit" - "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/sdk/metric/controller/controllertest" - "go.opentelemetry.io/otel/sdk/metric/export" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - "go.opentelemetry.io/otel/sdk/metric/metrictest" - "go.opentelemetry.io/otel/sdk/metric/number" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" - "go.opentelemetry.io/otel/sdk/resource" -) - -type fakeExporter struct { - export.Exporter - records []export.Record - resource *resource.Resource - err error -} - -func (f *fakeExporter) Export(ctx context.Context, res *resource.Resource, ilr export.InstrumentationLibraryReader) error { - return controllertest.ReadAll(ilr, aggregation.StatelessTemporalitySelector(), - func(_ instrumentation.Library, record export.Record) error { - f.resource = res - f.records = append(f.records, record) - return f.err - }) -} - -type fakeErrorHandler struct { - err error -} - -func (f *fakeErrorHandler) Handle(err error) { - f.err = err -} - -func (f *fakeErrorHandler) matches(err error) error { - // make sure err is cleared for the next test - defer func() { f.err = nil }() - if !errors.Is(f.err, err) { - return fmt.Errorf("err(%v), want err(%v)", f.err, err) - } - return nil -} - -func TestExportMetrics(t *testing.T) { - now := time.Now() - basicDesc := metrictest.NewDescriptor( - "", - sdkapi.GaugeObserverInstrumentKind, - number.Int64Kind, - ) - fakeErrorHandler := &fakeErrorHandler{} - otel.SetErrorHandler(fakeErrorHandler) - for _, tc := range []struct { - desc string - input []*metricdata.Metric - exportErr error - expected []export.Record - expectedResource *resource.Resource - expectedHandledError error - }{ - { - desc: "no metrics", - }, - { - desc: "metric without points is dropped", - input: []*metricdata.Metric{ - { - TimeSeries: []*metricdata.TimeSeries{ - {}, - }, - }, - }, - }, - { - desc: "descriptor conversion error", - input: []*metricdata.Metric{ - // TypeGaugeDistribution isn't supported - {Descriptor: metricdata.Descriptor{Type: metricdata.TypeGaugeDistribution}}, - }, - expectedHandledError: errConversion, - }, - { - desc: "attrs conversion error", - input: []*metricdata.Metric{ - { - // No descriptor with attribute keys. - TimeSeries: []*metricdata.TimeSeries{ - // 1 attribute value, which doens't exist in keys. - { - LabelValues: []metricdata.LabelValue{{Value: "foo", Present: true}}, - Points: []metricdata.Point{ - {}, - }, - }, - }, - }, - }, - expectedHandledError: errConversion, - }, - { - desc: "unsupported summary point type", - input: []*metricdata.Metric{ - { - TimeSeries: []*metricdata.TimeSeries{ - { - Points: []metricdata.Point{ - {Value: &metricdata.Summary{}}, - }, - }, - }, - }, - }, - exportErr: errIncompatibleType, - }, - { - desc: "success", - input: []*metricdata.Metric{ - { - Resource: &ocresource.Resource{ - Labels: map[string]string{ - "R1": "V1", - "R2": "V2", - }, - }, - TimeSeries: []*metricdata.TimeSeries{ - { - StartTime: now, - Points: []metricdata.Point{ - {Value: int64(123), Time: now}, - }, - }, - }, - }, - }, - expectedResource: resource.NewSchemaless( - attribute.String("R1", "V1"), - attribute.String("R2", "V2"), - ), - expected: []export.Record{ - export.NewRecord( - &basicDesc, - attribute.EmptySet(), - &ocRawAggregator{ - value: number.NewInt64Number(123), - time: now, - }, - now, - now, - ), - }, - }, - { - desc: "export error after success", - input: []*metricdata.Metric{ - { - TimeSeries: []*metricdata.TimeSeries{ - { - StartTime: now, - Points: []metricdata.Point{ - {Value: int64(123), Time: now}, - }, - }, - }, - }, - }, - expected: []export.Record{ - export.NewRecord( - &basicDesc, - attribute.EmptySet(), - &ocRawAggregator{ - value: number.NewInt64Number(123), - time: now, - }, - now, - now, - ), - }, - exportErr: errors.New("failed to export"), - }, - { - desc: "partial success sends correct metrics and drops incorrect metrics with handled err", - input: []*metricdata.Metric{ - { - TimeSeries: []*metricdata.TimeSeries{ - { - StartTime: now, - Points: []metricdata.Point{ - {Value: int64(123), Time: now}, - }, - }, - }, - }, - // TypeGaugeDistribution isn't supported - {Descriptor: metricdata.Descriptor{Type: metricdata.TypeGaugeDistribution}}, - }, - expected: []export.Record{ - export.NewRecord( - &basicDesc, - attribute.EmptySet(), - &ocRawAggregator{ - value: number.NewInt64Number(123), - time: now, - }, - now, - now, - ), - }, - expectedHandledError: errConversion, - }, - } { - t.Run(tc.desc, func(t *testing.T) { - fakeExporter := &fakeExporter{err: tc.exportErr} - err := NewMetricExporter(fakeExporter).ExportMetrics(context.Background(), tc.input) - if !errors.Is(err, tc.exportErr) { - t.Errorf("NewMetricExporter(%+v) = err(%v), want err(%v)", tc.input, err, tc.exportErr) - } - // Check the global error handler, since we don't return errors - // which occur during conversion. - err = fakeErrorHandler.matches(tc.expectedHandledError) - if err != nil { - t.Fatalf("ExportMetrics(%+v) = %v", tc.input, err) - } - output := fakeExporter.records - if len(tc.expected) != len(output) { - t.Fatalf("ExportMetrics(%+v) = %d records, want %d records", tc.input, len(output), len(tc.expected)) - } - if fakeExporter.resource.String() != tc.expectedResource.String() { - t.Errorf("ExportMetrics(%+v)[i].Resource() = %+v, want %+v", tc.input, fakeExporter.resource.String(), tc.expectedResource.String()) - } - for i, expected := range tc.expected { - if output[i].StartTime() != expected.StartTime() { - t.Errorf("ExportMetrics(%+v)[i].StartTime() = %+v, want %+v", tc.input, output[i].StartTime(), expected.StartTime()) - } - if output[i].EndTime() != expected.EndTime() { - t.Errorf("ExportMetrics(%+v)[i].EndTime() = %+v, want %+v", tc.input, output[i].EndTime(), expected.EndTime()) - } - if output[i].Descriptor().Name() != expected.Descriptor().Name() { - t.Errorf("ExportMetrics(%+v)[i].Descriptor() = %+v, want %+v", tc.input, output[i].Descriptor().Name(), expected.Descriptor().Name()) - } - // Don't bother with a complete check of the descriptor. - // That is checked as part of descriptor conversion tests below. - if !output[i].Attributes().Equals(expected.Attributes()) { - t.Errorf("ExportMetrics(%+v)[i].Attributes() = %+v, want %+v", tc.input, output[i].Attributes(), expected.Attributes()) - } - if output[i].Aggregation().Kind() != expected.Aggregation().Kind() { - t.Errorf("ExportMetrics(%+v)[i].Aggregation() = %+v, want %+v", tc.input, output[i].Aggregation().Kind(), expected.Aggregation().Kind()) - } - // Don't bother checking the contents of the points aggregation. - // Those tests are done with the aggregations themselves - } - }) - } -} - -func TestConvertAttributes(t *testing.T) { - setWithMultipleKeys := attribute.NewSet( - attribute.KeyValue{Key: attribute.Key("first"), Value: attribute.StringValue("1")}, - attribute.KeyValue{Key: attribute.Key("second"), Value: attribute.StringValue("2")}, - ) - for _, tc := range []struct { - desc string - inputKeys []metricdata.LabelKey - inputValues []metricdata.LabelValue - expected *attribute.Set - expectedErr error - }{ - { - desc: "no attributes", - expected: attribute.EmptySet(), - }, - { - desc: "different numbers of keys and values", - inputKeys: []metricdata.LabelKey{{Key: "foo"}}, - expected: attribute.EmptySet(), - expectedErr: errConversion, - }, - { - desc: "multiple keys and values", - inputKeys: []metricdata.LabelKey{{Key: "first"}, {Key: "second"}}, - inputValues: []metricdata.LabelValue{ - {Value: "1", Present: true}, - {Value: "2", Present: true}, - }, - expected: &setWithMultipleKeys, - }, - { - desc: "multiple keys and values with some not present", - inputKeys: []metricdata.LabelKey{{Key: "first"}, {Key: "second"}, {Key: "third"}}, - inputValues: []metricdata.LabelValue{ - {Value: "1", Present: true}, - {Value: "2", Present: true}, - {Present: false}, - }, - expected: &setWithMultipleKeys, - }, - } { - t.Run(tc.desc, func(t *testing.T) { - output, err := convertAttrs(tc.inputKeys, tc.inputValues) - if !errors.Is(err, tc.expectedErr) { - t.Errorf("convertAttrs(keys: %v, values: %v) = err(%v), want err(%v)", tc.inputKeys, tc.inputValues, err, tc.expectedErr) - } - if !output.Equals(tc.expected) { - t.Errorf("convertAttrs(keys: %v, values: %v) = %+v, want %+v", tc.inputKeys, tc.inputValues, output.ToSlice(), tc.expected.ToSlice()) - } - }) - } -} -func TestConvertResource(t *testing.T) { - for _, tc := range []struct { - desc string - input *ocresource.Resource - expected *resource.Resource - }{ - { - desc: "nil resource", - }, - { - desc: "empty resource", - input: &ocresource.Resource{ - Labels: map[string]string{}, - }, - expected: resource.NewSchemaless(), - }, - { - desc: "resource with attributes", - input: &ocresource.Resource{ - Labels: map[string]string{ - "foo": "bar", - "tick": "tock", - }, - }, - expected: resource.NewSchemaless( - attribute.KeyValue{Key: attribute.Key("foo"), Value: attribute.StringValue("bar")}, - attribute.KeyValue{Key: attribute.Key("tick"), Value: attribute.StringValue("tock")}, - ), - }, - } { - t.Run(tc.desc, func(t *testing.T) { - output := convertResource(tc.input) - if !output.Equal(tc.expected) { - t.Errorf("convertResource(%v) = %+v, want %+v", tc.input, output, tc.expected) - } - }) - } -} -func TestConvertDescriptor(t *testing.T) { - for _, tc := range []struct { - desc string - input metricdata.Descriptor - expected sdkapi.Descriptor - expectedErr error - }{ - { - desc: "empty descriptor", - expected: metrictest.NewDescriptor( - "", - sdkapi.GaugeObserverInstrumentKind, - number.Int64Kind, - ), - }, - { - desc: "gauge int64 bytes", - input: metricdata.Descriptor{ - Name: "foo", - Description: "bar", - Unit: metricdata.UnitBytes, - Type: metricdata.TypeGaugeInt64, - }, - expected: metrictest.NewDescriptor( - "foo", - sdkapi.GaugeObserverInstrumentKind, - number.Int64Kind, - instrument.WithDescription("bar"), - instrument.WithUnit(unit.Bytes), - ), - }, - { - desc: "gauge float64 ms", - input: metricdata.Descriptor{ - Name: "foo", - Description: "bar", - Unit: metricdata.UnitMilliseconds, - Type: metricdata.TypeGaugeFloat64, - }, - expected: metrictest.NewDescriptor( - "foo", - sdkapi.GaugeObserverInstrumentKind, - number.Float64Kind, - instrument.WithDescription("bar"), - instrument.WithUnit(unit.Milliseconds), - ), - }, - { - desc: "cumulative int64 dimensionless", - input: metricdata.Descriptor{ - Name: "foo", - Description: "bar", - Unit: metricdata.UnitDimensionless, - Type: metricdata.TypeCumulativeInt64, - }, - expected: metrictest.NewDescriptor( - "foo", - sdkapi.CounterObserverInstrumentKind, - number.Int64Kind, - instrument.WithDescription("bar"), - instrument.WithUnit(unit.Dimensionless), - ), - }, - { - desc: "cumulative float64 dimensionless", - input: metricdata.Descriptor{ - Name: "foo", - Description: "bar", - Unit: metricdata.UnitDimensionless, - Type: metricdata.TypeCumulativeFloat64, - }, - expected: metrictest.NewDescriptor( - "foo", - sdkapi.CounterObserverInstrumentKind, - number.Float64Kind, - instrument.WithDescription("bar"), - instrument.WithUnit(unit.Dimensionless), - ), - }, - { - desc: "incompatible TypeCumulativeDistribution", - input: metricdata.Descriptor{ - Name: "foo", - Description: "bar", - Type: metricdata.TypeCumulativeDistribution, - }, - expectedErr: errConversion, - }, - } { - t.Run(tc.desc, func(t *testing.T) { - output, err := convertDescriptor(tc.input) - if !errors.Is(err, tc.expectedErr) { - t.Errorf("convertDescriptor(%v) = err(%v), want err(%v)", tc.input, err, tc.expectedErr) - } - if output != tc.expected { - t.Errorf("convertDescriptor(%v) = %+v, want %+v", tc.input, output, tc.expected) - } - }) - } -} diff --git a/bridge/opencensus/go.mod b/bridge/opencensus/go.mod index 2fe1c84e302..4faa85d3607 100644 --- a/bridge/opencensus/go.mod +++ b/bridge/opencensus/go.mod @@ -5,26 +5,15 @@ go 1.17 require ( go.opencensus.io v0.22.6-0.20201102222123-380f4078db9f go.opentelemetry.io/otel v1.10.0 - go.opentelemetry.io/otel/metric v0.31.0 - go.opentelemetry.io/otel/sdk v1.10.0 - go.opentelemetry.io/otel/sdk/metric v0.31.0 go.opentelemetry.io/otel/trace v1.10.0 ) require ( - github.com/benbjohnson/clock v1.3.0 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 // indirect - golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7 // indirect ) replace go.opentelemetry.io/otel => ../.. -replace go.opentelemetry.io/otel/sdk => ../../sdk - -replace go.opentelemetry.io/otel/metric => ../../metric - -replace go.opentelemetry.io/otel/sdk/metric => ../../sdk/metric - replace go.opentelemetry.io/otel/trace => ../../trace diff --git a/bridge/opencensus/go.sum b/bridge/opencensus/go.sum index e42b184d238..2e90b382eb9 100644 --- a/bridge/opencensus/go.sum +++ b/bridge/opencensus/go.sum @@ -1,7 +1,5 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= -github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -42,8 +40,6 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7 h1:iGu644GcxtEcrInvDsQRCwJjtCIOlT2V7IRt6ah2Whw= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/sdk/metric/metrictest/doc.go b/bridge/opencensus/opencensusmetric/doc.go similarity index 76% rename from sdk/metric/metrictest/doc.go rename to bridge/opencensus/opencensusmetric/doc.go index 504384dd3ab..a15e8b2aeb4 100644 --- a/sdk/metric/metrictest/doc.go +++ b/bridge/opencensus/opencensusmetric/doc.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// The metrictest package is a collection of tools used to make testing parts of -// the SDK easier. - -package metrictest // import "go.opentelemetry.io/otel/sdk/metric/metrictest" +/* +Package opencensusmetric provides a metric bridge from OpenCensus to OpenTelemetry. +*/ +package opencensusmetric // import "go.opentelemetry.io/otel/bridge/opencensus/opencensusmetric" diff --git a/bridge/opencensus/opencensusmetric/go.mod b/bridge/opencensus/opencensusmetric/go.mod new file mode 100644 index 00000000000..2c98a6e1371 --- /dev/null +++ b/bridge/opencensus/opencensusmetric/go.mod @@ -0,0 +1,28 @@ +module go.opentelemetry.io/otel/bridge/opencensus/opencensusmetric + +go 1.18 + +require ( + go.opencensus.io v0.23.0 + go.opentelemetry.io/otel v1.10.0 + go.opentelemetry.io/otel/metric v0.0.0-00010101000000-000000000000 + go.opentelemetry.io/otel/sdk/metric v0.0.0-00010101000000-000000000000 +) + +require ( + github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + go.opentelemetry.io/otel/sdk v0.0.0-00010101000000-000000000000 // indirect + go.opentelemetry.io/otel/trace v1.10.0 // indirect + golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7 // indirect +) + +replace go.opentelemetry.io/otel => ../../.. + +replace go.opentelemetry.io/otel/sdk => ../../../sdk + +replace go.opentelemetry.io/otel/metric => ../../../metric + +replace go.opentelemetry.io/otel/sdk/metric => ../../../sdk/metric + +replace go.opentelemetry.io/otel/trace => ../../../trace diff --git a/bridge/opencensus/opencensusmetric/go.sum b/bridge/opencensus/opencensusmetric/go.sum new file mode 100644 index 00000000000..099866b7e41 --- /dev/null +++ b/bridge/opencensus/opencensusmetric/go.sum @@ -0,0 +1,98 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7 h1:iGu644GcxtEcrInvDsQRCwJjtCIOlT2V7IRt6ah2Whw= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/bridge/opencensus/opencensusmetric/internal/metric.go b/bridge/opencensus/opencensusmetric/internal/metric.go new file mode 100644 index 00000000000..8d2e9a53db0 --- /dev/null +++ b/bridge/opencensus/opencensusmetric/internal/metric.go @@ -0,0 +1,219 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package internal // import "go.opentelemetry.io/otel/bridge/opencensus/opencensusmetric/internal" + +import ( + "errors" + "fmt" + + ocmetricdata "go.opencensus.io/metric/metricdata" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/unit" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +var ( + errConversion = errors.New("converting from OpenCensus to OpenTelemetry") + errAggregationType = errors.New("unsupported OpenCensus aggregation type") + errMismatchedValueTypes = errors.New("wrong value type for data point") + errNumberDataPoint = errors.New("converting a number data point") + errHistogramDataPoint = errors.New("converting a histogram data point") + errNegativeDistributionCount = errors.New("distribution count is negative") + errNegativeBucketCount = errors.New("distribution bucket count is negative") + errMismatchedAttributeKeyValues = errors.New("mismatched number of attribute keys and values") +) + +// ConvertMetrics converts metric data from OpenCensus to OpenTelemetry. +func ConvertMetrics(ocmetrics []*ocmetricdata.Metric) ([]metricdata.Metrics, error) { + otelMetrics := make([]metricdata.Metrics, 0, len(ocmetrics)) + var errInfo []string + for _, ocm := range ocmetrics { + if ocm == nil { + continue + } + agg, err := convertAggregation(ocm) + if err != nil { + errInfo = append(errInfo, err.Error()) + continue + } + otelMetrics = append(otelMetrics, metricdata.Metrics{ + Name: ocm.Descriptor.Name, + Description: ocm.Descriptor.Description, + Unit: convertUnit(ocm.Descriptor.Unit), + Data: agg, + }) + } + var aggregatedError error + if len(errInfo) > 0 { + aggregatedError = fmt.Errorf("%w: %q", errConversion, errInfo) + } + return otelMetrics, aggregatedError +} + +// convertAggregation produces an aggregation based on the OpenCensus Metric. +func convertAggregation(metric *ocmetricdata.Metric) (metricdata.Aggregation, error) { + labelKeys := metric.Descriptor.LabelKeys + switch metric.Descriptor.Type { + case ocmetricdata.TypeGaugeInt64: + return convertGauge[int64](labelKeys, metric.TimeSeries) + case ocmetricdata.TypeGaugeFloat64: + return convertGauge[float64](labelKeys, metric.TimeSeries) + case ocmetricdata.TypeCumulativeInt64: + return convertSum[int64](labelKeys, metric.TimeSeries) + case ocmetricdata.TypeCumulativeFloat64: + return convertSum[float64](labelKeys, metric.TimeSeries) + case ocmetricdata.TypeCumulativeDistribution: + return convertHistogram(labelKeys, metric.TimeSeries) + // TODO: Support summaries, once it is in the OTel data types. + } + return nil, fmt.Errorf("%w: %q", errAggregationType, metric.Descriptor.Type) +} + +// convertGauge converts an OpenCensus gauge to an OpenTelemetry gauge aggregation. +func convertGauge[N int64 | float64](labelKeys []ocmetricdata.LabelKey, ts []*ocmetricdata.TimeSeries) (metricdata.Gauge[N], error) { + points, err := convertNumberDataPoints[N](labelKeys, ts) + return metricdata.Gauge[N]{DataPoints: points}, err +} + +// convertSum converts an OpenCensus cumulative to an OpenTelemetry sum aggregation. +func convertSum[N int64 | float64](labelKeys []ocmetricdata.LabelKey, ts []*ocmetricdata.TimeSeries) (metricdata.Sum[N], error) { + points, err := convertNumberDataPoints[N](labelKeys, ts) + // OpenCensus sums are always Cumulative + return metricdata.Sum[N]{DataPoints: points, Temporality: metricdata.CumulativeTemporality}, err +} + +// convertNumberDataPoints converts OpenCensus TimeSeries to OpenTelemetry DataPoints. +func convertNumberDataPoints[N int64 | float64](labelKeys []ocmetricdata.LabelKey, ts []*ocmetricdata.TimeSeries) ([]metricdata.DataPoint[N], error) { + var points []metricdata.DataPoint[N] + var errInfo []string + for _, t := range ts { + attrs, err := convertAttrs(labelKeys, t.LabelValues) + if err != nil { + errInfo = append(errInfo, err.Error()) + continue + } + for _, p := range t.Points { + v, ok := p.Value.(N) + if !ok { + errInfo = append(errInfo, fmt.Sprintf("%v: %q", errMismatchedValueTypes, p.Value)) + continue + } + points = append(points, metricdata.DataPoint[N]{ + Attributes: attrs, + StartTime: t.StartTime, + Time: p.Time, + Value: v, + }) + } + } + var aggregatedError error + if len(errInfo) > 0 { + aggregatedError = fmt.Errorf("%w: %v", errNumberDataPoint, errInfo) + } + return points, aggregatedError +} + +// convertHistogram converts OpenCensus Distribution timeseries to an +// OpenTelemetry Histogram aggregation. +func convertHistogram(labelKeys []ocmetricdata.LabelKey, ts []*ocmetricdata.TimeSeries) (metricdata.Histogram, error) { + points := make([]metricdata.HistogramDataPoint, 0, len(ts)) + var errInfo []string + for _, t := range ts { + attrs, err := convertAttrs(labelKeys, t.LabelValues) + if err != nil { + errInfo = append(errInfo, err.Error()) + continue + } + for _, p := range t.Points { + dist, ok := p.Value.(*ocmetricdata.Distribution) + if !ok { + errInfo = append(errInfo, fmt.Sprintf("%v: %d", errMismatchedValueTypes, p.Value)) + continue + } + bucketCounts, err := convertBucketCounts(dist.Buckets) + if err != nil { + errInfo = append(errInfo, err.Error()) + continue + } + if dist.Count < 0 { + errInfo = append(errInfo, fmt.Sprintf("%v: %d", errNegativeDistributionCount, dist.Count)) + continue + } + // TODO: handle exemplars + points = append(points, metricdata.HistogramDataPoint{ + Attributes: attrs, + StartTime: t.StartTime, + Time: p.Time, + Count: uint64(dist.Count), + Sum: dist.Sum, + Bounds: dist.BucketOptions.Bounds, + BucketCounts: bucketCounts, + }) + } + } + var aggregatedError error + if len(errInfo) > 0 { + aggregatedError = fmt.Errorf("%w: %v", errHistogramDataPoint, errInfo) + } + return metricdata.Histogram{DataPoints: points, Temporality: metricdata.CumulativeTemporality}, aggregatedError +} + +// convertBucketCounts converts from OpenCensus bucket counts to slice of uint64. +func convertBucketCounts(buckets []ocmetricdata.Bucket) ([]uint64, error) { + bucketCounts := make([]uint64, len(buckets)) + for i, bucket := range buckets { + if bucket.Count < 0 { + return nil, fmt.Errorf("%w: %q", errNegativeBucketCount, bucket.Count) + } + bucketCounts[i] = uint64(bucket.Count) + } + return bucketCounts, nil +} + +// convertAttrs converts from OpenCensus attribute keys and values to an +// OpenTelemetry attribute Set. +func convertAttrs(keys []ocmetricdata.LabelKey, values []ocmetricdata.LabelValue) (attribute.Set, error) { + if len(keys) != len(values) { + return attribute.NewSet(), fmt.Errorf("%w: keys(%q) values(%q)", errMismatchedAttributeKeyValues, len(keys), len(values)) + } + attrs := []attribute.KeyValue{} + for i, lv := range values { + if !lv.Present { + continue + } + attrs = append(attrs, attribute.KeyValue{ + Key: attribute.Key(keys[i].Key), + Value: attribute.StringValue(lv.Value), + }) + } + return attribute.NewSet(attrs...), nil +} + +// convertUnit converts from the OpenCensus unit to OpenTelemetry unit. +func convertUnit(u ocmetricdata.Unit) unit.Unit { + switch u { + case ocmetricdata.UnitDimensionless: + return unit.Dimensionless + case ocmetricdata.UnitBytes: + return unit.Bytes + case ocmetricdata.UnitMilliseconds: + return unit.Milliseconds + } + return unit.Unit(string(u)) +} diff --git a/bridge/opencensus/opencensusmetric/internal/metric_test.go b/bridge/opencensus/opencensusmetric/internal/metric_test.go new file mode 100644 index 00000000000..4088972cc3f --- /dev/null +++ b/bridge/opencensus/opencensusmetric/internal/metric_test.go @@ -0,0 +1,667 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package internal // import "go.opentelemetry.io/otel/bridge/opencensus/opencensusmetric/internal" + +import ( + "errors" + "testing" + "time" + + ocmetricdata "go.opencensus.io/metric/metricdata" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/unit" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" +) + +func TestConvertMetrics(t *testing.T) { + endTime1 := time.Now() + endTime2 := endTime1.Add(-time.Millisecond) + startTime := endTime2.Add(-time.Minute) + for _, tc := range []struct { + desc string + input []*ocmetricdata.Metric + expected []metricdata.Metrics + expectedErr error + }{ + { + desc: "empty", + expected: []metricdata.Metrics{}, + }, + { + desc: "normal Histogram, gauges, and sums", + input: []*ocmetricdata.Metric{ + { + Descriptor: ocmetricdata.Descriptor{ + Name: "foo.com/histogram-a", + Description: "a testing histogram", + Unit: ocmetricdata.UnitDimensionless, + Type: ocmetricdata.TypeCumulativeDistribution, + LabelKeys: []ocmetricdata.LabelKey{ + {Key: "a"}, + {Key: "b"}, + }, + }, + TimeSeries: []*ocmetricdata.TimeSeries{ + { + + LabelValues: []ocmetricdata.LabelValue{ + { + Value: "hello", + Present: true, + }, { + Value: "world", + Present: true, + }, + }, + Points: []ocmetricdata.Point{ + ocmetricdata.NewDistributionPoint(endTime1, &ocmetricdata.Distribution{ + Count: 8, + Sum: 100.0, + BucketOptions: &ocmetricdata.BucketOptions{ + Bounds: []float64{1.0, 2.0, 3.0}, + }, + Buckets: []ocmetricdata.Bucket{ + {Count: 1}, + {Count: 2}, + {Count: 5}, + }, + }), + ocmetricdata.NewDistributionPoint(endTime2, &ocmetricdata.Distribution{ + Count: 10, + Sum: 110.0, + BucketOptions: &ocmetricdata.BucketOptions{ + Bounds: []float64{1.0, 2.0, 3.0}, + }, + Buckets: []ocmetricdata.Bucket{ + {Count: 1}, + {Count: 4}, + {Count: 5}, + }, + }), + }, + StartTime: startTime, + }, + }, + }, { + Descriptor: ocmetricdata.Descriptor{ + Name: "foo.com/gauge-a", + Description: "an int testing gauge", + Unit: ocmetricdata.UnitBytes, + Type: ocmetricdata.TypeGaugeInt64, + LabelKeys: []ocmetricdata.LabelKey{ + {Key: "c"}, + {Key: "d"}, + }, + }, + TimeSeries: []*ocmetricdata.TimeSeries{ + { + LabelValues: []ocmetricdata.LabelValue{ + { + Value: "foo", + Present: true, + }, { + Value: "bar", + Present: true, + }, + }, + Points: []ocmetricdata.Point{ + ocmetricdata.NewInt64Point(endTime1, 123), + ocmetricdata.NewInt64Point(endTime2, 1236), + }, + }, + }, + }, { + Descriptor: ocmetricdata.Descriptor{ + Name: "foo.com/gauge-b", + Description: "a float testing gauge", + Unit: ocmetricdata.UnitBytes, + Type: ocmetricdata.TypeGaugeFloat64, + LabelKeys: []ocmetricdata.LabelKey{ + {Key: "cf"}, + {Key: "df"}, + }, + }, + TimeSeries: []*ocmetricdata.TimeSeries{ + { + LabelValues: []ocmetricdata.LabelValue{ + { + Value: "foof", + Present: true, + }, { + Value: "barf", + Present: true, + }, + }, + Points: []ocmetricdata.Point{ + ocmetricdata.NewFloat64Point(endTime1, 123.4), + ocmetricdata.NewFloat64Point(endTime2, 1236.7), + }, + }, + }, + }, { + Descriptor: ocmetricdata.Descriptor{ + Name: "foo.com/sum-a", + Description: "an int testing sum", + Unit: ocmetricdata.UnitMilliseconds, + Type: ocmetricdata.TypeCumulativeInt64, + LabelKeys: []ocmetricdata.LabelKey{ + {Key: "e"}, + {Key: "f"}, + }, + }, + TimeSeries: []*ocmetricdata.TimeSeries{ + { + LabelValues: []ocmetricdata.LabelValue{ + { + Value: "zig", + Present: true, + }, { + Value: "zag", + Present: true, + }, + }, + Points: []ocmetricdata.Point{ + ocmetricdata.NewInt64Point(endTime1, 13), + ocmetricdata.NewInt64Point(endTime2, 14), + }, + }, + }, + }, { + Descriptor: ocmetricdata.Descriptor{ + Name: "foo.com/sum-b", + Description: "a float testing sum", + Unit: ocmetricdata.UnitMilliseconds, + Type: ocmetricdata.TypeCumulativeFloat64, + LabelKeys: []ocmetricdata.LabelKey{ + {Key: "e"}, + {Key: "f"}, + }, + }, + TimeSeries: []*ocmetricdata.TimeSeries{ + { + LabelValues: []ocmetricdata.LabelValue{ + { + Value: "zig", + Present: true, + }, { + Value: "zag", + Present: true, + }, + }, + Points: []ocmetricdata.Point{ + ocmetricdata.NewFloat64Point(endTime1, 12.3), + ocmetricdata.NewFloat64Point(endTime2, 123.4), + }, + }, + }, + }, + }, + expected: []metricdata.Metrics{ + { + Name: "foo.com/histogram-a", + Description: "a testing histogram", + Unit: unit.Dimensionless, + Data: metricdata.Histogram{ + DataPoints: []metricdata.HistogramDataPoint{ + { + Attributes: attribute.NewSet(attribute.KeyValue{ + Key: attribute.Key("a"), + Value: attribute.StringValue("hello"), + }, attribute.KeyValue{ + Key: attribute.Key("b"), + Value: attribute.StringValue("world"), + }), + StartTime: startTime, + Time: endTime1, + Count: 8, + Sum: 100.0, + Bounds: []float64{1.0, 2.0, 3.0}, + BucketCounts: []uint64{1, 2, 5}, + }, { + Attributes: attribute.NewSet(attribute.KeyValue{ + Key: attribute.Key("a"), + Value: attribute.StringValue("hello"), + }, attribute.KeyValue{ + Key: attribute.Key("b"), + Value: attribute.StringValue("world"), + }), + StartTime: startTime, + Time: endTime2, + Count: 10, + Sum: 110.0, + Bounds: []float64{1.0, 2.0, 3.0}, + BucketCounts: []uint64{1, 4, 5}, + }, + }, + Temporality: metricdata.CumulativeTemporality, + }, + }, { + Name: "foo.com/gauge-a", + Description: "an int testing gauge", + Unit: unit.Bytes, + Data: metricdata.Gauge[int64]{ + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet(attribute.KeyValue{ + Key: attribute.Key("c"), + Value: attribute.StringValue("foo"), + }, attribute.KeyValue{ + Key: attribute.Key("d"), + Value: attribute.StringValue("bar"), + }), + Time: endTime1, + Value: 123, + }, { + Attributes: attribute.NewSet(attribute.KeyValue{ + Key: attribute.Key("c"), + Value: attribute.StringValue("foo"), + }, attribute.KeyValue{ + Key: attribute.Key("d"), + Value: attribute.StringValue("bar"), + }), + Time: endTime2, + Value: 1236, + }, + }, + }, + }, { + Name: "foo.com/gauge-b", + Description: "a float testing gauge", + Unit: unit.Bytes, + Data: metricdata.Gauge[float64]{ + DataPoints: []metricdata.DataPoint[float64]{ + { + Attributes: attribute.NewSet(attribute.KeyValue{ + Key: attribute.Key("cf"), + Value: attribute.StringValue("foof"), + }, attribute.KeyValue{ + Key: attribute.Key("df"), + Value: attribute.StringValue("barf"), + }), + Time: endTime1, + Value: 123.4, + }, { + Attributes: attribute.NewSet(attribute.KeyValue{ + Key: attribute.Key("cf"), + Value: attribute.StringValue("foof"), + }, attribute.KeyValue{ + Key: attribute.Key("df"), + Value: attribute.StringValue("barf"), + }), + Time: endTime2, + Value: 1236.7, + }, + }, + }, + }, { + Name: "foo.com/sum-a", + Description: "an int testing sum", + Unit: unit.Milliseconds, + Data: metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet(attribute.KeyValue{ + Key: attribute.Key("e"), + Value: attribute.StringValue("zig"), + }, attribute.KeyValue{ + Key: attribute.Key("f"), + Value: attribute.StringValue("zag"), + }), + Time: endTime1, + Value: 13, + }, { + Attributes: attribute.NewSet(attribute.KeyValue{ + Key: attribute.Key("e"), + Value: attribute.StringValue("zig"), + }, attribute.KeyValue{ + Key: attribute.Key("f"), + Value: attribute.StringValue("zag"), + }), + Time: endTime2, + Value: 14, + }, + }, + }, + }, { + Name: "foo.com/sum-b", + Description: "a float testing sum", + Unit: unit.Milliseconds, + Data: metricdata.Sum[float64]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.DataPoint[float64]{ + { + Attributes: attribute.NewSet(attribute.KeyValue{ + Key: attribute.Key("e"), + Value: attribute.StringValue("zig"), + }, attribute.KeyValue{ + Key: attribute.Key("f"), + Value: attribute.StringValue("zag"), + }), + Time: endTime1, + Value: 12.3, + }, { + Attributes: attribute.NewSet(attribute.KeyValue{ + Key: attribute.Key("e"), + Value: attribute.StringValue("zig"), + }, attribute.KeyValue{ + Key: attribute.Key("f"), + Value: attribute.StringValue("zag"), + }), + Time: endTime2, + Value: 123.4, + }, + }, + }, + }, + }, + }, { + desc: "histogram without data points", + input: []*ocmetricdata.Metric{ + { + Descriptor: ocmetricdata.Descriptor{ + Name: "foo.com/histogram-a", + Description: "a testing histogram", + Unit: ocmetricdata.UnitDimensionless, + Type: ocmetricdata.TypeCumulativeDistribution, + }, + }, + }, + expected: []metricdata.Metrics{ + { + Name: "foo.com/histogram-a", + Description: "a testing histogram", + Unit: unit.Dimensionless, + Data: metricdata.Histogram{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.HistogramDataPoint{}, + }, + }, + }, + }, { + desc: "sum without data points", + input: []*ocmetricdata.Metric{ + { + Descriptor: ocmetricdata.Descriptor{ + Name: "foo.com/sum-a", + Description: "a testing sum", + Unit: ocmetricdata.UnitDimensionless, + Type: ocmetricdata.TypeCumulativeFloat64, + }, + }, + }, + expected: []metricdata.Metrics{ + { + Name: "foo.com/sum-a", + Description: "a testing sum", + Unit: unit.Dimensionless, + Data: metricdata.Sum[float64]{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.DataPoint[float64]{}, + }, + }, + }, + }, { + desc: "gauge without data points", + input: []*ocmetricdata.Metric{ + { + Descriptor: ocmetricdata.Descriptor{ + Name: "foo.com/gauge-a", + Description: "a testing gauge", + Unit: ocmetricdata.UnitDimensionless, + Type: ocmetricdata.TypeGaugeInt64, + }, + }, + }, + expected: []metricdata.Metrics{ + { + Name: "foo.com/gauge-a", + Description: "a testing gauge", + Unit: unit.Dimensionless, + Data: metricdata.Gauge[int64]{ + DataPoints: []metricdata.DataPoint[int64]{}, + }, + }, + }, + }, { + desc: "histogram with negative count", + input: []*ocmetricdata.Metric{ + { + Descriptor: ocmetricdata.Descriptor{ + Name: "foo.com/histogram-a", + Description: "a testing histogram", + Unit: ocmetricdata.UnitDimensionless, + Type: ocmetricdata.TypeCumulativeDistribution, + }, + TimeSeries: []*ocmetricdata.TimeSeries{ + { + Points: []ocmetricdata.Point{ + ocmetricdata.NewDistributionPoint(endTime1, &ocmetricdata.Distribution{ + Count: -8, + }), + }, + StartTime: startTime, + }, + }, + }, + }, + expectedErr: errConversion, + }, { + desc: "histogram with negative bucket count", + input: []*ocmetricdata.Metric{ + { + Descriptor: ocmetricdata.Descriptor{ + Name: "foo.com/histogram-a", + Description: "a testing histogram", + Unit: ocmetricdata.UnitDimensionless, + Type: ocmetricdata.TypeCumulativeDistribution, + }, + TimeSeries: []*ocmetricdata.TimeSeries{ + { + Points: []ocmetricdata.Point{ + ocmetricdata.NewDistributionPoint(endTime1, &ocmetricdata.Distribution{ + Buckets: []ocmetricdata.Bucket{ + {Count: -1}, + {Count: 2}, + {Count: 5}, + }, + }), + }, + StartTime: startTime, + }, + }, + }, + }, + expectedErr: errConversion, + }, { + desc: "histogram with non-histogram datapoint type", + input: []*ocmetricdata.Metric{ + { + Descriptor: ocmetricdata.Descriptor{ + Name: "foo.com/bad-point", + Description: "a bad type", + Unit: ocmetricdata.UnitDimensionless, + Type: ocmetricdata.TypeCumulativeDistribution, + }, + TimeSeries: []*ocmetricdata.TimeSeries{ + { + Points: []ocmetricdata.Point{ + ocmetricdata.NewFloat64Point(endTime1, 1.0), + }, + StartTime: startTime, + }, + }, + }, + }, + expectedErr: errConversion, + }, { + desc: "sum with non-sum datapoint type", + input: []*ocmetricdata.Metric{ + { + Descriptor: ocmetricdata.Descriptor{ + Name: "foo.com/bad-point", + Description: "a bad type", + Unit: ocmetricdata.UnitDimensionless, + Type: ocmetricdata.TypeCumulativeFloat64, + }, + TimeSeries: []*ocmetricdata.TimeSeries{ + { + Points: []ocmetricdata.Point{ + ocmetricdata.NewDistributionPoint(endTime1, &ocmetricdata.Distribution{}), + }, + StartTime: startTime, + }, + }, + }, + }, + expectedErr: errConversion, + }, { + desc: "gauge with non-gauge datapoint type", + input: []*ocmetricdata.Metric{ + { + Descriptor: ocmetricdata.Descriptor{ + Name: "foo.com/bad-point", + Description: "a bad type", + Unit: ocmetricdata.UnitDimensionless, + Type: ocmetricdata.TypeGaugeFloat64, + }, + TimeSeries: []*ocmetricdata.TimeSeries{ + { + Points: []ocmetricdata.Point{ + ocmetricdata.NewDistributionPoint(endTime1, &ocmetricdata.Distribution{}), + }, + StartTime: startTime, + }, + }, + }, + }, + expectedErr: errConversion, + }, { + desc: "unsupported Gauge Distribution type", + input: []*ocmetricdata.Metric{ + { + Descriptor: ocmetricdata.Descriptor{ + Name: "foo.com/bad-point", + Description: "a bad type", + Unit: ocmetricdata.UnitDimensionless, + Type: ocmetricdata.TypeGaugeDistribution, + }, + }, + }, + expectedErr: errConversion, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + output, err := ConvertMetrics(tc.input) + if !errors.Is(err, tc.expectedErr) { + t.Errorf("convertAggregation(%+v) = err(%v), want err(%v)", tc.input, err, tc.expectedErr) + } + metricdatatest.AssertEqual[metricdata.ScopeMetrics](t, + metricdata.ScopeMetrics{Metrics: tc.expected}, + metricdata.ScopeMetrics{Metrics: output}) + }) + } +} + +func TestConvertUnits(t *testing.T) { + var noUnit unit.Unit + for _, tc := range []struct { + desc string + input ocmetricdata.Unit + expected unit.Unit + }{{ + desc: "unspecified unit", + expected: noUnit, + }, { + desc: "dimensionless", + input: ocmetricdata.UnitDimensionless, + expected: unit.Dimensionless, + }, { + desc: "milliseconds", + input: ocmetricdata.UnitMilliseconds, + expected: unit.Milliseconds, + }, { + desc: "bytes", + input: ocmetricdata.UnitBytes, + expected: unit.Bytes, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + output := convertUnit(tc.input) + if output != tc.expected { + t.Errorf("convertUnit(%v) = %q, want %q", tc.input, output, tc.expected) + } + }) + } +} + +func TestConvertAttributes(t *testing.T) { + setWithMultipleKeys := attribute.NewSet( + attribute.KeyValue{Key: attribute.Key("first"), Value: attribute.StringValue("1")}, + attribute.KeyValue{Key: attribute.Key("second"), Value: attribute.StringValue("2")}, + ) + for _, tc := range []struct { + desc string + inputKeys []ocmetricdata.LabelKey + inputValues []ocmetricdata.LabelValue + expected *attribute.Set + expectedErr error + }{ + { + desc: "no attributes", + expected: attribute.EmptySet(), + }, + { + desc: "different numbers of keys and values", + inputKeys: []ocmetricdata.LabelKey{{Key: "foo"}}, + expected: attribute.EmptySet(), + expectedErr: errMismatchedAttributeKeyValues, + }, + { + desc: "multiple keys and values", + inputKeys: []ocmetricdata.LabelKey{{Key: "first"}, {Key: "second"}}, + inputValues: []ocmetricdata.LabelValue{ + {Value: "1", Present: true}, + {Value: "2", Present: true}, + }, + expected: &setWithMultipleKeys, + }, + { + desc: "multiple keys and values with some not present", + inputKeys: []ocmetricdata.LabelKey{{Key: "first"}, {Key: "second"}, {Key: "third"}}, + inputValues: []ocmetricdata.LabelValue{ + {Value: "1", Present: true}, + {Value: "2", Present: true}, + {Present: false}, + }, + expected: &setWithMultipleKeys, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + output, err := convertAttrs(tc.inputKeys, tc.inputValues) + if !errors.Is(err, tc.expectedErr) { + t.Errorf("convertAttrs(keys: %v, values: %v) = err(%v), want err(%v)", tc.inputKeys, tc.inputValues, err, tc.expectedErr) + } + if !output.Equals(tc.expected) { + t.Errorf("convertAttrs(keys: %v, values: %v) = %+v, want %+v", tc.inputKeys, tc.inputValues, output.ToSlice(), tc.expected.ToSlice()) + } + }) + } +} diff --git a/bridge/opencensus/test/go.mod b/bridge/opencensus/test/go.mod index 270928f38b5..66226d4b6c2 100644 --- a/bridge/opencensus/test/go.mod +++ b/bridge/opencensus/test/go.mod @@ -14,8 +14,6 @@ require ( github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect - go.opentelemetry.io/otel/metric v0.31.0 // indirect - go.opentelemetry.io/otel/sdk/metric v0.31.0 // indirect golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7 // indirect ) @@ -23,10 +21,6 @@ replace go.opentelemetry.io/otel => ../../.. replace go.opentelemetry.io/otel/bridge/opencensus => ../ -replace go.opentelemetry.io/otel/metric => ../../../metric - replace go.opentelemetry.io/otel/sdk => ../../../sdk -replace go.opentelemetry.io/otel/sdk/metric => ../../../sdk/metric - replace go.opentelemetry.io/otel/trace => ../../../trace diff --git a/bridge/opencensus/test/go.sum b/bridge/opencensus/test/go.sum index cc5af06ea90..c7ddc1e1307 100644 --- a/bridge/opencensus/test/go.sum +++ b/bridge/opencensus/test/go.sum @@ -1,6 +1,5 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= diff --git a/example/opencensus/go.mod b/example/opencensus/go.mod deleted file mode 100644 index ed539b85d35..00000000000 --- a/example/opencensus/go.mod +++ /dev/null @@ -1,38 +0,0 @@ -module go.opentelemetry.io/otel/example/opencensus - -go 1.17 - -replace ( - go.opentelemetry.io/otel => ../.. - go.opentelemetry.io/otel/bridge/opencensus => ../../bridge/opencensus - go.opentelemetry.io/otel/sdk => ../../sdk -) - -require ( - go.opencensus.io v0.22.6-0.20201102222123-380f4078db9f - go.opentelemetry.io/otel v1.10.0 - go.opentelemetry.io/otel/bridge/opencensus v0.31.0 - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v0.31.0 - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.10.0 - go.opentelemetry.io/otel/sdk v1.10.0 - go.opentelemetry.io/otel/sdk/metric v0.31.0 -) - -require ( - github.com/go-logr/logr v1.2.3 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 // indirect - go.opentelemetry.io/otel/metric v0.31.0 // indirect - go.opentelemetry.io/otel/trace v1.10.0 // indirect - golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7 // indirect -) - -replace go.opentelemetry.io/otel/metric => ../../metric - -replace go.opentelemetry.io/otel/sdk/metric => ../../sdk/metric - -replace go.opentelemetry.io/otel/trace => ../../trace - -replace go.opentelemetry.io/otel/exporters/stdout/stdoutmetric => ../../exporters/stdout/stdoutmetric - -replace go.opentelemetry.io/otel/exporters/stdout/stdouttrace => ../../exporters/stdout/stdouttrace diff --git a/example/opencensus/go.sum b/example/opencensus/go.sum deleted file mode 100644 index 98f74c56098..00000000000 --- a/example/opencensus/go.sum +++ /dev/null @@ -1,61 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY= -go.opencensus.io v0.22.6-0.20201102222123-380f4078db9f h1:IUmbcoP9XyEXW+R9AbrZgDvaYVfTbISN92Y5RIV+Mx4= -go.opencensus.io v0.22.6-0.20201102222123-380f4078db9f/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7 h1:iGu644GcxtEcrInvDsQRCwJjtCIOlT2V7IRt6ah2Whw= -golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/example/opencensus/main.go b/example/opencensus/main.go deleted file mode 100644 index 26c648d5948..00000000000 --- a/example/opencensus/main.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "context" - "fmt" - "log" - "time" - - "go.opencensus.io/metric" - "go.opencensus.io/metric/metricdata" - "go.opencensus.io/metric/metricexport" - "go.opencensus.io/metric/metricproducer" - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - octrace "go.opencensus.io/trace" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/bridge/opencensus" - "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" - "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" - "go.opentelemetry.io/otel/sdk/metric/export" - sdktrace "go.opentelemetry.io/otel/sdk/trace" -) - -var ( - // instrumenttype differentiates between our gauge and view metrics. - keyType = tag.MustNewKey("instrumenttype") - // Counts the number of lines read in from standard input. - countMeasure = stats.Int64("test_count", "A count of something", stats.UnitDimensionless) - countView = &view.View{ - Name: "test_count", - Measure: countMeasure, - Description: "A count of something", - Aggregation: view.Count(), - TagKeys: []tag.Key{keyType}, - } -) - -func main() { - log.Println("Using OpenTelemetry stdout exporters.") - traceExporter, err := stdouttrace.New(stdouttrace.WithPrettyPrint()) - if err != nil { - log.Fatal(fmt.Errorf("error creating trace exporter: %w", err)) - } - metricsExporter, err := stdoutmetric.New(stdoutmetric.WithPrettyPrint()) - if err != nil { - log.Fatal(fmt.Errorf("error creating metric exporter: %w", err)) - } - tracing(traceExporter) - if err := monitoring(metricsExporter); err != nil { - log.Fatal(err) - } -} - -// tracing demonstrates overriding the OpenCensus DefaultTracer to send spans -// to the OpenTelemetry exporter by calling OpenCensus APIs. -func tracing(otExporter sdktrace.SpanExporter) { - ctx := context.Background() - - log.Println("Configuring OpenCensus. Not Registering any OpenCensus exporters.") - octrace.ApplyConfig(octrace.Config{DefaultSampler: octrace.AlwaysSample()}) - - tp := sdktrace.NewTracerProvider(sdktrace.WithBatcher(otExporter)) - otel.SetTracerProvider(tp) - - log.Println("Installing the OpenCensus bridge to make OpenCensus libraries write spans using OpenTelemetry.") - tracer := tp.Tracer("simple") - octrace.DefaultTracer = opencensus.NewTracer(tracer) - tp.ForceFlush(ctx) - - log.Println("Creating OpenCensus span, which should be printed out using the OpenTelemetry stdouttrace exporter.\n-- It should have no parent, since it is the first span.") - ctx, outerOCSpan := octrace.StartSpan(ctx, "OpenCensusOuterSpan") - outerOCSpan.End() - tp.ForceFlush(ctx) - - log.Println("Creating OpenTelemetry span\n-- It should have the OpenCensus span as a parent, since the OpenCensus span was written with using OpenTelemetry APIs.") - ctx, otspan := tracer.Start(ctx, "OpenTelemetrySpan") - otspan.End() - tp.ForceFlush(ctx) - - log.Println("Creating OpenCensus span, which should be printed out using the OpenTelemetry stdouttrace exporter.\n-- It should have the OpenTelemetry span as a parent, since it was written using OpenTelemetry APIs") - _, innerOCSpan := octrace.StartSpan(ctx, "OpenCensusInnerSpan") - innerOCSpan.End() - tp.ForceFlush(ctx) -} - -// monitoring demonstrates creating an IntervalReader using the OpenTelemetry -// exporter to send metrics to the exporter by using either an OpenCensus -// registry or an OpenCensus view. -func monitoring(otExporter export.Exporter) error { - log.Println("Using the OpenTelemetry stdoutmetric exporter to export OpenCensus metrics. This allows routing telemetry from both OpenTelemetry and OpenCensus to a single exporter.") - ocExporter := opencensus.NewMetricExporter(otExporter) - intervalReader, err := metricexport.NewIntervalReader(&metricexport.Reader{}, ocExporter) - if err != nil { - return fmt.Errorf("failed to create interval reader: %w", err) - } - intervalReader.ReportingInterval = 10 * time.Second - log.Println("Emitting metrics using OpenCensus APIs. These should be printed out using the OpenTelemetry stdoutmetric exporter.") - err = intervalReader.Start() - if err != nil { - return fmt.Errorf("failed to start interval reader: %w", err) - } - defer intervalReader.Stop() - - log.Println("Registering a gauge metric using an OpenCensus registry.") - r := metric.NewRegistry() - metricproducer.GlobalManager().AddProducer(r) - gauge, err := r.AddInt64Gauge( - "test_gauge", - metric.WithDescription("A gauge for testing"), - metric.WithConstLabel(map[metricdata.LabelKey]metricdata.LabelValue{ - {Key: keyType.Name()}: metricdata.NewLabelValue("gauge"), - }), - ) - if err != nil { - return fmt.Errorf("failed to add gauge: %w", err) - } - entry, err := gauge.GetEntry() - if err != nil { - return fmt.Errorf("failed to get gauge entry: %w", err) - } - - log.Println("Registering a cumulative metric using an OpenCensus view.") - if err := view.Register(countView); err != nil { - return fmt.Errorf("failed to register views: %w", err) - } - ctx, err := tag.New(context.Background(), tag.Insert(keyType, "view")) - if err != nil { - return fmt.Errorf("failed to set tag: %w", err) - } - for i := int64(1); true; i++ { - // update stats for our gauge - entry.Set(i) - // update stats for our view - stats.Record(ctx, countMeasure.M(1)) - time.Sleep(time.Second) - } - return nil -} diff --git a/exporters/otlp/otlpmetric/internal/otlpconfig/envconfig_test.go b/example/prometheus/doc.go similarity index 88% rename from exporters/otlp/otlpmetric/internal/otlpconfig/envconfig_test.go rename to example/prometheus/doc.go index 25021f7328c..b272a494874 100644 --- a/exporters/otlp/otlpmetric/internal/otlpconfig/envconfig_test.go +++ b/example/prometheus/doc.go @@ -12,4 +12,5 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otlpconfig +// Package main provides a code sample of the Prometheus exporter. +package main diff --git a/example/prometheus/go.mod b/example/prometheus/go.mod index d51de7fec82..441f03e5112 100644 --- a/example/prometheus/go.mod +++ b/example/prometheus/go.mod @@ -1,14 +1,9 @@ module go.opentelemetry.io/otel/example/prometheus -go 1.17 - -replace ( - go.opentelemetry.io/otel => ../.. - go.opentelemetry.io/otel/exporters/prometheus => ../../exporters/prometheus - go.opentelemetry.io/otel/sdk => ../../sdk -) +go 1.18 require ( + github.com/prometheus/client_golang v1.13.0 go.opentelemetry.io/otel v1.10.0 go.opentelemetry.io/otel/exporters/prometheus v0.31.0 go.opentelemetry.io/otel/metric v0.31.0 @@ -22,18 +17,23 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect - github.com/prometheus/client_golang v1.12.2 // indirect github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.32.1 // indirect - github.com/prometheus/procfs v0.7.3 // indirect + github.com/prometheus/common v0.37.0 // indirect + github.com/prometheus/procfs v0.8.0 // indirect go.opentelemetry.io/otel/sdk v1.10.0 // indirect go.opentelemetry.io/otel/trace v1.10.0 // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect - google.golang.org/protobuf v1.26.0 // indirect + golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect + google.golang.org/protobuf v1.28.1 // indirect ) -replace go.opentelemetry.io/otel/metric => ../../metric +replace go.opentelemetry.io/otel => ../.. + +replace go.opentelemetry.io/otel/exporters/prometheus => ../../exporters/prometheus + +replace go.opentelemetry.io/otel/sdk => ../../sdk replace go.opentelemetry.io/otel/sdk/metric => ../../sdk/metric +replace go.opentelemetry.io/otel/metric => ../../metric + replace go.opentelemetry.io/otel/trace => ../../trace diff --git a/example/prometheus/go.sum b/example/prometheus/go.sum index 5563b0bf878..f652c33031b 100644 --- a/example/prometheus/go.sum +++ b/example/prometheus/go.sum @@ -38,7 +38,6 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -65,9 +64,11 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2 github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -165,8 +166,9 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34= -github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= +github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -175,14 +177,16 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -266,12 +270,15 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -316,15 +323,20 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -446,8 +458,9 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/example/prometheus/main.go b/example/prometheus/main.go index 1a64371b764..fada742d32b 100644 --- a/example/prometheus/main.go +++ b/example/prometheus/main.go @@ -12,6 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.18 +// +build go1.18 + package main import ( @@ -21,118 +24,75 @@ import ( "net/http" "os" "os/signal" - "sync" - "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/exporters/prometheus" - "go.opentelemetry.io/otel/metric/global" + otelprom "go.opentelemetry.io/otel/exporters/prometheus" "go.opentelemetry.io/otel/metric/instrument" - "go.opentelemetry.io/otel/sdk/metric/aggregator/histogram" - controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - processor "go.opentelemetry.io/otel/sdk/metric/processor/basic" - selector "go.opentelemetry.io/otel/sdk/metric/selector/simple" -) - -var ( - lemonsKey = attribute.Key("ex.com/lemons") + "go.opentelemetry.io/otel/sdk/metric" ) -func initMeter() error { - config := prometheus.Config{ - DefaultHistogramBoundaries: []float64{1, 2, 5, 10, 20, 50}, - } - c := controller.New( - processor.NewFactory( - selector.NewWithHistogramDistribution( - histogram.WithExplicitBoundaries(config.DefaultHistogramBoundaries), - ), - aggregation.CumulativeTemporalitySelector(), - processor.WithMemory(true), - ), - ) - exporter, err := prometheus.New(config, c) - if err != nil { - return fmt.Errorf("failed to initialize prometheus exporter: %w", err) - } - - global.SetMeterProvider(exporter.MeterProvider()) +func main() { + ctx := context.Background() - http.HandleFunc("/", exporter.ServeHTTP) - go func() { - _ = http.ListenAndServe(":2222", nil) - }() + // The exporter embeds a default OpenTelemetry Reader and + // implements prometheus.Collector, allowing it to be used as + // both a Reader and Collector. + exporter := otelprom.New() + provider := metric.NewMeterProvider(metric.WithReader(exporter)) + meter := provider.Meter("github.com/open-telemetry/opentelemetry-go/example/prometheus") - fmt.Println("Prometheus server running on :2222") - return nil -} + // Start the prometheus HTTP server and pass the exporter Collector to it + go serveMetrics(exporter.Collector) -func main() { - if err := initMeter(); err != nil { - log.Fatal(err) + attrs := []attribute.KeyValue{ + attribute.Key("A").String("B"), + attribute.Key("C").String("D"), } - meter := global.Meter("ex.com/basic") - - observerLock := new(sync.RWMutex) - observerValueToReport := new(float64) - observerAttrsToReport := new([]attribute.KeyValue) - - gaugeObserver, err := meter.AsyncFloat64().Gauge("ex.com.one") + // This is the equivalent of prometheus.NewCounterVec + counter, err := meter.SyncFloat64().Counter("foo", instrument.WithDescription("a simple counter")) if err != nil { - log.Panicf("failed to initialize instrument: %v", err) + log.Fatal(err) } - _ = meter.RegisterCallback([]instrument.Asynchronous{gaugeObserver}, func(ctx context.Context) { - (*observerLock).RLock() - value := *observerValueToReport - attrs := *observerAttrsToReport - (*observerLock).RUnlock() - gaugeObserver.Observe(ctx, value, attrs...) - }) + counter.Add(ctx, 5, attrs...) - hist, err := meter.SyncFloat64().Histogram("ex.com.two") + gauge, err := meter.SyncFloat64().UpDownCounter("bar", instrument.WithDescription("a fun little gauge")) if err != nil { - log.Panicf("failed to initialize instrument: %v", err) + log.Fatal(err) } - counter, err := meter.SyncFloat64().Counter("ex.com.three") + gauge.Add(ctx, 100, attrs...) + gauge.Add(ctx, -25, attrs...) + + // This is the equivalent of prometheus.NewHistogramVec + histogram, err := meter.SyncFloat64().Histogram("baz", instrument.WithDescription("a very nice histogram")) if err != nil { - log.Panicf("failed to initialize instrument: %v", err) + log.Fatal(err) } + histogram.Record(ctx, 23, attrs...) + histogram.Record(ctx, 7, attrs...) + histogram.Record(ctx, 101, attrs...) + histogram.Record(ctx, 105, attrs...) - commonAttrs := []attribute.KeyValue{lemonsKey.Int(10), attribute.String("A", "1"), attribute.String("B", "2"), attribute.String("C", "3")} - notSoCommonAttrs := []attribute.KeyValue{lemonsKey.Int(13)} - - ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt) - defer stop() - - (*observerLock).Lock() - *observerValueToReport = 1.0 - *observerAttrsToReport = commonAttrs - (*observerLock).Unlock() - - hist.Record(ctx, 2.0, commonAttrs...) - counter.Add(ctx, 12.0, commonAttrs...) - - time.Sleep(5 * time.Second) - - (*observerLock).Lock() - *observerValueToReport = 1.0 - *observerAttrsToReport = notSoCommonAttrs - (*observerLock).Unlock() - hist.Record(ctx, 2.0, notSoCommonAttrs...) - counter.Add(ctx, 22.0, notSoCommonAttrs...) - - time.Sleep(5 * time.Second) - - (*observerLock).Lock() - *observerValueToReport = 13.0 - *observerAttrsToReport = commonAttrs - (*observerLock).Unlock() - hist.Record(ctx, 12.0, commonAttrs...) - counter.Add(ctx, 13.0, commonAttrs...) + ctx, _ = signal.NotifyContext(ctx, os.Interrupt) + <-ctx.Done() +} - fmt.Println("Example finished updating, please visit :2222") +func serveMetrics(collector prometheus.Collector) { + registry := prometheus.NewRegistry() + err := registry.Register(collector) + if err != nil { + fmt.Printf("error registering collector: %v", err) + return + } - <-ctx.Done() + log.Printf("serving metrics at localhost:2222/metrics") + http.Handle("/metrics", promhttp.HandlerFor(registry, promhttp.HandlerOpts{})) + err = http.ListenAndServe(":2222", nil) + if err != nil { + fmt.Printf("error serving http: %v", err) + return + } } diff --git a/exporters/otlp/otlpmetric/client.go b/exporters/otlp/otlpmetric/client.go new file mode 100644 index 00000000000..48b0fe805e2 --- /dev/null +++ b/exporters/otlp/otlpmetric/client.go @@ -0,0 +1,52 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package otlpmetric // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric" + +import ( + "context" + + mpb "go.opentelemetry.io/proto/otlp/metrics/v1" +) + +// Client handles the transmission of OTLP data to an OTLP receiving endpoint. +type Client interface { + // UploadMetrics transmits metric data to an OTLP receiver. + // + // All retry logic must be handled by UploadMetrics alone, the Exporter + // does not implement any retry logic. All returned errors are considered + // unrecoverable. + UploadMetrics(context.Context, *mpb.ResourceMetrics) error + + // ForceFlush flushes any metric data held by an Client. + // + // The deadline or cancellation of the passed context must be honored. An + // appropriate error should be returned in these situations. + ForceFlush(context.Context) error + + // Shutdown flushes all metric data held by a Client and closes any + // connections it holds open. + // + // The deadline or cancellation of the passed context must be honored. An + // appropriate error should be returned in these situations. + // + // Shutdown will only be called once by the Exporter. Once a return value + // is received by the Exporter from Shutdown the Client will not be used + // anymore. Therefore all computational resources need to be released + // after this is called so the Client can be garbage collected. + Shutdown(context.Context) error +} diff --git a/exporters/otlp/otlpmetric/clients.go b/exporters/otlp/otlpmetric/clients.go deleted file mode 100644 index 6808d464761..00000000000 --- a/exporters/otlp/otlpmetric/clients.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otlpmetric // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric" - -import ( - "context" - - metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" -) - -// Client manages connections to the collector, handles the -// transformation of data into wire format, and the transmission of that -// data to the collector. -type Client interface { - // Start should establish connection(s) to endpoint(s). It is - // called just once by the exporter, so the implementation - // does not need to worry about idempotence and locking. - Start(ctx context.Context) error - // Stop should close the connections. The function is called - // only once by the exporter, so the implementation does not - // need to worry about idempotence, but it may be called - // concurrently with UploadMetrics, so proper - // locking is required. The function serves as a - // synchronization point - after the function returns, the - // process of closing connections is assumed to be finished. - Stop(ctx context.Context) error - // UploadMetrics should transform the passed metrics to the - // wire format and send it to the collector. May be called - // concurrently. - UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) error -} diff --git a/exporters/otlp/otlpmetric/doc.go b/exporters/otlp/otlpmetric/doc.go new file mode 100644 index 00000000000..31831c415fe --- /dev/null +++ b/exporters/otlp/otlpmetric/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package otlpmetric provides an OpenTelemetry metric Exporter that can be +// used with PeriodicReader. It transforms metricdata into OTLP and transmits +// the transformed data to OTLP receivers. The Exporter is configurable to use +// different Clients, each using a distinct transport protocol to communicate +// to an OTLP receiving endpoint. +package otlpmetric // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric" diff --git a/exporters/otlp/otlpmetric/exporter.go b/exporters/otlp/otlpmetric/exporter.go index e46c6bea790..0bd546e5255 100644 --- a/exporters/otlp/otlpmetric/exporter.go +++ b/exporters/otlp/otlpmetric/exporter.go @@ -12,121 +12,96 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.18 +// +build go1.18 + package otlpmetric // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric" import ( "context" - "errors" + "fmt" "sync" - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/metrictransform" - "go.opentelemetry.io/otel/sdk/metric/export" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" - "go.opentelemetry.io/otel/sdk/resource" -) - -var ( - errAlreadyStarted = errors.New("already started") + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform" + "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + mpb "go.opentelemetry.io/proto/otlp/metrics/v1" ) -// Exporter exports metrics data in the OTLP wire format. -type Exporter struct { - client Client - temporalitySelector aggregation.TemporalitySelector +// exporter exports metrics data as OTLP. +type exporter struct { + // Ensure synchronous access to the client across all functionality. + clientMu sync.Mutex + client Client - mu sync.RWMutex - started bool - - startOnce sync.Once - stopOnce sync.Once + shutdownOnce sync.Once } -// Export exports a batch of metrics. -func (e *Exporter) Export(ctx context.Context, res *resource.Resource, ilr export.InstrumentationLibraryReader) error { - rm, err := metrictransform.InstrumentationLibraryReader(ctx, e, res, ilr, 1) - if err != nil { - return err - } - if rm == nil { - return nil +// Export transforms and transmits metric data to an OTLP receiver. +func (e *exporter) Export(ctx context.Context, rm metricdata.ResourceMetrics) error { + otlpRm, err := transform.ResourceMetrics(rm) + // Best effort upload of transformable metrics. + e.clientMu.Lock() + upErr := e.client.UploadMetrics(ctx, otlpRm) + e.clientMu.Unlock() + if upErr != nil { + if err == nil { + return upErr + } + // Merge the two errors. + return fmt.Errorf("failed to upload incomplete metrics (%s): %w", err, upErr) } - - // TODO: There is never more than one resource emitted by this - // call, as per the specification. We can change the - // signature of UploadMetrics correspondingly. Here create a - // singleton list to reduce the size of the current PR: - return e.client.UploadMetrics(ctx, rm) -} - -// Start establishes a connection to the receiving endpoint. -func (e *Exporter) Start(ctx context.Context) error { - var err = errAlreadyStarted - e.startOnce.Do(func() { - e.mu.Lock() - e.started = true - e.mu.Unlock() - err = e.client.Start(ctx) - }) - return err } -// Shutdown flushes all exports and closes all connections to the receiving endpoint. -func (e *Exporter) Shutdown(ctx context.Context) error { - e.mu.RLock() - started := e.started - e.mu.RUnlock() - - if !started { - return nil - } - - var err error +// ForceFlush flushes any metric data held by an exporter. +func (e *exporter) ForceFlush(ctx context.Context) error { + // The Exporter does not hold data, forward the command to the client. + e.clientMu.Lock() + defer e.clientMu.Unlock() + return e.client.ForceFlush(ctx) +} - e.stopOnce.Do(func() { - err = e.client.Stop(ctx) - e.mu.Lock() - e.started = false - e.mu.Unlock() +var errShutdown = fmt.Errorf("exporter is shutdown") + +// Shutdown flushes all metric data held by an exporter and releases any held +// computational resources. +func (e *exporter) Shutdown(ctx context.Context) error { + err := errShutdown + e.shutdownOnce.Do(func() { + e.clientMu.Lock() + client := e.client + e.client = shutdownClient{} + e.clientMu.Unlock() + err = client.Shutdown(ctx) }) - return err } -// TemporalityFor returns the accepted temporality for a metric measurment. -func (e *Exporter) TemporalityFor(descriptor *sdkapi.Descriptor, kind aggregation.Kind) aggregation.Temporality { - return e.temporalitySelector.TemporalityFor(descriptor, kind) +// New return an Exporter that uses client to transmits the OTLP data it +// produces. The client is assumed to be fully started and able to communicate +// with its OTLP receiving endpoint. +func New(client Client) metric.Exporter { + return &exporter{client: client} } -var _ export.Exporter = (*Exporter)(nil) +type shutdownClient struct{} -// New constructs a new Exporter and starts it. -func New(ctx context.Context, client Client, opts ...Option) (*Exporter, error) { - exp := NewUnstarted(client, opts...) - if err := exp.Start(ctx); err != nil { - return nil, err +func (c shutdownClient) err(ctx context.Context) error { + if err := ctx.Err(); err != nil { + return err } - return exp, nil + return errShutdown } -// NewUnstarted constructs a new Exporter and does not start it. -func NewUnstarted(client Client, opts ...Option) *Exporter { - cfg := config{ - // Note: the default TemporalitySelector is specified - // as Cumulative: - // https://github.com/open-telemetry/opentelemetry-specification/issues/731 - temporalitySelector: aggregation.CumulativeTemporalitySelector(), - } - - for _, opt := range opts { - cfg = opt.apply(cfg) - } +func (c shutdownClient) UploadMetrics(ctx context.Context, _ *mpb.ResourceMetrics) error { + return c.err(ctx) +} - e := &Exporter{ - client: client, - temporalitySelector: cfg.temporalitySelector, - } +func (c shutdownClient) ForceFlush(ctx context.Context) error { + return c.err(ctx) +} - return e +func (c shutdownClient) Shutdown(ctx context.Context) error { + return c.err(ctx) } diff --git a/exporters/otlp/otlpmetric/exporter_test.go b/exporters/otlp/otlpmetric/exporter_test.go index d5208fc26d6..0b673f50031 100644 --- a/exporters/otlp/otlpmetric/exporter_test.go +++ b/exporters/otlp/otlpmetric/exporter_test.go @@ -12,837 +12,82 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otlpmetric_test +//go:build go1.18 +// +build go1.18 + +package otlpmetric // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric" import ( "context" - "fmt" + "sync" "testing" - "time" - "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "google.golang.org/protobuf/testing/protocmp" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric" - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/metrictransform" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/sdk/metric/aggregator" - "go.opentelemetry.io/otel/sdk/metric/aggregator/histogram" - "go.opentelemetry.io/otel/sdk/metric/aggregator/sum" - "go.opentelemetry.io/otel/sdk/metric/export" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - "go.opentelemetry.io/otel/sdk/metric/metrictest" - "go.opentelemetry.io/otel/sdk/metric/number" - "go.opentelemetry.io/otel/sdk/metric/processor/processortest" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" - "go.opentelemetry.io/otel/sdk/resource" - commonpb "go.opentelemetry.io/proto/otlp/common/v1" - metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" -) - -var ( - // Timestamps used in this test: - intervalStart = time.Now() - intervalEnd = intervalStart.Add(time.Hour) + "go.opentelemetry.io/otel/sdk/metric/metricdata" + mpb "go.opentelemetry.io/proto/otlp/metrics/v1" ) -type stubClient struct { - rm []*metricpb.ResourceMetrics +type client struct { + // n is incremented by all Client methods. If these methods are called + // concurrently this should fail tests run with the race detector. + n int } -func (m *stubClient) Start(ctx context.Context) error { +func (c *client) UploadMetrics(context.Context, *mpb.ResourceMetrics) error { + c.n++ return nil } -func (m *stubClient) Stop(ctx context.Context) error { +func (c *client) ForceFlush(context.Context) error { + c.n++ return nil } -func (m *stubClient) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) error { - m.rm = append(m.rm, protoMetrics) +func (c *client) Shutdown(context.Context) error { + c.n++ return nil } -var _ otlpmetric.Client = (*stubClient)(nil) - -func (m *stubClient) Reset() { - m.rm = nil -} - -func newExporter(t *testing.T, opts ...otlpmetric.Option) (*otlpmetric.Exporter, *stubClient) { - client := &stubClient{} - exp, _ := otlpmetric.New(context.Background(), client, opts...) - return exp, client -} - -func startTime() uint64 { - return uint64(intervalStart.UnixNano()) -} - -func pointTime() uint64 { - return uint64(intervalEnd.UnixNano()) -} - -type testRecord struct { - name string - iKind sdkapi.InstrumentKind - nKind number.Kind - attrs []attribute.KeyValue - - meterName string - meterOpts []metric.MeterOption -} - -func record( - name string, - iKind sdkapi.InstrumentKind, - nKind number.Kind, - attrs []attribute.KeyValue, - meterName string, - meterOpts ...metric.MeterOption) testRecord { - return testRecord{ - name: name, - iKind: iKind, - nKind: nKind, - attrs: attrs, - meterName: meterName, - meterOpts: meterOpts, - } -} - -var ( - baseKeyValues = []attribute.KeyValue{attribute.String("host", "test.com")} - cpuKey = attribute.Key("CPU") - - testHistogramBoundaries = []float64{2.0, 4.0, 8.0} - - cpu1Attrs = []*commonpb.KeyValue{ - { - Key: "CPU", - Value: &commonpb.AnyValue{ - Value: &commonpb.AnyValue_IntValue{ - IntValue: 1, - }, - }, - }, - { - Key: "host", - Value: &commonpb.AnyValue{ - Value: &commonpb.AnyValue_StringValue{ - StringValue: "test.com", - }, - }, - }, - } - cpu2Attrs = []*commonpb.KeyValue{ - { - Key: "CPU", - Value: &commonpb.AnyValue{ - Value: &commonpb.AnyValue_IntValue{ - IntValue: 2, - }, - }, - }, - { - Key: "host", - Value: &commonpb.AnyValue{ - Value: &commonpb.AnyValue_StringValue{ - StringValue: "test.com", - }, - }, - }, - } - - testerAResource = resource.NewSchemaless(attribute.String("instance", "tester-a")) - testerAResourcePb = metrictransform.Resource(testerAResource) -) - -const ( - // Most of this test uses an empty instrumentation library name. - testLibName = "" -) - -func TestNoGroupingExport(t *testing.T) { - runMetricExportTests( - t, - nil, - resource.Empty(), - []testRecord{ - record( - "int64-count", - sdkapi.CounterInstrumentKind, - number.Int64Kind, - append(baseKeyValues, cpuKey.Int(1)), - testLibName, - ), - record( - "int64-count", - sdkapi.CounterInstrumentKind, - number.Int64Kind, - append(baseKeyValues, cpuKey.Int(2)), - testLibName, - ), - }, - []*metricpb.ResourceMetrics{ - { - Resource: nil, - ScopeMetrics: []*metricpb.ScopeMetrics{ - { - Metrics: []*metricpb.Metric{ - { - Name: "int64-count", - Data: &metricpb.Metric_Sum{ - Sum: &metricpb.Sum{ - IsMonotonic: true, - AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, - DataPoints: []*metricpb.NumberDataPoint{ - { - Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11}, - Attributes: cpu1Attrs, - StartTimeUnixNano: startTime(), - TimeUnixNano: pointTime(), - }, - { - Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11}, - Attributes: cpu2Attrs, - StartTimeUnixNano: startTime(), - TimeUnixNano: pointTime(), - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - ) -} - -func TestHistogramInt64MetricGroupingExport(t *testing.T) { - r := record( - "int64-histogram", - sdkapi.HistogramInstrumentKind, - number.Int64Kind, - append(baseKeyValues, cpuKey.Int(1)), - testLibName, - ) - sumVal := 11.0 - expected := []*metricpb.ResourceMetrics{ - { - Resource: nil, - ScopeMetrics: []*metricpb.ScopeMetrics{ - { - Metrics: []*metricpb.Metric{ - { - Name: "int64-histogram", - Data: &metricpb.Metric_Histogram{ - Histogram: &metricpb.Histogram{ - AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, - DataPoints: []*metricpb.HistogramDataPoint{ - { - Attributes: cpu1Attrs, - StartTimeUnixNano: startTime(), - TimeUnixNano: pointTime(), - Count: 2, - Sum: &sumVal, - ExplicitBounds: testHistogramBoundaries, - BucketCounts: []uint64{1, 0, 0, 1}, - }, - { - Attributes: cpu1Attrs, - Count: 2, - Sum: &sumVal, - ExplicitBounds: testHistogramBoundaries, - BucketCounts: []uint64{1, 0, 0, 1}, - StartTimeUnixNano: startTime(), - TimeUnixNano: pointTime(), - }, - }, - }, - }, - }, - }, - }, - }, - }, - } - runMetricExportTests(t, nil, resource.Empty(), []testRecord{r, r}, expected) -} - -func TestHistogramFloat64MetricGroupingExport(t *testing.T) { - r := record( - "float64-histogram", - sdkapi.HistogramInstrumentKind, - number.Float64Kind, - append(baseKeyValues, cpuKey.Int(1)), - testLibName, - ) - sumVal := 11.0 - expected := []*metricpb.ResourceMetrics{ - { - Resource: nil, - ScopeMetrics: []*metricpb.ScopeMetrics{ - { - Metrics: []*metricpb.Metric{ - { - Name: "float64-histogram", - Data: &metricpb.Metric_Histogram{ - Histogram: &metricpb.Histogram{ - AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, - DataPoints: []*metricpb.HistogramDataPoint{ - { - Attributes: cpu1Attrs, - StartTimeUnixNano: startTime(), - TimeUnixNano: pointTime(), - Count: 2, - Sum: &sumVal, - ExplicitBounds: testHistogramBoundaries, - BucketCounts: []uint64{1, 0, 0, 1}, - }, - { - Attributes: cpu1Attrs, - Count: 2, - Sum: &sumVal, - ExplicitBounds: testHistogramBoundaries, - BucketCounts: []uint64{1, 0, 0, 1}, - StartTimeUnixNano: startTime(), - TimeUnixNano: pointTime(), - }, - }, - }, - }, - }, - }, - }, - }, - }, - } - runMetricExportTests(t, nil, resource.Empty(), []testRecord{r, r}, expected) -} - -func TestCountInt64MetricGroupingExport(t *testing.T) { - r := record( - "int64-count", - sdkapi.CounterInstrumentKind, - number.Int64Kind, - append(baseKeyValues, cpuKey.Int(1)), - testLibName, - ) - runMetricExportTests( - t, - nil, - resource.Empty(), - []testRecord{r, r}, - []*metricpb.ResourceMetrics{ - { - Resource: nil, - ScopeMetrics: []*metricpb.ScopeMetrics{ - { - Metrics: []*metricpb.Metric{ - { - Name: "int64-count", - Data: &metricpb.Metric_Sum{ - Sum: &metricpb.Sum{ - IsMonotonic: true, - AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, - DataPoints: []*metricpb.NumberDataPoint{ - { - Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11}, - Attributes: cpu1Attrs, - StartTimeUnixNano: startTime(), - TimeUnixNano: pointTime(), - }, - { - Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11}, - Attributes: cpu1Attrs, - StartTimeUnixNano: startTime(), - TimeUnixNano: pointTime(), - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - ) -} - -func TestCountFloat64MetricGroupingExport(t *testing.T) { - r := record( - "float64-count", - sdkapi.CounterInstrumentKind, - number.Float64Kind, - append(baseKeyValues, cpuKey.Int(1)), - testLibName, - ) - runMetricExportTests( - t, - nil, - resource.Empty(), - []testRecord{r, r}, - []*metricpb.ResourceMetrics{ - { - Resource: nil, - ScopeMetrics: []*metricpb.ScopeMetrics{ - { - Metrics: []*metricpb.Metric{ - { - Name: "float64-count", - Data: &metricpb.Metric_Sum{ - Sum: &metricpb.Sum{ - IsMonotonic: true, - AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, - DataPoints: []*metricpb.NumberDataPoint{ - { - Value: &metricpb.NumberDataPoint_AsDouble{AsDouble: 11.0}, - Attributes: cpu1Attrs, - StartTimeUnixNano: startTime(), - TimeUnixNano: pointTime(), - }, - { - Value: &metricpb.NumberDataPoint_AsDouble{AsDouble: 11.0}, - Attributes: cpu1Attrs, - StartTimeUnixNano: startTime(), - TimeUnixNano: pointTime(), - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - ) -} - -func TestResourceMetricGroupingExport(t *testing.T) { - runMetricExportTests( - t, - nil, - testerAResource, - []testRecord{ - record( - "int64-count", - sdkapi.CounterInstrumentKind, - number.Int64Kind, - append(baseKeyValues, cpuKey.Int(1)), - testLibName, - ), - record( - "int64-count", - sdkapi.CounterInstrumentKind, - number.Int64Kind, - append(baseKeyValues, cpuKey.Int(1)), - testLibName, - ), - record( - "int64-count", - sdkapi.CounterInstrumentKind, - number.Int64Kind, - append(baseKeyValues, cpuKey.Int(2)), - testLibName, - ), - record( - "int64-count", - sdkapi.CounterInstrumentKind, - number.Int64Kind, - append(baseKeyValues, cpuKey.Int(1)), - testLibName, - ), - }, - []*metricpb.ResourceMetrics{ - { - Resource: testerAResourcePb, - ScopeMetrics: []*metricpb.ScopeMetrics{ - { - Metrics: []*metricpb.Metric{ - { - Name: "int64-count", - Data: &metricpb.Metric_Sum{ - Sum: &metricpb.Sum{ - IsMonotonic: true, - AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, - DataPoints: []*metricpb.NumberDataPoint{ - { - Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11}, - Attributes: cpu1Attrs, - StartTimeUnixNano: startTime(), - TimeUnixNano: pointTime(), - }, - { - Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11}, - Attributes: cpu1Attrs, - StartTimeUnixNano: startTime(), - TimeUnixNano: pointTime(), - }, - { - Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11}, - Attributes: cpu2Attrs, - StartTimeUnixNano: startTime(), - TimeUnixNano: pointTime(), - }, - { - Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11}, - Attributes: cpu1Attrs, - StartTimeUnixNano: startTime(), - TimeUnixNano: pointTime(), - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - ) -} - -func TestResourceInstLibMetricGroupingExport(t *testing.T) { - version1 := metric.WithInstrumentationVersion("v1") - version2 := metric.WithInstrumentationVersion("v2") - specialSchema := metric.WithSchemaURL("schurl") - summingLib := "summing-lib" - countingLib := "counting-lib" - runMetricExportTests( - t, - nil, - testerAResource, - []testRecord{ - record( - "int64-count", - sdkapi.CounterInstrumentKind, - number.Int64Kind, - append(baseKeyValues, cpuKey.Int(1)), - countingLib, - version1, - ), - record( - "int64-count", - sdkapi.CounterInstrumentKind, - number.Int64Kind, - append(baseKeyValues, cpuKey.Int(1)), - countingLib, - version2, - ), - record( - "int64-count", - sdkapi.CounterInstrumentKind, - number.Int64Kind, - append(baseKeyValues, cpuKey.Int(1)), - countingLib, - version1, - ), - record( - "int64-count", - sdkapi.CounterInstrumentKind, - number.Int64Kind, - append(baseKeyValues, cpuKey.Int(2)), - countingLib, - version1, - ), - record( - "int64-count", - sdkapi.CounterInstrumentKind, - number.Int64Kind, - append(baseKeyValues, cpuKey.Int(1)), - summingLib, - specialSchema, - ), - }, - []*metricpb.ResourceMetrics{ - { - Resource: testerAResourcePb, - ScopeMetrics: []*metricpb.ScopeMetrics{ - { - Scope: &commonpb.InstrumentationScope{ - Name: "counting-lib", - Version: "v1", - }, - Metrics: []*metricpb.Metric{ - { - Name: "int64-count", - Data: &metricpb.Metric_Sum{ - Sum: &metricpb.Sum{ - IsMonotonic: true, - AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, - DataPoints: []*metricpb.NumberDataPoint{ - { - Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11}, - Attributes: cpu1Attrs, - StartTimeUnixNano: startTime(), - TimeUnixNano: pointTime(), - }, - { - Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11}, - Attributes: cpu1Attrs, - StartTimeUnixNano: startTime(), - TimeUnixNano: pointTime(), - }, - { - Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11}, - Attributes: cpu2Attrs, - StartTimeUnixNano: startTime(), - TimeUnixNano: pointTime(), - }, - }, - }, - }, - }, - }, - }, - { - Scope: &commonpb.InstrumentationScope{ - Name: "counting-lib", - Version: "v2", - }, - Metrics: []*metricpb.Metric{ - { - Name: "int64-count", - Data: &metricpb.Metric_Sum{ - Sum: &metricpb.Sum{ - IsMonotonic: true, - AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, - DataPoints: []*metricpb.NumberDataPoint{ - { - Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11}, - Attributes: cpu1Attrs, - StartTimeUnixNano: startTime(), - TimeUnixNano: pointTime(), - }, - }, - }, - }, - }, - }, - }, - { - Scope: &commonpb.InstrumentationScope{ - Name: "summing-lib", - }, - SchemaUrl: "schurl", - Metrics: []*metricpb.Metric{ - { - Name: "int64-count", - Data: &metricpb.Metric_Sum{ - Sum: &metricpb.Sum{ - IsMonotonic: true, - AggregationTemporality: metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, - DataPoints: []*metricpb.NumberDataPoint{ - { - Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11}, - Attributes: cpu1Attrs, - StartTimeUnixNano: startTime(), - TimeUnixNano: pointTime(), - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - ) -} - -func TestStatelessAggregationTemporality(t *testing.T) { - type testcase struct { - name string - instrumentKind sdkapi.InstrumentKind - aggTemporality metricpb.AggregationTemporality - monotonic bool - } - - for _, k := range []testcase{ - {"counter", sdkapi.CounterInstrumentKind, metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, true}, - {"updowncounter", sdkapi.UpDownCounterInstrumentKind, metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, false}, - {"counterobserver", sdkapi.CounterObserverInstrumentKind, metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, true}, - {"updowncounterobserver", sdkapi.UpDownCounterObserverInstrumentKind, metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, false}, - } { - t.Run(k.name, func(t *testing.T) { - runMetricExportTests( - t, - []otlpmetric.Option{ - otlpmetric.WithMetricAggregationTemporalitySelector( - aggregation.StatelessTemporalitySelector(), - ), - }, - testerAResource, - []testRecord{ - record( - "instrument", - k.instrumentKind, - number.Int64Kind, - append(baseKeyValues, cpuKey.Int(1)), - testLibName, - ), - }, - []*metricpb.ResourceMetrics{ - { - Resource: testerAResourcePb, - ScopeMetrics: []*metricpb.ScopeMetrics{ - { - Metrics: []*metricpb.Metric{ - { - Name: "instrument", - Data: &metricpb.Metric_Sum{ - Sum: &metricpb.Sum{ - IsMonotonic: k.monotonic, - AggregationTemporality: k.aggTemporality, - DataPoints: []*metricpb.NumberDataPoint{ - { - Value: &metricpb.NumberDataPoint_AsInt{AsInt: 11}, - Attributes: cpu1Attrs, - StartTimeUnixNano: startTime(), - TimeUnixNano: pointTime(), - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - ) - }) - } -} - -func runMetricExportTests(t *testing.T, opts []otlpmetric.Option, res *resource.Resource, records []testRecord, expected []*metricpb.ResourceMetrics) { - exp, driver := newExporter(t, opts...) - - libraryRecs := map[instrumentation.Library][]export.Record{} - for _, r := range records { - lcopy := make([]attribute.KeyValue, len(r.attrs)) - copy(lcopy, r.attrs) - desc := metrictest.NewDescriptor(r.name, r.iKind, r.nKind) - labs := attribute.NewSet(lcopy...) - - var agg, ckpt aggregator.Aggregator - if r.iKind.Adding() { - sums := sum.New(2) - agg, ckpt = &sums[0], &sums[1] - } else { - histos := histogram.New(2, &desc, histogram.WithExplicitBoundaries(testHistogramBoundaries)) - agg, ckpt = &histos[0], &histos[1] - } - - ctx := context.Background() - if r.iKind.Synchronous() { - // For synchronous instruments, perform two updates: 1 and 10 - switch r.nKind { - case number.Int64Kind: - require.NoError(t, agg.Update(ctx, number.NewInt64Number(1), &desc)) - require.NoError(t, agg.Update(ctx, number.NewInt64Number(10), &desc)) - case number.Float64Kind: - require.NoError(t, agg.Update(ctx, number.NewFloat64Number(1), &desc)) - require.NoError(t, agg.Update(ctx, number.NewFloat64Number(10), &desc)) - default: - t.Fatalf("invalid number kind: %v", r.nKind) +func TestExporterClientConcurrency(t *testing.T) { + const goroutines = 5 + + exp := New(&client{}) + rm := metricdata.ResourceMetrics{} + ctx := context.Background() + + done := make(chan struct{}) + first := make(chan struct{}, goroutines) + var wg sync.WaitGroup + for i := 0; i < goroutines; i++ { + wg.Add(1) + go func() { + defer wg.Done() + assert.NoError(t, exp.Export(ctx, rm)) + assert.NoError(t, exp.ForceFlush(ctx)) + // Ensure some work is done before shutting down. + first <- struct{}{} + + for { + _ = exp.Export(ctx, rm) + _ = exp.ForceFlush(ctx) + + select { + case <-done: + return + default: + } } - } else { - // For asynchronous instruments, perform a single update: 11 - switch r.nKind { - case number.Int64Kind: - require.NoError(t, agg.Update(ctx, number.NewInt64Number(11), &desc)) - case number.Float64Kind: - require.NoError(t, agg.Update(ctx, number.NewFloat64Number(11), &desc)) - default: - t.Fatalf("invalid number kind: %v", r.nKind) - } - } - require.NoError(t, agg.SynchronizedMove(ckpt, &desc)) - - meterCfg := metric.NewMeterConfig(r.meterOpts...) - lib := instrumentation.Library{ - Name: r.meterName, - Version: meterCfg.InstrumentationVersion(), - SchemaURL: meterCfg.SchemaURL(), - } - libraryRecs[lib] = append(libraryRecs[lib], export.NewRecord(&desc, &labs, ckpt.Aggregation(), intervalStart, intervalEnd)) - } - assert.NoError(t, exp.Export(context.Background(), res, processortest.MultiInstrumentationLibraryReader(libraryRecs))) - - // assert.ElementsMatch does not equate nested slices of different order, - // therefore this requires the top level slice to be broken down. - // Build a map of Resource/Scope pairs to Metrics, from that validate the - // metric elements match for all expected pairs. Finally, make we saw all - // expected pairs. - keyFor := func(sm *metricpb.ScopeMetrics) string { - return fmt.Sprintf("%s/%s/%s", sm.GetScope().GetName(), sm.GetScope().GetVersion(), sm.GetSchemaUrl()) - } - got := map[string][]*metricpb.Metric{} - for _, rm := range driver.rm { - for _, sm := range rm.ScopeMetrics { - k := keyFor(sm) - got[k] = append(got[k], sm.GetMetrics()...) - } + }() } - seen := map[string]struct{}{} - for _, rm := range expected { - for _, sm := range rm.ScopeMetrics { - k := keyFor(sm) - seen[k] = struct{}{} - g, ok := got[k] - if !ok { - t.Errorf("missing metrics for:\n\tInstrumentationScope: %q\n", k) - continue - } - if !assert.Len(t, g, len(sm.GetMetrics())) { - continue - } - for i, expected := range sm.GetMetrics() { - assert.Equal(t, "", cmp.Diff(expected, g[i], protocmp.Transform())) - } - } + for i := 0; i < goroutines; i++ { + <-first } - for k := range got { - if _, ok := seen[k]; !ok { - t.Errorf("did not expect metrics for:\n\tInstrumentationScope: %s\n", k) - } - } -} + close(first) + assert.NoError(t, exp.Shutdown(ctx)) + assert.ErrorIs(t, exp.Shutdown(ctx), errShutdown) -func TestEmptyMetricExport(t *testing.T) { - exp, driver := newExporter(t) - - for _, test := range []struct { - records []export.Record - want []*metricpb.ResourceMetrics - }{ - { - []export.Record(nil), - []*metricpb.ResourceMetrics(nil), - }, - { - []export.Record{}, - []*metricpb.ResourceMetrics(nil), - }, - } { - driver.Reset() - require.NoError(t, exp.Export(context.Background(), resource.Empty(), processortest.MultiInstrumentationLibraryReader(map[instrumentation.Library][]export.Record{ - { - Name: testLibName, - }: test.records, - }))) - assert.Equal(t, test.want, driver.rm) - } + close(done) + wg.Wait() } diff --git a/exporters/otlp/otlpmetric/go.mod b/exporters/otlp/otlpmetric/go.mod index 2541614fc71..b99be852d34 100644 --- a/exporters/otlp/otlpmetric/go.mod +++ b/exporters/otlp/otlpmetric/go.mod @@ -1,18 +1,18 @@ module go.opentelemetry.io/otel/exporters/otlp/otlpmetric -go 1.17 +go 1.18 require ( github.com/google/go-cmp v0.5.8 github.com/stretchr/testify v1.7.1 go.opentelemetry.io/otel v1.10.0 go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 - go.opentelemetry.io/otel/metric v0.31.0 - go.opentelemetry.io/otel/sdk v1.10.0 - go.opentelemetry.io/otel/sdk/metric v0.31.0 + go.opentelemetry.io/otel/metric v0.0.0-00010101000000-000000000000 + go.opentelemetry.io/otel/sdk v0.0.0-00010101000000-000000000000 + go.opentelemetry.io/otel/sdk/metric v0.0.0-00010101000000-000000000000 go.opentelemetry.io/proto/otlp v0.19.0 - google.golang.org/grpc v1.46.2 - google.golang.org/protobuf v1.28.0 + google.golang.org/grpc v1.42.0 + google.golang.org/protobuf v1.27.1 ) require ( @@ -31,14 +31,14 @@ require ( gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect ) +replace go.opentelemetry.io/otel/metric => ../../../metric + +replace go.opentelemetry.io/otel/sdk/metric => ../../../sdk/metric + replace go.opentelemetry.io/otel => ../../.. replace go.opentelemetry.io/otel/sdk => ../../../sdk -replace go.opentelemetry.io/otel/metric => ../../../metric +replace go.opentelemetry.io/otel/exporters/otlp/internal/retry => ../internal/retry replace go.opentelemetry.io/otel/trace => ../../../trace - -replace go.opentelemetry.io/otel/sdk/metric => ../../../sdk/metric - -replace go.opentelemetry.io/otel/exporters/otlp/internal/retry => ../internal/retry diff --git a/exporters/otlp/otlpmetric/go.sum b/exporters/otlp/otlpmetric/go.sum index fda59974476..82bf95645f4 100644 --- a/exporters/otlp/otlpmetric/go.sum +++ b/exporters/otlp/otlpmetric/go.sum @@ -35,7 +35,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -51,7 +50,6 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -61,7 +59,6 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -226,7 +223,6 @@ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -268,9 +264,7 @@ golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007 h1:gG67DSER+11cZvqIMb8S8bt0vZtiN6xWYARwirrOSfE= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -398,9 +392,8 @@ google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0 h1:XT2/MFpuPFsEX2fWh3YQtHkZ+WYZFQRfaUgLZYj/p6A= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.46.2 h1:u+MLGgVf7vRdjEYZ8wDFhAVNmhkbJ5hmrA1LMWK1CAQ= -google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -413,9 +406,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/exporters/otlp/otlpmetric/internal/metrictransform/attribute.go b/exporters/otlp/otlpmetric/internal/metrictransform/attribute.go deleted file mode 100644 index 5432906cf95..00000000000 --- a/exporters/otlp/otlpmetric/internal/metrictransform/attribute.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metrictransform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/metrictransform" - -import ( - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/resource" - commonpb "go.opentelemetry.io/proto/otlp/common/v1" -) - -// KeyValues transforms a slice of attribute KeyValues into OTLP key-values. -func KeyValues(attrs []attribute.KeyValue) []*commonpb.KeyValue { - if len(attrs) == 0 { - return nil - } - - out := make([]*commonpb.KeyValue, 0, len(attrs)) - for _, kv := range attrs { - out = append(out, KeyValue(kv)) - } - return out -} - -// Iterator transforms an attribute iterator into OTLP key-values. -func Iterator(iter attribute.Iterator) []*commonpb.KeyValue { - l := iter.Len() - if l == 0 { - return nil - } - - out := make([]*commonpb.KeyValue, 0, l) - for iter.Next() { - out = append(out, KeyValue(iter.Attribute())) - } - return out -} - -// ResourceAttributes transforms a Resource OTLP key-values. -func ResourceAttributes(res *resource.Resource) []*commonpb.KeyValue { - return Iterator(res.Iter()) -} - -// KeyValue transforms an attribute KeyValue into an OTLP key-value. -func KeyValue(kv attribute.KeyValue) *commonpb.KeyValue { - return &commonpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)} -} - -// Value transforms an attribute Value into an OTLP AnyValue. -func Value(v attribute.Value) *commonpb.AnyValue { - av := new(commonpb.AnyValue) - switch v.Type() { - case attribute.BOOL: - av.Value = &commonpb.AnyValue_BoolValue{ - BoolValue: v.AsBool(), - } - case attribute.BOOLSLICE: - av.Value = &commonpb.AnyValue_ArrayValue{ - ArrayValue: &commonpb.ArrayValue{ - Values: boolSliceValues(v.AsBoolSlice()), - }, - } - case attribute.INT64: - av.Value = &commonpb.AnyValue_IntValue{ - IntValue: v.AsInt64(), - } - case attribute.INT64SLICE: - av.Value = &commonpb.AnyValue_ArrayValue{ - ArrayValue: &commonpb.ArrayValue{ - Values: int64SliceValues(v.AsInt64Slice()), - }, - } - case attribute.FLOAT64: - av.Value = &commonpb.AnyValue_DoubleValue{ - DoubleValue: v.AsFloat64(), - } - case attribute.FLOAT64SLICE: - av.Value = &commonpb.AnyValue_ArrayValue{ - ArrayValue: &commonpb.ArrayValue{ - Values: float64SliceValues(v.AsFloat64Slice()), - }, - } - case attribute.STRING: - av.Value = &commonpb.AnyValue_StringValue{ - StringValue: v.AsString(), - } - case attribute.STRINGSLICE: - av.Value = &commonpb.AnyValue_ArrayValue{ - ArrayValue: &commonpb.ArrayValue{ - Values: stringSliceValues(v.AsStringSlice()), - }, - } - default: - av.Value = &commonpb.AnyValue_StringValue{ - StringValue: "INVALID", - } - } - return av -} - -func boolSliceValues(vals []bool) []*commonpb.AnyValue { - converted := make([]*commonpb.AnyValue, len(vals)) - for i, v := range vals { - converted[i] = &commonpb.AnyValue{ - Value: &commonpb.AnyValue_BoolValue{ - BoolValue: v, - }, - } - } - return converted -} - -func int64SliceValues(vals []int64) []*commonpb.AnyValue { - converted := make([]*commonpb.AnyValue, len(vals)) - for i, v := range vals { - converted[i] = &commonpb.AnyValue{ - Value: &commonpb.AnyValue_IntValue{ - IntValue: v, - }, - } - } - return converted -} - -func float64SliceValues(vals []float64) []*commonpb.AnyValue { - converted := make([]*commonpb.AnyValue, len(vals)) - for i, v := range vals { - converted[i] = &commonpb.AnyValue{ - Value: &commonpb.AnyValue_DoubleValue{ - DoubleValue: v, - }, - } - } - return converted -} - -func stringSliceValues(vals []string) []*commonpb.AnyValue { - converted := make([]*commonpb.AnyValue, len(vals)) - for i, v := range vals { - converted[i] = &commonpb.AnyValue{ - Value: &commonpb.AnyValue_StringValue{ - StringValue: v, - }, - } - } - return converted -} diff --git a/exporters/otlp/otlpmetric/internal/metrictransform/attribute_test.go b/exporters/otlp/otlpmetric/internal/metrictransform/attribute_test.go deleted file mode 100644 index e728e24b721..00000000000 --- a/exporters/otlp/otlpmetric/internal/metrictransform/attribute_test.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metrictransform - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "go.opentelemetry.io/otel/attribute" - commonpb "go.opentelemetry.io/proto/otlp/common/v1" -) - -type attributeTest struct { - attrs []attribute.KeyValue - expected []*commonpb.KeyValue -} - -func TestAttributes(t *testing.T) { - for _, test := range []attributeTest{ - {nil, nil}, - { - []attribute.KeyValue{ - attribute.Int("int to int", 123), - attribute.Int64("int64 to int64", 1234567), - attribute.Float64("float64 to double", 1.61), - attribute.String("string to string", "string"), - attribute.Bool("bool to bool", true), - }, - []*commonpb.KeyValue{ - { - Key: "int to int", - Value: &commonpb.AnyValue{ - Value: &commonpb.AnyValue_IntValue{ - IntValue: 123, - }, - }, - }, - { - Key: "int64 to int64", - Value: &commonpb.AnyValue{ - Value: &commonpb.AnyValue_IntValue{ - IntValue: 1234567, - }, - }, - }, - { - Key: "float64 to double", - Value: &commonpb.AnyValue{ - Value: &commonpb.AnyValue_DoubleValue{ - DoubleValue: 1.61, - }, - }, - }, - { - Key: "string to string", - Value: &commonpb.AnyValue{ - Value: &commonpb.AnyValue_StringValue{ - StringValue: "string", - }, - }, - }, - { - Key: "bool to bool", - Value: &commonpb.AnyValue{ - Value: &commonpb.AnyValue_BoolValue{ - BoolValue: true, - }, - }, - }, - }, - }, - } { - got := KeyValues(test.attrs) - if !assert.Len(t, got, len(test.expected)) { - continue - } - for i, actual := range got { - if a, ok := actual.Value.Value.(*commonpb.AnyValue_DoubleValue); ok { - e, ok := test.expected[i].Value.Value.(*commonpb.AnyValue_DoubleValue) - if !ok { - t.Errorf("expected AnyValue_DoubleValue, got %T", test.expected[i].Value.Value) - continue - } - if !assert.InDelta(t, e.DoubleValue, a.DoubleValue, 0.01) { - continue - } - e.DoubleValue = a.DoubleValue - } - assert.Equal(t, test.expected[i], actual) - } - } -} - -func TestArrayAttributes(t *testing.T) { - // Array KeyValue supports only arrays of primitive types: - // "bool", "int", "int64", - // "float64", "string", - for _, test := range []attributeTest{ - {nil, nil}, - { - []attribute.KeyValue{ - { - Key: attribute.Key("invalid"), - Value: attribute.Value{}, - }, - }, - []*commonpb.KeyValue{ - { - Key: "invalid", - Value: &commonpb.AnyValue{ - Value: &commonpb.AnyValue_StringValue{ - StringValue: "INVALID", - }, - }, - }, - }, - }, - { - []attribute.KeyValue{ - attribute.BoolSlice("bool slice to bool array", []bool{true, false}), - attribute.IntSlice("int slice to int64 array", []int{1, 2, 3}), - attribute.Int64Slice("int64 slice to int64 array", []int64{1, 2, 3}), - attribute.Float64Slice("float64 slice to double array", []float64{1.11, 2.22, 3.33}), - attribute.StringSlice("string slice to string array", []string{"foo", "bar", "baz"}), - }, - []*commonpb.KeyValue{ - newOTelBoolArray("bool slice to bool array", []bool{true, false}), - newOTelIntArray("int slice to int64 array", []int64{1, 2, 3}), - newOTelIntArray("int64 slice to int64 array", []int64{1, 2, 3}), - newOTelDoubleArray("float64 slice to double array", []float64{1.11, 2.22, 3.33}), - newOTelStringArray("string slice to string array", []string{"foo", "bar", "baz"}), - }, - }, - } { - actualArrayAttributes := KeyValues(test.attrs) - expectedArrayAttributes := test.expected - if !assert.Len(t, actualArrayAttributes, len(expectedArrayAttributes)) { - continue - } - - for i, actualArrayAttr := range actualArrayAttributes { - expectedArrayAttr := expectedArrayAttributes[i] - expectedKey, actualKey := expectedArrayAttr.Key, actualArrayAttr.Key - if !assert.Equal(t, expectedKey, actualKey) { - continue - } - - expected := expectedArrayAttr.Value.GetArrayValue() - actual := actualArrayAttr.Value.GetArrayValue() - if expected == nil { - assert.Nil(t, actual) - continue - } - if assert.NotNil(t, actual, "expected not nil for %s", actualKey) { - assertExpectedArrayValues(t, expected.Values, actual.Values) - } - } - } -} - -func assertExpectedArrayValues(t *testing.T, expectedValues, actualValues []*commonpb.AnyValue) { - for i, actual := range actualValues { - expected := expectedValues[i] - if a, ok := actual.Value.(*commonpb.AnyValue_DoubleValue); ok { - e, ok := expected.Value.(*commonpb.AnyValue_DoubleValue) - if !ok { - t.Errorf("expected AnyValue_DoubleValue, got %T", expected.Value) - continue - } - if !assert.InDelta(t, e.DoubleValue, a.DoubleValue, 0.01) { - continue - } - e.DoubleValue = a.DoubleValue - } - assert.Equal(t, expected, actual) - } -} - -func newOTelBoolArray(key string, values []bool) *commonpb.KeyValue { - arrayValues := []*commonpb.AnyValue{} - for _, b := range values { - arrayValues = append(arrayValues, &commonpb.AnyValue{ - Value: &commonpb.AnyValue_BoolValue{ - BoolValue: b, - }, - }) - } - - return newOTelArray(key, arrayValues) -} - -func newOTelIntArray(key string, values []int64) *commonpb.KeyValue { - arrayValues := []*commonpb.AnyValue{} - - for _, i := range values { - arrayValues = append(arrayValues, &commonpb.AnyValue{ - Value: &commonpb.AnyValue_IntValue{ - IntValue: i, - }, - }) - } - - return newOTelArray(key, arrayValues) -} - -func newOTelDoubleArray(key string, values []float64) *commonpb.KeyValue { - arrayValues := []*commonpb.AnyValue{} - - for _, d := range values { - arrayValues = append(arrayValues, &commonpb.AnyValue{ - Value: &commonpb.AnyValue_DoubleValue{ - DoubleValue: d, - }, - }) - } - - return newOTelArray(key, arrayValues) -} - -func newOTelStringArray(key string, values []string) *commonpb.KeyValue { - arrayValues := []*commonpb.AnyValue{} - - for _, s := range values { - arrayValues = append(arrayValues, &commonpb.AnyValue{ - Value: &commonpb.AnyValue_StringValue{ - StringValue: s, - }, - }) - } - - return newOTelArray(key, arrayValues) -} - -func newOTelArray(key string, arrayValues []*commonpb.AnyValue) *commonpb.KeyValue { - return &commonpb.KeyValue{ - Key: key, - Value: &commonpb.AnyValue{ - Value: &commonpb.AnyValue_ArrayValue{ - ArrayValue: &commonpb.ArrayValue{ - Values: arrayValues, - }, - }, - }, - } -} diff --git a/exporters/otlp/otlpmetric/internal/metrictransform/metric.go b/exporters/otlp/otlpmetric/internal/metrictransform/metric.go deleted file mode 100644 index 2d7c9049905..00000000000 --- a/exporters/otlp/otlpmetric/internal/metrictransform/metric.go +++ /dev/null @@ -1,437 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package metrictransform provides translations for opentelemetry-go concepts and -// structures to otlp structures. -package metrictransform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/metrictransform" - -import ( - "context" - "errors" - "fmt" - "strings" - "sync" - "time" - - "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/sdk/metric/export" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - "go.opentelemetry.io/otel/sdk/metric/number" - "go.opentelemetry.io/otel/sdk/resource" - commonpb "go.opentelemetry.io/proto/otlp/common/v1" - metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" -) - -var ( - // ErrUnimplementedAgg is returned when a transformation of an unimplemented - // aggregator is attempted. - ErrUnimplementedAgg = errors.New("unimplemented aggregator") - - // ErrIncompatibleAgg is returned when - // aggregation.Kind implies an interface conversion that has - // failed. - ErrIncompatibleAgg = errors.New("incompatible aggregation type") - - // ErrUnknownValueType is returned when a transformation of an unknown value - // is attempted. - ErrUnknownValueType = errors.New("invalid value type") - - // ErrContextCanceled is returned when a context cancellation halts a - // transformation. - ErrContextCanceled = errors.New("context canceled") - - // ErrTransforming is returned when an unexected error is encountered transforming. - ErrTransforming = errors.New("transforming failed") -) - -// result is the product of transforming Records into OTLP Metrics. -type result struct { - Metric *metricpb.Metric - Err error -} - -// toNanos returns the number of nanoseconds since the UNIX epoch. -func toNanos(t time.Time) uint64 { - if t.IsZero() { - return 0 - } - return uint64(t.UnixNano()) -} - -// InstrumentationLibraryReader transforms all records contained in a checkpoint into -// batched OTLP ResourceMetrics. -func InstrumentationLibraryReader(ctx context.Context, temporalitySelector aggregation.TemporalitySelector, res *resource.Resource, ilmr export.InstrumentationLibraryReader, numWorkers uint) (*metricpb.ResourceMetrics, error) { - var sms []*metricpb.ScopeMetrics - - err := ilmr.ForEach(func(lib instrumentation.Library, mr export.Reader) error { - records, errc := source(ctx, temporalitySelector, mr) - - // Start a fixed number of goroutines to transform records. - transformed := make(chan result) - var wg sync.WaitGroup - wg.Add(int(numWorkers)) - for i := uint(0); i < numWorkers; i++ { - go func() { - defer wg.Done() - transformer(ctx, temporalitySelector, records, transformed) - }() - } - go func() { - wg.Wait() - close(transformed) - }() - - // Synchronously collect the transformed records and transmit. - ms, err := sink(ctx, transformed) - if err != nil { - return nil - } - - // source is complete, check for any errors. - if err := <-errc; err != nil { - return err - } - if len(ms) == 0 { - return nil - } - - sms = append(sms, &metricpb.ScopeMetrics{ - Metrics: ms, - SchemaUrl: lib.SchemaURL, - Scope: &commonpb.InstrumentationScope{ - Name: lib.Name, - Version: lib.Version, - }, - }) - return nil - }) - if len(sms) == 0 { - return nil, err - } - - rms := &metricpb.ResourceMetrics{ - Resource: Resource(res), - SchemaUrl: res.SchemaURL(), - ScopeMetrics: sms, - } - - return rms, err -} - -// source starts a goroutine that sends each one of the Records yielded by -// the Reader on the returned chan. Any error encountered will be sent -// on the returned error chan after seeding is complete. -func source(ctx context.Context, temporalitySelector aggregation.TemporalitySelector, mr export.Reader) (<-chan export.Record, <-chan error) { - errc := make(chan error, 1) - out := make(chan export.Record) - // Seed records into process. - go func() { - defer close(out) - // No select is needed since errc is buffered. - errc <- mr.ForEach(temporalitySelector, func(r export.Record) error { - select { - case <-ctx.Done(): - return ErrContextCanceled - case out <- r: - } - return nil - }) - }() - return out, errc -} - -// transformer transforms records read from the passed in chan into -// OTLP Metrics which are sent on the out chan. -func transformer(ctx context.Context, temporalitySelector aggregation.TemporalitySelector, in <-chan export.Record, out chan<- result) { - for r := range in { - m, err := Record(temporalitySelector, r) - // Propagate errors, but do not send empty results. - if err == nil && m == nil { - continue - } - res := result{ - Metric: m, - Err: err, - } - select { - case <-ctx.Done(): - return - case out <- res: - } - } -} - -// sink collects transformed Records and batches them. -// -// Any errors encountered transforming input will be reported with an -// ErrTransforming as well as the completed ResourceMetrics. It is up to the -// caller to handle any incorrect data in these ResourceMetric. -func sink(ctx context.Context, in <-chan result) ([]*metricpb.Metric, error) { - var errStrings []string - - // Group by the MetricDescriptor. - grouped := map[string]*metricpb.Metric{} - for res := range in { - if res.Err != nil { - errStrings = append(errStrings, res.Err.Error()) - continue - } - - mID := res.Metric.GetName() - m, ok := grouped[mID] - if !ok { - grouped[mID] = res.Metric - continue - } - // Note: There is extra work happening in this code that can be - // improved when the work described in #2119 is completed. The SDK has - // a guarantee that no more than one point per period per attribute - // set is produced, so this fallthrough should never happen. The final - // step of #2119 is to remove all the grouping logic here. - switch res.Metric.Data.(type) { - case *metricpb.Metric_Gauge: - m.GetGauge().DataPoints = append(m.GetGauge().DataPoints, res.Metric.GetGauge().DataPoints...) - case *metricpb.Metric_Sum: - m.GetSum().DataPoints = append(m.GetSum().DataPoints, res.Metric.GetSum().DataPoints...) - case *metricpb.Metric_Histogram: - m.GetHistogram().DataPoints = append(m.GetHistogram().DataPoints, res.Metric.GetHistogram().DataPoints...) - case *metricpb.Metric_Summary: - m.GetSummary().DataPoints = append(m.GetSummary().DataPoints, res.Metric.GetSummary().DataPoints...) - default: - err := fmt.Sprintf("unsupported metric type: %T", res.Metric.Data) - errStrings = append(errStrings, err) - } - } - - if len(grouped) == 0 { - return nil, nil - } - - ms := make([]*metricpb.Metric, 0, len(grouped)) - for _, m := range grouped { - ms = append(ms, m) - } - - // Report any transform errors. - if len(errStrings) > 0 { - return ms, fmt.Errorf("%w:\n -%s", ErrTransforming, strings.Join(errStrings, "\n -")) - } - return ms, nil -} - -// Record transforms a Record into an OTLP Metric. An ErrIncompatibleAgg -// error is returned if the Record Aggregator is not supported. -func Record(temporalitySelector aggregation.TemporalitySelector, r export.Record) (*metricpb.Metric, error) { - agg := r.Aggregation() - switch agg.Kind() { - case aggregation.HistogramKind: - h, ok := agg.(aggregation.Histogram) - if !ok { - return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg) - } - return histogramPoint(r, temporalitySelector.TemporalityFor(r.Descriptor(), aggregation.HistogramKind), h) - - case aggregation.SumKind: - s, ok := agg.(aggregation.Sum) - if !ok { - return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg) - } - sum, err := s.Sum() - if err != nil { - return nil, err - } - return sumPoint(r, sum, r.StartTime(), r.EndTime(), temporalitySelector.TemporalityFor(r.Descriptor(), aggregation.SumKind), r.Descriptor().InstrumentKind().Monotonic()) - - case aggregation.LastValueKind: - lv, ok := agg.(aggregation.LastValue) - if !ok { - return nil, fmt.Errorf("%w: %T", ErrIncompatibleAgg, agg) - } - value, tm, err := lv.LastValue() - if err != nil { - return nil, err - } - return gaugePoint(r, value, time.Time{}, tm) - - default: - return nil, fmt.Errorf("%w: %T", ErrUnimplementedAgg, agg) - } -} - -func gaugePoint(record export.Record, num number.Number, start, end time.Time) (*metricpb.Metric, error) { - desc := record.Descriptor() - attrs := record.Attributes() - - m := &metricpb.Metric{ - Name: desc.Name(), - Description: desc.Description(), - Unit: string(desc.Unit()), - } - - switch n := desc.NumberKind(); n { - case number.Int64Kind: - m.Data = &metricpb.Metric_Gauge{ - Gauge: &metricpb.Gauge{ - DataPoints: []*metricpb.NumberDataPoint{ - { - Value: &metricpb.NumberDataPoint_AsInt{ - AsInt: num.CoerceToInt64(n), - }, - Attributes: Iterator(attrs.Iter()), - StartTimeUnixNano: toNanos(start), - TimeUnixNano: toNanos(end), - }, - }, - }, - } - case number.Float64Kind: - m.Data = &metricpb.Metric_Gauge{ - Gauge: &metricpb.Gauge{ - DataPoints: []*metricpb.NumberDataPoint{ - { - Value: &metricpb.NumberDataPoint_AsDouble{ - AsDouble: num.CoerceToFloat64(n), - }, - Attributes: Iterator(attrs.Iter()), - StartTimeUnixNano: toNanos(start), - TimeUnixNano: toNanos(end), - }, - }, - }, - } - default: - return nil, fmt.Errorf("%w: %v", ErrUnknownValueType, n) - } - - return m, nil -} - -func sdkTemporalityToTemporality(temporality aggregation.Temporality) metricpb.AggregationTemporality { - switch temporality { - case aggregation.DeltaTemporality: - return metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA - case aggregation.CumulativeTemporality: - return metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE - } - return metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED -} - -func sumPoint(record export.Record, num number.Number, start, end time.Time, temporality aggregation.Temporality, monotonic bool) (*metricpb.Metric, error) { - desc := record.Descriptor() - attrs := record.Attributes() - - m := &metricpb.Metric{ - Name: desc.Name(), - Description: desc.Description(), - Unit: string(desc.Unit()), - } - - switch n := desc.NumberKind(); n { - case number.Int64Kind: - m.Data = &metricpb.Metric_Sum{ - Sum: &metricpb.Sum{ - IsMonotonic: monotonic, - AggregationTemporality: sdkTemporalityToTemporality(temporality), - DataPoints: []*metricpb.NumberDataPoint{ - { - Value: &metricpb.NumberDataPoint_AsInt{ - AsInt: num.CoerceToInt64(n), - }, - Attributes: Iterator(attrs.Iter()), - StartTimeUnixNano: toNanos(start), - TimeUnixNano: toNanos(end), - }, - }, - }, - } - case number.Float64Kind: - m.Data = &metricpb.Metric_Sum{ - Sum: &metricpb.Sum{ - IsMonotonic: monotonic, - AggregationTemporality: sdkTemporalityToTemporality(temporality), - DataPoints: []*metricpb.NumberDataPoint{ - { - Value: &metricpb.NumberDataPoint_AsDouble{ - AsDouble: num.CoerceToFloat64(n), - }, - Attributes: Iterator(attrs.Iter()), - StartTimeUnixNano: toNanos(start), - TimeUnixNano: toNanos(end), - }, - }, - }, - } - default: - return nil, fmt.Errorf("%w: %v", ErrUnknownValueType, n) - } - - return m, nil -} - -func histogramValues(a aggregation.Histogram) (boundaries []float64, counts []uint64, err error) { - var buckets aggregation.Buckets - if buckets, err = a.Histogram(); err != nil { - return - } - boundaries, counts = buckets.Boundaries, buckets.Counts - if len(counts) != len(boundaries)+1 { - err = ErrTransforming - return - } - return -} - -// histogram transforms a Histogram Aggregator into an OTLP Metric. -func histogramPoint(record export.Record, temporality aggregation.Temporality, a aggregation.Histogram) (*metricpb.Metric, error) { - desc := record.Descriptor() - attrs := record.Attributes() - boundaries, counts, err := histogramValues(a) - if err != nil { - return nil, err - } - - count, err := a.Count() - if err != nil { - return nil, err - } - - sum, err := a.Sum() - if err != nil { - return nil, err - } - - sumFloat64 := sum.CoerceToFloat64(desc.NumberKind()) - m := &metricpb.Metric{ - Name: desc.Name(), - Description: desc.Description(), - Unit: string(desc.Unit()), - Data: &metricpb.Metric_Histogram{ - Histogram: &metricpb.Histogram{ - AggregationTemporality: sdkTemporalityToTemporality(temporality), - DataPoints: []*metricpb.HistogramDataPoint{ - { - Sum: &sumFloat64, - Attributes: Iterator(attrs.Iter()), - StartTimeUnixNano: toNanos(record.StartTime()), - TimeUnixNano: toNanos(record.EndTime()), - Count: uint64(count), - BucketCounts: counts, - ExplicitBounds: boundaries, - }, - }, - }, - }, - } - return m, nil -} diff --git a/exporters/otlp/otlpmetric/internal/metrictransform/metric_test.go b/exporters/otlp/otlpmetric/internal/metrictransform/metric_test.go deleted file mode 100644 index 3acf98e0cda..00000000000 --- a/exporters/otlp/otlpmetric/internal/metrictransform/metric_test.go +++ /dev/null @@ -1,314 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metrictransform - -import ( - "context" - "errors" - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/metric/aggregator" - "go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue" - "go.opentelemetry.io/otel/sdk/metric/aggregator/sum" - "go.opentelemetry.io/otel/sdk/metric/export" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - "go.opentelemetry.io/otel/sdk/metric/metrictest" - "go.opentelemetry.io/otel/sdk/metric/number" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" - commonpb "go.opentelemetry.io/proto/otlp/common/v1" - metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" -) - -var ( - // Timestamps used in this test: - - intervalStart = time.Now() - intervalEnd = intervalStart.Add(time.Hour) -) - -const ( - otelCumulative = metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE - otelDelta = metricpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA -) - -func TestStringKeyValues(t *testing.T) { - tests := []struct { - kvs []attribute.KeyValue - expected []*commonpb.KeyValue - }{ - { - nil, - nil, - }, - { - []attribute.KeyValue{}, - nil, - }, - { - []attribute.KeyValue{ - attribute.Bool("true", true), - attribute.Int64("one", 1), - attribute.Int64("two", 2), - attribute.Float64("three", 3), - attribute.Int("four", 4), - attribute.Int("five", 5), - attribute.Float64("six", 6), - attribute.Int("seven", 7), - attribute.Int("eight", 8), - attribute.String("the", "final word"), - }, - []*commonpb.KeyValue{ - {Key: "eight", Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_IntValue{IntValue: 8}}}, - {Key: "five", Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_IntValue{IntValue: 5}}}, - {Key: "four", Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_IntValue{IntValue: 4}}}, - {Key: "one", Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_IntValue{IntValue: 1}}}, - {Key: "seven", Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_IntValue{IntValue: 7}}}, - {Key: "six", Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_DoubleValue{DoubleValue: 6.0}}}, - {Key: "the", Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: "final word"}}}, - {Key: "three", Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_DoubleValue{DoubleValue: 3.0}}}, - {Key: "true", Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_BoolValue{BoolValue: true}}}, - {Key: "two", Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_IntValue{IntValue: 2}}}, - }, - }, - } - - for _, test := range tests { - attrs := attribute.NewSet(test.kvs...) - assert.Equal(t, test.expected, Iterator(attrs.Iter())) - } -} - -func TestSumIntDataPoints(t *testing.T) { - desc := metrictest.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Int64Kind) - attrs := attribute.NewSet(attribute.String("one", "1")) - sums := sum.New(2) - s, ckpt := &sums[0], &sums[1] - - assert.NoError(t, s.Update(context.Background(), number.Number(1), &desc)) - require.NoError(t, s.SynchronizedMove(ckpt, &desc)) - record := export.NewRecord(&desc, &attrs, ckpt.Aggregation(), intervalStart, intervalEnd) - - value, err := ckpt.Sum() - require.NoError(t, err) - - if m, err := sumPoint(record, value, record.StartTime(), record.EndTime(), aggregation.CumulativeTemporality, true); assert.NoError(t, err) { - assert.Nil(t, m.GetGauge()) - assert.Equal(t, &metricpb.Sum{ - AggregationTemporality: otelCumulative, - IsMonotonic: true, - DataPoints: []*metricpb.NumberDataPoint{{ - StartTimeUnixNano: uint64(intervalStart.UnixNano()), - TimeUnixNano: uint64(intervalEnd.UnixNano()), - Attributes: []*commonpb.KeyValue{ - { - Key: "one", - Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: "1"}}, - }, - }, - Value: &metricpb.NumberDataPoint_AsInt{ - AsInt: 1, - }, - }}, - }, m.GetSum()) - assert.Nil(t, m.GetHistogram()) - assert.Nil(t, m.GetSummary()) - } -} - -func TestSumFloatDataPoints(t *testing.T) { - desc := metrictest.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Float64Kind) - attrs := attribute.NewSet(attribute.String("one", "1")) - sums := sum.New(2) - s, ckpt := &sums[0], &sums[1] - - assert.NoError(t, s.Update(context.Background(), number.NewFloat64Number(1), &desc)) - require.NoError(t, s.SynchronizedMove(ckpt, &desc)) - record := export.NewRecord(&desc, &attrs, ckpt.Aggregation(), intervalStart, intervalEnd) - value, err := ckpt.Sum() - require.NoError(t, err) - - if m, err := sumPoint(record, value, record.StartTime(), record.EndTime(), aggregation.DeltaTemporality, false); assert.NoError(t, err) { - assert.Nil(t, m.GetGauge()) - assert.Equal(t, &metricpb.Sum{ - IsMonotonic: false, - AggregationTemporality: otelDelta, - DataPoints: []*metricpb.NumberDataPoint{{ - Value: &metricpb.NumberDataPoint_AsDouble{ - AsDouble: 1.0, - }, - StartTimeUnixNano: uint64(intervalStart.UnixNano()), - TimeUnixNano: uint64(intervalEnd.UnixNano()), - Attributes: []*commonpb.KeyValue{ - { - Key: "one", - Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: "1"}}, - }, - }, - }}}, m.GetSum()) - assert.Nil(t, m.GetHistogram()) - assert.Nil(t, m.GetSummary()) - } -} - -func TestLastValueIntDataPoints(t *testing.T) { - desc := metrictest.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Int64Kind) - attrs := attribute.NewSet(attribute.String("one", "1")) - lvs := lastvalue.New(2) - lv, ckpt := &lvs[0], &lvs[1] - - assert.NoError(t, lv.Update(context.Background(), number.Number(100), &desc)) - require.NoError(t, lv.SynchronizedMove(ckpt, &desc)) - record := export.NewRecord(&desc, &attrs, ckpt.Aggregation(), intervalStart, intervalEnd) - value, timestamp, err := ckpt.LastValue() - require.NoError(t, err) - - if m, err := gaugePoint(record, value, time.Time{}, timestamp); assert.NoError(t, err) { - assert.Equal(t, []*metricpb.NumberDataPoint{{ - StartTimeUnixNano: 0, - TimeUnixNano: uint64(timestamp.UnixNano()), - Attributes: []*commonpb.KeyValue{ - { - Key: "one", - Value: &commonpb.AnyValue{Value: &commonpb.AnyValue_StringValue{StringValue: "1"}}, - }, - }, - Value: &metricpb.NumberDataPoint_AsInt{ - AsInt: 100, - }, - }}, m.GetGauge().DataPoints) - assert.Nil(t, m.GetSum()) - assert.Nil(t, m.GetHistogram()) - assert.Nil(t, m.GetSummary()) - } -} - -func TestSumErrUnknownValueType(t *testing.T) { - desc := metrictest.NewDescriptor("", sdkapi.HistogramInstrumentKind, number.Kind(-1)) - attrs := attribute.NewSet() - s := &sum.New(1)[0] - record := export.NewRecord(&desc, &attrs, s, intervalStart, intervalEnd) - value, err := s.Sum() - require.NoError(t, err) - - _, err = sumPoint(record, value, record.StartTime(), record.EndTime(), aggregation.CumulativeTemporality, true) - assert.Error(t, err) - if !errors.Is(err, ErrUnknownValueType) { - t.Errorf("expected ErrUnknownValueType, got %v", err) - } -} - -type testAgg struct { - kind aggregation.Kind - agg aggregation.Aggregation -} - -func (t *testAgg) Kind() aggregation.Kind { - return t.kind -} - -func (t *testAgg) Aggregation() aggregation.Aggregation { - return t.agg -} - -// None of these three are used: - -func (t *testAgg) Update(context.Context, number.Number, *sdkapi.Descriptor) error { - return nil -} -func (t *testAgg) SynchronizedMove(aggregator.Aggregator, *sdkapi.Descriptor) error { - return nil -} -func (t *testAgg) Merge(aggregator.Aggregator, *sdkapi.Descriptor) error { - return nil -} - -type testErrSum struct { - err error -} - -type testErrLastValue struct { - err error -} - -func (te *testErrLastValue) LastValue() (number.Number, time.Time, error) { - return 0, time.Time{}, te.err -} -func (te *testErrLastValue) Kind() aggregation.Kind { - return aggregation.LastValueKind -} - -func (te *testErrSum) Sum() (number.Number, error) { - return 0, te.err -} -func (te *testErrSum) Kind() aggregation.Kind { - return aggregation.SumKind -} - -var _ aggregator.Aggregator = &testAgg{} -var _ aggregation.Aggregation = &testAgg{} -var _ aggregation.Sum = &testErrSum{} -var _ aggregation.LastValue = &testErrLastValue{} - -func TestRecordAggregatorIncompatibleErrors(t *testing.T) { - makeMpb := func(kind aggregation.Kind, agg aggregation.Aggregation) (*metricpb.Metric, error) { - desc := metrictest.NewDescriptor("things", sdkapi.CounterInstrumentKind, number.Int64Kind) - attrs := attribute.NewSet() - test := &testAgg{ - kind: kind, - agg: agg, - } - return Record(aggregation.CumulativeTemporalitySelector(), export.NewRecord(&desc, &attrs, test, intervalStart, intervalEnd)) - } - - mpb, err := makeMpb(aggregation.SumKind, &lastvalue.New(1)[0]) - - require.Error(t, err) - require.Nil(t, mpb) - require.True(t, errors.Is(err, ErrIncompatibleAgg)) - - mpb, err = makeMpb(aggregation.LastValueKind, &sum.New(1)[0]) - - require.Error(t, err) - require.Nil(t, mpb) - require.True(t, errors.Is(err, ErrIncompatibleAgg)) -} - -func TestRecordAggregatorUnexpectedErrors(t *testing.T) { - makeMpb := func(kind aggregation.Kind, agg aggregation.Aggregation) (*metricpb.Metric, error) { - desc := metrictest.NewDescriptor("things", sdkapi.CounterInstrumentKind, number.Int64Kind) - attrs := attribute.NewSet() - return Record(aggregation.CumulativeTemporalitySelector(), export.NewRecord(&desc, &attrs, agg, intervalStart, intervalEnd)) - } - - errEx := fmt.Errorf("timeout") - - mpb, err := makeMpb(aggregation.SumKind, &testErrSum{errEx}) - - require.Error(t, err) - require.Nil(t, mpb) - require.True(t, errors.Is(err, errEx)) - - mpb, err = makeMpb(aggregation.LastValueKind, &testErrLastValue{errEx}) - - require.Error(t, err) - require.Nil(t, mpb) - require.True(t, errors.Is(err, errEx)) -} diff --git a/exporters/otlp/otlpmetric/internal/metrictransform/resource_test.go b/exporters/otlp/otlpmetric/internal/metrictransform/resource_test.go deleted file mode 100644 index 016d2f1e019..00000000000 --- a/exporters/otlp/otlpmetric/internal/metrictransform/resource_test.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metrictransform - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/resource" -) - -func TestNilResource(t *testing.T) { - assert.Empty(t, Resource(nil)) -} - -func TestEmptyResource(t *testing.T) { - assert.Empty(t, Resource(&resource.Resource{})) -} - -/* -* This does not include any testing on the ordering of Resource Attributes. -* They are stored as a map internally to the Resource and their order is not -* guaranteed. - */ - -func TestResourceAttributes(t *testing.T) { - attrs := []attribute.KeyValue{attribute.Int("one", 1), attribute.Int("two", 2)} - - got := Resource(resource.NewSchemaless(attrs...)).GetAttributes() - if !assert.Len(t, attrs, 2) { - return - } - assert.ElementsMatch(t, KeyValues(attrs), got) -} diff --git a/exporters/otlp/otlpmetric/internal/otlpconfig/envconfig.go b/exporters/otlp/otlpmetric/internal/oconf/envconfig.go similarity index 97% rename from exporters/otlp/otlpmetric/internal/otlpconfig/envconfig.go rename to exporters/otlp/otlpmetric/internal/oconf/envconfig.go index 2576a2c75a5..96c6fc1753a 100644 --- a/exporters/otlp/otlpmetric/internal/otlpconfig/envconfig.go +++ b/exporters/otlp/otlpmetric/internal/oconf/envconfig.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpconfig" +package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf" import ( "crypto/tls" diff --git a/exporters/otlp/otlpmetric/internal/otlpconfig/options.go b/exporters/otlp/otlpmetric/internal/oconf/options.go similarity index 98% rename from exporters/otlp/otlpmetric/internal/otlpconfig/options.go rename to exporters/otlp/otlpmetric/internal/oconf/options.go index f2c8ee5d21a..f5a82d6db17 100644 --- a/exporters/otlp/otlpmetric/internal/otlpconfig/options.go +++ b/exporters/otlp/otlpmetric/internal/oconf/options.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpconfig" +package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf" import ( "crypto/tls" diff --git a/exporters/otlp/otlpmetric/internal/otlpconfig/options_test.go b/exporters/otlp/otlpmetric/internal/oconf/options_test.go similarity index 70% rename from exporters/otlp/otlpmetric/internal/otlpconfig/options_test.go rename to exporters/otlp/otlpmetric/internal/oconf/options_test.go index 7687e3fdbb3..e436eb5b07e 100644 --- a/exporters/otlp/otlpmetric/internal/otlpconfig/options_test.go +++ b/exporters/otlp/otlpmetric/internal/oconf/options_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otlpconfig_test +package oconf_test import ( "errors" @@ -22,7 +22,7 @@ import ( "github.com/stretchr/testify/assert" "go.opentelemetry.io/otel/exporters/otlp/internal/envconfig" - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpconfig" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf" ) const ( @@ -64,25 +64,25 @@ func (f *fileReader) readFile(filename string) ([]byte, error) { } func TestConfigs(t *testing.T) { - tlsCert, err := otlpconfig.CreateTLSConfig([]byte(WeakCertificate)) + tlsCert, err := oconf.CreateTLSConfig([]byte(WeakCertificate)) assert.NoError(t, err) tests := []struct { name string - opts []otlpconfig.GenericOption + opts []oconf.GenericOption env env fileReader fileReader - asserts func(t *testing.T, c *otlpconfig.Config, grpcOption bool) + asserts func(t *testing.T, c *oconf.Config, grpcOption bool) }{ { name: "Test default configs", - asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) { + asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) { if grpcOption { assert.Equal(t, "localhost:4317", c.Metrics.Endpoint) } else { assert.Equal(t, "localhost:4318", c.Metrics.Endpoint) } - assert.Equal(t, otlpconfig.NoCompression, c.Metrics.Compression) + assert.Equal(t, oconf.NoCompression, c.Metrics.Compression) assert.Equal(t, map[string]string(nil), c.Metrics.Headers) assert.Equal(t, 10*time.Second, c.Metrics.Timeout) }, @@ -91,10 +91,10 @@ func TestConfigs(t *testing.T) { // Endpoint Tests { name: "Test With Endpoint", - opts: []otlpconfig.GenericOption{ - otlpconfig.WithEndpoint("someendpoint"), + opts: []oconf.GenericOption{ + oconf.WithEndpoint("someendpoint"), }, - asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) { + asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) { assert.Equal(t, "someendpoint", c.Metrics.Endpoint) }, }, @@ -103,7 +103,7 @@ func TestConfigs(t *testing.T) { env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "https://env.endpoint/prefix", }, - asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) { + asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) { assert.False(t, c.Metrics.Insecure) if grpcOption { assert.Equal(t, "env.endpoint/prefix", c.Metrics.Endpoint) @@ -119,7 +119,7 @@ func TestConfigs(t *testing.T) { "OTEL_EXPORTER_OTLP_ENDPOINT": "https://overrode.by.signal.specific/env/var", "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": "http://env.metrics.endpoint", }, - asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) { + asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) { assert.True(t, c.Metrics.Insecure) assert.Equal(t, "env.metrics.endpoint", c.Metrics.Endpoint) if !grpcOption { @@ -129,13 +129,13 @@ func TestConfigs(t *testing.T) { }, { name: "Test Mixed Environment and With Endpoint", - opts: []otlpconfig.GenericOption{ - otlpconfig.WithEndpoint("metrics_endpoint"), + opts: []oconf.GenericOption{ + oconf.WithEndpoint("metrics_endpoint"), }, env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "env_endpoint", }, - asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) { + asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) { assert.Equal(t, "metrics_endpoint", c.Metrics.Endpoint) }, }, @@ -144,7 +144,7 @@ func TestConfigs(t *testing.T) { env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "http://env_endpoint", }, - asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) { + asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) { assert.Equal(t, "env_endpoint", c.Metrics.Endpoint) assert.Equal(t, true, c.Metrics.Insecure) }, @@ -154,7 +154,7 @@ func TestConfigs(t *testing.T) { env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": " http://env_endpoint ", }, - asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) { + asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) { assert.Equal(t, "env_endpoint", c.Metrics.Endpoint) assert.Equal(t, true, c.Metrics.Insecure) }, @@ -164,7 +164,7 @@ func TestConfigs(t *testing.T) { env: map[string]string{ "OTEL_EXPORTER_OTLP_ENDPOINT": "https://env_endpoint", }, - asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) { + asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) { assert.Equal(t, "env_endpoint", c.Metrics.Endpoint) assert.Equal(t, false, c.Metrics.Insecure) }, @@ -175,7 +175,7 @@ func TestConfigs(t *testing.T) { "OTEL_EXPORTER_OTLP_ENDPOINT": "HTTPS://overrode_by_signal_specific", "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT": "HtTp://env_metrics_endpoint", }, - asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) { + asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) { assert.Equal(t, "env_metrics_endpoint", c.Metrics.Endpoint) assert.Equal(t, true, c.Metrics.Insecure) }, @@ -184,7 +184,7 @@ func TestConfigs(t *testing.T) { // Certificate tests { name: "Test Default Certificate", - asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) { + asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) { if grpcOption { assert.NotNil(t, c.Metrics.GRPCCredentials) } else { @@ -194,10 +194,10 @@ func TestConfigs(t *testing.T) { }, { name: "Test With Certificate", - opts: []otlpconfig.GenericOption{ - otlpconfig.WithTLSClientConfig(tlsCert), + opts: []oconf.GenericOption{ + oconf.WithTLSClientConfig(tlsCert), }, - asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) { + asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) { if grpcOption { //TODO: make sure gRPC's credentials actually works assert.NotNil(t, c.Metrics.GRPCCredentials) @@ -215,7 +215,7 @@ func TestConfigs(t *testing.T) { fileReader: fileReader{ "cert_path": []byte(WeakCertificate), }, - asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) { + asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) { if grpcOption { assert.NotNil(t, c.Metrics.GRPCCredentials) } else { @@ -234,7 +234,7 @@ func TestConfigs(t *testing.T) { "cert_path": []byte(WeakCertificate), "invalid_cert": []byte("invalid certificate file."), }, - asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) { + asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) { if grpcOption { assert.NotNil(t, c.Metrics.GRPCCredentials) } else { @@ -245,14 +245,14 @@ func TestConfigs(t *testing.T) { }, { name: "Test Mixed Environment and With Certificate", - opts: []otlpconfig.GenericOption{}, + opts: []oconf.GenericOption{}, env: map[string]string{ "OTEL_EXPORTER_OTLP_CERTIFICATE": "cert_path", }, fileReader: fileReader{ "cert_path": []byte(WeakCertificate), }, - asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) { + asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) { if grpcOption { assert.NotNil(t, c.Metrics.GRPCCredentials) } else { @@ -265,17 +265,17 @@ func TestConfigs(t *testing.T) { // Headers tests { name: "Test With Headers", - opts: []otlpconfig.GenericOption{ - otlpconfig.WithHeaders(map[string]string{"h1": "v1"}), + opts: []oconf.GenericOption{ + oconf.WithHeaders(map[string]string{"h1": "v1"}), }, - asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) { + asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) { assert.Equal(t, map[string]string{"h1": "v1"}, c.Metrics.Headers) }, }, { name: "Test Environment Headers", env: map[string]string{"OTEL_EXPORTER_OTLP_HEADERS": "h1=v1,h2=v2"}, - asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) { + asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) { assert.Equal(t, map[string]string{"h1": "v1", "h2": "v2"}, c.Metrics.Headers) }, }, @@ -285,17 +285,17 @@ func TestConfigs(t *testing.T) { "OTEL_EXPORTER_OTLP_HEADERS": "overrode_by_signal_specific", "OTEL_EXPORTER_OTLP_METRICS_HEADERS": "h1=v1,h2=v2", }, - asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) { + asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) { assert.Equal(t, map[string]string{"h1": "v1", "h2": "v2"}, c.Metrics.Headers) }, }, { name: "Test Mixed Environment and With Headers", env: map[string]string{"OTEL_EXPORTER_OTLP_HEADERS": "h1=v1,h2=v2"}, - opts: []otlpconfig.GenericOption{ - otlpconfig.WithHeaders(map[string]string{"m1": "mv1"}), + opts: []oconf.GenericOption{ + oconf.WithHeaders(map[string]string{"m1": "mv1"}), }, - asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) { + asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) { assert.Equal(t, map[string]string{"m1": "mv1"}, c.Metrics.Headers) }, }, @@ -303,11 +303,11 @@ func TestConfigs(t *testing.T) { // Compression Tests { name: "Test With Compression", - opts: []otlpconfig.GenericOption{ - otlpconfig.WithCompression(otlpconfig.GzipCompression), + opts: []oconf.GenericOption{ + oconf.WithCompression(oconf.GzipCompression), }, - asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) { - assert.Equal(t, otlpconfig.GzipCompression, c.Metrics.Compression) + asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) { + assert.Equal(t, oconf.GzipCompression, c.Metrics.Compression) }, }, { @@ -315,8 +315,8 @@ func TestConfigs(t *testing.T) { env: map[string]string{ "OTEL_EXPORTER_OTLP_COMPRESSION": "gzip", }, - asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) { - assert.Equal(t, otlpconfig.GzipCompression, c.Metrics.Compression) + asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) { + assert.Equal(t, oconf.GzipCompression, c.Metrics.Compression) }, }, { @@ -324,30 +324,30 @@ func TestConfigs(t *testing.T) { env: map[string]string{ "OTEL_EXPORTER_OTLP_METRICS_COMPRESSION": "gzip", }, - asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) { - assert.Equal(t, otlpconfig.GzipCompression, c.Metrics.Compression) + asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) { + assert.Equal(t, oconf.GzipCompression, c.Metrics.Compression) }, }, { name: "Test Mixed Environment and With Compression", - opts: []otlpconfig.GenericOption{ - otlpconfig.WithCompression(otlpconfig.NoCompression), + opts: []oconf.GenericOption{ + oconf.WithCompression(oconf.NoCompression), }, env: map[string]string{ "OTEL_EXPORTER_OTLP_METRICS_COMPRESSION": "gzip", }, - asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) { - assert.Equal(t, otlpconfig.NoCompression, c.Metrics.Compression) + asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) { + assert.Equal(t, oconf.NoCompression, c.Metrics.Compression) }, }, // Timeout Tests { name: "Test With Timeout", - opts: []otlpconfig.GenericOption{ - otlpconfig.WithTimeout(time.Duration(5 * time.Second)), + opts: []oconf.GenericOption{ + oconf.WithTimeout(time.Duration(5 * time.Second)), }, - asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) { + asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) { assert.Equal(t, 5*time.Second, c.Metrics.Timeout) }, }, @@ -356,7 +356,7 @@ func TestConfigs(t *testing.T) { env: map[string]string{ "OTEL_EXPORTER_OTLP_TIMEOUT": "15000", }, - asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) { + asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) { assert.Equal(t, c.Metrics.Timeout, 15*time.Second) }, }, @@ -366,7 +366,7 @@ func TestConfigs(t *testing.T) { "OTEL_EXPORTER_OTLP_TIMEOUT": "15000", "OTEL_EXPORTER_OTLP_METRICS_TIMEOUT": "28000", }, - asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) { + asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) { assert.Equal(t, c.Metrics.Timeout, 28*time.Second) }, }, @@ -376,10 +376,10 @@ func TestConfigs(t *testing.T) { "OTEL_EXPORTER_OTLP_TIMEOUT": "15000", "OTEL_EXPORTER_OTLP_METRICS_TIMEOUT": "28000", }, - opts: []otlpconfig.GenericOption{ - otlpconfig.WithTimeout(5 * time.Second), + opts: []oconf.GenericOption{ + oconf.WithTimeout(5 * time.Second), }, - asserts: func(t *testing.T, c *otlpconfig.Config, grpcOption bool) { + asserts: func(t *testing.T, c *oconf.Config, grpcOption bool) { assert.Equal(t, c.Metrics.Timeout, 5*time.Second) }, }, @@ -387,37 +387,37 @@ func TestConfigs(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - origEOR := otlpconfig.DefaultEnvOptionsReader - otlpconfig.DefaultEnvOptionsReader = envconfig.EnvOptionsReader{ + origEOR := oconf.DefaultEnvOptionsReader + oconf.DefaultEnvOptionsReader = envconfig.EnvOptionsReader{ GetEnv: tt.env.getEnv, ReadFile: tt.fileReader.readFile, Namespace: "OTEL_EXPORTER_OTLP", } - t.Cleanup(func() { otlpconfig.DefaultEnvOptionsReader = origEOR }) + t.Cleanup(func() { oconf.DefaultEnvOptionsReader = origEOR }) // Tests Generic options as HTTP Options - cfg := otlpconfig.NewHTTPConfig(asHTTPOptions(tt.opts)...) + cfg := oconf.NewHTTPConfig(asHTTPOptions(tt.opts)...) tt.asserts(t, &cfg, false) // Tests Generic options as gRPC Options - cfg = otlpconfig.NewGRPCConfig(asGRPCOptions(tt.opts)...) + cfg = oconf.NewGRPCConfig(asGRPCOptions(tt.opts)...) tt.asserts(t, &cfg, true) }) } } -func asHTTPOptions(opts []otlpconfig.GenericOption) []otlpconfig.HTTPOption { - converted := make([]otlpconfig.HTTPOption, len(opts)) +func asHTTPOptions(opts []oconf.GenericOption) []oconf.HTTPOption { + converted := make([]oconf.HTTPOption, len(opts)) for i, o := range opts { - converted[i] = otlpconfig.NewHTTPOption(o.ApplyHTTPOption) + converted[i] = oconf.NewHTTPOption(o.ApplyHTTPOption) } return converted } -func asGRPCOptions(opts []otlpconfig.GenericOption) []otlpconfig.GRPCOption { - converted := make([]otlpconfig.GRPCOption, len(opts)) +func asGRPCOptions(opts []oconf.GenericOption) []oconf.GRPCOption { + converted := make([]oconf.GRPCOption, len(opts)) for i, o := range opts { - converted[i] = otlpconfig.NewGRPCOption(o.ApplyGRPCOption) + converted[i] = oconf.NewGRPCOption(o.ApplyGRPCOption) } return converted } diff --git a/exporters/otlp/otlpmetric/internal/otlpconfig/optiontypes.go b/exporters/otlp/otlpmetric/internal/oconf/optiontypes.go similarity index 95% rename from exporters/otlp/otlpmetric/internal/otlpconfig/optiontypes.go rename to exporters/otlp/otlpmetric/internal/oconf/optiontypes.go index 1d6f83f366d..e878ee74104 100644 --- a/exporters/otlp/otlpmetric/internal/otlpconfig/optiontypes.go +++ b/exporters/otlp/otlpmetric/internal/oconf/optiontypes.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpconfig" +package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf" import "time" diff --git a/exporters/otlp/otlpmetric/internal/otlpconfig/tls.go b/exporters/otlp/otlpmetric/internal/oconf/tls.go similarity index 92% rename from exporters/otlp/otlpmetric/internal/otlpconfig/tls.go rename to exporters/otlp/otlpmetric/internal/oconf/tls.go index efbe0f6f428..44bbe326860 100644 --- a/exporters/otlp/otlpmetric/internal/otlpconfig/tls.go +++ b/exporters/otlp/otlpmetric/internal/oconf/tls.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otlpconfig // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpconfig" +package oconf // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf" import ( "crypto/tls" diff --git a/exporters/otlp/otlpmetric/internal/otest/client.go b/exporters/otlp/otlpmetric/internal/otest/client.go new file mode 100644 index 00000000000..2ade400522d --- /dev/null +++ b/exporters/otlp/otlpmetric/internal/otest/client.go @@ -0,0 +1,254 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package otest // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest" + +import ( + "context" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" + + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric" + "go.opentelemetry.io/otel/metric/unit" + semconv "go.opentelemetry.io/otel/semconv/v1.10.0" + cpb "go.opentelemetry.io/proto/otlp/common/v1" + mpb "go.opentelemetry.io/proto/otlp/metrics/v1" + rpb "go.opentelemetry.io/proto/otlp/resource/v1" +) + +var ( + // Sat Jan 01 2000 00:00:00 GMT+0000. + start = time.Date(2000, time.January, 01, 0, 0, 0, 0, time.FixedZone("GMT", 0)) + end = start.Add(30 * time.Second) + + kvAlice = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{ + Value: &cpb.AnyValue_StringValue{StringValue: "alice"}, + }} + kvBob = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{ + Value: &cpb.AnyValue_StringValue{StringValue: "bob"}, + }} + kvSrvName = &cpb.KeyValue{Key: "service.name", Value: &cpb.AnyValue{ + Value: &cpb.AnyValue_StringValue{StringValue: "test server"}, + }} + kvSrvVer = &cpb.KeyValue{Key: "service.version", Value: &cpb.AnyValue{ + Value: &cpb.AnyValue_StringValue{StringValue: "v0.1.0"}, + }} + + min, max, sum = 2.0, 4.0, 90.0 + hdp = []*mpb.HistogramDataPoint{{ + Attributes: []*cpb.KeyValue{kvAlice}, + StartTimeUnixNano: uint64(start.UnixNano()), + TimeUnixNano: uint64(end.UnixNano()), + Count: 30, + Sum: &sum, + ExplicitBounds: []float64{1, 5}, + BucketCounts: []uint64{0, 30, 0}, + Min: &min, + Max: &max, + }} + + hist = &mpb.Histogram{ + AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, + DataPoints: hdp, + } + + dPtsInt64 = []*mpb.NumberDataPoint{ + { + Attributes: []*cpb.KeyValue{kvAlice}, + StartTimeUnixNano: uint64(start.UnixNano()), + TimeUnixNano: uint64(end.UnixNano()), + Value: &mpb.NumberDataPoint_AsInt{AsInt: 1}, + }, + { + Attributes: []*cpb.KeyValue{kvBob}, + StartTimeUnixNano: uint64(start.UnixNano()), + TimeUnixNano: uint64(end.UnixNano()), + Value: &mpb.NumberDataPoint_AsInt{AsInt: 2}, + }, + } + dPtsFloat64 = []*mpb.NumberDataPoint{ + { + Attributes: []*cpb.KeyValue{kvAlice}, + StartTimeUnixNano: uint64(start.UnixNano()), + TimeUnixNano: uint64(end.UnixNano()), + Value: &mpb.NumberDataPoint_AsDouble{AsDouble: 1.0}, + }, + { + Attributes: []*cpb.KeyValue{kvBob}, + StartTimeUnixNano: uint64(start.UnixNano()), + TimeUnixNano: uint64(end.UnixNano()), + Value: &mpb.NumberDataPoint_AsDouble{AsDouble: 2.0}, + }, + } + + sumInt64 = &mpb.Sum{ + AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, + IsMonotonic: true, + DataPoints: dPtsInt64, + } + sumFloat64 = &mpb.Sum{ + AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, + IsMonotonic: false, + DataPoints: dPtsFloat64, + } + + gaugeInt64 = &mpb.Gauge{DataPoints: dPtsInt64} + gaugeFloat64 = &mpb.Gauge{DataPoints: dPtsFloat64} + + metrics = []*mpb.Metric{ + { + Name: "int64-gauge", + Description: "Gauge with int64 values", + Unit: string(unit.Dimensionless), + Data: &mpb.Metric_Gauge{Gauge: gaugeInt64}, + }, + { + Name: "float64-gauge", + Description: "Gauge with float64 values", + Unit: string(unit.Dimensionless), + Data: &mpb.Metric_Gauge{Gauge: gaugeFloat64}, + }, + { + Name: "int64-sum", + Description: "Sum with int64 values", + Unit: string(unit.Dimensionless), + Data: &mpb.Metric_Sum{Sum: sumInt64}, + }, + { + Name: "float64-sum", + Description: "Sum with float64 values", + Unit: string(unit.Dimensionless), + Data: &mpb.Metric_Sum{Sum: sumFloat64}, + }, + { + Name: "histogram", + Description: "Histogram", + Unit: string(unit.Dimensionless), + Data: &mpb.Metric_Histogram{Histogram: hist}, + }, + } + + scope = &cpb.InstrumentationScope{ + Name: "test/code/path", + Version: "v0.1.0", + } + scopeMetrics = []*mpb.ScopeMetrics{{ + Scope: scope, + Metrics: metrics, + SchemaUrl: semconv.SchemaURL, + }} + + res = &rpb.Resource{ + Attributes: []*cpb.KeyValue{kvSrvName, kvSrvVer}, + } + resourceMetrics = &mpb.ResourceMetrics{ + Resource: res, + ScopeMetrics: scopeMetrics, + SchemaUrl: semconv.SchemaURL, + } +) + +// ClientFactory is a function that when called returns a +// otlpmetric.Client implementation that is connected to also returned +// Collector implementation. The Client is ready to upload metric data to the +// Collector which is ready to store that data. +type ClientFactory func() (otlpmetric.Client, Collector) + +// RunClientTests runs a suite of Client integration tests. For example: +// +// t.Run("Integration", RunClientTests(factory)) +func RunClientTests(f ClientFactory) func(*testing.T) { + return func(t *testing.T) { + t.Run("ClientHonorsContextErrors", func(t *testing.T) { + t.Run("Shutdown", testCtxErrs(func() func(context.Context) error { + c, _ := f() + return c.Shutdown + })) + + t.Run("ForceFlush", testCtxErrs(func() func(context.Context) error { + c, _ := f() + return c.ForceFlush + })) + + t.Run("UploadMetrics", testCtxErrs(func() func(context.Context) error { + c, _ := f() + return func(ctx context.Context) error { + return c.UploadMetrics(ctx, nil) + } + })) + }) + + t.Run("ForceFlushFlushes", func(t *testing.T) { + ctx := context.Background() + client, collector := f() + require.NoError(t, client.UploadMetrics(ctx, resourceMetrics)) + + require.NoError(t, client.ForceFlush(ctx)) + rm := collector.Collect().Dump() + // Data correctness is not important, just it was received. + require.Greater(t, len(rm), 0, "no data uploaded") + + require.NoError(t, client.Shutdown(ctx)) + rm = collector.Collect().Dump() + assert.Len(t, rm, 0, "client did not flush all data") + }) + + t.Run("UploadMetrics", func(t *testing.T) { + ctx := context.Background() + client, coll := f() + + require.NoError(t, client.UploadMetrics(ctx, resourceMetrics)) + require.NoError(t, client.Shutdown(ctx)) + got := coll.Collect().Dump() + require.Len(t, got, 1, "upload of one ResourceMetrics") + diff := cmp.Diff(got[0], resourceMetrics, cmp.Comparer(proto.Equal)) + if diff != "" { + t.Fatalf("unexpected ResourceMetrics:\n%s", diff) + } + }) + } +} + +func testCtxErrs(factory func() func(context.Context) error) func(t *testing.T) { + return func(t *testing.T) { + t.Helper() + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + t.Run("DeadlineExceeded", func(t *testing.T) { + innerCtx, innerCancel := context.WithTimeout(ctx, time.Nanosecond) + t.Cleanup(innerCancel) + <-innerCtx.Done() + + f := factory() + assert.ErrorIs(t, f(innerCtx), context.DeadlineExceeded) + }) + + t.Run("Canceled", func(t *testing.T) { + innerCtx, innerCancel := context.WithCancel(ctx) + innerCancel() + + f := factory() + assert.ErrorIs(t, f(innerCtx), context.Canceled) + }) + } +} diff --git a/exporters/otlp/otlpmetric/internal/otest/client_test.go b/exporters/otlp/otlpmetric/internal/otest/client_test.go new file mode 100644 index 00000000000..1f4aac5610d --- /dev/null +++ b/exporters/otlp/otlpmetric/internal/otest/client_test.go @@ -0,0 +1,54 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package otest // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest" + +import ( + "context" + "testing" + + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric" + cpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" + mpb "go.opentelemetry.io/proto/otlp/metrics/v1" +) + +type client struct { + storage *Storage +} + +func (c *client) Collect() *Storage { + return c.storage +} + +func (c *client) UploadMetrics(ctx context.Context, rm *mpb.ResourceMetrics) error { + c.storage.Add(&cpb.ExportMetricsServiceRequest{ + ResourceMetrics: []*mpb.ResourceMetrics{rm}, + }) + return ctx.Err() +} + +func (c *client) ForceFlush(ctx context.Context) error { return ctx.Err() } +func (c *client) Shutdown(ctx context.Context) error { return ctx.Err() } + +func TestClientTests(t *testing.T) { + factory := func() (otlpmetric.Client, Collector) { + c := &client{storage: NewStorage()} + return c, c + } + + t.Run("Integration", RunClientTests(factory)) +} diff --git a/exporters/otlp/otlpmetric/internal/otest/collector.go b/exporters/otlp/otlpmetric/internal/otest/collector.go new file mode 100644 index 00000000000..3302c8cfc2a --- /dev/null +++ b/exporters/otlp/otlpmetric/internal/otest/collector.go @@ -0,0 +1,428 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package otest // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest" + +import ( + "bytes" + "compress/gzip" + "context" + "crypto/ecdsa" + "crypto/elliptic" + cryptorand "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" // nolint:depguard // This is for testing. + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" + mathrand "math/rand" + "net" + "net/http" + "net/url" + "sync" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + "google.golang.org/protobuf/proto" + + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf" + collpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" + mpb "go.opentelemetry.io/proto/otlp/metrics/v1" +) + +// Collector is the collection target a Client sends metric uploads to. +type Collector interface { + Collect() *Storage +} + +// Storage stores uploaded OTLP metric data in their proto form. +type Storage struct { + dataMu sync.Mutex + data []*mpb.ResourceMetrics +} + +// NewStorage returns a configure storage ready to store received requests. +func NewStorage() *Storage { + return &Storage{} +} + +// Add adds the request to the Storage. +func (s *Storage) Add(request *collpb.ExportMetricsServiceRequest) { + s.dataMu.Lock() + defer s.dataMu.Unlock() + s.data = append(s.data, request.ResourceMetrics...) +} + +// Dump returns all added ResourceMetrics and clears the storage. +func (s *Storage) Dump() []*mpb.ResourceMetrics { + s.dataMu.Lock() + defer s.dataMu.Unlock() + + var data []*mpb.ResourceMetrics + data, s.data = s.data, []*mpb.ResourceMetrics{} + return data +} + +// GRPCCollector is an OTLP gRPC server that collects all requests it receives. +type GRPCCollector struct { + collpb.UnimplementedMetricsServiceServer + + headersMu sync.Mutex + headers metadata.MD + storage *Storage + + errCh <-chan error + listener net.Listener + srv *grpc.Server +} + +// NewGRPCCollector returns a *GRPCCollector that is listening at the provided +// endpoint. +// +// If endpoint is an empty string, the returned collector will be listeing on +// the localhost interface at an OS chosen port. +// +// If errCh is not nil, the collector will respond to Export calls with errors +// sent on that channel. This means that if errCh is not nil Export calls will +// block until an error is received. +func NewGRPCCollector(endpoint string, errCh <-chan error) (*GRPCCollector, error) { + if endpoint == "" { + endpoint = "localhost:0" + } + + c := &GRPCCollector{ + storage: NewStorage(), + errCh: errCh, + } + + var err error + c.listener, err = net.Listen("tcp", endpoint) + if err != nil { + return nil, err + } + + c.srv = grpc.NewServer() + collpb.RegisterMetricsServiceServer(c.srv, c) + go func() { _ = c.srv.Serve(c.listener) }() + + return c, nil +} + +// Shutdown shuts down the gRPC server closing all open connections and +// listeners immediately. +func (c *GRPCCollector) Shutdown() { c.srv.Stop() } + +// Addr returns the net.Addr c is listening at. +func (c *GRPCCollector) Addr() net.Addr { + return c.listener.Addr() +} + +// Collect returns the Storage holding all collected requests. +func (c *GRPCCollector) Collect() *Storage { + return c.storage +} + +// Headers returns the headers received for all requests. +func (c *GRPCCollector) Headers() map[string][]string { + // Makes a copy. + c.headersMu.Lock() + defer c.headersMu.Unlock() + return metadata.Join(c.headers) +} + +// Export handles the export req. +func (c *GRPCCollector) Export(ctx context.Context, req *collpb.ExportMetricsServiceRequest) (*collpb.ExportMetricsServiceResponse, error) { + c.storage.Add(req) + + if h, ok := metadata.FromIncomingContext(ctx); ok { + c.headersMu.Lock() + c.headers = metadata.Join(c.headers, h) + c.headersMu.Unlock() + } + + var err error + if c.errCh != nil { + err = <-c.errCh + } + return &collpb.ExportMetricsServiceResponse{}, err +} + +var emptyExportMetricsServiceResponse = func() []byte { + body := collpb.ExportMetricsServiceResponse{} + r, err := proto.Marshal(&body) + if err != nil { + panic(err) + } + return r +}() + +type HTTPResponseError struct { + Err error + Status int + Header http.Header +} + +func (e *HTTPResponseError) Error() string { + return fmt.Sprintf("%d: %s", e.Status, e.Err) +} + +func (e *HTTPResponseError) Unwrap() error { return e.Err } + +// HTTPCollector is an OTLP HTTP server that collects all requests it receives. +type HTTPCollector struct { + headersMu sync.Mutex + headers http.Header + storage *Storage + + errCh <-chan error + listener net.Listener + srv *http.Server +} + +// NewHTTPCollector returns a *HTTPCollector that is listening at the provided +// endpoint. +// +// If endpoint is an empty string, the returned collector will be listeing on +// the localhost interface at an OS chosen port, not use TLS, and listen at the +// default OTLP metric endpoint path ("/v1/metrics"). If the endpoint contains +// a prefix of "https" the server will generate weak self-signed TLS +// certificates and use them to server data. If the endpoint contains a path, +// that path will be used instead of the default OTLP metric endpoint path. +// +// If errCh is not nil, the collector will respond to HTTP requests with errors +// sent on that channel. This means that if errCh is not nil Export calls will +// block until an error is received. +func NewHTTPCollector(endpoint string, errCh <-chan error) (*HTTPCollector, error) { + u, err := url.Parse(endpoint) + if err != nil { + return nil, err + } + if u.Host == "" { + u.Host = "localhost:0" + } + if u.Path == "" { + u.Path = oconf.DefaultMetricsPath + } + + c := &HTTPCollector{ + headers: http.Header{}, + storage: NewStorage(), + errCh: errCh, + } + + c.listener, err = net.Listen("tcp", u.Host) + if err != nil { + return nil, err + } + + mux := http.NewServeMux() + mux.Handle(u.Path, http.HandlerFunc(c.handler)) + c.srv = &http.Server{Handler: mux} + if u.Scheme == "https" { + cert, err := weakCertificate() + if err != nil { + return nil, err + } + c.srv.TLSConfig = &tls.Config{ + Certificates: []tls.Certificate{cert}, + } + go func() { _ = c.srv.ServeTLS(c.listener, "", "") }() + } else { + go func() { _ = c.srv.Serve(c.listener) }() + } + return c, nil +} + +// Shutdown shuts down the HTTP server closing all open connections and +// listeners. +func (c *HTTPCollector) Shutdown(ctx context.Context) error { + return c.srv.Shutdown(ctx) +} + +// Addr returns the net.Addr c is listening at. +func (c *HTTPCollector) Addr() net.Addr { + return c.listener.Addr() +} + +// Collect returns the Storage holding all collected requests. +func (c *HTTPCollector) Collect() *Storage { + return c.storage +} + +// Headers returns the headers received for all requests. +func (c *HTTPCollector) Headers() map[string][]string { + // Makes a copy. + c.headersMu.Lock() + defer c.headersMu.Unlock() + return c.headers.Clone() +} + +func (c *HTTPCollector) handler(w http.ResponseWriter, r *http.Request) { + c.respond(w, c.record(r)) +} + +func (c *HTTPCollector) record(r *http.Request) error { + // Currently only supports protobuf. + if v := r.Header.Get("Content-Type"); v != "application/x-protobuf" { + return fmt.Errorf("content-type not supported: %s", v) + } + + body, err := c.readBody(r) + if err != nil { + return err + } + pbRequest := &collpb.ExportMetricsServiceRequest{} + err = proto.Unmarshal(body, pbRequest) + if err != nil { + return &HTTPResponseError{ + Err: err, + Status: http.StatusInternalServerError, + } + } + c.storage.Add(pbRequest) + + c.headersMu.Lock() + for k, vals := range r.Header { + for _, v := range vals { + c.headers.Add(k, v) + } + } + c.headersMu.Unlock() + + if c.errCh != nil { + err = <-c.errCh + } + return err +} + +func (c *HTTPCollector) readBody(r *http.Request) (body []byte, err error) { + var reader io.ReadCloser + switch r.Header.Get("Content-Encoding") { + case "gzip": + reader, err = gzip.NewReader(r.Body) + if err != nil { + _ = reader.Close() + return nil, &HTTPResponseError{ + Err: err, + Status: http.StatusInternalServerError, + } + } + default: + reader = r.Body + } + + defer func() { + cErr := reader.Close() + if err == nil && cErr != nil { + err = &HTTPResponseError{ + Err: cErr, + Status: http.StatusInternalServerError, + } + } + }() + body, err = io.ReadAll(reader) + if err != nil { + err = &HTTPResponseError{ + Err: err, + Status: http.StatusInternalServerError, + } + } + return body, err +} + +func (c *HTTPCollector) respond(w http.ResponseWriter, err error) { + if err != nil { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + w.Header().Set("X-Content-Type-Options", "nosniff") + var e *HTTPResponseError + if errors.As(err, &e) { + for k, vals := range e.Header { + for _, v := range vals { + w.Header().Add(k, v) + } + } + w.WriteHeader(e.Status) + fmt.Fprintln(w, e.Error()) + } else { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintln(w, err.Error()) + } + return + } + + w.Header().Set("Content-Type", "application/x-protobuf") + w.WriteHeader(http.StatusOK) + _, _ = w.Write(emptyExportMetricsServiceResponse) +} + +type mathRandReader struct{} + +func (mathRandReader) Read(p []byte) (n int, err error) { + return mathrand.Read(p) +} + +var randReader mathRandReader + +// Based on https://golang.org/src/crypto/tls/generate_cert.go, +// simplified and weakened. +func weakCertificate() (tls.Certificate, error) { + priv, err := ecdsa.GenerateKey(elliptic.P256(), randReader) + if err != nil { + return tls.Certificate{}, err + } + notBefore := time.Now() + notAfter := notBefore.Add(time.Hour) + max := new(big.Int).Lsh(big.NewInt(1), 128) + sn, err := cryptorand.Int(randReader, max) + if err != nil { + return tls.Certificate{}, err + } + tmpl := x509.Certificate{ + SerialNumber: sn, + Subject: pkix.Name{Organization: []string{"otel-go"}}, + NotBefore: notBefore, + NotAfter: notAfter, + KeyUsage: x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + DNSNames: []string{"localhost"}, + IPAddresses: []net.IP{net.IPv6loopback, net.IPv4(127, 0, 0, 1)}, + } + derBytes, err := x509.CreateCertificate(randReader, &tmpl, &tmpl, &priv.PublicKey, priv) + if err != nil { + return tls.Certificate{}, err + } + var certBuf bytes.Buffer + err = pem.Encode(&certBuf, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) + if err != nil { + return tls.Certificate{}, err + } + privBytes, err := x509.MarshalPKCS8PrivateKey(priv) + if err != nil { + return tls.Certificate{}, err + } + var privBuf bytes.Buffer + err = pem.Encode(&privBuf, &pem.Block{Type: "PRIVATE KEY", Bytes: privBytes}) + if err != nil { + return tls.Certificate{}, err + } + return tls.X509KeyPair(certBuf.Bytes(), privBuf.Bytes()) +} diff --git a/exporters/otlp/otlpmetric/internal/otlpmetrictest/client.go b/exporters/otlp/otlpmetric/internal/otlpmetrictest/client.go deleted file mode 100644 index c248521ee14..00000000000 --- a/exporters/otlp/otlpmetric/internal/otlpmetrictest/client.go +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otlpmetrictest // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpmetrictest" - -import ( - "context" - "errors" - "sync" - "testing" - "time" - - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric" -) - -func RunExporterShutdownTest(t *testing.T, factory func() otlpmetric.Client) { - t.Run("testClientStopHonorsTimeout", func(t *testing.T) { - testClientStopHonorsTimeout(t, factory()) - }) - - t.Run("testClientStopHonorsCancel", func(t *testing.T) { - testClientStopHonorsCancel(t, factory()) - }) - - t.Run("testClientStopNoError", func(t *testing.T) { - testClientStopNoError(t, factory()) - }) - - t.Run("testClientStopManyTimes", func(t *testing.T) { - testClientStopManyTimes(t, factory()) - }) -} - -func initializeExporter(t *testing.T, client otlpmetric.Client) *otlpmetric.Exporter { - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) - defer cancel() - - e, err := otlpmetric.New(ctx, client) - if err != nil { - t.Fatalf("failed to create exporter") - } - - return e -} - -func testClientStopHonorsTimeout(t *testing.T, client otlpmetric.Client) { - t.Cleanup(func() { - // The test is looking for a failed shut down. Call Stop a second time - // with an un-expired context to give the client a second chance at - // cleaning up. There is not guarantee from the Client interface this - // will succeed, therefore, no need to check the error (just give it a - // best try). - _ = client.Stop(context.Background()) - }) - e := initializeExporter(t, client) - - innerCtx, innerCancel := context.WithTimeout(context.Background(), time.Microsecond) - <-innerCtx.Done() - if err := e.Shutdown(innerCtx); err == nil { - t.Error("expected context DeadlineExceeded error, got nil") - } else if !errors.Is(err, context.DeadlineExceeded) { - t.Errorf("expected context DeadlineExceeded error, got %v", err) - } - innerCancel() -} - -func testClientStopHonorsCancel(t *testing.T, client otlpmetric.Client) { - t.Cleanup(func() { - // The test is looking for a failed shut down. Call Stop a second time - // with an un-expired context to give the client a second chance at - // cleaning up. There is not guarantee from the Client interface this - // will succeed, therefore, no need to check the error (just give it a - // best try). - _ = client.Stop(context.Background()) - }) - e := initializeExporter(t, client) - - ctx, innerCancel := context.WithCancel(context.Background()) - innerCancel() - if err := e.Shutdown(ctx); err == nil { - t.Error("expected context canceled error, got nil") - } else if !errors.Is(err, context.Canceled) { - t.Errorf("expected context canceled error, got %v", err) - } -} - -func testClientStopNoError(t *testing.T, client otlpmetric.Client) { - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) - defer cancel() - - e := initializeExporter(t, client) - if err := e.Shutdown(ctx); err != nil { - t.Errorf("shutdown errored: expected nil, got %v", err) - } -} - -func testClientStopManyTimes(t *testing.T, client otlpmetric.Client) { - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) - defer cancel() - e := initializeExporter(t, client) - - ch := make(chan struct{}) - wg := sync.WaitGroup{} - const num int = 20 - wg.Add(num) - errs := make([]error, num) - for i := 0; i < num; i++ { - go func(idx int) { - defer wg.Done() - <-ch - errs[idx] = e.Shutdown(ctx) - }(i) - } - close(ch) - wg.Wait() - for _, err := range errs { - if err != nil { - t.Fatalf("failed to shutdown exporter: %v", err) - } - } -} diff --git a/exporters/otlp/otlpmetric/internal/otlpmetrictest/collector.go b/exporters/otlp/otlpmetric/internal/otlpmetrictest/collector.go deleted file mode 100644 index 20915724a53..00000000000 --- a/exporters/otlp/otlpmetric/internal/otlpmetrictest/collector.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otlpmetrictest // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpmetrictest" - -import ( - collectormetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" - metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" -) - -// Collector is an interface that mock collectors should implements, -// so they can be used for the end-to-end testing. -type Collector interface { - Stop() error - GetMetrics() []*metricpb.Metric -} - -// MetricsStorage stores the metrics. Mock collectors could use it to -// store metrics they have received. -type MetricsStorage struct { - metrics []*metricpb.Metric -} - -// NewMetricsStorage creates a new metrics storage. -func NewMetricsStorage() MetricsStorage { - return MetricsStorage{} -} - -// AddMetrics adds metrics to the metrics storage. -func (s *MetricsStorage) AddMetrics(request *collectormetricpb.ExportMetricsServiceRequest) { - for _, rm := range request.GetResourceMetrics() { - // TODO (rghetia) handle multiple resource and library info. - if len(rm.ScopeMetrics) > 0 { - s.metrics = append(s.metrics, rm.ScopeMetrics[0].Metrics...) - } - } -} - -// GetMetrics returns the stored metrics. -func (s *MetricsStorage) GetMetrics() []*metricpb.Metric { - // copy in order to not change. - m := make([]*metricpb.Metric, 0, len(s.metrics)) - return append(m, s.metrics...) -} diff --git a/exporters/otlp/otlpmetric/internal/otlpmetrictest/data.go b/exporters/otlp/otlpmetric/internal/otlpmetrictest/data.go deleted file mode 100644 index d8d9c03a31e..00000000000 --- a/exporters/otlp/otlpmetric/internal/otlpmetrictest/data.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otlpmetrictest // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpmetrictest" - -import ( - "context" - "fmt" - "time" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/sdk/metric/aggregator/sum" - "go.opentelemetry.io/otel/sdk/metric/export" - "go.opentelemetry.io/otel/sdk/metric/metrictest" - "go.opentelemetry.io/otel/sdk/metric/number" - "go.opentelemetry.io/otel/sdk/metric/processor/processortest" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" -) - -// OneRecordReader is a Reader that returns just one -// filled record. It may be useful for testing driver's metrics -// export. -func OneRecordReader() export.InstrumentationLibraryReader { - desc := metrictest.NewDescriptor( - "foo", - sdkapi.CounterInstrumentKind, - number.Int64Kind, - ) - agg := sum.New(1) - if err := agg[0].Update(context.Background(), number.NewInt64Number(42), &desc); err != nil { - panic(err) - } - start := time.Date(2020, time.December, 8, 19, 15, 0, 0, time.UTC) - end := time.Date(2020, time.December, 8, 19, 16, 0, 0, time.UTC) - attrs := attribute.NewSet(attribute.String("abc", "def"), attribute.Int64("one", 1)) - rec := export.NewRecord(&desc, &attrs, agg[0].Aggregation(), start, end) - - return processortest.MultiInstrumentationLibraryReader( - map[instrumentation.Library][]export.Record{ - { - Name: "onelib", - }: {rec}, - }) -} - -func EmptyReader() export.InstrumentationLibraryReader { - return processortest.MultiInstrumentationLibraryReader(nil) -} - -// FailReader is a checkpointer that returns an error during -// ForEach. -type FailReader struct{} - -var _ export.InstrumentationLibraryReader = FailReader{} - -// ForEach implements export.Reader. It always fails. -func (FailReader) ForEach(readerFunc func(instrumentation.Library, export.Reader) error) error { - return fmt.Errorf("fail") -} diff --git a/exporters/otlp/otlpmetric/internal/otlpmetrictest/otlptest.go b/exporters/otlp/otlpmetric/internal/otlpmetrictest/otlptest.go deleted file mode 100644 index c12cd5b1b6a..00000000000 --- a/exporters/otlp/otlpmetric/internal/otlpmetrictest/otlptest.go +++ /dev/null @@ -1,174 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otlpmetrictest // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpmetrictest" - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric" - "go.opentelemetry.io/otel/metric/instrument" - controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - "go.opentelemetry.io/otel/sdk/metric/number" - processor "go.opentelemetry.io/otel/sdk/metric/processor/basic" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" - "go.opentelemetry.io/otel/sdk/metric/selector/simple" - metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" -) - -// RunEndToEndTest can be used by protocol driver tests to validate -// themselves. -func RunEndToEndTest(ctx context.Context, t *testing.T, exp *otlpmetric.Exporter, mcMetrics Collector) { - selector := simple.NewWithHistogramDistribution() - proc := processor.NewFactory(selector, aggregation.StatelessTemporalitySelector()) - cont := controller.New(proc, controller.WithExporter(exp)) - require.NoError(t, cont.Start(ctx)) - - meter := cont.Meter("test-meter") - attrs := []attribute.KeyValue{attribute.Bool("test", true)} - - type data struct { - iKind sdkapi.InstrumentKind - nKind number.Kind - val int64 - } - instruments := map[string]data{ - "test-int64-counter": {sdkapi.CounterInstrumentKind, number.Int64Kind, 1}, - "test-float64-counter": {sdkapi.CounterInstrumentKind, number.Float64Kind, 1}, - "test-int64-histogram": {sdkapi.HistogramInstrumentKind, number.Int64Kind, 2}, - "test-float64-histogram": {sdkapi.HistogramInstrumentKind, number.Float64Kind, 2}, - "test-int64-gaugeobserver": {sdkapi.GaugeObserverInstrumentKind, number.Int64Kind, 3}, - "test-float64-gaugeobserver": {sdkapi.GaugeObserverInstrumentKind, number.Float64Kind, 3}, - } - for name, data := range instruments { - data := data - switch data.iKind { - case sdkapi.CounterInstrumentKind: - switch data.nKind { - case number.Int64Kind: - c, _ := meter.SyncInt64().Counter(name) - c.Add(ctx, data.val, attrs...) - case number.Float64Kind: - c, _ := meter.SyncFloat64().Counter(name) - c.Add(ctx, float64(data.val), attrs...) - default: - assert.Failf(t, "unsupported number testing kind", data.nKind.String()) - } - case sdkapi.HistogramInstrumentKind: - switch data.nKind { - case number.Int64Kind: - c, _ := meter.SyncInt64().Histogram(name) - c.Record(ctx, data.val, attrs...) - case number.Float64Kind: - c, _ := meter.SyncFloat64().Histogram(name) - c.Record(ctx, float64(data.val), attrs...) - default: - assert.Failf(t, "unsupported number testing kind", data.nKind.String()) - } - case sdkapi.GaugeObserverInstrumentKind: - switch data.nKind { - case number.Int64Kind: - g, _ := meter.AsyncInt64().Gauge(name) - _ = meter.RegisterCallback([]instrument.Asynchronous{g}, func(ctx context.Context) { - g.Observe(ctx, data.val, attrs...) - }) - case number.Float64Kind: - g, _ := meter.AsyncFloat64().Gauge(name) - _ = meter.RegisterCallback([]instrument.Asynchronous{g}, func(ctx context.Context) { - g.Observe(ctx, float64(data.val), attrs...) - }) - default: - assert.Failf(t, "unsupported number testing kind", data.nKind.String()) - } - default: - assert.Failf(t, "unsupported metrics testing kind", data.iKind.String()) - } - } - - // Flush and close. - require.NoError(t, cont.Stop(ctx)) - - // Wait >2 cycles. - <-time.After(40 * time.Millisecond) - - // Now shutdown the exporter - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - if err := exp.Shutdown(ctx); err != nil { - t.Fatalf("failed to stop the exporter: %v", err) - } - - // Shutdown the collector too so that we can begin - // verification checks of expected data back. - _ = mcMetrics.Stop() - - metrics := mcMetrics.GetMetrics() - assert.Len(t, metrics, len(instruments), "not enough metrics exported") - seen := make(map[string]struct{}, len(instruments)) - for _, m := range metrics { - data, ok := instruments[m.Name] - if !ok { - assert.Failf(t, "unknown metrics", m.Name) - continue - } - seen[m.Name] = struct{}{} - - switch data.iKind { - case sdkapi.CounterInstrumentKind, sdkapi.GaugeObserverInstrumentKind: - var dp []*metricpb.NumberDataPoint - switch data.iKind { - case sdkapi.CounterInstrumentKind: - require.NotNil(t, m.GetSum()) - dp = m.GetSum().GetDataPoints() - case sdkapi.GaugeObserverInstrumentKind: - require.NotNil(t, m.GetGauge()) - dp = m.GetGauge().GetDataPoints() - } - if assert.Len(t, dp, 1) { - switch data.nKind { - case number.Int64Kind: - v := &metricpb.NumberDataPoint_AsInt{AsInt: data.val} - assert.Equal(t, v, dp[0].Value, "invalid value for %q", m.Name) - case number.Float64Kind: - v := &metricpb.NumberDataPoint_AsDouble{AsDouble: float64(data.val)} - assert.Equal(t, v, dp[0].Value, "invalid value for %q", m.Name) - } - } - case sdkapi.HistogramInstrumentKind: - require.NotNil(t, m.GetHistogram()) - if dp := m.GetHistogram().DataPoints; assert.Len(t, dp, 1) { - count := dp[0].Count - assert.Equal(t, uint64(1), count, "invalid count for %q", m.Name) - require.NotNil(t, dp[0].Sum) - assert.Equal(t, float64(data.val*int64(count)), *dp[0].Sum, "invalid sum for %q (value %d)", m.Name, data.val) - } - default: - assert.Failf(t, "invalid metrics kind", data.iKind.String()) - } - } - - for i := range instruments { - if _, ok := seen[i]; !ok { - assert.Fail(t, fmt.Sprintf("no metric(s) exported for %q", i)) - } - } -} diff --git a/exporters/otlp/otlpmetric/internal/transform/attribute.go b/exporters/otlp/otlpmetric/internal/transform/attribute.go new file mode 100644 index 00000000000..504256ee7b2 --- /dev/null +++ b/exporters/otlp/otlpmetric/internal/transform/attribute.go @@ -0,0 +1,155 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform" + +import ( + "go.opentelemetry.io/otel/attribute" + cpb "go.opentelemetry.io/proto/otlp/common/v1" +) + +// AttrIter transforms an attribute iterator into OTLP key-values. +func AttrIter(iter attribute.Iterator) []*cpb.KeyValue { + l := iter.Len() + if l == 0 { + return nil + } + + out := make([]*cpb.KeyValue, 0, l) + for iter.Next() { + out = append(out, KeyValue(iter.Attribute())) + } + return out +} + +// KeyValues transforms a slice of attribute KeyValues into OTLP key-values. +func KeyValues(attrs []attribute.KeyValue) []*cpb.KeyValue { + if len(attrs) == 0 { + return nil + } + + out := make([]*cpb.KeyValue, 0, len(attrs)) + for _, kv := range attrs { + out = append(out, KeyValue(kv)) + } + return out +} + +// KeyValue transforms an attribute KeyValue into an OTLP key-value. +func KeyValue(kv attribute.KeyValue) *cpb.KeyValue { + return &cpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)} +} + +// Value transforms an attribute Value into an OTLP AnyValue. +func Value(v attribute.Value) *cpb.AnyValue { + av := new(cpb.AnyValue) + switch v.Type() { + case attribute.BOOL: + av.Value = &cpb.AnyValue_BoolValue{ + BoolValue: v.AsBool(), + } + case attribute.BOOLSLICE: + av.Value = &cpb.AnyValue_ArrayValue{ + ArrayValue: &cpb.ArrayValue{ + Values: boolSliceValues(v.AsBoolSlice()), + }, + } + case attribute.INT64: + av.Value = &cpb.AnyValue_IntValue{ + IntValue: v.AsInt64(), + } + case attribute.INT64SLICE: + av.Value = &cpb.AnyValue_ArrayValue{ + ArrayValue: &cpb.ArrayValue{ + Values: int64SliceValues(v.AsInt64Slice()), + }, + } + case attribute.FLOAT64: + av.Value = &cpb.AnyValue_DoubleValue{ + DoubleValue: v.AsFloat64(), + } + case attribute.FLOAT64SLICE: + av.Value = &cpb.AnyValue_ArrayValue{ + ArrayValue: &cpb.ArrayValue{ + Values: float64SliceValues(v.AsFloat64Slice()), + }, + } + case attribute.STRING: + av.Value = &cpb.AnyValue_StringValue{ + StringValue: v.AsString(), + } + case attribute.STRINGSLICE: + av.Value = &cpb.AnyValue_ArrayValue{ + ArrayValue: &cpb.ArrayValue{ + Values: stringSliceValues(v.AsStringSlice()), + }, + } + default: + av.Value = &cpb.AnyValue_StringValue{ + StringValue: "INVALID", + } + } + return av +} + +func boolSliceValues(vals []bool) []*cpb.AnyValue { + converted := make([]*cpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &cpb.AnyValue{ + Value: &cpb.AnyValue_BoolValue{ + BoolValue: v, + }, + } + } + return converted +} + +func int64SliceValues(vals []int64) []*cpb.AnyValue { + converted := make([]*cpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &cpb.AnyValue{ + Value: &cpb.AnyValue_IntValue{ + IntValue: v, + }, + } + } + return converted +} + +func float64SliceValues(vals []float64) []*cpb.AnyValue { + converted := make([]*cpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &cpb.AnyValue{ + Value: &cpb.AnyValue_DoubleValue{ + DoubleValue: v, + }, + } + } + return converted +} + +func stringSliceValues(vals []string) []*cpb.AnyValue { + converted := make([]*cpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &cpb.AnyValue{ + Value: &cpb.AnyValue_StringValue{ + StringValue: v, + }, + } + } + return converted +} diff --git a/exporters/otlp/otlpmetric/internal/transform/attribute_test.go b/exporters/otlp/otlpmetric/internal/transform/attribute_test.go new file mode 100644 index 00000000000..6ca20fdbafd --- /dev/null +++ b/exporters/otlp/otlpmetric/internal/transform/attribute_test.go @@ -0,0 +1,197 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform" + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/otel/attribute" + cpb "go.opentelemetry.io/proto/otlp/common/v1" +) + +var ( + attrBool = attribute.Bool("bool", true) + attrBoolSlice = attribute.BoolSlice("bool slice", []bool{true, false}) + attrInt = attribute.Int("int", 1) + attrIntSlice = attribute.IntSlice("int slice", []int{-1, 1}) + attrInt64 = attribute.Int64("int64", 1) + attrInt64Slice = attribute.Int64Slice("int64 slice", []int64{-1, 1}) + attrFloat64 = attribute.Float64("float64", 1) + attrFloat64Slice = attribute.Float64Slice("float64 slice", []float64{-1, 1}) + attrString = attribute.String("string", "o") + attrStringSlice = attribute.StringSlice("string slice", []string{"o", "n"}) + attrInvalid = attribute.KeyValue{ + Key: attribute.Key("invalid"), + Value: attribute.Value{}, + } + + valBoolTrue = &cpb.AnyValue{Value: &cpb.AnyValue_BoolValue{BoolValue: true}} + valBoolFalse = &cpb.AnyValue{Value: &cpb.AnyValue_BoolValue{BoolValue: false}} + valBoolSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{ + ArrayValue: &cpb.ArrayValue{ + Values: []*cpb.AnyValue{valBoolTrue, valBoolFalse}, + }, + }} + valIntOne = &cpb.AnyValue{Value: &cpb.AnyValue_IntValue{IntValue: 1}} + valIntNOne = &cpb.AnyValue{Value: &cpb.AnyValue_IntValue{IntValue: -1}} + valIntSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{ + ArrayValue: &cpb.ArrayValue{ + Values: []*cpb.AnyValue{valIntNOne, valIntOne}, + }, + }} + valDblOne = &cpb.AnyValue{Value: &cpb.AnyValue_DoubleValue{DoubleValue: 1}} + valDblNOne = &cpb.AnyValue{Value: &cpb.AnyValue_DoubleValue{DoubleValue: -1}} + valDblSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{ + ArrayValue: &cpb.ArrayValue{ + Values: []*cpb.AnyValue{valDblNOne, valDblOne}, + }, + }} + valStrO = &cpb.AnyValue{Value: &cpb.AnyValue_StringValue{StringValue: "o"}} + valStrN = &cpb.AnyValue{Value: &cpb.AnyValue_StringValue{StringValue: "n"}} + valStrSlice = &cpb.AnyValue{Value: &cpb.AnyValue_ArrayValue{ + ArrayValue: &cpb.ArrayValue{ + Values: []*cpb.AnyValue{valStrO, valStrN}, + }, + }} + + kvBool = &cpb.KeyValue{Key: "bool", Value: valBoolTrue} + kvBoolSlice = &cpb.KeyValue{Key: "bool slice", Value: valBoolSlice} + kvInt = &cpb.KeyValue{Key: "int", Value: valIntOne} + kvIntSlice = &cpb.KeyValue{Key: "int slice", Value: valIntSlice} + kvInt64 = &cpb.KeyValue{Key: "int64", Value: valIntOne} + kvInt64Slice = &cpb.KeyValue{Key: "int64 slice", Value: valIntSlice} + kvFloat64 = &cpb.KeyValue{Key: "float64", Value: valDblOne} + kvFloat64Slice = &cpb.KeyValue{Key: "float64 slice", Value: valDblSlice} + kvString = &cpb.KeyValue{Key: "string", Value: valStrO} + kvStringSlice = &cpb.KeyValue{Key: "string slice", Value: valStrSlice} + kvInvalid = &cpb.KeyValue{ + Key: "invalid", + Value: &cpb.AnyValue{ + Value: &cpb.AnyValue_StringValue{StringValue: "INVALID"}, + }, + } +) + +type attributeTest struct { + name string + in []attribute.KeyValue + want []*cpb.KeyValue +} + +func TestAttributeTransforms(t *testing.T) { + for _, test := range []attributeTest{ + {"nil", nil, nil}, + {"empty", []attribute.KeyValue{}, nil}, + { + "invalid", + []attribute.KeyValue{attrInvalid}, + []*cpb.KeyValue{kvInvalid}, + }, + { + "bool", + []attribute.KeyValue{attrBool}, + []*cpb.KeyValue{kvBool}, + }, + { + "bool slice", + []attribute.KeyValue{attrBoolSlice}, + []*cpb.KeyValue{kvBoolSlice}, + }, + { + "int", + []attribute.KeyValue{attrInt}, + []*cpb.KeyValue{kvInt}, + }, + { + "int slice", + []attribute.KeyValue{attrIntSlice}, + []*cpb.KeyValue{kvIntSlice}, + }, + { + "int64", + []attribute.KeyValue{attrInt64}, + []*cpb.KeyValue{kvInt64}, + }, + { + "int64 slice", + []attribute.KeyValue{attrInt64Slice}, + []*cpb.KeyValue{kvInt64Slice}, + }, + { + "float64", + []attribute.KeyValue{attrFloat64}, + []*cpb.KeyValue{kvFloat64}, + }, + { + "float64 slice", + []attribute.KeyValue{attrFloat64Slice}, + []*cpb.KeyValue{kvFloat64Slice}, + }, + { + "string", + []attribute.KeyValue{attrString}, + []*cpb.KeyValue{kvString}, + }, + { + "string slice", + []attribute.KeyValue{attrStringSlice}, + []*cpb.KeyValue{kvStringSlice}, + }, + { + "all", + []attribute.KeyValue{ + attrBool, + attrBoolSlice, + attrInt, + attrIntSlice, + attrInt64, + attrInt64Slice, + attrFloat64, + attrFloat64Slice, + attrString, + attrStringSlice, + attrInvalid, + }, + []*cpb.KeyValue{ + kvBool, + kvBoolSlice, + kvInt, + kvIntSlice, + kvInt64, + kvInt64Slice, + kvFloat64, + kvFloat64Slice, + kvString, + kvStringSlice, + kvInvalid, + }, + }, + } { + t.Run(test.name, func(t *testing.T) { + t.Run("KeyValues", func(t *testing.T) { + assert.ElementsMatch(t, test.want, KeyValues(test.in)) + }) + t.Run("AttrIter", func(t *testing.T) { + s := attribute.NewSet(test.in...) + assert.ElementsMatch(t, test.want, AttrIter(s.Iter())) + }) + }) + } +} diff --git a/sdk/metric/atomicfields.go b/exporters/otlp/otlpmetric/internal/transform/doc.go similarity index 64% rename from sdk/metric/atomicfields.go rename to exporters/otlp/otlpmetric/internal/transform/doc.go index 7cea2e54374..7a79f794dd1 100644 --- a/sdk/metric/atomicfields.go +++ b/exporters/otlp/otlpmetric/internal/transform/doc.go @@ -12,14 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -package metric // import "go.opentelemetry.io/otel/sdk/metric" - -import "unsafe" - -// Deprecated: will be removed soon. -func AtomicFieldOffsets() map[string]uintptr { - return map[string]uintptr{ - "record.refMapped.value": unsafe.Offsetof(record{}.refMapped.value), - "record.updateCount": unsafe.Offsetof(record{}.updateCount), - } -} +// Package transform provides transformation functionality from the +// sdk/metric/metricdata data-types into OTLP data-types. +package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform" diff --git a/exporters/otlp/otlpmetric/internal/transform/error.go b/exporters/otlp/otlpmetric/internal/transform/error.go new file mode 100644 index 00000000000..8a8b49a63a3 --- /dev/null +++ b/exporters/otlp/otlpmetric/internal/transform/error.go @@ -0,0 +1,114 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform" + +import ( + "errors" + "fmt" + "strings" + + mpb "go.opentelemetry.io/proto/otlp/metrics/v1" +) + +var ( + errUnknownAggregation = errors.New("unknown aggregation") + errUnknownTemporality = errors.New("unknown temporality") +) + +type errMetric struct { + m *mpb.Metric + err error +} + +func (e errMetric) Unwrap() error { + return e.err +} + +func (e errMetric) Error() string { + format := "invalid metric (name: %q, description: %q, unit: %q): %s" + return fmt.Sprintf(format, e.m.Name, e.m.Description, e.m.Unit, e.err) +} + +func (e errMetric) Is(target error) bool { + return errors.Is(e.err, target) +} + +// multiErr is used by the data-type transform functions to wrap multiple +// errors into a single return value. The error message will show all errors +// as a list and scope them by the datatype name that is returning them. +type multiErr struct { + datatype string + errs []error +} + +// errOrNil returns nil if e contains no errors, otherwise it returns e. +func (e *multiErr) errOrNil() error { + if len(e.errs) == 0 { + return nil + } + return e +} + +// append adds err to e. If err is a multiErr, its errs are flattened into e. +func (e *multiErr) append(err error) { + // Do not use errors.As here, this should only be flattened one layer. If + // there is a *multiErr several steps down the chain, all the errors above + // it will be discarded if errors.As is used instead. + switch other := err.(type) { + case *multiErr: + // Flatten err errors into e. + e.errs = append(e.errs, other.errs...) + default: + e.errs = append(e.errs, err) + } +} + +func (e *multiErr) Error() string { + es := make([]string, len(e.errs)) + for i, err := range e.errs { + es[i] = fmt.Sprintf("* %s", err) + } + + format := "%d errors occurred transforming %s:\n\t%s" + return fmt.Sprintf(format, len(es), e.datatype, strings.Join(es, "\n\t")) +} + +func (e *multiErr) Unwrap() error { + switch len(e.errs) { + case 0: + return nil + case 1: + return e.errs[0] + } + + // Return a multiErr without the leading error. + cp := &multiErr{ + datatype: e.datatype, + errs: make([]error, len(e.errs)-1), + } + copy(cp.errs, e.errs[1:]) + return cp +} + +func (e *multiErr) Is(target error) bool { + if len(e.errs) == 0 { + return false + } + // Check if the first error is target. + return errors.Is(e.errs[0], target) +} diff --git a/exporters/otlp/otlpmetric/internal/transform/error_test.go b/exporters/otlp/otlpmetric/internal/transform/error_test.go new file mode 100644 index 00000000000..f366fc48724 --- /dev/null +++ b/exporters/otlp/otlpmetric/internal/transform/error_test.go @@ -0,0 +1,91 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform" + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + e0 = errMetric{m: pbMetrics[0], err: errUnknownAggregation} + e1 = errMetric{m: pbMetrics[1], err: errUnknownTemporality} +) + +type testingErr struct{} + +func (testingErr) Error() string { return "testing error" } + +// errFunc is a non-comparable error type. +type errFunc func() string + +func (e errFunc) Error() string { + return e() +} + +func TestMultiErr(t *testing.T) { + const name = "TestMultiErr" + me := &multiErr{datatype: name} + + t.Run("ErrOrNil", func(t *testing.T) { + require.Nil(t, me.errOrNil()) + me.errs = []error{e0} + assert.Error(t, me.errOrNil()) + }) + + var testErr testingErr + t.Run("AppendError", func(t *testing.T) { + me.append(testErr) + assert.Equal(t, testErr, me.errs[len(me.errs)-1]) + }) + + t.Run("AppendFlattens", func(t *testing.T) { + other := &multiErr{datatype: "OtherTestMultiErr", errs: []error{e1}} + me.append(other) + assert.Equal(t, e1, me.errs[len(me.errs)-1]) + }) + + t.Run("ErrorMessage", func(t *testing.T) { + // Test the overall structure of the message, but not the exact + // language so this doesn't become a change-indicator. + msg := me.Error() + lines := strings.Split(msg, "\n") + assert.Equalf(t, 4, len(lines), "expected a 4 line error message, got:\n\n%s", msg) + assert.Contains(t, msg, name) + assert.Contains(t, msg, e0.Error()) + assert.Contains(t, msg, testErr.Error()) + assert.Contains(t, msg, e1.Error()) + }) + + t.Run("ErrorIs", func(t *testing.T) { + assert.ErrorIs(t, me, errUnknownAggregation) + assert.ErrorIs(t, me, e0) + assert.ErrorIs(t, me, testErr) + assert.ErrorIs(t, me, errUnknownTemporality) + assert.ErrorIs(t, me, e1) + + errUnknown := errFunc(func() string { return "unknown error" }) + assert.NotErrorIs(t, me, errUnknown) + + var empty multiErr + assert.NotErrorIs(t, &empty, errUnknownTemporality) + }) +} diff --git a/exporters/otlp/otlpmetric/internal/transform/metricdata.go b/exporters/otlp/otlpmetric/internal/transform/metricdata.go new file mode 100644 index 00000000000..38ad617fd6a --- /dev/null +++ b/exporters/otlp/otlpmetric/internal/transform/metricdata.go @@ -0,0 +1,207 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform" + +import ( + "fmt" + + "go.opentelemetry.io/otel/sdk/metric/metricdata" + cpb "go.opentelemetry.io/proto/otlp/common/v1" + mpb "go.opentelemetry.io/proto/otlp/metrics/v1" + rpb "go.opentelemetry.io/proto/otlp/resource/v1" +) + +// ResourceMetrics returns an OTLP ResourceMetrics generated from rm. If rm +// contains invalid ScopeMetrics, an error will be returned along with an OTLP +// ResourceMetrics that contains partial OTLP ScopeMetrics. +func ResourceMetrics(rm metricdata.ResourceMetrics) (*mpb.ResourceMetrics, error) { + sms, err := ScopeMetrics(rm.ScopeMetrics) + return &mpb.ResourceMetrics{ + Resource: &rpb.Resource{ + Attributes: AttrIter(rm.Resource.Iter()), + }, + ScopeMetrics: sms, + SchemaUrl: rm.Resource.SchemaURL(), + }, err +} + +// ScopeMetrics returns a slice of OTLP ScopeMetrics generated from sms. If +// sms contains invalid metric values, an error will be returned along with a +// slice that contains partial OTLP ScopeMetrics. +func ScopeMetrics(sms []metricdata.ScopeMetrics) ([]*mpb.ScopeMetrics, error) { + errs := &multiErr{datatype: "ScopeMetrics"} + out := make([]*mpb.ScopeMetrics, 0, len(sms)) + for _, sm := range sms { + ms, err := Metrics(sm.Metrics) + if err != nil { + errs.append(err) + } + + out = append(out, &mpb.ScopeMetrics{ + Scope: &cpb.InstrumentationScope{ + Name: sm.Scope.Name, + Version: sm.Scope.Version, + }, + Metrics: ms, + SchemaUrl: sm.Scope.SchemaURL, + }) + } + return out, errs.errOrNil() +} + +// Metrics returns a slice of OTLP Metric generated from ms. If ms contains +// invalid metric values, an error will be returned along with a slice that +// contains partial OTLP Metrics. +func Metrics(ms []metricdata.Metrics) ([]*mpb.Metric, error) { + errs := &multiErr{datatype: "Metrics"} + out := make([]*mpb.Metric, 0, len(ms)) + for _, m := range ms { + o, err := metric(m) + if err != nil { + // Do not include invalid data. Drop the metric, report the error. + errs.append(errMetric{m: o, err: err}) + continue + } + out = append(out, o) + } + return out, errs.errOrNil() +} + +func metric(m metricdata.Metrics) (*mpb.Metric, error) { + var err error + out := &mpb.Metric{ + Name: m.Name, + Description: m.Description, + Unit: string(m.Unit), + } + switch a := m.Data.(type) { + case metricdata.Gauge[int64]: + out.Data = Gauge[int64](a) + case metricdata.Gauge[float64]: + out.Data = Gauge[float64](a) + case metricdata.Sum[int64]: + out.Data, err = Sum[int64](a) + case metricdata.Sum[float64]: + out.Data, err = Sum[float64](a) + case metricdata.Histogram: + out.Data, err = Histogram(a) + default: + return out, fmt.Errorf("%w: %T", errUnknownAggregation, a) + } + return out, err +} + +// Gauge returns an OTLP Metric_Gauge generated from g. +func Gauge[N int64 | float64](g metricdata.Gauge[N]) *mpb.Metric_Gauge { + return &mpb.Metric_Gauge{ + Gauge: &mpb.Gauge{ + DataPoints: DataPoints(g.DataPoints), + }, + } +} + +// Sum returns an OTLP Metric_Sum generated from s. An error is returned with +// a partial Metric_Sum if the temporality of s is unknown. +func Sum[N int64 | float64](s metricdata.Sum[N]) (*mpb.Metric_Sum, error) { + t, err := Temporality(s.Temporality) + if err != nil { + return nil, err + } + return &mpb.Metric_Sum{ + Sum: &mpb.Sum{ + AggregationTemporality: t, + IsMonotonic: s.IsMonotonic, + DataPoints: DataPoints(s.DataPoints), + }, + }, nil +} + +// DataPoints returns a slice of OTLP NumberDataPoint generated from dPts. +func DataPoints[N int64 | float64](dPts []metricdata.DataPoint[N]) []*mpb.NumberDataPoint { + out := make([]*mpb.NumberDataPoint, 0, len(dPts)) + for _, dPt := range dPts { + ndp := &mpb.NumberDataPoint{ + Attributes: AttrIter(dPt.Attributes.Iter()), + StartTimeUnixNano: uint64(dPt.StartTime.UnixNano()), + TimeUnixNano: uint64(dPt.Time.UnixNano()), + } + switch v := any(dPt.Value).(type) { + case int64: + ndp.Value = &mpb.NumberDataPoint_AsInt{ + AsInt: v, + } + case float64: + ndp.Value = &mpb.NumberDataPoint_AsDouble{ + AsDouble: v, + } + } + out = append(out, ndp) + } + return out +} + +// Histogram returns an OTLP Metric_Histogram generated from h. An error is +// returned with a partial Metric_Histogram if the temporality of h is +// unknown. +func Histogram(h metricdata.Histogram) (*mpb.Metric_Histogram, error) { + t, err := Temporality(h.Temporality) + if err != nil { + return nil, err + } + return &mpb.Metric_Histogram{ + Histogram: &mpb.Histogram{ + AggregationTemporality: t, + DataPoints: HistogramDataPoints(h.DataPoints), + }, + }, nil +} + +// HistogramDataPoints returns a slice of OTLP HistogramDataPoint generated +// from dPts. +func HistogramDataPoints(dPts []metricdata.HistogramDataPoint) []*mpb.HistogramDataPoint { + out := make([]*mpb.HistogramDataPoint, 0, len(dPts)) + for _, dPt := range dPts { + out = append(out, &mpb.HistogramDataPoint{ + Attributes: AttrIter(dPt.Attributes.Iter()), + StartTimeUnixNano: uint64(dPt.StartTime.UnixNano()), + TimeUnixNano: uint64(dPt.Time.UnixNano()), + Count: dPt.Count, + Sum: &dPt.Sum, + BucketCounts: dPt.BucketCounts, + ExplicitBounds: dPt.Bounds, + Min: dPt.Min, + Max: dPt.Max, + }) + } + return out +} + +// Temporality returns an OTLP AggregationTemporality generated from t. If t +// is unknown, an error is returned along with the invalid +// AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED. +func Temporality(t metricdata.Temporality) (mpb.AggregationTemporality, error) { + switch t { + case metricdata.DeltaTemporality: + return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, nil + case metricdata.CumulativeTemporality: + return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, nil + default: + err := fmt.Errorf("%w: %s", errUnknownTemporality, t) + return mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_UNSPECIFIED, err + } +} diff --git a/exporters/otlp/otlpmetric/internal/transform/metricdata_test.go b/exporters/otlp/otlpmetric/internal/transform/metricdata_test.go new file mode 100644 index 00000000000..e9745da7b7f --- /dev/null +++ b/exporters/otlp/otlpmetric/internal/transform/metricdata_test.go @@ -0,0 +1,355 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package transform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/transform" + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/unit" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/resource" + semconv "go.opentelemetry.io/otel/semconv/v1.10.0" + cpb "go.opentelemetry.io/proto/otlp/common/v1" + mpb "go.opentelemetry.io/proto/otlp/metrics/v1" + rpb "go.opentelemetry.io/proto/otlp/resource/v1" +) + +type unknownAggT struct { + metricdata.Aggregation +} + +var ( + // Sat Jan 01 2000 00:00:00 GMT+0000. + start = time.Date(2000, time.January, 01, 0, 0, 0, 0, time.FixedZone("GMT", 0)) + end = start.Add(30 * time.Second) + + alice = attribute.NewSet(attribute.String("user", "alice")) + bob = attribute.NewSet(attribute.String("user", "bob")) + + pbAlice = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{ + Value: &cpb.AnyValue_StringValue{StringValue: "alice"}, + }} + pbBob = &cpb.KeyValue{Key: "user", Value: &cpb.AnyValue{ + Value: &cpb.AnyValue_StringValue{StringValue: "bob"}, + }} + + min, max, sum = 2.0, 4.0, 90.0 + otelHDP = []metricdata.HistogramDataPoint{{ + Attributes: alice, + StartTime: start, + Time: end, + Count: 30, + Bounds: []float64{1, 5}, + BucketCounts: []uint64{0, 30, 0}, + Min: &min, + Max: &max, + Sum: sum, + }} + + pbHDP = []*mpb.HistogramDataPoint{{ + Attributes: []*cpb.KeyValue{pbAlice}, + StartTimeUnixNano: uint64(start.UnixNano()), + TimeUnixNano: uint64(end.UnixNano()), + Count: 30, + Sum: &sum, + ExplicitBounds: []float64{1, 5}, + BucketCounts: []uint64{0, 30, 0}, + Min: &min, + Max: &max, + }} + + otelHist = metricdata.Histogram{ + Temporality: metricdata.DeltaTemporality, + DataPoints: otelHDP, + } + invalidTemporality metricdata.Temporality + otelHistInvalid = metricdata.Histogram{ + Temporality: invalidTemporality, + DataPoints: otelHDP, + } + + pbHist = &mpb.Histogram{ + AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, + DataPoints: pbHDP, + } + + otelDPtsInt64 = []metricdata.DataPoint[int64]{ + {Attributes: alice, StartTime: start, Time: end, Value: 1}, + {Attributes: bob, StartTime: start, Time: end, Value: 2}, + } + otelDPtsFloat64 = []metricdata.DataPoint[float64]{ + {Attributes: alice, StartTime: start, Time: end, Value: 1.0}, + {Attributes: bob, StartTime: start, Time: end, Value: 2.0}, + } + + pbDPtsInt64 = []*mpb.NumberDataPoint{ + { + Attributes: []*cpb.KeyValue{pbAlice}, + StartTimeUnixNano: uint64(start.UnixNano()), + TimeUnixNano: uint64(end.UnixNano()), + Value: &mpb.NumberDataPoint_AsInt{AsInt: 1}, + }, + { + Attributes: []*cpb.KeyValue{pbBob}, + StartTimeUnixNano: uint64(start.UnixNano()), + TimeUnixNano: uint64(end.UnixNano()), + Value: &mpb.NumberDataPoint_AsInt{AsInt: 2}, + }, + } + pbDPtsFloat64 = []*mpb.NumberDataPoint{ + { + Attributes: []*cpb.KeyValue{pbAlice}, + StartTimeUnixNano: uint64(start.UnixNano()), + TimeUnixNano: uint64(end.UnixNano()), + Value: &mpb.NumberDataPoint_AsDouble{AsDouble: 1.0}, + }, + { + Attributes: []*cpb.KeyValue{pbBob}, + StartTimeUnixNano: uint64(start.UnixNano()), + TimeUnixNano: uint64(end.UnixNano()), + Value: &mpb.NumberDataPoint_AsDouble{AsDouble: 2.0}, + }, + } + + otelSumInt64 = metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + DataPoints: otelDPtsInt64, + } + otelSumFloat64 = metricdata.Sum[float64]{ + Temporality: metricdata.DeltaTemporality, + IsMonotonic: false, + DataPoints: otelDPtsFloat64, + } + otelSumInvalid = metricdata.Sum[float64]{ + Temporality: invalidTemporality, + IsMonotonic: false, + DataPoints: otelDPtsFloat64, + } + + pbSumInt64 = &mpb.Sum{ + AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_CUMULATIVE, + IsMonotonic: true, + DataPoints: pbDPtsInt64, + } + pbSumFloat64 = &mpb.Sum{ + AggregationTemporality: mpb.AggregationTemporality_AGGREGATION_TEMPORALITY_DELTA, + IsMonotonic: false, + DataPoints: pbDPtsFloat64, + } + + otelGaugeInt64 = metricdata.Gauge[int64]{DataPoints: otelDPtsInt64} + otelGaugeFloat64 = metricdata.Gauge[float64]{DataPoints: otelDPtsFloat64} + + pbGaugeInt64 = &mpb.Gauge{DataPoints: pbDPtsInt64} + pbGaugeFloat64 = &mpb.Gauge{DataPoints: pbDPtsFloat64} + + unknownAgg unknownAggT + otelMetrics = []metricdata.Metrics{ + { + Name: "int64-gauge", + Description: "Gauge with int64 values", + Unit: unit.Dimensionless, + Data: otelGaugeInt64, + }, + { + Name: "float64-gauge", + Description: "Gauge with float64 values", + Unit: unit.Dimensionless, + Data: otelGaugeFloat64, + }, + { + Name: "int64-sum", + Description: "Sum with int64 values", + Unit: unit.Dimensionless, + Data: otelSumInt64, + }, + { + Name: "float64-sum", + Description: "Sum with float64 values", + Unit: unit.Dimensionless, + Data: otelSumFloat64, + }, + { + Name: "invalid-sum", + Description: "Sum with invalid temporality", + Unit: unit.Dimensionless, + Data: otelSumInvalid, + }, + { + Name: "histogram", + Description: "Histogram", + Unit: unit.Dimensionless, + Data: otelHist, + }, + { + Name: "invalid-histogram", + Description: "Invalid histogram", + Unit: unit.Dimensionless, + Data: otelHistInvalid, + }, + { + Name: "unknown", + Description: "Unknown aggregation", + Unit: unit.Dimensionless, + Data: unknownAgg, + }, + } + + pbMetrics = []*mpb.Metric{ + { + Name: "int64-gauge", + Description: "Gauge with int64 values", + Unit: string(unit.Dimensionless), + Data: &mpb.Metric_Gauge{Gauge: pbGaugeInt64}, + }, + { + Name: "float64-gauge", + Description: "Gauge with float64 values", + Unit: string(unit.Dimensionless), + Data: &mpb.Metric_Gauge{Gauge: pbGaugeFloat64}, + }, + { + Name: "int64-sum", + Description: "Sum with int64 values", + Unit: string(unit.Dimensionless), + Data: &mpb.Metric_Sum{Sum: pbSumInt64}, + }, + { + Name: "float64-sum", + Description: "Sum with float64 values", + Unit: string(unit.Dimensionless), + Data: &mpb.Metric_Sum{Sum: pbSumFloat64}, + }, + { + Name: "histogram", + Description: "Histogram", + Unit: string(unit.Dimensionless), + Data: &mpb.Metric_Histogram{Histogram: pbHist}, + }, + } + + otelScopeMetrics = []metricdata.ScopeMetrics{{ + Scope: instrumentation.Scope{ + Name: "test/code/path", + Version: "v0.1.0", + SchemaURL: semconv.SchemaURL, + }, + Metrics: otelMetrics, + }} + + pbScopeMetrics = []*mpb.ScopeMetrics{{ + Scope: &cpb.InstrumentationScope{ + Name: "test/code/path", + Version: "v0.1.0", + }, + Metrics: pbMetrics, + SchemaUrl: semconv.SchemaURL, + }} + + otelRes = resource.NewWithAttributes( + semconv.SchemaURL, + semconv.ServiceNameKey.String("test server"), + semconv.ServiceVersionKey.String("v0.1.0"), + ) + + pbRes = &rpb.Resource{ + Attributes: []*cpb.KeyValue{ + { + Key: "service.name", + Value: &cpb.AnyValue{ + Value: &cpb.AnyValue_StringValue{StringValue: "test server"}, + }, + }, + { + Key: "service.version", + Value: &cpb.AnyValue{ + Value: &cpb.AnyValue_StringValue{StringValue: "v0.1.0"}, + }, + }, + }, + } + + otelResourceMetrics = metricdata.ResourceMetrics{ + Resource: otelRes, + ScopeMetrics: otelScopeMetrics, + } + + pbResourceMetrics = &mpb.ResourceMetrics{ + Resource: pbRes, + ScopeMetrics: pbScopeMetrics, + SchemaUrl: semconv.SchemaURL, + } +) + +func TestTransformations(t *testing.T) { + // Run tests from the "bottom-up" of the metricdata data-types and halt + // when a failure occurs to ensure the clearest failure message (as + // opposed to the opposite of testing from the top-down which will obscure + // errors deep inside the structs). + + // DataPoint types. + assert.Equal(t, pbHDP, HistogramDataPoints(otelHDP)) + assert.Equal(t, pbDPtsInt64, DataPoints[int64](otelDPtsInt64)) + require.Equal(t, pbDPtsFloat64, DataPoints[float64](otelDPtsFloat64)) + + // Aggregations. + h, err := Histogram(otelHist) + assert.NoError(t, err) + assert.Equal(t, &mpb.Metric_Histogram{Histogram: pbHist}, h) + h, err = Histogram(otelHistInvalid) + assert.ErrorIs(t, err, errUnknownTemporality) + assert.Nil(t, h) + + s, err := Sum[int64](otelSumInt64) + assert.NoError(t, err) + assert.Equal(t, &mpb.Metric_Sum{Sum: pbSumInt64}, s) + s, err = Sum[float64](otelSumFloat64) + assert.NoError(t, err) + assert.Equal(t, &mpb.Metric_Sum{Sum: pbSumFloat64}, s) + s, err = Sum[float64](otelSumInvalid) + assert.ErrorIs(t, err, errUnknownTemporality) + assert.Nil(t, s) + + assert.Equal(t, &mpb.Metric_Gauge{Gauge: pbGaugeInt64}, Gauge[int64](otelGaugeInt64)) + require.Equal(t, &mpb.Metric_Gauge{Gauge: pbGaugeFloat64}, Gauge[float64](otelGaugeFloat64)) + + // Metrics. + m, err := Metrics(otelMetrics) + assert.ErrorIs(t, err, errUnknownTemporality) + assert.ErrorIs(t, err, errUnknownAggregation) + require.Equal(t, pbMetrics, m) + + // Scope Metrics. + sm, err := ScopeMetrics(otelScopeMetrics) + assert.ErrorIs(t, err, errUnknownTemporality) + assert.ErrorIs(t, err, errUnknownAggregation) + require.Equal(t, pbScopeMetrics, sm) + + // Resource Metrics. + rm, err := ResourceMetrics(otelResourceMetrics) + assert.ErrorIs(t, err, errUnknownTemporality) + assert.ErrorIs(t, err, errUnknownAggregation) + require.Equal(t, pbResourceMetrics, rm) +} diff --git a/exporters/otlp/otlpmetric/options.go b/exporters/otlp/otlpmetric/options.go deleted file mode 100644 index bd8706a74d3..00000000000 --- a/exporters/otlp/otlpmetric/options.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otlpmetric // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric" - -import "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - -// Option are setting options passed to an Exporter on creation. -type Option interface { - apply(config) config -} - -type exporterOptionFunc func(config) config - -func (fn exporterOptionFunc) apply(cfg config) config { - return fn(cfg) -} - -type config struct { - temporalitySelector aggregation.TemporalitySelector -} - -// WithMetricAggregationTemporalitySelector defines the aggregation.TemporalitySelector used -// for selecting aggregation.Temporality (i.e., Cumulative vs. Delta -// aggregation). If not specified otherwise, exporter will use a -// cumulative temporality selector. -func WithMetricAggregationTemporalitySelector(selector aggregation.TemporalitySelector) Option { - return exporterOptionFunc(func(cfg config) config { - cfg.temporalitySelector = selector - return cfg - }) -} diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go index 16cc5322da8..44ad5e2fc0e 100644 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go +++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/client.go @@ -12,12 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.18 +// +build go1.18 + package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" import ( "context" - "errors" - "sync" "time" "google.golang.org/genproto/googleapis/rpc/errdetails" @@ -28,54 +29,49 @@ import ( "go.opentelemetry.io/otel/exporters/otlp/internal/retry" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric" - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpconfig" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf" + "go.opentelemetry.io/otel/sdk/metric" colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" ) +// New returns an OpenTelemetry metric Exporter. The Exporter can be used with +// a PeriodicReader to export OpenTelemetry metric data to an OTLP receiving +// endpoint using gRPC. +// +// If an already established gRPC ClientConn is not passed in options using +// WithGRPCConn, a connection to the OTLP endpoint will be established based +// on options. If a connection cannot be establishes in the lifetime of ctx, +// an error will be returned. +func New(ctx context.Context, options ...Option) (metric.Exporter, error) { + c, err := newClient(ctx, options...) + if err != nil { + return nil, err + } + return otlpmetric.New(c), nil +} + type client struct { - endpoint string - dialOpts []grpc.DialOption metadata metadata.MD exportTimeout time.Duration requestFunc retry.RequestFunc - // stopCtx is used as a parent context for all exports. Therefore, when it - // is canceled with the stopFunc all exports are canceled. - stopCtx context.Context - // stopFunc cancels stopCtx, stopping any active exports. - stopFunc context.CancelFunc - - // ourConn keeps track of where conn was created: true if created here on - // Start, or false if passed with an option. This is important on Shutdown - // as the conn should only be closed if created here on start. Otherwise, + // ourConn keeps track of where conn was created: true if created here in + // NewClient, or false if passed with an option. This is important on + // Shutdown as the conn should only be closed if we created it. Otherwise, // it is up to the processes that passed the conn to close it. ourConn bool conn *grpc.ClientConn - mscMu sync.RWMutex msc colmetricpb.MetricsServiceClient } -// Compile time check *client implements otlpmetric.Client. -var _ otlpmetric.Client = (*client)(nil) - -// NewClient creates a new gRPC metric client. -func NewClient(opts ...Option) otlpmetric.Client { - return newClient(opts...) -} - -func newClient(opts ...Option) *client { - cfg := otlpconfig.NewGRPCConfig(asGRPCOptions(opts)...) - - ctx, cancel := context.WithCancel(context.Background()) +// newClient creates a new gRPC metric client. +func newClient(ctx context.Context, options ...Option) (otlpmetric.Client, error) { + cfg := oconf.NewGRPCConfig(asGRPCOptions(options)...) c := &client{ - endpoint: cfg.Metrics.Endpoint, exportTimeout: cfg.Metrics.Timeout, requestFunc: cfg.RetryConfig.RequestFunc(retryable), - dialOpts: cfg.DialOptions, - stopCtx: ctx, - stopFunc: cancel, conn: cfg.GRPCConn, } @@ -83,17 +79,12 @@ func newClient(opts ...Option) *client { c.metadata = metadata.New(cfg.Metrics.Headers) } - return c -} - -// Start establishes a gRPC connection to the collector. -func (c *client) Start(ctx context.Context) error { if c.conn == nil { // If the caller did not provide a ClientConn when the client was // created, create one using the configuration they did provide. - conn, err := grpc.DialContext(ctx, c.endpoint, c.dialOpts...) + conn, err := grpc.DialContext(ctx, cfg.Metrics.Endpoint, cfg.DialOptions...) if err != nil { - return err + return nil, err } // Keep track that we own the lifecycle of this conn and need to close // it on Shutdown. @@ -101,69 +92,30 @@ func (c *client) Start(ctx context.Context) error { c.conn = conn } - // The otlpmetric.Client interface states this method is called just once, - // so no need to check if already started. - c.mscMu.Lock() c.msc = colmetricpb.NewMetricsServiceClient(c.conn) - c.mscMu.Unlock() - return nil + return c, nil } -var errAlreadyStopped = errors.New("the client is already stopped") +// ForceFlush does nothing, the client holds no state. +func (c *client) ForceFlush(ctx context.Context) error { return ctx.Err() } -// Stop shuts down the client. +// Shutdown shuts down the client, freeing all resource. // // Any active connections to a remote endpoint are closed if they were created // by the client. Any gRPC connection passed during creation using // WithGRPCConn will not be closed. It is the caller's responsibility to // handle cleanup of that resource. -// -// This method synchronizes with the UploadMetrics method of the client. It -// will wait for any active calls to that method to complete unimpeded, or it -// will cancel any active calls if ctx expires. If ctx expires, the context -// error will be forwarded as the returned error. All client held resources -// will still be released in this situation. -// -// If the client has already stopped, an error will be returned describing -// this. -func (c *client) Stop(ctx context.Context) error { - // Acquire the c.mscMu lock within the ctx lifetime. - acquired := make(chan struct{}) - go func() { - c.mscMu.Lock() - close(acquired) - }() - var err error - select { - case <-ctx.Done(): - // The Stop timeout is reached. Kill any remaining exports to force - // the clear of the lock and save the timeout error to return and - // signal the shutdown timed out before cleanly stopping. - c.stopFunc() - err = ctx.Err() - - // To ensure the client is not left in a dirty state c.msc needs to be - // set to nil. To avoid the race condition when doing this, ensure - // that all the exports are killed (initiated by c.stopFunc). - <-acquired - case <-acquired: - } - // Hold the mscMu lock for the rest of the function to ensure no new - // exports are started. - defer c.mscMu.Unlock() - - // The otlpmetric.Client interface states this method is called only - // once, but there is no guarantee it is called after Start. Ensure the - // client is started before doing anything and let the called know if they - // made a mistake. - if c.msc == nil { - return errAlreadyStopped - } +func (c *client) Shutdown(ctx context.Context) error { + // The otlpmetric.Exporter synchronizes access to client methods and + // ensures this is called only once. The only thing that needs to be done + // here is to release any computational resources the client holds. - // Clear c.msc to signal the client is stopped. + c.metadata = nil + c.requestFunc = nil c.msc = nil + err := ctx.Err() if c.ourConn { closeErr := c.conn.Close() // A context timeout error takes precedence over this error. @@ -171,25 +123,24 @@ func (c *client) Stop(ctx context.Context) error { err = closeErr } } + c.conn = nil return err } -var errShutdown = errors.New("the client is shutdown") - -// UploadMetrics sends a batch of spans. +// UploadMetrics sends protoMetrics to connected endpoint. // // Retryable errors from the server will be handled according to any // RetryConfig the client was created with. func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) error { - // Hold a read lock to ensure a shut down initiated after this starts does - // not abandon the export. This read lock acquire has less priority than a - // write lock acquire (i.e. Stop), meaning if the client is shutting down - // this will come after the shut down. - c.mscMu.RLock() - defer c.mscMu.RUnlock() + // The otlpmetric.Exporter synchronizes access to client methods, and + // ensures this is not called after the Exporter is shutdown. Only thing + // to do here is send data. - if c.msc == nil { - return errShutdown + select { + case <-ctx.Done(): + // Do not upload if the context is already expired. + return ctx.Err() + default: } ctx, cancel := c.exportContext(ctx) @@ -209,7 +160,7 @@ func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou } // exportContext returns a copy of parent with an appropriate deadline and -// cancellation function. +// cancellation function based on the clients configured export timeout. // // It is the callers responsibility to cancel the returned context once its // use is complete, via the parent or directly with the returned CancelFunc, to @@ -230,23 +181,12 @@ func (c *client) exportContext(parent context.Context) (context.Context, context ctx = metadata.NewOutgoingContext(ctx, c.metadata) } - // Unify the client stopCtx with the parent. - go func() { - select { - case <-ctx.Done(): - case <-c.stopCtx.Done(): - // Cancel the export as the shutdown has timed out. - cancel() - } - }() - return ctx, cancel } // retryable returns if err identifies a request that can be retried and a // duration to wait for if an explicit throttle time is included in err. func retryable(err error) (bool, time.Duration) { - //func retryable(err error) (bool, time.Duration) { s := status.Convert(err) switch s.Code() { case codes.Canceled, diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/client_test.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/client_test.go index 1f54bf5c610..e78e91f5396 100644 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/client_test.go +++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/client_test.go @@ -12,320 +12,183 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otlpmetricgrpc_test +//go:build go1.18 +// +build go1.18 + +package otlpmetricgrpc import ( "context" - "fmt" - "net" - "strings" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "google.golang.org/grpc" + "google.golang.org/genproto/googleapis/rpc/errdetails" "google.golang.org/grpc/codes" - "google.golang.org/grpc/encoding/gzip" "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/durationpb" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric" - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpmetrictest" - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" - "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest" + "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" ) -var ( - oneRecord = otlpmetrictest.OneRecordReader() - - testResource = resource.Empty() -) - -func TestNewExporterEndToEnd(t *testing.T) { - tests := []struct { - name string - additionalOpts []otlpmetricgrpc.Option +func TestThrottleDuration(t *testing.T) { + c := codes.ResourceExhausted + testcases := []struct { + status *status.Status + expected time.Duration }{ { - name: "StandardExporter", + status: status.New(c, "NoRetryInfo"), + expected: 0, }, { - name: "WithCompressor", - additionalOpts: []otlpmetricgrpc.Option{ - otlpmetricgrpc.WithCompressor(gzip.Name), - }, + status: func() *status.Status { + s, err := status.New(c, "SingleRetryInfo").WithDetails( + &errdetails.RetryInfo{ + RetryDelay: durationpb.New(15 * time.Millisecond), + }, + ) + require.NoError(t, err) + return s + }(), + expected: 15 * time.Millisecond, }, { - name: "WithServiceConfig", - additionalOpts: []otlpmetricgrpc.Option{ - otlpmetricgrpc.WithServiceConfig("{}"), - }, + status: func() *status.Status { + s, err := status.New(c, "ErrorInfo").WithDetails( + &errdetails.ErrorInfo{Reason: "no throttle detail"}, + ) + require.NoError(t, err) + return s + }(), + expected: 0, + }, + { + status: func() *status.Status { + s, err := status.New(c, "ErrorAndRetryInfo").WithDetails( + &errdetails.ErrorInfo{Reason: "with throttle detail"}, + &errdetails.RetryInfo{ + RetryDelay: durationpb.New(13 * time.Minute), + }, + ) + require.NoError(t, err) + return s + }(), + expected: 13 * time.Minute, }, { - name: "WithDialOptions", - additionalOpts: []otlpmetricgrpc.Option{ - otlpmetricgrpc.WithDialOption(grpc.WithBlock()), - }, + status: func() *status.Status { + s, err := status.New(c, "DoubleRetryInfo").WithDetails( + &errdetails.RetryInfo{ + RetryDelay: durationpb.New(13 * time.Minute), + }, + &errdetails.RetryInfo{ + RetryDelay: durationpb.New(15 * time.Minute), + }, + ) + require.NoError(t, err) + return s + }(), + expected: 13 * time.Minute, }, } - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - newExporterEndToEndTest(t, test.additionalOpts) + for _, tc := range testcases { + t.Run(tc.status.Message(), func(t *testing.T) { + require.Equal(t, tc.expected, throttleDelay(tc.status)) }) } } -func newGRPCExporter(t *testing.T, ctx context.Context, endpoint string, additionalOpts ...otlpmetricgrpc.Option) *otlpmetric.Exporter { - opts := []otlpmetricgrpc.Option{ - otlpmetricgrpc.WithInsecure(), - otlpmetricgrpc.WithEndpoint(endpoint), - otlpmetricgrpc.WithReconnectionPeriod(50 * time.Millisecond), +func TestRetryable(t *testing.T) { + retryableCodes := map[codes.Code]bool{ + codes.OK: false, + codes.Canceled: true, + codes.Unknown: false, + codes.InvalidArgument: false, + codes.DeadlineExceeded: true, + codes.NotFound: false, + codes.AlreadyExists: false, + codes.PermissionDenied: false, + codes.ResourceExhausted: true, + codes.FailedPrecondition: false, + codes.Aborted: true, + codes.OutOfRange: true, + codes.Unimplemented: false, + codes.Internal: false, + codes.Unavailable: true, + codes.DataLoss: true, + codes.Unauthenticated: false, } - opts = append(opts, additionalOpts...) - client := otlpmetricgrpc.NewClient(opts...) - exp, err := otlpmetric.New(ctx, client) - if err != nil { - t.Fatalf("failed to create a new collector exporter: %v", err) + for c, want := range retryableCodes { + got, _ := retryable(status.Error(c, "")) + assert.Equalf(t, want, got, "evaluate(%s)", c) } - return exp } -func newExporterEndToEndTest(t *testing.T, additionalOpts []otlpmetricgrpc.Option) { - mc := runMockCollector(t) - - defer func() { - _ = mc.stop() - }() - - <-time.After(5 * time.Millisecond) +func TestClient(t *testing.T) { + factory := func() (otlpmetric.Client, otest.Collector) { + coll, err := otest.NewGRPCCollector("", nil) + require.NoError(t, err) - ctx := context.Background() - exp := newGRPCExporter(t, ctx, mc.endpoint, additionalOpts...) - defer func() { - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - if err := exp.Shutdown(ctx); err != nil { - panic(err) - } - }() - - otlpmetrictest.RunEndToEndTest(ctx, t, exp, mc) -} - -func TestExporterShutdown(t *testing.T) { - mc := runMockCollector(t) - defer func() { - _ = mc.Stop() - }() - - <-time.After(5 * time.Millisecond) - - otlpmetrictest.RunExporterShutdownTest(t, func() otlpmetric.Client { - return otlpmetricgrpc.NewClient( - otlpmetricgrpc.WithInsecure(), - otlpmetricgrpc.WithEndpoint(mc.endpoint), - otlpmetricgrpc.WithReconnectionPeriod(50*time.Millisecond), - ) - }) -} - -func TestNewExporterInvokeStartThenStopManyTimes(t *testing.T) { - mc := runMockCollector(t) - defer func() { - _ = mc.stop() - }() - - ctx := context.Background() - exp := newGRPCExporter(t, ctx, mc.endpoint) - defer func() { - if err := exp.Shutdown(ctx); err != nil { - panic(err) - } - }() - - // Invoke Start numerous times, should return errAlreadyStarted - for i := 0; i < 10; i++ { - if err := exp.Start(ctx); err == nil || !strings.Contains(err.Error(), "already started") { - t.Fatalf("#%d unexpected Start error: %v", i, err) - } + ctx := context.Background() + addr := coll.Addr().String() + client, err := newClient(ctx, WithEndpoint(addr), WithInsecure()) + require.NoError(t, err) + return client, coll } - if err := exp.Shutdown(ctx); err != nil { - t.Fatalf("failed to Shutdown the exporter: %v", err) - } - // Invoke Shutdown numerous times - for i := 0; i < 10; i++ { - if err := exp.Shutdown(ctx); err != nil { - t.Fatalf(`#%d got error (%v) expected none`, i, err) - } - } -} - -// This test takes a long time to run: to skip it, run tests using: -short. -func TestNewExporterCollectorOnBadConnection(t *testing.T) { - if testing.Short() { - t.Skipf("Skipping this long running test") - } - - ln, err := net.Listen("tcp", "localhost:0") - if err != nil { - t.Fatalf("Failed to grab an available port: %v", err) - } - // Firstly close the "collector's" channel: optimistically this endpoint won't get reused ASAP - // However, our goal of closing it is to simulate an unavailable connection - _ = ln.Close() - - _, collectorPortStr, _ := net.SplitHostPort(ln.Addr().String()) - - endpoint := fmt.Sprintf("localhost:%s", collectorPortStr) - ctx := context.Background() - exp := newGRPCExporter(t, ctx, endpoint) - _ = exp.Shutdown(ctx) -} - -func TestNewExporterWithEndpoint(t *testing.T) { - mc := runMockCollector(t) - defer func() { - _ = mc.stop() - }() - - ctx := context.Background() - exp := newGRPCExporter(t, ctx, mc.endpoint) - _ = exp.Shutdown(ctx) + t.Run("Integration", otest.RunClientTests(factory)) } -func TestNewExporterWithHeaders(t *testing.T) { - mc := runMockCollector(t) - defer func() { - _ = mc.stop() - }() - - ctx := context.Background() - exp := newGRPCExporter(t, ctx, mc.endpoint, - otlpmetricgrpc.WithHeaders(map[string]string{"header1": "value1"})) - require.NoError(t, exp.Export(ctx, testResource, oneRecord)) - - defer func() { - _ = exp.Shutdown(ctx) - }() - - headers := mc.getHeaders() - require.Len(t, headers.Get("header1"), 1) - assert.Equal(t, "value1", headers.Get("header1")[0]) -} - -func TestNewExporterWithTimeout(t *testing.T) { - tts := []struct { - name string - fn func(exp *otlpmetric.Exporter) error - timeout time.Duration - metrics int - spans int - code codes.Code - delay bool - }{ - { - name: "Timeout Metrics", - fn: func(exp *otlpmetric.Exporter) error { - return exp.Export(context.Background(), testResource, oneRecord) - }, - timeout: time.Millisecond * 100, - code: codes.DeadlineExceeded, - delay: true, - }, - - { - name: "No Timeout Metrics", - fn: func(exp *otlpmetric.Exporter) error { - return exp.Export(context.Background(), testResource, oneRecord) - }, - timeout: time.Minute, - metrics: 1, - code: codes.OK, - }, +func TestConfig(t *testing.T) { + factoryFunc := func(errCh <-chan error, o ...Option) (metric.Exporter, *otest.GRPCCollector) { + coll, err := otest.NewGRPCCollector("", errCh) + require.NoError(t, err) + + ctx := context.Background() + opts := append([]Option{ + WithEndpoint(coll.Addr().String()), + WithInsecure(), + }, o...) + exp, err := New(ctx, opts...) + require.NoError(t, err) + return exp, coll } - for _, tt := range tts { - t.Run(tt.name, func(t *testing.T) { - mc := runMockCollector(t) - if tt.delay { - mc.metricSvc.delay = time.Second * 10 - } - defer func() { - _ = mc.stop() - }() - - ctx := context.Background() - exp := newGRPCExporter(t, ctx, mc.endpoint, otlpmetricgrpc.WithTimeout(tt.timeout), otlpmetricgrpc.WithRetry(otlpmetricgrpc.RetryConfig{Enabled: false})) - defer func() { - _ = exp.Shutdown(ctx) - }() - - err := tt.fn(exp) - - if tt.code == codes.OK { - require.NoError(t, err) - } else { - require.Error(t, err) - } - - s := status.Convert(err) - require.Equal(t, tt.code, s.Code()) - - require.Len(t, mc.getMetrics(), tt.metrics) - }) - } -} - -func TestStartErrorInvalidAddress(t *testing.T) { - client := otlpmetricgrpc.NewClient( - otlpmetricgrpc.WithInsecure(), - // Validate the connection in Start (which should return the error). - otlpmetricgrpc.WithDialOption( - grpc.WithBlock(), - grpc.FailOnNonTempDialError(true), - ), - otlpmetricgrpc.WithEndpoint("invalid"), - otlpmetricgrpc.WithReconnectionPeriod(time.Hour), - ) - err := client.Start(context.Background()) - assert.EqualError(t, err, `connection error: desc = "transport: error while dialing: dial tcp: address invalid: missing port in address"`) -} - -func TestEmptyData(t *testing.T) { - mc := runMockCollector(t) - - defer func() { - _ = mc.stop() - }() - - <-time.After(5 * time.Millisecond) - - ctx := context.Background() - exp := newGRPCExporter(t, ctx, mc.endpoint) - defer func() { - assert.NoError(t, exp.Shutdown(ctx)) - }() - - assert.NoError(t, exp.Export(ctx, testResource, otlpmetrictest.EmptyReader())) -} - -func TestFailedMetricTransform(t *testing.T) { - mc := runMockCollector(t) - - defer func() { - _ = mc.stop() - }() - - <-time.After(5 * time.Millisecond) - - ctx := context.Background() - exp := newGRPCExporter(t, ctx, mc.endpoint) - defer func() { - assert.NoError(t, exp.Shutdown(ctx)) - }() + t.Run("WithHeaders", func(t *testing.T) { + key := "my-custom-header" + headers := map[string]string{key: "custom-value"} + exp, coll := factoryFunc(nil, WithHeaders(headers)) + t.Cleanup(coll.Shutdown) + ctx := context.Background() + require.NoError(t, exp.Export(ctx, metricdata.ResourceMetrics{})) + // Ensure everything is flushed. + require.NoError(t, exp.Shutdown(ctx)) + + got := coll.Headers() + require.Contains(t, got, key) + assert.Equal(t, got[key], []string{headers[key]}) + }) - assert.Error(t, exp.Export(ctx, testResource, otlpmetrictest.FailReader{})) + t.Run("WithTimeout", func(t *testing.T) { + // Do not send on errCh so the Collector never responds to the client. + errCh := make(chan error) + t.Cleanup(func() { close(errCh) }) + exp, coll := factoryFunc( + errCh, + WithTimeout(time.Millisecond), + WithRetry(RetryConfig{Enabled: false}), + ) + t.Cleanup(coll.Shutdown) + ctx := context.Background() + t.Cleanup(func() { require.NoError(t, exp.Shutdown(ctx)) }) + err := exp.Export(ctx, metricdata.ResourceMetrics{}) + assert.ErrorContains(t, err, context.DeadlineExceeded.Error()) + }) } diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/client_unit_test.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/client_unit_test.go deleted file mode 100644 index ccd4ade1389..00000000000 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/client_unit_test.go +++ /dev/null @@ -1,193 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otlpmetricgrpc - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "google.golang.org/genproto/googleapis/rpc/errdetails" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - "google.golang.org/protobuf/types/known/durationpb" -) - -func TestThrottleDuration(t *testing.T) { - c := codes.ResourceExhausted - testcases := []struct { - status *status.Status - expected time.Duration - }{ - { - status: status.New(c, "no retry info"), - expected: 0, - }, - { - status: func() *status.Status { - s, err := status.New(c, "single retry info").WithDetails( - &errdetails.RetryInfo{ - RetryDelay: durationpb.New(15 * time.Millisecond), - }, - ) - require.NoError(t, err) - return s - }(), - expected: 15 * time.Millisecond, - }, - { - status: func() *status.Status { - s, err := status.New(c, "error info").WithDetails( - &errdetails.ErrorInfo{Reason: "no throttle detail"}, - ) - require.NoError(t, err) - return s - }(), - expected: 0, - }, - { - status: func() *status.Status { - s, err := status.New(c, "error and retry info").WithDetails( - &errdetails.ErrorInfo{Reason: "with throttle detail"}, - &errdetails.RetryInfo{ - RetryDelay: durationpb.New(13 * time.Minute), - }, - ) - require.NoError(t, err) - return s - }(), - expected: 13 * time.Minute, - }, - { - status: func() *status.Status { - s, err := status.New(c, "double retry info").WithDetails( - &errdetails.RetryInfo{ - RetryDelay: durationpb.New(13 * time.Minute), - }, - &errdetails.RetryInfo{ - RetryDelay: durationpb.New(15 * time.Minute), - }, - ) - require.NoError(t, err) - return s - }(), - expected: 13 * time.Minute, - }, - } - - for _, tc := range testcases { - t.Run(tc.status.Message(), func(t *testing.T) { - require.Equal(t, tc.expected, throttleDelay(tc.status)) - }) - } -} - -func TestRetryable(t *testing.T) { - retryableCodes := map[codes.Code]bool{ - codes.OK: false, - codes.Canceled: true, - codes.Unknown: false, - codes.InvalidArgument: false, - codes.DeadlineExceeded: true, - codes.NotFound: false, - codes.AlreadyExists: false, - codes.PermissionDenied: false, - codes.ResourceExhausted: true, - codes.FailedPrecondition: false, - codes.Aborted: true, - codes.OutOfRange: true, - codes.Unimplemented: false, - codes.Internal: false, - codes.Unavailable: true, - codes.DataLoss: true, - codes.Unauthenticated: false, - } - - for c, want := range retryableCodes { - got, _ := retryable(status.Error(c, "")) - assert.Equalf(t, want, got, "evaluate(%s)", c) - } -} - -func TestUnstartedStop(t *testing.T) { - client := NewClient() - assert.ErrorIs(t, client.Stop(context.Background()), errAlreadyStopped) -} - -func TestUnstartedUploadMetric(t *testing.T) { - client := NewClient() - assert.ErrorIs(t, client.UploadMetrics(context.Background(), nil), errShutdown) -} - -func TestExportContextHonorsParentDeadline(t *testing.T) { - now := time.Now() - ctx, cancel := context.WithDeadline(context.Background(), now) - t.Cleanup(cancel) - - // Without a client timeout, the parent deadline should be used. - client := newClient(WithTimeout(0)) - eCtx, eCancel := client.exportContext(ctx) - t.Cleanup(eCancel) - - deadline, ok := eCtx.Deadline() - assert.True(t, ok, "deadline not propagated to child context") - assert.Equal(t, now, deadline) -} - -func TestExportContextHonorsClientTimeout(t *testing.T) { - // Setting a timeout should ensure a deadline is set on the context. - client := newClient(WithTimeout(1 * time.Second)) - ctx, cancel := client.exportContext(context.Background()) - t.Cleanup(cancel) - - _, ok := ctx.Deadline() - assert.True(t, ok, "timeout not set as deadline for child context") -} - -func TestExportContextLinksStopSignal(t *testing.T) { - rootCtx := context.Background() - - client := newClient(WithInsecure()) - t.Cleanup(func() { require.NoError(t, client.Stop(rootCtx)) }) - require.NoError(t, client.Start(rootCtx)) - - ctx, cancel := client.exportContext(rootCtx) - t.Cleanup(cancel) - - require.False(t, func() bool { - select { - case <-ctx.Done(): - return true - default: - } - return false - }(), "context should not be done prior to canceling it") - - // The client.stopFunc cancels the client.stopCtx. This should have been - // setup as a parent of ctx. Therefore, it should cancel ctx as well. - client.stopFunc() - - // Assert this with Eventually to account for goroutine scheduler timing. - assert.Eventually(t, func() bool { - select { - case <-ctx.Done(): - return true - default: - } - return false - }, 10*time.Second, time.Microsecond) -} diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go new file mode 100644 index 00000000000..d838420d03f --- /dev/null +++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/config.go @@ -0,0 +1,241 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" + +import ( + "fmt" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/exporters/otlp/internal/retry" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf" +) + +// Option applies a configuration option to the Exporter. +type Option interface { + applyGRPCOption(oconf.Config) oconf.Config +} + +func asGRPCOptions(opts []Option) []oconf.GRPCOption { + converted := make([]oconf.GRPCOption, len(opts)) + for i, o := range opts { + converted[i] = oconf.NewGRPCOption(o.applyGRPCOption) + } + return converted +} + +// RetryConfig defines configuration for retrying the export of metric data +// that failed. +// +// This configuration does not define any network retry strategy. That is +// entirely handled by the gRPC ClientConn. +type RetryConfig retry.Config + +type wrappedOption struct { + oconf.GRPCOption +} + +func (w wrappedOption) applyGRPCOption(cfg oconf.Config) oconf.Config { + return w.ApplyGRPCOption(cfg) +} + +// WithInsecure disables client transport security for the Exporter's gRPC +// connection, just like grpc.WithInsecure() +// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does. +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT +// environment variable is set, and this option is not passed, that variable +// value will be used to determine client security. If the endpoint has a +// scheme of "http" or "unix" client security will be disabled. If both are +// set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, client security will be used. +// +// This option has no effect if WithGRPCConn is used. +func WithInsecure() Option { + return wrappedOption{oconf.WithInsecure()} +} + +// WithEndpoint sets the target endpoint the Exporter will connect to. +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT +// environment variable is set, and this option is not passed, that variable +// value will be used. If both are set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT +// will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, "localhost:4317" will be used. +// +// This option has no effect if WithGRPCConn is used. +func WithEndpoint(endpoint string) Option { + return wrappedOption{oconf.WithEndpoint(endpoint)} +} + +// WithReconnectionPeriod set the minimum amount of time between connection +// attempts to the target endpoint. +// +// This option has no effect if WithGRPCConn is used. +func WithReconnectionPeriod(rp time.Duration) Option { + return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config { + cfg.ReconnectionPeriod = rp + return cfg + })} +} + +func compressorToCompression(compressor string) oconf.Compression { + if compressor == "gzip" { + return oconf.GzipCompression + } + + otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor)) + return oconf.NoCompression +} + +// WithCompressor sets the compressor the gRPC client uses. +// +// It is the responsibility of the caller to ensure that the compressor set +// has been registered with google.golang.org/grpc/encoding (see +// encoding.RegisterCompressor for more information). For example, to register +// the gzip compressor import the package: +// +// import _ "google.golang.org/grpc/encoding/gzip" +// +// If the OTEL_EXPORTER_OTLP_COMPRESSION or +// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION environment variable is set, and +// this option is not passed, that variable value will be used. That value can +// be either "none" or "gzip". If both are set, +// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, no compressor will be used. +// +// This option has no effect if WithGRPCConn is used. +func WithCompressor(compressor string) Option { + return wrappedOption{oconf.WithCompression(compressorToCompression(compressor))} +} + +// WithHeaders will send the provided headers with each gRPC requests. +// +// If the OTEL_EXPORTER_OTLP_HEADERS or OTEL_EXPORTER_OTLP_METRICS_HEADERS +// environment variable is set, and this option is not passed, that variable +// value will be used. The value will be parsed as a list of key value pairs. +// These pairs are expected to be in the W3C Correlation-Context format +// without additional semi-colon delimited metadata (i.e. "k1=v1,k2=v2"). If +// both are set, OTEL_EXPORTER_OTLP_METRICS_HEADERS will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, no user headers will be set. +func WithHeaders(headers map[string]string) Option { + return wrappedOption{oconf.WithHeaders(headers)} +} + +// WithTLSCredentials sets the gRPC connection to use creds. +// +// If the OTEL_EXPORTER_OTLP_CERTIFICATE or +// OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE environment variable is set, and +// this option is not passed, that variable value will be used. The value will +// be parsed the filepath of the TLS certificate chain to use. If both are +// set, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, no TLS credentials will be used. +// +// This option has no effect if WithGRPCConn is used. +func WithTLSCredentials(creds credentials.TransportCredentials) Option { + return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config { + cfg.Metrics.GRPCCredentials = creds + return cfg + })} +} + +// WithServiceConfig defines the default gRPC service config used. +// +// This option has no effect if WithGRPCConn is used. +func WithServiceConfig(serviceConfig string) Option { + return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config { + cfg.ServiceConfig = serviceConfig + return cfg + })} +} + +// WithDialOption sets explicit grpc.DialOptions to use when establishing a +// gRPC connection. The options here are appended to the internal grpc.DialOptions +// used so they will take precedence over any other internal grpc.DialOptions +// they might conflict with. +// +// This option has no effect if WithGRPCConn is used. +func WithDialOption(opts ...grpc.DialOption) Option { + return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config { + cfg.DialOptions = opts + return cfg + })} +} + +// WithGRPCConn sets conn as the gRPC ClientConn used for all communication. +// +// This option takes precedence over any other option that relates to +// establishing or persisting a gRPC connection to a target endpoint. Any +// other option of those types passed will be ignored. +// +// It is the callers responsibility to close the passed conn. The Exporter +// Shutdown method will not close this connection. +func WithGRPCConn(conn *grpc.ClientConn) Option { + return wrappedOption{oconf.NewGRPCOption(func(cfg oconf.Config) oconf.Config { + cfg.GRPCConn = conn + return cfg + })} +} + +// WithTimeout sets the max amount of time an Exporter will attempt an export. +// +// This takes precedence over any retry settings defined by WithRetry. Once +// this time limit has been reached the export is abandoned and the metric +// data is dropped. +// +// If the OTEL_EXPORTER_OTLP_TIMEOUT or OTEL_EXPORTER_OTLP_METRICS_TIMEOUT +// environment variable is set, and this option is not passed, that variable +// value will be used. The value will be parsed as an integer representing the +// timeout in milliseconds. If both are set, +// OTEL_EXPORTER_OTLP_METRICS_TIMEOUT will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, a timeout of 10 seconds will be used. +func WithTimeout(duration time.Duration) Option { + return wrappedOption{oconf.WithTimeout(duration)} +} + +// WithRetry sets the retry policy for transient retryable errors that are +// returned by the target endpoint. +// +// If the target endpoint responds with not only a retryable error, but +// explicitly returns a backoff time in the response, that time will take +// precedence over these settings. +// +// These settings do not define any network retry strategy. That is entirely +// handled by the gRPC ClientConn. +// +// If unset, the default retry policy will be used. It will retry the export +// 5 seconds after receiving a retryable error and increase exponentially +// after each error for no more than a total time of 1 minute. +func WithRetry(settings RetryConfig) Option { + return wrappedOption{oconf.WithRetry(retry.Config(settings))} +} diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go similarity index 61% rename from exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go rename to exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go index 197059a6a5b..7820619bf60 100644 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/exporter.go +++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/doc.go @@ -12,20 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package otlpmetricgrpc provides an otlpmetric.Exporter that communicates +// with an OTLP receiving endpoint using gRPC. package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" - -import ( - "context" - - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric" -) - -// New constructs a new Exporter and starts it. -func New(ctx context.Context, opts ...Option) (*otlpmetric.Exporter, error) { - return otlpmetric.New(ctx, NewClient(opts...)) -} - -// NewUnstarted constructs a new Exporter and does not start it. -func NewUnstarted(opts ...Option) *otlpmetric.Exporter { - return otlpmetric.NewUnstarted(NewClient(opts...)) -} diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/example_test.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/example_test.go index 0b0fbd6967a..8ac22bb2c72 100644 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/example_test.go +++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/example_test.go @@ -12,192 +12,34 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.18 +// +build go1.18 + package otlpmetricgrpc_test import ( "context" - "log" - "time" - - "google.golang.org/grpc/credentials" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" "go.opentelemetry.io/otel/metric/global" - "go.opentelemetry.io/otel/metric/instrument" - controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" - processor "go.opentelemetry.io/otel/sdk/metric/processor/basic" - "go.opentelemetry.io/otel/sdk/metric/selector/simple" + "go.opentelemetry.io/otel/sdk/metric" ) -func Example_insecure() { - ctx := context.Background() - client := otlpmetricgrpc.NewClient(otlpmetricgrpc.WithInsecure()) - exp, err := otlpmetric.New(ctx, client) - if err != nil { - log.Fatalf("Failed to create the collector exporter: %v", err) - } - defer func() { - ctx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - if err := exp.Shutdown(ctx); err != nil { - otel.Handle(err) - } - }() - - pusher := controller.New( - processor.NewFactory( - simple.NewWithHistogramDistribution(), - exp, - ), - controller.WithExporter(exp), - controller.WithCollectPeriod(2*time.Second), - ) - - global.SetMeterProvider(pusher) - - if err := pusher.Start(ctx); err != nil { - log.Fatalf("could not start metric controller: %v", err) - } - defer func() { - ctx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - // pushes any last exports to the receiver - if err := pusher.Stop(ctx); err != nil { - otel.Handle(err) - } - }() - - meter := global.Meter("go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc_test") - - // Recorder metric example - - counter, err := meter.SyncFloat64().Counter("an_important_metric", instrument.WithDescription("Measures the cumulative epicness of the app")) - if err != nil { - log.Fatalf("Failed to create the instrument: %v", err) - } - - for i := 0; i < 10; i++ { - log.Printf("Doing really hard work (%d / 10)\n", i+1) - counter.Add(ctx, 1.0) - } -} - -func Example_withTLS() { - // Please take at look at https://pkg.go.dev/google.golang.org/grpc/credentials#TransportCredentials - // for ways on how to initialize gRPC TransportCredentials. - creds, err := credentials.NewClientTLSFromFile("my-cert.pem", "") - if err != nil { - log.Fatalf("failed to create gRPC client TLS credentials: %v", err) - } - - ctx := context.Background() - client := otlpmetricgrpc.NewClient(otlpmetricgrpc.WithTLSCredentials(creds)) - exp, err := otlpmetric.New(ctx, client) - if err != nil { - log.Fatalf("failed to create the collector exporter: %v", err) - } - defer func() { - ctx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - if err := exp.Shutdown(ctx); err != nil { - otel.Handle(err) - } - }() - - pusher := controller.New( - processor.NewFactory( - simple.NewWithHistogramDistribution(), - exp, - ), - controller.WithExporter(exp), - controller.WithCollectPeriod(2*time.Second), - ) - - global.SetMeterProvider(pusher) - - if err := pusher.Start(ctx); err != nil { - log.Fatalf("could not start metric controller: %v", err) - } - - defer func() { - ctx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - // pushes any last exports to the receiver - if err := pusher.Stop(ctx); err != nil { - otel.Handle(err) - } - }() - - meter := global.Meter("go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc_test") - - // Recorder metric example - counter, err := meter.SyncFloat64().Counter("an_important_metric", instrument.WithDescription("Measures the cumulative epicness of the app")) - if err != nil { - log.Fatalf("Failed to create the instrument: %v", err) - } - - for i := 0; i < 10; i++ { - log.Printf("Doing really hard work (%d / 10)\n", i+1) - counter.Add(ctx, 1.0) - } -} - -func Example_withDifferentSignalCollectors() { - client := otlpmetricgrpc.NewClient( - otlpmetricgrpc.WithInsecure(), - otlpmetricgrpc.WithEndpoint("localhost:30080"), - ) +func Example() { ctx := context.Background() - exp, err := otlpmetric.New(ctx, client) + exp, err := otlpmetricgrpc.New(ctx) if err != nil { - log.Fatalf("failed to create the collector exporter: %v", err) + panic(err) } + meterProvider := metric.NewMeterProvider(metric.WithReader(metric.NewPeriodicReader(exp))) defer func() { - ctx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - if err := exp.Shutdown(ctx); err != nil { - otel.Handle(err) + if err := meterProvider.Shutdown(ctx); err != nil { + panic(err) } }() + global.SetMeterProvider(meterProvider) - pusher := controller.New( - processor.NewFactory( - simple.NewWithHistogramDistribution(), - exp, - ), - controller.WithExporter(exp), - controller.WithCollectPeriod(2*time.Second), - ) - - global.SetMeterProvider(pusher) - - if err := pusher.Start(ctx); err != nil { - log.Fatalf("could not start metric controller: %v", err) - } - defer func() { - ctx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - // pushes any last exports to the receiver - if err := pusher.Stop(ctx); err != nil { - otel.Handle(err) - } - }() - - meter := global.Meter("go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc_test") - - // Recorder metric example - counter, err := meter.SyncFloat64().Counter("an_important_metric", instrument.WithDescription("Measures the cumulative epicness of the app")) - if err != nil { - log.Fatalf("Failed to create the instrument: %v", err) - } - - for i := 0; i < 10; i++ { - log.Printf("Doing really hard work (%d / 10)\n", i+1) - counter.Add(ctx, 1.0) - } - - log.Printf("Done!") + // From here, the meterProvider can be used by instrumentation to collect + // telemetry. } diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod b/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod index d0ad331fdea..f4051953143 100644 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod +++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod @@ -1,6 +1,6 @@ module go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc -go 1.17 +go 1.18 require ( github.com/stretchr/testify v1.7.1 @@ -8,7 +8,6 @@ require ( go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.31.0 go.opentelemetry.io/otel/metric v0.31.0 - go.opentelemetry.io/otel/sdk v1.10.0 go.opentelemetry.io/otel/sdk/metric v0.31.0 go.opentelemetry.io/proto/otlp v0.19.0 google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1 @@ -22,8 +21,10 @@ require ( github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/protobuf v1.5.2 // indirect + github.com/google/go-cmp v0.5.8 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + go.opentelemetry.io/otel/sdk v1.10.0 // indirect go.opentelemetry.io/otel/trace v1.10.0 // indirect golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 // indirect golang.org/x/sys v0.0.0-20210510120138-977fb7262007 // indirect diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/go.sum b/exporters/otlp/otlpmetric/otlpmetricgrpc/go.sum index 77a03262881..c066ceeecc1 100644 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/go.sum +++ b/exporters/otlp/otlpmetric/otlpmetricgrpc/go.sum @@ -35,7 +35,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -114,6 +113,7 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/mock_collector_test.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/mock_collector_test.go deleted file mode 100644 index 96e5303320a..00000000000 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/mock_collector_test.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otlpmetricgrpc_test - -import ( - "context" - "fmt" - "net" - "sync" - "testing" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" - - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpmetrictest" - collectormetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" - metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" -) - -func makeMockCollector(t *testing.T, mockConfig *mockConfig) *mockCollector { - return &mockCollector{ - t: t, - metricSvc: &mockMetricService{ - storage: otlpmetrictest.NewMetricsStorage(), - errors: mockConfig.errors, - }, - } -} - -type mockMetricService struct { - collectormetricpb.UnimplementedMetricsServiceServer - - requests int - errors []error - - headers metadata.MD - mu sync.RWMutex - storage otlpmetrictest.MetricsStorage - delay time.Duration -} - -func (mms *mockMetricService) getHeaders() metadata.MD { - mms.mu.RLock() - defer mms.mu.RUnlock() - return mms.headers -} - -func (mms *mockMetricService) getMetrics() []*metricpb.Metric { - mms.mu.RLock() - defer mms.mu.RUnlock() - return mms.storage.GetMetrics() -} - -func (mms *mockMetricService) Export(ctx context.Context, exp *collectormetricpb.ExportMetricsServiceRequest) (*collectormetricpb.ExportMetricsServiceResponse, error) { - if mms.delay > 0 { - time.Sleep(mms.delay) - } - - mms.mu.Lock() - defer func() { - mms.requests++ - mms.mu.Unlock() - }() - - reply := &collectormetricpb.ExportMetricsServiceResponse{} - if mms.requests < len(mms.errors) { - idx := mms.requests - return reply, mms.errors[idx] - } - - mms.headers, _ = metadata.FromIncomingContext(ctx) - mms.storage.AddMetrics(exp) - return reply, nil -} - -type mockCollector struct { - t *testing.T - - metricSvc *mockMetricService - - endpoint string - stopFunc func() - stopOnce sync.Once -} - -type mockConfig struct { - errors []error - endpoint string -} - -var _ collectormetricpb.MetricsServiceServer = (*mockMetricService)(nil) - -var errAlreadyStopped = fmt.Errorf("already stopped") - -func (mc *mockCollector) stop() error { - var err = errAlreadyStopped - mc.stopOnce.Do(func() { - err = nil - if mc.stopFunc != nil { - mc.stopFunc() - } - }) - // Give it sometime to shutdown. - <-time.After(160 * time.Millisecond) - - // Wait for services to finish reading/writing. - // Getting the lock ensures the metricSvc is done flushing. - mc.metricSvc.mu.Lock() - defer mc.metricSvc.mu.Unlock() - return err -} - -func (mc *mockCollector) Stop() error { - return mc.stop() -} - -func (mc *mockCollector) getHeaders() metadata.MD { - return mc.metricSvc.getHeaders() -} - -func (mc *mockCollector) getMetrics() []*metricpb.Metric { - return mc.metricSvc.getMetrics() -} - -func (mc *mockCollector) GetMetrics() []*metricpb.Metric { - return mc.getMetrics() -} - -// runMockCollector is a helper function to create a mock Collector. -func runMockCollector(t *testing.T) *mockCollector { - return runMockCollectorAtEndpoint(t, "localhost:0") -} - -func runMockCollectorAtEndpoint(t *testing.T, endpoint string) *mockCollector { - return runMockCollectorWithConfig(t, &mockConfig{endpoint: endpoint}) -} - -func runMockCollectorWithConfig(t *testing.T, mockConfig *mockConfig) *mockCollector { - ln, err := net.Listen("tcp", mockConfig.endpoint) - if err != nil { - t.Fatalf("Failed to get an endpoint: %v", err) - } - - srv := grpc.NewServer() - mc := makeMockCollector(t, mockConfig) - collectormetricpb.RegisterMetricsServiceServer(srv, mc.metricSvc) - go func() { - _ = srv.Serve(ln) - }() - - mc.endpoint = ln.Addr().String() - // srv.Stop calls Close on mc.ln. - mc.stopFunc = srv.Stop - - return mc -} diff --git a/exporters/otlp/otlpmetric/otlpmetricgrpc/options.go b/exporters/otlp/otlpmetric/otlpmetricgrpc/options.go deleted file mode 100644 index e733677f00d..00000000000 --- a/exporters/otlp/otlpmetric/otlpmetricgrpc/options.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otlpmetricgrpc // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" - -import ( - "fmt" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/exporters/otlp/internal/retry" - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpconfig" -) - -// Option applies an option to the gRPC driver. -type Option interface { - applyGRPCOption(otlpconfig.Config) otlpconfig.Config -} - -func asGRPCOptions(opts []Option) []otlpconfig.GRPCOption { - converted := make([]otlpconfig.GRPCOption, len(opts)) - for i, o := range opts { - converted[i] = otlpconfig.NewGRPCOption(o.applyGRPCOption) - } - return converted -} - -// RetryConfig defines configuration for retrying export of span batches that -// failed to be received by the target endpoint. -// -// This configuration does not define any network retry strategy. That is -// entirely handled by the gRPC ClientConn. -type RetryConfig retry.Config - -type wrappedOption struct { - otlpconfig.GRPCOption -} - -func (w wrappedOption) applyGRPCOption(cfg otlpconfig.Config) otlpconfig.Config { - return w.ApplyGRPCOption(cfg) -} - -// WithInsecure disables client transport security for the exporter's gRPC -// connection just like grpc.WithInsecure() -// (https://pkg.go.dev/google.golang.org/grpc#WithInsecure) does. Note, by -// default, client security is required unless WithInsecure is used. -// -// This option has no effect if WithGRPCConn is used. -func WithInsecure() Option { - return wrappedOption{otlpconfig.WithInsecure()} -} - -// WithEndpoint sets the target endpoint the exporter will connect to. If -// unset, localhost:4317 will be used as a default. -// -// This option has no effect if WithGRPCConn is used. -func WithEndpoint(endpoint string) Option { - return wrappedOption{otlpconfig.WithEndpoint(endpoint)} -} - -// WithReconnectionPeriod set the minimum amount of time between connection -// attempts to the target endpoint. -// -// This option has no effect if WithGRPCConn is used. -func WithReconnectionPeriod(rp time.Duration) Option { - return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { - cfg.ReconnectionPeriod = rp - return cfg - })} -} - -func compressorToCompression(compressor string) otlpconfig.Compression { - if compressor == "gzip" { - return otlpconfig.GzipCompression - } - - otel.Handle(fmt.Errorf("invalid compression type: '%s', using no compression as default", compressor)) - return otlpconfig.NoCompression -} - -// WithCompressor sets the compressor for the gRPC client to use when sending -// requests. It is the responsibility of the caller to ensure that the -// compressor set has been registered with google.golang.org/grpc/encoding. -// This can be done by encoding.RegisterCompressor. Some compressors -// auto-register on import, such as gzip, which can be registered by calling -// `import _ "google.golang.org/grpc/encoding/gzip"`. -// -// This option has no effect if WithGRPCConn is used. -func WithCompressor(compressor string) Option { - return wrappedOption{otlpconfig.WithCompression(compressorToCompression(compressor))} -} - -// WithHeaders will send the provided headers with each gRPC requests. -func WithHeaders(headers map[string]string) Option { - return wrappedOption{otlpconfig.WithHeaders(headers)} -} - -// WithTLSCredentials allows the connection to use TLS credentials when -// talking to the server. It takes in grpc.TransportCredentials instead of say -// a Certificate file or a tls.Certificate, because the retrieving of these -// credentials can be done in many ways e.g. plain file, in code tls.Config or -// by certificate rotation, so it is up to the caller to decide what to use. -// -// This option has no effect if WithGRPCConn is used. -func WithTLSCredentials(creds credentials.TransportCredentials) Option { - return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { - cfg.Metrics.GRPCCredentials = creds - return cfg - })} -} - -// WithServiceConfig defines the default gRPC service config used. -// -// This option has no effect if WithGRPCConn is used. -func WithServiceConfig(serviceConfig string) Option { - return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { - cfg.ServiceConfig = serviceConfig - return cfg - })} -} - -// WithDialOption sets explicit grpc.DialOptions to use when making a -// connection. The options here are appended to the internal grpc.DialOptions -// used so they will take precedence over any other internal grpc.DialOptions -// they might conflict with. -// -// This option has no effect if WithGRPCConn is used. -func WithDialOption(opts ...grpc.DialOption) Option { - return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { - cfg.DialOptions = opts - return cfg - })} -} - -// WithGRPCConn sets conn as the gRPC ClientConn used for all communication. -// -// This option takes precedence over any other option that relates to -// establishing or persisting a gRPC connection to a target endpoint. Any -// other option of those types passed will be ignored. -// -// It is the callers responsibility to close the passed conn. The client -// Shutdown method will not close this connection. -func WithGRPCConn(conn *grpc.ClientConn) Option { - return wrappedOption{otlpconfig.NewGRPCOption(func(cfg otlpconfig.Config) otlpconfig.Config { - cfg.GRPCConn = conn - return cfg - })} -} - -// WithTimeout sets the max amount of time a client will attempt to export a -// batch of spans. This takes precedence over any retry settings defined with -// WithRetry, once this time limit has been reached the export is abandoned -// and the batch of spans is dropped. -// -// If unset, the default timeout will be set to 10 seconds. -func WithTimeout(duration time.Duration) Option { - return wrappedOption{otlpconfig.WithTimeout(duration)} -} - -// WithRetry sets the retry policy for transient retryable errors that may be -// returned by the target endpoint when exporting a batch of spans. -// -// If the target endpoint responds with not only a retryable error, but -// explicitly returns a backoff time in the response. That time will take -// precedence over these settings. -// -// These settings do not define any network retry strategy. That is entirely -// handled by the gRPC ClientConn. -// -// If unset, the default retry policy will be used. It will retry the export -// 5 seconds after receiving a retryable error and increase exponentially -// after each error for no more than a total time of 1 minute. -func WithRetry(settings RetryConfig) Option { - return wrappedOption{otlpconfig.WithRetry(retry.Config(settings))} -} diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/certificate_test.go b/exporters/otlp/otlpmetric/otlpmetrichttp/certificate_test.go deleted file mode 100644 index d75547f6e4c..00000000000 --- a/exporters/otlp/otlpmetric/otlpmetrichttp/certificate_test.go +++ /dev/null @@ -1,92 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otlpmetrichttp_test - -import ( - "bytes" - "crypto/ecdsa" - "crypto/elliptic" - cryptorand "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "math/big" - mathrand "math/rand" - "net" - "time" -) - -type mathRandReader struct{} - -func (mathRandReader) Read(p []byte) (n int, err error) { - return mathrand.Read(p) -} - -var randReader mathRandReader - -type pemCertificate struct { - Certificate []byte - PrivateKey []byte -} - -// Based on https://golang.org/src/crypto/tls/generate_cert.go, -// simplified and weakened. -func generateWeakCertificate() (*pemCertificate, error) { - priv, err := ecdsa.GenerateKey(elliptic.P256(), randReader) - if err != nil { - return nil, err - } - keyUsage := x509.KeyUsageDigitalSignature - notBefore := time.Now() - notAfter := notBefore.Add(time.Hour) - serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) - serialNumber, err := cryptorand.Int(randReader, serialNumberLimit) - if err != nil { - return nil, err - } - template := x509.Certificate{ - SerialNumber: serialNumber, - Subject: pkix.Name{ - Organization: []string{"otel-go"}, - }, - NotBefore: notBefore, - NotAfter: notAfter, - KeyUsage: keyUsage, - ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, - BasicConstraintsValid: true, - DNSNames: []string{"localhost"}, - IPAddresses: []net.IP{net.IPv6loopback, net.IPv4(127, 0, 0, 1)}, - } - derBytes, err := x509.CreateCertificate(randReader, &template, &template, &priv.PublicKey, priv) - if err != nil { - return nil, err - } - certificateBuffer := new(bytes.Buffer) - if err := pem.Encode(certificateBuffer, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil { - return nil, err - } - privDERBytes, err := x509.MarshalPKCS8PrivateKey(priv) - if err != nil { - return nil, err - } - privBuffer := new(bytes.Buffer) - if err := pem.Encode(privBuffer, &pem.Block{Type: "PRIVATE KEY", Bytes: privDERBytes}); err != nil { - return nil, err - } - return &pemCertificate{ - Certificate: certificateBuffer.Bytes(), - PrivateKey: privBuffer.Bytes(), - }, nil -} diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/client.go b/exporters/otlp/otlpmetric/otlpmetrichttp/client.go index 766bcf48744..1bfebc6fbd6 100644 --- a/exporters/otlp/otlpmetric/otlpmetrichttp/client.go +++ b/exporters/otlp/otlpmetric/otlpmetrichttp/client.go @@ -12,6 +12,9 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.18 +// +build go1.18 + package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" import ( @@ -31,24 +34,35 @@ import ( "go.opentelemetry.io/otel/exporters/otlp/internal/retry" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric" - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpconfig" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf" + "go.opentelemetry.io/otel/sdk/metric" colmetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" ) -const contentTypeProto = "application/x-protobuf" +// New returns an OpenTelemetry metric Exporter. The Exporter can be used with +// a PeriodicReader to export OpenTelemetry metric data to an OTLP receiving +// endpoint using protobufs over HTTP. +func New(_ context.Context, opts ...Option) (metric.Exporter, error) { + c, err := newClient(opts...) + if err != nil { + return nil, err + } + return otlpmetric.New(c), nil +} -var gzPool = sync.Pool{ - New: func() interface{} { - w := gzip.NewWriter(io.Discard) - return w - }, +type client struct { + // req is cloned for every upload the client makes. + req *http.Request + compression Compression + requestFunc retry.RequestFunc + httpClient *http.Client } // Keep it in sync with golang's DefaultTransport from net/http! We // have our own copy to avoid handling a situation where the // DefaultTransport is overwritten with some different implementation -// of http.RoundTripper or it's modified by other package. +// of http.RoundTripper or it's modified by another package. var ourTransport = &http.Transport{ Proxy: http.ProxyFromEnvironment, DialContext: (&net.Dialer{ @@ -62,19 +76,9 @@ var ourTransport = &http.Transport{ ExpectContinueTimeout: 1 * time.Second, } -type client struct { - name string - cfg otlpconfig.SignalConfig - generalCfg otlpconfig.Config - requestFunc retry.RequestFunc - client *http.Client - stopCh chan struct{} - stopOnce sync.Once -} - -// NewClient creates a new HTTP metric client. -func NewClient(opts ...Option) otlpmetric.Client { - cfg := otlpconfig.NewHTTPConfig(asHTTPOptions(opts)...) +// newClient creates a new HTTP metric client. +func newClient(opts ...Option) (otlpmetric.Client, error) { + cfg := oconf.NewHTTPConfig(asHTTPOptions(opts)...) httpClient := &http.Client{ Transport: ourTransport, @@ -86,68 +90,79 @@ func NewClient(opts ...Option) otlpmetric.Client { httpClient.Transport = transport } - stopCh := make(chan struct{}) - return &client{ - name: "metrics", - cfg: cfg.Metrics, - generalCfg: cfg, - requestFunc: cfg.RetryConfig.RequestFunc(evaluate), - stopCh: stopCh, - client: httpClient, + u := &url.URL{ + Scheme: "https", + Host: cfg.Metrics.Endpoint, + Path: cfg.Metrics.URLPath, + } + if cfg.Metrics.Insecure { + u.Scheme = "http" + } + // Body is set when this is cloned during upload. + req, err := http.NewRequest(http.MethodPost, u.String(), http.NoBody) + if err != nil { + return nil, err } -} -// Start does nothing in a HTTP client. -func (d *client) Start(ctx context.Context) error { - // nothing to do - select { - case <-ctx.Done(): - return ctx.Err() - default: + if n := len(cfg.Metrics.Headers); n > 0 { + for k, v := range cfg.Metrics.Headers { + req.Header.Set(k, v) + } } - return nil + req.Header.Set("Content-Type", "application/x-protobuf") + + return &client{ + compression: Compression(cfg.Metrics.Compression), + req: req, + requestFunc: cfg.RetryConfig.RequestFunc(evaluate), + httpClient: httpClient, + }, nil } -// Stop shuts down the client and interrupt any in-flight request. -func (d *client) Stop(ctx context.Context) error { - d.stopOnce.Do(func() { - close(d.stopCh) - }) - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - return nil +// ForceFlush does nothing, the client holds no state. +func (c *client) ForceFlush(ctx context.Context) error { return ctx.Err() } + +// Shutdown shuts down the client, freeing all resources. +func (c *client) Shutdown(ctx context.Context) error { + // The otlpmetric.Exporter synchronizes access to client methods and + // ensures this is called only once. The only thing that needs to be done + // here is to release any computational resources the client holds. + + c.requestFunc = nil + c.httpClient = nil + return ctx.Err() } -// UploadMetrics sends a batch of metrics to the collector. -func (d *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) error { +// UploadMetrics sends protoMetrics to the connected endpoint. +// +// Retryable errors from the server will be handled according to any +// RetryConfig the client was created with. +func (c *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.ResourceMetrics) error { + // The otlpmetric.Exporter synchronizes access to client methods, and + // ensures this is not called after the Exporter is shutdown. Only thing + // to do here is send data. + pbRequest := &colmetricpb.ExportMetricsServiceRequest{ ResourceMetrics: []*metricpb.ResourceMetrics{protoMetrics}, } - rawRequest, err := proto.Marshal(pbRequest) + body, err := proto.Marshal(pbRequest) if err != nil { return err } - - ctx, cancel := d.contextWithStop(ctx) - defer cancel() - - request, err := d.newRequest(rawRequest) + request, err := c.newRequest(ctx, body) if err != nil { return err } - return d.requestFunc(ctx, func(ctx context.Context) error { + return c.requestFunc(ctx, func(iCtx context.Context) error { select { - case <-ctx.Done(): - return ctx.Err() + case <-iCtx.Done(): + return iCtx.Err() default: } - request.reset(ctx) - resp, err := d.client.Do(request.Request) + request.reset(iCtx) + resp, err := c.httpClient.Do(request.Request) if err != nil { return err } @@ -167,7 +182,7 @@ func (d *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou return err } default: - rErr = fmt.Errorf("failed to send %s to %s: %s", d.name, request.URL, resp.Status) + rErr = fmt.Errorf("failed to send metrics to %s: %s", request.URL, resp.Status) } if err := resp.Body.Close(); err != nil { @@ -177,20 +192,18 @@ func (d *client) UploadMetrics(ctx context.Context, protoMetrics *metricpb.Resou }) } -func (d *client) newRequest(body []byte) (request, error) { - u := url.URL{Scheme: d.getScheme(), Host: d.cfg.Endpoint, Path: d.cfg.URLPath} - r, err := http.NewRequest(http.MethodPost, u.String(), nil) - if err != nil { - return request{Request: r}, err - } - - for k, v := range d.cfg.Headers { - r.Header.Set(k, v) - } - r.Header.Set("Content-Type", contentTypeProto) +var gzPool = sync.Pool{ + New: func() interface{} { + w := gzip.NewWriter(io.Discard) + return w + }, +} +func (c *client) newRequest(ctx context.Context, body []byte) (request, error) { + r := c.req.Clone(ctx) req := request{Request: r} - switch Compression(d.cfg.Compression) { + + switch c.compression { case NoCompression: r.ContentLength = (int64)(len(body)) req.bodyReader = bodyReader(body) @@ -249,8 +262,8 @@ type retryableError struct { // throttle delay contained in headers. func newResponseError(header http.Header) error { var rErr retryableError - if s, ok := header["Retry-After"]; ok { - if t, err := strconv.ParseInt(s[0], 10, 64); err == nil { + if v := header.Get("Retry-After"); v != "" { + if t, err := strconv.ParseInt(v, 10, 64); err == nil { rErr.throttle = t } } @@ -275,26 +288,3 @@ func evaluate(err error) (bool, time.Duration) { return true, time.Duration(rErr.throttle) } - -func (d *client) getScheme() string { - if d.cfg.Insecure { - return "http" - } - return "https" -} - -func (d *client) contextWithStop(ctx context.Context) (context.Context, context.CancelFunc) { - // Unify the parent context Done signal with the client's stop - // channel. - ctx, cancel := context.WithCancel(ctx) - go func(ctx context.Context, cancel context.CancelFunc) { - select { - case <-ctx.Done(): - // Nothing to do, either cancelled or deadline - // happened. - case <-d.stopCh: - cancel() - } - }(ctx, cancel) - return ctx, cancel -} diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/client_test.go b/exporters/otlp/otlpmetric/otlpmetrichttp/client_test.go index 5e614da2640..7d6aa912737 100644 --- a/exporters/otlp/otlpmetric/otlpmetrichttp/client_test.go +++ b/exporters/otlp/otlpmetric/otlpmetrichttp/client_test.go @@ -12,12 +12,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -package otlpmetrichttp_test +//go:build go1.18 +// +build go1.18 + +package otlpmetrichttp import ( "context" + "crypto/tls" + "errors" + "fmt" "net/http" - "os" + "strings" "testing" "time" @@ -25,247 +31,137 @@ import ( "github.com/stretchr/testify/require" "go.opentelemetry.io/otel/exporters/otlp/otlpmetric" - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpmetrictest" - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" - "go.opentelemetry.io/otel/sdk/resource" -) - -const ( - relOtherMetricsPath = "post/metrics/here" - otherMetricsPath = "/post/metrics/here" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otest" + "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" ) -var ( - oneRecord = otlpmetrictest.OneRecordReader() - - testResource = resource.Empty() -) - -var ( - testHeaders = map[string]string{ - "Otel-Go-Key-1": "somevalue", - "Otel-Go-Key-2": "someothervalue", - } -) +func TestClient(t *testing.T) { + factory := func() (otlpmetric.Client, otest.Collector) { + coll, err := otest.NewHTTPCollector("", nil) + require.NoError(t, err) -func TestEndToEnd(t *testing.T) { - tests := []struct { - name string - opts []otlpmetrichttp.Option - mcCfg mockCollectorConfig - tls bool - }{ - { - name: "no extra options", - opts: nil, - }, - { - name: "with gzip compression", - opts: []otlpmetrichttp.Option{ - otlpmetrichttp.WithCompression(otlpmetrichttp.GzipCompression), - }, - }, - { - name: "with empty paths (forced to defaults)", - opts: []otlpmetrichttp.Option{ - otlpmetrichttp.WithURLPath(""), - }, - }, - { - name: "with relative paths", - opts: []otlpmetrichttp.Option{ - otlpmetrichttp.WithURLPath(relOtherMetricsPath), - }, - mcCfg: mockCollectorConfig{ - MetricsURLPath: otherMetricsPath, - }, - }, - { - name: "with TLS", - opts: nil, - mcCfg: mockCollectorConfig{ - WithTLS: true, - }, - tls: true, - }, - { - name: "with extra headers", - opts: []otlpmetrichttp.Option{ - otlpmetrichttp.WithHeaders(testHeaders), - }, - mcCfg: mockCollectorConfig{ - ExpectedHeaders: testHeaders, - }, - }, + addr := coll.Addr().String() + client, err := newClient(WithEndpoint(addr), WithInsecure()) + require.NoError(t, err) + return client, coll } - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - mc := runMockCollector(t, tc.mcCfg) - defer mc.MustStop(t) - allOpts := []otlpmetrichttp.Option{ - otlpmetrichttp.WithEndpoint(mc.Endpoint()), - } - if tc.tls { - tlsConfig := mc.ClientTLSConfig() - require.NotNil(t, tlsConfig) - allOpts = append(allOpts, otlpmetrichttp.WithTLSClientConfig(tlsConfig)) - } else { - allOpts = append(allOpts, otlpmetrichttp.WithInsecure()) - } - allOpts = append(allOpts, tc.opts...) - client := otlpmetrichttp.NewClient(allOpts...) - ctx := context.Background() - exporter, err := otlpmetric.New(ctx, client) - if assert.NoError(t, err) { - defer func() { - assert.NoError(t, exporter.Shutdown(ctx)) - }() - otlpmetrictest.RunEndToEndTest(ctx, t, exporter, mc) - } - }) - } + t.Run("Integration", otest.RunClientTests(factory)) } -func TestExporterShutdown(t *testing.T) { - mc := runMockCollector(t, mockCollectorConfig{}) - defer func() { - _ = mc.Stop() - }() +func TestConfig(t *testing.T) { + factoryFunc := func(ePt string, errCh <-chan error, o ...Option) (metric.Exporter, *otest.HTTPCollector) { + coll, err := otest.NewHTTPCollector(ePt, errCh) + require.NoError(t, err) + + opts := []Option{WithEndpoint(coll.Addr().String())} + if !strings.HasPrefix(strings.ToLower(ePt), "https") { + opts = append(opts, WithInsecure()) + } + opts = append(opts, o...) + + ctx := context.Background() + exp, err := New(ctx, opts...) + require.NoError(t, err) + return exp, coll + } - <-time.After(5 * time.Millisecond) + t.Run("WithHeaders", func(t *testing.T) { + key := http.CanonicalHeaderKey("my-custom-header") + headers := map[string]string{key: "custom-value"} + exp, coll := factoryFunc("", nil, WithHeaders(headers)) + ctx := context.Background() + t.Cleanup(func() { require.NoError(t, coll.Shutdown(ctx)) }) + require.NoError(t, exp.Export(ctx, metricdata.ResourceMetrics{})) + // Ensure everything is flushed. + require.NoError(t, exp.Shutdown(ctx)) + + got := coll.Headers() + require.Contains(t, got, key) + assert.Equal(t, got[key], []string{headers[key]}) + }) - otlpmetrictest.RunExporterShutdownTest(t, func() otlpmetric.Client { - return otlpmetrichttp.NewClient( - otlpmetrichttp.WithInsecure(), - otlpmetrichttp.WithEndpoint(mc.endpoint), + t.Run("WithTimeout", func(t *testing.T) { + // Do not send on errCh so the Collector never responds to the client. + errCh := make(chan error) + exp, coll := factoryFunc( + "", + errCh, + WithTimeout(time.Millisecond), + WithRetry(RetryConfig{Enabled: false}), ) + ctx := context.Background() + t.Cleanup(func() { require.NoError(t, coll.Shutdown(ctx)) }) + // Push this after Shutdown so the HTTP server doesn't hang. + t.Cleanup(func() { close(errCh) }) + t.Cleanup(func() { require.NoError(t, exp.Shutdown(ctx)) }) + err := exp.Export(ctx, metricdata.ResourceMetrics{}) + assert.ErrorContains(t, err, context.DeadlineExceeded.Error()) }) -} -func TestTimeout(t *testing.T) { - delay := make(chan struct{}) - mcCfg := mockCollectorConfig{Delay: delay} - mc := runMockCollector(t, mcCfg) - defer mc.MustStop(t) - defer func() { close(delay) }() - client := otlpmetrichttp.NewClient( - otlpmetrichttp.WithEndpoint(mc.Endpoint()), - otlpmetrichttp.WithInsecure(), - otlpmetrichttp.WithTimeout(time.Nanosecond), - ) - ctx := context.Background() - exporter, err := otlpmetric.New(ctx, client) - require.NoError(t, err) - defer func() { - assert.NoError(t, exporter.Shutdown(ctx)) - }() - err = exporter.Export(ctx, testResource, oneRecord) - assert.Equalf(t, true, os.IsTimeout(err), "expected timeout error, got: %v", err) -} + t.Run("WithCompressionGZip", func(t *testing.T) { + exp, coll := factoryFunc("", nil, WithCompression(GzipCompression)) + ctx := context.Background() + t.Cleanup(func() { require.NoError(t, coll.Shutdown(ctx)) }) + t.Cleanup(func() { require.NoError(t, exp.Shutdown(ctx)) }) + assert.NoError(t, exp.Export(ctx, metricdata.ResourceMetrics{})) + assert.Len(t, coll.Collect().Dump(), 1) + }) -func TestEmptyData(t *testing.T) { - mcCfg := mockCollectorConfig{} - mc := runMockCollector(t, mcCfg) - defer mc.MustStop(t) - driver := otlpmetrichttp.NewClient( - otlpmetrichttp.WithEndpoint(mc.Endpoint()), - otlpmetrichttp.WithInsecure(), - ) - ctx := context.Background() - exporter, err := otlpmetric.New(ctx, driver) - require.NoError(t, err) - defer func() { - assert.NoError(t, exporter.Shutdown(ctx)) - }() - assert.NoError(t, err) - err = exporter.Export(ctx, testResource, oneRecord) - assert.NoError(t, err) - assert.NotEmpty(t, mc.GetMetrics()) -} + t.Run("WithRetry", func(t *testing.T) { + emptyErr := errors.New("") + errCh := make(chan error, 3) + header := http.Header{http.CanonicalHeaderKey("Retry-After"): {"10"}} + // Both retryable errors. + errCh <- &otest.HTTPResponseError{Status: http.StatusServiceUnavailable, Err: emptyErr, Header: header} + errCh <- &otest.HTTPResponseError{Status: http.StatusTooManyRequests, Err: emptyErr} + errCh <- nil + exp, coll := factoryFunc("", errCh, WithRetry(RetryConfig{ + Enabled: true, + InitialInterval: time.Nanosecond, + MaxInterval: time.Millisecond, + MaxElapsedTime: time.Minute, + })) + ctx := context.Background() + t.Cleanup(func() { require.NoError(t, coll.Shutdown(ctx)) }) + // Push this after Shutdown so the HTTP server doesn't hang. + t.Cleanup(func() { close(errCh) }) + t.Cleanup(func() { require.NoError(t, exp.Shutdown(ctx)) }) + assert.NoError(t, exp.Export(ctx, metricdata.ResourceMetrics{}), "failed retry") + assert.Len(t, errCh, 0, "failed HTTP responses did not occur") + }) -func TestCancelledContext(t *testing.T) { - statuses := []int{ - http.StatusBadRequest, - } - mcCfg := mockCollectorConfig{ - InjectHTTPStatus: statuses, - } - mc := runMockCollector(t, mcCfg) - defer mc.MustStop(t) - driver := otlpmetrichttp.NewClient( - otlpmetrichttp.WithEndpoint(mc.Endpoint()), - otlpmetrichttp.WithInsecure(), - ) - ctx, cancel := context.WithCancel(context.Background()) - exporter, err := otlpmetric.New(ctx, driver) - require.NoError(t, err) - defer func() { - assert.NoError(t, exporter.Shutdown(context.Background())) - }() - cancel() - _ = exporter.Export(ctx, testResource, oneRecord) - assert.Empty(t, mc.GetMetrics()) -} + t.Run("WithURLPath", func(t *testing.T) { + path := "/prefix/v2/metrics" + ePt := fmt.Sprintf("http://localhost:0%s", path) + exp, coll := factoryFunc(ePt, nil, WithURLPath(path)) + ctx := context.Background() + t.Cleanup(func() { require.NoError(t, coll.Shutdown(ctx)) }) + t.Cleanup(func() { require.NoError(t, exp.Shutdown(ctx)) }) + assert.NoError(t, exp.Export(ctx, metricdata.ResourceMetrics{})) + assert.Len(t, coll.Collect().Dump(), 1) + }) -func TestDeadlineContext(t *testing.T) { - statuses := make([]int, 0, 5) - for i := 0; i < cap(statuses); i++ { - statuses = append(statuses, http.StatusTooManyRequests) - } - mcCfg := mockCollectorConfig{ - InjectHTTPStatus: statuses, - } - mc := runMockCollector(t, mcCfg) - defer mc.MustStop(t) - driver := otlpmetrichttp.NewClient( - otlpmetrichttp.WithEndpoint(mc.Endpoint()), - otlpmetrichttp.WithInsecure(), - otlpmetrichttp.WithBackoff(time.Minute), - ) - ctx := context.Background() - exporter, err := otlpmetric.New(ctx, driver) - require.NoError(t, err) - defer func() { - assert.NoError(t, exporter.Shutdown(context.Background())) - }() - ctx, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - err = exporter.Export(ctx, testResource, oneRecord) - assert.Error(t, err) - assert.Empty(t, mc.GetMetrics()) -} + t.Run("WithURLPath", func(t *testing.T) { + path := "/prefix/v2/metrics" + ePt := fmt.Sprintf("http://localhost:0%s", path) + exp, coll := factoryFunc(ePt, nil, WithURLPath(path)) + ctx := context.Background() + t.Cleanup(func() { require.NoError(t, coll.Shutdown(ctx)) }) + t.Cleanup(func() { require.NoError(t, exp.Shutdown(ctx)) }) + assert.NoError(t, exp.Export(ctx, metricdata.ResourceMetrics{})) + assert.Len(t, coll.Collect().Dump(), 1) + }) -func TestStopWhileExporting(t *testing.T) { - statuses := make([]int, 0, 5) - for i := 0; i < cap(statuses); i++ { - statuses = append(statuses, http.StatusTooManyRequests) - } - mcCfg := mockCollectorConfig{ - InjectHTTPStatus: statuses, - } - mc := runMockCollector(t, mcCfg) - defer mc.MustStop(t) - driver := otlpmetrichttp.NewClient( - otlpmetrichttp.WithEndpoint(mc.Endpoint()), - otlpmetrichttp.WithInsecure(), - otlpmetrichttp.WithBackoff(time.Minute), - ) - ctx := context.Background() - exporter, err := otlpmetric.New(ctx, driver) - require.NoError(t, err) - defer func() { - assert.NoError(t, exporter.Shutdown(ctx)) - }() - doneCh := make(chan struct{}) - go func() { - err := exporter.Export(ctx, testResource, oneRecord) - assert.Error(t, err) - assert.Empty(t, mc.GetMetrics()) - close(doneCh) - }() - <-time.After(time.Second) - err = exporter.Shutdown(ctx) - assert.NoError(t, err) - <-doneCh + t.Run("WithTLSClientConfig", func(t *testing.T) { + ePt := "https://localhost:0" + tlsCfg := &tls.Config{InsecureSkipVerify: true} + exp, coll := factoryFunc(ePt, nil, WithTLSClientConfig(tlsCfg)) + ctx := context.Background() + t.Cleanup(func() { require.NoError(t, coll.Shutdown(ctx)) }) + t.Cleanup(func() { require.NoError(t, exp.Shutdown(ctx)) }) + assert.NoError(t, exp.Export(ctx, metricdata.ResourceMetrics{})) + assert.Len(t, coll.Collect().Dump(), 1) + }) } diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/client_unit_test.go b/exporters/otlp/otlpmetric/otlpmetrichttp/client_unit_test.go deleted file mode 100644 index 4ba01c85e5e..00000000000 --- a/exporters/otlp/otlpmetric/otlpmetrichttp/client_unit_test.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otlpmetrichttp - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestUnreasonableBackoff(t *testing.T) { - cIface := NewClient( - WithEndpoint("http://localhost"), - WithInsecure(), - WithBackoff(-time.Microsecond), - ) - require.IsType(t, &client{}, cIface) - c := cIface.(*client) - assert.True(t, c.generalCfg.RetryConfig.Enabled) - assert.Equal(t, 5*time.Second, c.generalCfg.RetryConfig.InitialInterval) - assert.Equal(t, 300*time.Millisecond, c.generalCfg.RetryConfig.MaxInterval) - assert.Equal(t, time.Minute, c.generalCfg.RetryConfig.MaxElapsedTime) -} - -func TestUnreasonableMaxAttempts(t *testing.T) { - type testcase struct { - name string - maxAttempts int - } - for _, tc := range []testcase{ - { - name: "negative max attempts", - maxAttempts: -3, - }, - { - name: "too large max attempts", - maxAttempts: 10, - }, - } { - t.Run(tc.name, func(t *testing.T) { - cIface := NewClient( - WithEndpoint("http://localhost"), - WithInsecure(), - WithMaxAttempts(tc.maxAttempts), - ) - require.IsType(t, &client{}, cIface) - c := cIface.(*client) - assert.True(t, c.generalCfg.RetryConfig.Enabled) - assert.Equal(t, 5*time.Second, c.generalCfg.RetryConfig.InitialInterval) - assert.Equal(t, 30*time.Second, c.generalCfg.RetryConfig.MaxInterval) - assert.Equal(t, 145*time.Second, c.generalCfg.RetryConfig.MaxElapsedTime) - }) - } -} diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/config.go b/exporters/otlp/otlpmetric/otlpmetrichttp/config.go new file mode 100644 index 00000000000..6228b1f7fa2 --- /dev/null +++ b/exporters/otlp/otlpmetric/otlpmetrichttp/config.go @@ -0,0 +1,184 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" + +import ( + "crypto/tls" + "time" + + "go.opentelemetry.io/otel/exporters/otlp/internal/retry" + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/oconf" +) + +// Compression describes the compression used for payloads sent to the +// collector. +type Compression oconf.Compression + +const ( + // NoCompression tells the driver to send payloads without + // compression. + NoCompression = Compression(oconf.NoCompression) + // GzipCompression tells the driver to send payloads after + // compressing them with gzip. + GzipCompression = Compression(oconf.GzipCompression) +) + +// Option applies an option to the Exporter. +type Option interface { + applyHTTPOption(oconf.Config) oconf.Config +} + +func asHTTPOptions(opts []Option) []oconf.HTTPOption { + converted := make([]oconf.HTTPOption, len(opts)) + for i, o := range opts { + converted[i] = oconf.NewHTTPOption(o.applyHTTPOption) + } + return converted +} + +// RetryConfig defines configuration for retrying the export of metric data +// that failed. +type RetryConfig retry.Config + +type wrappedOption struct { + oconf.HTTPOption +} + +func (w wrappedOption) applyHTTPOption(cfg oconf.Config) oconf.Config { + return w.ApplyHTTPOption(cfg) +} + +// WithEndpoint sets the target endpoint the Exporter will connect to. This +// endpoint is specified as a host and optional port, no path or scheme should +// be included (see WithInsecure and WithURLPath). +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT +// environment variable is set, and this option is not passed, that variable +// value will be used. If both are set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT +// will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, "localhost:4318" will be used. +func WithEndpoint(endpoint string) Option { + return wrappedOption{oconf.WithEndpoint(endpoint)} +} + +// WithCompression sets the compression strategy the Exporter will use to +// compress the HTTP body. +// +// If the OTEL_EXPORTER_OTLP_COMPRESSION or +// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION environment variable is set, and +// this option is not passed, that variable value will be used. That value can +// be either "none" or "gzip". If both are set, +// OTEL_EXPORTER_OTLP_METRICS_COMPRESSION will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, no compression strategy will be used. +func WithCompression(compression Compression) Option { + return wrappedOption{oconf.WithCompression(oconf.Compression(compression))} +} + +// WithURLPath sets the URL path the Exporter will send requests to. +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT +// environment variable is set, and this option is not passed, the path +// contained in that variable value will be used. If both are set, +// OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, "/v1/metrics" will be used. +func WithURLPath(urlPath string) Option { + return wrappedOption{oconf.WithURLPath(urlPath)} +} + +// WithTLSClientConfig sets the TLS configuration the Exporter will use for +// HTTP requests. +// +// If the OTEL_EXPORTER_OTLP_CERTIFICATE or +// OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE environment variable is set, and +// this option is not passed, that variable value will be used. The value will +// be parsed the filepath of the TLS certificate chain to use. If both are +// set, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, the system default configuration is used. +func WithTLSClientConfig(tlsCfg *tls.Config) Option { + return wrappedOption{oconf.WithTLSClientConfig(tlsCfg)} +} + +// WithInsecure disables client transport security for the Exporter's HTTP +// connection. +// +// If the OTEL_EXPORTER_OTLP_ENDPOINT or OTEL_EXPORTER_OTLP_METRICS_ENDPOINT +// environment variable is set, and this option is not passed, that variable +// value will be used to determine client security. If the endpoint has a +// scheme of "http" or "unix" client security will be disabled. If both are +// set, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, client security will be used. +func WithInsecure() Option { + return wrappedOption{oconf.WithInsecure()} +} + +// WithHeaders will send the provided headers with each HTTP requests. +// +// If the OTEL_EXPORTER_OTLP_HEADERS or OTEL_EXPORTER_OTLP_METRICS_HEADERS +// environment variable is set, and this option is not passed, that variable +// value will be used. The value will be parsed as a list of key value pairs. +// These pairs are expected to be in the W3C Correlation-Context format +// without additional semi-colon delimited metadata (i.e. "k1=v1,k2=v2"). If +// both are set, OTEL_EXPORTER_OTLP_METRICS_HEADERS will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, no user headers will be set. +func WithHeaders(headers map[string]string) Option { + return wrappedOption{oconf.WithHeaders(headers)} +} + +// WithTimeout sets the max amount of time an Exporter will attempt an export. +// +// This takes precedence over any retry settings defined by WithRetry. Once +// this time limit has been reached the export is abandoned and the metric +// data is dropped. +// +// If the OTEL_EXPORTER_OTLP_TIMEOUT or OTEL_EXPORTER_OTLP_METRICS_TIMEOUT +// environment variable is set, and this option is not passed, that variable +// value will be used. The value will be parsed as an integer representing the +// timeout in milliseconds. If both are set, +// OTEL_EXPORTER_OTLP_METRICS_TIMEOUT will take precedence. +// +// By default, if an environment variable is not set, and this option is not +// passed, a timeout of 10 seconds will be used. +func WithTimeout(duration time.Duration) Option { + return wrappedOption{oconf.WithTimeout(duration)} +} + +// WithRetry sets the retry policy for transient retryable errors that are +// returned by the target endpoint. +// +// If the target endpoint responds with not only a retryable error, but +// explicitly returns a backoff time in the response, that time will take +// precedence over these settings. +// +// If unset, the default retry policy will be used. It will retry the export +// 5 seconds after receiving a retryable error and increase exponentially +// after each error for no more than a total time of 1 minute. +func WithRetry(rc RetryConfig) Option { + return wrappedOption{oconf.WithRetry(retry.Config(rc))} +} diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go b/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go index d096388320d..a49e2465171 100644 --- a/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go +++ b/exporters/otlp/otlpmetric/otlpmetrichttp/doc.go @@ -12,12 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -/* -Package otlpmetrichttp provides a client that sends metrics to the collector -using HTTP with binary protobuf payloads. - -This package is currently in a pre-GA phase. Backwards incompatible changes -may be introduced in subsequent minor version releases as we work to track the -evolving OpenTelemetry specification and user feedback. -*/ +// Package otlpmetrichttp provides an otlpmetric.Exporter that communicates +// with an OTLP receiving endpoint using protobuf encoded metric data over +// HTTP. package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/example_test.go b/exporters/otlp/otlpmetric/otlpmetrichttp/example_test.go new file mode 100644 index 00000000000..8cae38d0ef7 --- /dev/null +++ b/exporters/otlp/otlpmetric/otlpmetrichttp/example_test.go @@ -0,0 +1,45 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package otlpmetrichttp_test + +import ( + "context" + + "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" + "go.opentelemetry.io/otel/metric/global" + "go.opentelemetry.io/otel/sdk/metric" +) + +func Example() { + ctx := context.Background() + exp, err := otlpmetrichttp.New(ctx) + if err != nil { + panic(err) + } + + meterProvider := metric.NewMeterProvider(metric.WithReader(metric.NewPeriodicReader(exp))) + defer func() { + if err := meterProvider.Shutdown(ctx); err != nil { + panic(err) + } + }() + global.SetMeterProvider(meterProvider) + + // From here, the meterProvider can be used by instrumentation to collect + // telemetry. +} diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go b/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go deleted file mode 100644 index de09e7cdcaa..00000000000 --- a/exporters/otlp/otlpmetric/otlpmetrichttp/exporter.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" - -import ( - "context" - - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric" -) - -// New constructs a new Exporter and starts it. -func New(ctx context.Context, opts ...Option) (*otlpmetric.Exporter, error) { - return otlpmetric.New(ctx, NewClient(opts...)) -} - -// NewUnstarted constructs a new Exporter and does not start it. -func NewUnstarted(opts ...Option) *otlpmetric.Exporter { - return otlpmetric.NewUnstarted(NewClient(opts...)) -} diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod b/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod index fb8da36dc02..463b978feb6 100644 --- a/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod +++ b/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod @@ -1,12 +1,13 @@ module go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp -go 1.17 +go 1.18 require ( github.com/stretchr/testify v1.7.1 go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.31.0 - go.opentelemetry.io/otel/sdk v1.10.0 + go.opentelemetry.io/otel/metric v0.31.0 + go.opentelemetry.io/otel/sdk/metric v0.31.0 go.opentelemetry.io/proto/otlp v0.19.0 google.golang.org/protobuf v1.28.0 ) @@ -17,11 +18,11 @@ require ( github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/protobuf v1.5.2 // indirect + github.com/google/go-cmp v0.5.8 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/otel v1.10.0 // indirect - go.opentelemetry.io/otel/metric v0.31.0 // indirect - go.opentelemetry.io/otel/sdk/metric v0.31.0 // indirect + go.opentelemetry.io/otel/sdk v1.10.0 // indirect go.opentelemetry.io/otel/trace v1.10.0 // indirect golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 // indirect golang.org/x/sys v0.0.0-20210510120138-977fb7262007 // indirect diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/go.sum b/exporters/otlp/otlpmetric/otlpmetrichttp/go.sum index 77a03262881..c066ceeecc1 100644 --- a/exporters/otlp/otlpmetric/otlpmetrichttp/go.sum +++ b/exporters/otlp/otlpmetric/otlpmetrichttp/go.sum @@ -35,7 +35,6 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -114,6 +113,7 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/mock_collector_test.go b/exporters/otlp/otlpmetric/otlpmetrichttp/mock_collector_test.go deleted file mode 100644 index 5776c67a016..00000000000 --- a/exporters/otlp/otlpmetric/otlpmetrichttp/mock_collector_test.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otlpmetrichttp_test - -import ( - "bytes" - "compress/gzip" - "context" - "crypto/tls" - "fmt" - "io" - "net" - "net/http" - "sync" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "google.golang.org/protobuf/proto" - - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpconfig" - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpmetrictest" - collectormetricpb "go.opentelemetry.io/proto/otlp/collector/metrics/v1" - metricpb "go.opentelemetry.io/proto/otlp/metrics/v1" -) - -type mockCollector struct { - endpoint string - server *http.Server - - spanLock sync.Mutex - metricsStorage otlpmetrictest.MetricsStorage - - injectHTTPStatus []int - injectContentType string - delay <-chan struct{} - - clientTLSConfig *tls.Config - expectedHeaders map[string]string -} - -func (c *mockCollector) Stop() error { - return c.server.Shutdown(context.Background()) -} - -func (c *mockCollector) MustStop(t *testing.T) { - assert.NoError(t, c.server.Shutdown(context.Background())) -} - -func (c *mockCollector) GetMetrics() []*metricpb.Metric { - c.spanLock.Lock() - defer c.spanLock.Unlock() - return c.metricsStorage.GetMetrics() -} - -func (c *mockCollector) Endpoint() string { - return c.endpoint -} - -func (c *mockCollector) ClientTLSConfig() *tls.Config { - return c.clientTLSConfig -} - -func (c *mockCollector) serveMetrics(w http.ResponseWriter, r *http.Request) { - if c.delay != nil { - select { - case <-c.delay: - case <-r.Context().Done(): - return - } - } - - if !c.checkHeaders(r) { - w.WriteHeader(http.StatusBadRequest) - return - } - response := collectormetricpb.ExportMetricsServiceResponse{} - rawResponse, err := proto.Marshal(&response) - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - return - } - if injectedStatus := c.getInjectHTTPStatus(); injectedStatus != 0 { - writeReply(w, rawResponse, injectedStatus, c.injectContentType) - return - } - rawRequest, err := readRequest(r) - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - return - } - - request, err := unmarshalMetricsRequest(rawRequest, r.Header.Get("content-type")) - if err != nil { - w.WriteHeader(http.StatusBadRequest) - return - } - writeReply(w, rawResponse, 0, c.injectContentType) - c.spanLock.Lock() - defer c.spanLock.Unlock() - c.metricsStorage.AddMetrics(request) -} - -func unmarshalMetricsRequest(rawRequest []byte, contentType string) (*collectormetricpb.ExportMetricsServiceRequest, error) { - request := &collectormetricpb.ExportMetricsServiceRequest{} - if contentType != "application/x-protobuf" { - return request, fmt.Errorf("invalid content-type: %s, only application/x-protobuf is supported", contentType) - } - err := proto.Unmarshal(rawRequest, request) - return request, err -} - -func (c *mockCollector) checkHeaders(r *http.Request) bool { - for k, v := range c.expectedHeaders { - got := r.Header.Get(k) - if got != v { - return false - } - } - return true -} - -func (c *mockCollector) getInjectHTTPStatus() int { - if len(c.injectHTTPStatus) == 0 { - return 0 - } - status := c.injectHTTPStatus[0] - c.injectHTTPStatus = c.injectHTTPStatus[1:] - if len(c.injectHTTPStatus) == 0 { - c.injectHTTPStatus = nil - } - return status -} - -func readRequest(r *http.Request) ([]byte, error) { - if r.Header.Get("Content-Encoding") == "gzip" { - return readGzipBody(r.Body) - } - return io.ReadAll(r.Body) -} - -func readGzipBody(body io.Reader) ([]byte, error) { - rawRequest := bytes.Buffer{} - gunzipper, err := gzip.NewReader(body) - if err != nil { - return nil, err - } - defer gunzipper.Close() - _, err = io.Copy(&rawRequest, gunzipper) - if err != nil { - return nil, err - } - return rawRequest.Bytes(), nil -} - -func writeReply(w http.ResponseWriter, rawResponse []byte, injectHTTPStatus int, injectContentType string) { - status := http.StatusOK - if injectHTTPStatus != 0 { - status = injectHTTPStatus - } - contentType := "application/x-protobuf" - if injectContentType != "" { - contentType = injectContentType - } - w.Header().Set("Content-Type", contentType) - w.WriteHeader(status) - _, _ = w.Write(rawResponse) -} - -type mockCollectorConfig struct { - MetricsURLPath string - Port int - InjectHTTPStatus []int - InjectContentType string - Delay <-chan struct{} - WithTLS bool - ExpectedHeaders map[string]string -} - -func (c *mockCollectorConfig) fillInDefaults() { - if c.MetricsURLPath == "" { - c.MetricsURLPath = otlpconfig.DefaultMetricsPath - } -} - -func runMockCollector(t *testing.T, cfg mockCollectorConfig) *mockCollector { - cfg.fillInDefaults() - ln, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", cfg.Port)) - require.NoError(t, err) - _, portStr, err := net.SplitHostPort(ln.Addr().String()) - require.NoError(t, err) - m := &mockCollector{ - endpoint: fmt.Sprintf("localhost:%s", portStr), - metricsStorage: otlpmetrictest.NewMetricsStorage(), - injectHTTPStatus: cfg.InjectHTTPStatus, - injectContentType: cfg.InjectContentType, - delay: cfg.Delay, - expectedHeaders: cfg.ExpectedHeaders, - } - mux := http.NewServeMux() - mux.Handle(cfg.MetricsURLPath, http.HandlerFunc(m.serveMetrics)) - server := &http.Server{ - Handler: mux, - } - if cfg.WithTLS { - pem, err := generateWeakCertificate() - require.NoError(t, err) - tlsCertificate, err := tls.X509KeyPair(pem.Certificate, pem.PrivateKey) - require.NoError(t, err) - server.TLSConfig = &tls.Config{ - Certificates: []tls.Certificate{tlsCertificate}, - } - - m.clientTLSConfig = &tls.Config{ - InsecureSkipVerify: true, - } - } - go func() { - if cfg.WithTLS { - _ = server.ServeTLS(ln, "", "") - } else { - _ = server.Serve(ln) - } - }() - m.server = server - return m -} diff --git a/exporters/otlp/otlpmetric/otlpmetrichttp/options.go b/exporters/otlp/otlpmetric/otlpmetrichttp/options.go deleted file mode 100644 index 8d12c791954..00000000000 --- a/exporters/otlp/otlpmetric/otlpmetrichttp/options.go +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package otlpmetrichttp // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" - -import ( - "crypto/tls" - "time" - - "go.opentelemetry.io/otel/exporters/otlp/internal/retry" - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/otlpconfig" -) - -// Compression describes the compression used for payloads sent to the -// collector. -type Compression otlpconfig.Compression - -const ( - // NoCompression tells the driver to send payloads without - // compression. - NoCompression = Compression(otlpconfig.NoCompression) - // GzipCompression tells the driver to send payloads after - // compressing them with gzip. - GzipCompression = Compression(otlpconfig.GzipCompression) -) - -// Option applies an option to the HTTP client. -type Option interface { - applyHTTPOption(otlpconfig.Config) otlpconfig.Config -} - -func asHTTPOptions(opts []Option) []otlpconfig.HTTPOption { - converted := make([]otlpconfig.HTTPOption, len(opts)) - for i, o := range opts { - converted[i] = otlpconfig.NewHTTPOption(o.applyHTTPOption) - } - return converted -} - -// RetryConfig defines configuration for retrying batches in case of export -// failure using an exponential backoff. -type RetryConfig retry.Config - -type wrappedOption struct { - otlpconfig.HTTPOption -} - -func (w wrappedOption) applyHTTPOption(cfg otlpconfig.Config) otlpconfig.Config { - return w.ApplyHTTPOption(cfg) -} - -// WithEndpoint allows one to set the address of the collector endpoint that -// the driver will use to send metrics. If unset, it will instead try to use -// the default endpoint (localhost:4318). Note that the endpoint must not -// contain any URL path. -func WithEndpoint(endpoint string) Option { - return wrappedOption{otlpconfig.WithEndpoint(endpoint)} -} - -// WithCompression tells the driver to compress the sent data. -func WithCompression(compression Compression) Option { - return wrappedOption{otlpconfig.WithCompression(otlpconfig.Compression(compression))} -} - -// WithURLPath allows one to override the default URL path used -// for sending metrics. If unset, default ("/v1/metrics") will be used. -func WithURLPath(urlPath string) Option { - return wrappedOption{otlpconfig.WithURLPath(urlPath)} -} - -// WithMaxAttempts allows one to override how many times the driver -// will try to send the payload in case of retryable errors. -// The max attempts is limited to at most 5 retries. If unset, -// default (5) will be used. -// -// Deprecated: Use WithRetry instead. -func WithMaxAttempts(maxAttempts int) Option { - if maxAttempts > 5 || maxAttempts < 0 { - maxAttempts = 5 - } - return wrappedOption{ - otlpconfig.NewHTTPOption(func(cfg otlpconfig.Config) otlpconfig.Config { - cfg.RetryConfig.Enabled = true - - var ( - init = cfg.RetryConfig.InitialInterval - maxI = cfg.RetryConfig.MaxInterval - maxE = cfg.RetryConfig.MaxElapsedTime - ) - - if init == 0 { - init = retry.DefaultConfig.InitialInterval - } - if maxI == 0 { - maxI = retry.DefaultConfig.MaxInterval - } - if maxE == 0 { - maxE = retry.DefaultConfig.MaxElapsedTime - } - attempts := int64(maxE+init) / int64(maxI) - - if int64(maxAttempts) == attempts { - return cfg - } - - maxE = time.Duration(int64(maxAttempts)*int64(maxI)) - init - - cfg.RetryConfig.InitialInterval = init - cfg.RetryConfig.MaxInterval = maxI - cfg.RetryConfig.MaxElapsedTime = maxE - - return cfg - }), - } -} - -// WithBackoff tells the driver to use the duration as a base of the -// exponential backoff strategy. If unset, default (300ms) will be -// used. -// -// Deprecated: Use WithRetry instead. -func WithBackoff(duration time.Duration) Option { - if duration < 0 { - duration = 300 * time.Millisecond - } - return wrappedOption{ - otlpconfig.NewHTTPOption(func(cfg otlpconfig.Config) otlpconfig.Config { - cfg.RetryConfig.Enabled = true - cfg.RetryConfig.MaxInterval = duration - if cfg.RetryConfig.InitialInterval == 0 { - cfg.RetryConfig.InitialInterval = retry.DefaultConfig.InitialInterval - } - if cfg.RetryConfig.MaxElapsedTime == 0 { - cfg.RetryConfig.MaxElapsedTime = retry.DefaultConfig.MaxElapsedTime - } - return cfg - }), - } -} - -// WithTLSClientConfig can be used to set up a custom TLS -// configuration for the client used to send payloads to the -// collector. Use it if you want to use a custom certificate. -func WithTLSClientConfig(tlsCfg *tls.Config) Option { - return wrappedOption{otlpconfig.WithTLSClientConfig(tlsCfg)} -} - -// WithInsecure tells the driver to connect to the collector using the -// HTTP scheme, instead of HTTPS. -func WithInsecure() Option { - return wrappedOption{otlpconfig.WithInsecure()} -} - -// WithHeaders allows one to tell the driver to send additional HTTP -// headers with the payloads. Specifying headers like Content-Length, -// Content-Encoding and Content-Type may result in a broken driver. -func WithHeaders(headers map[string]string) Option { - return wrappedOption{otlpconfig.WithHeaders(headers)} -} - -// WithTimeout tells the driver the max waiting time for the backend to process -// each metrics batch. If unset, the default will be 10 seconds. -func WithTimeout(duration time.Duration) Option { - return wrappedOption{otlpconfig.WithTimeout(duration)} -} - -// WithRetry configures the retry policy for transient errors that may occurs -// when exporting traces. An exponential back-off algorithm is used to ensure -// endpoints are not overwhelmed with retries. If unset, the default retry -// policy will retry after 5 seconds and increase exponentially after each -// error for a total of 1 minute. -func WithRetry(rc RetryConfig) Option { - return wrappedOption{otlpconfig.WithRetry(retry.Config(rc))} -} diff --git a/exporters/prometheus/README.md b/exporters/prometheus/README.md deleted file mode 100644 index ccded2f2829..00000000000 --- a/exporters/prometheus/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# OpenTelemetry-Go Prometheus Exporter - -OpenTelemetry Prometheus exporter - -## Installation - -``` -go get -u go.opentelemetry.io/otel/exporters/prometheus -``` diff --git a/sdk/metric/number/doc.go b/exporters/prometheus/doc.go similarity index 59% rename from sdk/metric/number/doc.go rename to exporters/prometheus/doc.go index 6f947400a4b..f212c6146cf 100644 --- a/sdk/metric/number/doc.go +++ b/exporters/prometheus/doc.go @@ -12,12 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -/* -Package number provides a number abstraction for instruments that -either support int64 or float64 input values. - -This package is currently in a pre-GA phase. Backwards incompatible changes -may be introduced in subsequent minor version releases as we work to track the -evolving OpenTelemetry specification and user feedback. -*/ -package number // import "go.opentelemetry.io/otel/sdk/metric/number" +// Package prometheus provides a Prometheus Exporter that converts +// OTLP metrics into the Prometheus exposition format and implements +// prometheus.Collector to provide a handler for these metrics. +package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus" diff --git a/exporters/prometheus/exporter.go b/exporters/prometheus/exporter.go new file mode 100644 index 00000000000..8527b1b5025 --- /dev/null +++ b/exporters/prometheus/exporter.go @@ -0,0 +1,233 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus" + +import ( + "context" + "sort" + "strings" + "unicode" + + "github.com/prometheus/client_golang/prometheus" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// Exporter is a Prometheus Exporter that embeds the OTel metric.Reader +// interface for easy instantiation with a MeterProvider. +type Exporter struct { + metric.Reader + Collector prometheus.Collector +} + +// collector is used to implement prometheus.Collector. +type collector struct { + metric.Reader +} + +// config is added here to allow for options expansion in the future. +type config struct{} + +// Option may be used in the future to apply options to a Prometheus Exporter config. +type Option interface { + apply(config) config +} + +// New returns a Prometheus Exporter. +func New(_ ...Option) Exporter { + // this assumes that the default temporality selector will always return cumulative. + // we only support cumulative temporality, so building our own reader enforces this. + reader := metric.NewManualReader() + e := Exporter{ + Reader: reader, + Collector: &collector{ + Reader: reader, + }, + } + return e +} + +// Describe implements prometheus.Collector. +func (c *collector) Describe(ch chan<- *prometheus.Desc) { + metrics, err := c.Reader.Collect(context.TODO()) + if err != nil { + otel.Handle(err) + } + for _, metricData := range getMetricData(metrics) { + ch <- metricData.description + } +} + +// Collect implements prometheus.Collector. +func (c *collector) Collect(ch chan<- prometheus.Metric) { + metrics, err := c.Reader.Collect(context.TODO()) + if err != nil { + otel.Handle(err) + } + + // TODO(#3166): convert otel resource to target_info + // see https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#resource-attributes-1 + for _, metricData := range getMetricData(metrics) { + if metricData.valueType == prometheus.UntypedValue { + m, err := prometheus.NewConstHistogram(metricData.description, metricData.histogramCount, metricData.histogramSum, metricData.histogramBuckets, metricData.attributeValues...) + if err != nil { + otel.Handle(err) + } + ch <- m + } else { + m, err := prometheus.NewConstMetric(metricData.description, metricData.valueType, metricData.value, metricData.attributeValues...) + if err != nil { + otel.Handle(err) + } + ch <- m + } + } +} + +// metricData holds the metadata as well as values for individual data points. +type metricData struct { + // name should include the unit as a suffix (before _total on counters) + // see https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#metric-metadata-1 + name string + description *prometheus.Desc + attributeValues []string + valueType prometheus.ValueType + value float64 + histogramCount uint64 + histogramSum float64 + histogramBuckets map[float64]uint64 +} + +func getMetricData(metrics metricdata.ResourceMetrics) []*metricData { + allMetrics := make([]*metricData, 0) + for _, scopeMetrics := range metrics.ScopeMetrics { + for _, m := range scopeMetrics.Metrics { + switch v := m.Data.(type) { + case metricdata.Histogram: + allMetrics = append(allMetrics, getHistogramMetricData(v, m)...) + case metricdata.Sum[int64]: + allMetrics = append(allMetrics, getSumMetricData(v, m)...) + case metricdata.Sum[float64]: + allMetrics = append(allMetrics, getSumMetricData(v, m)...) + case metricdata.Gauge[int64]: + allMetrics = append(allMetrics, getGaugeMetricData(v, m)...) + case metricdata.Gauge[float64]: + allMetrics = append(allMetrics, getGaugeMetricData(v, m)...) + } + } + } + + return allMetrics +} + +func getHistogramMetricData(histogram metricdata.Histogram, m metricdata.Metrics) []*metricData { + // TODO(https://github.com/open-telemetry/opentelemetry-go/issues/3163): support exemplars + dataPoints := make([]*metricData, 0, len(histogram.DataPoints)) + for _, dp := range histogram.DataPoints { + keys, values := getAttrs(dp.Attributes) + desc := prometheus.NewDesc(m.Name, m.Description, keys, nil) + buckets := make(map[float64]uint64, len(dp.Bounds)) + for i, bound := range dp.Bounds { + buckets[bound] = dp.BucketCounts[i] + } + md := &metricData{ + name: m.Name, + description: desc, + attributeValues: values, + valueType: prometheus.UntypedValue, + histogramCount: dp.Count, + histogramSum: dp.Sum, + histogramBuckets: buckets, + } + dataPoints = append(dataPoints, md) + } + return dataPoints +} + +func getSumMetricData[N int64 | float64](sum metricdata.Sum[N], m metricdata.Metrics) []*metricData { + dataPoints := make([]*metricData, 0, len(sum.DataPoints)) + for _, dp := range sum.DataPoints { + keys, values := getAttrs(dp.Attributes) + desc := prometheus.NewDesc(m.Name, m.Description, keys, nil) + md := &metricData{ + name: m.Name, + description: desc, + attributeValues: values, + valueType: prometheus.CounterValue, + value: float64(dp.Value), + } + dataPoints = append(dataPoints, md) + } + return dataPoints +} + +func getGaugeMetricData[N int64 | float64](gauge metricdata.Gauge[N], m metricdata.Metrics) []*metricData { + dataPoints := make([]*metricData, 0, len(gauge.DataPoints)) + for _, dp := range gauge.DataPoints { + keys, values := getAttrs(dp.Attributes) + desc := prometheus.NewDesc(m.Name, m.Description, keys, nil) + md := &metricData{ + name: m.Name, + description: desc, + attributeValues: values, + valueType: prometheus.GaugeValue, + value: float64(dp.Value), + } + dataPoints = append(dataPoints, md) + } + return dataPoints +} + +// getAttrs parses the attribute.Set to two lists of matching Prometheus-style +// keys and values. It sanitizes invalid characters and handles duplicate keys +// (due to sanitization) by sorting and concatenating the values following the spec. +func getAttrs(attrs attribute.Set) ([]string, []string) { + keysMap := make(map[string][]string) + itr := attrs.Iter() + for itr.Next() { + kv := itr.Attribute() + key := strings.Map(sanitizeRune, string(kv.Key)) + if _, ok := keysMap[key]; !ok { + keysMap[key] = []string{kv.Value.AsString()} + } else { + // if the sanitized key is a duplicate, append to the list of keys + keysMap[key] = append(keysMap[key], kv.Value.AsString()) + } + } + + keys := make([]string, 0, attrs.Len()) + values := make([]string, 0, attrs.Len()) + for key, vals := range keysMap { + keys = append(keys, key) + sort.Slice(vals, func(i, j int) bool { + return i < j + }) + values = append(values, strings.Join(vals, ";")) + } + return keys, values +} + +func sanitizeRune(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) || r == ':' || r == '_' { + return r + } + return '_' +} diff --git a/exporters/prometheus/exporter_test.go b/exporters/prometheus/exporter_test.go new file mode 100644 index 00000000000..aaf029e2021 --- /dev/null +++ b/exporters/prometheus/exporter_test.go @@ -0,0 +1,130 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package prometheus + +import ( + "context" + "os" + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/otel/attribute" + otelmetric "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/instrument" + "go.opentelemetry.io/otel/sdk/metric" +) + +func TestPrometheusExporter(t *testing.T) { + testCases := []struct { + name string + recordMetrics func(ctx context.Context, meter otelmetric.Meter) + expectedFile string + }{ + { + name: "counter", + expectedFile: "testdata/counter.txt", + recordMetrics: func(ctx context.Context, meter otelmetric.Meter) { + attrs := []attribute.KeyValue{ + attribute.Key("A").String("B"), + attribute.Key("C").String("D"), + } + counter, err := meter.SyncFloat64().Counter("foo", instrument.WithDescription("a simple counter")) + require.NoError(t, err) + counter.Add(ctx, 5, attrs...) + counter.Add(ctx, 10.3, attrs...) + counter.Add(ctx, 9, attrs...) + }, + }, + { + name: "gauge", + expectedFile: "testdata/gauge.txt", + recordMetrics: func(ctx context.Context, meter otelmetric.Meter) { + attrs := []attribute.KeyValue{ + attribute.Key("A").String("B"), + attribute.Key("C").String("D"), + } + gauge, err := meter.SyncFloat64().UpDownCounter("bar", instrument.WithDescription("a fun little gauge")) + require.NoError(t, err) + gauge.Add(ctx, 100, attrs...) + gauge.Add(ctx, -25, attrs...) + }, + }, + { + name: "histogram", + expectedFile: "testdata/histogram.txt", + recordMetrics: func(ctx context.Context, meter otelmetric.Meter) { + attrs := []attribute.KeyValue{ + attribute.Key("A").String("B"), + attribute.Key("C").String("D"), + } + histogram, err := meter.SyncFloat64().Histogram("baz", instrument.WithDescription("a very nice histogram")) + require.NoError(t, err) + histogram.Record(ctx, 23, attrs...) + histogram.Record(ctx, 7, attrs...) + histogram.Record(ctx, 101, attrs...) + histogram.Record(ctx, 105, attrs...) + }, + }, + { + name: "sanitized attributes to labels", + expectedFile: "testdata/sanitized_labels.txt", + recordMetrics: func(ctx context.Context, meter otelmetric.Meter) { + attrs := []attribute.KeyValue{ + // exact match, value should be overwritten + attribute.Key("A.B").String("X"), + attribute.Key("A.B").String("Q"), + + // unintended match due to sanitization, values should be concatenated + attribute.Key("C.D").String("Y"), + attribute.Key("C/D").String("Z"), + } + counter, err := meter.SyncFloat64().Counter("foo", instrument.WithDescription("a sanitary counter")) + require.NoError(t, err) + counter.Add(ctx, 5, attrs...) + counter.Add(ctx, 10.3, attrs...) + counter.Add(ctx, 9, attrs...) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx := context.Background() + + exporter := New() + provider := metric.NewMeterProvider(metric.WithReader(exporter)) + meter := provider.Meter("testmeter") + + registry := prometheus.NewRegistry() + err := registry.Register(exporter.Collector) + require.NoError(t, err) + + tc.recordMetrics(ctx, meter) + + file, err := os.Open(tc.expectedFile) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, file.Close()) }) + + err = testutil.GatherAndCompare(registry, file) + require.NoError(t, err) + }) + } +} diff --git a/exporters/prometheus/go.mod b/exporters/prometheus/go.mod index 6575457676a..c06f9ff10a5 100644 --- a/exporters/prometheus/go.mod +++ b/exporters/prometheus/go.mod @@ -1,13 +1,12 @@ module go.opentelemetry.io/otel/exporters/prometheus -go 1.17 +go 1.18 require ( - github.com/prometheus/client_golang v1.12.2 + github.com/prometheus/client_golang v1.13.0 github.com/stretchr/testify v1.7.1 go.opentelemetry.io/otel v1.10.0 go.opentelemetry.io/otel/metric v0.31.0 - go.opentelemetry.io/otel/sdk v1.10.0 go.opentelemetry.io/otel/sdk/metric v0.31.0 ) @@ -21,20 +20,21 @@ require ( github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.32.1 // indirect - github.com/prometheus/procfs v0.7.3 // indirect + github.com/prometheus/common v0.37.0 // indirect + github.com/prometheus/procfs v0.8.0 // indirect + go.opentelemetry.io/otel/sdk v1.10.0 // indirect go.opentelemetry.io/otel/trace v1.10.0 // indirect - golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect - google.golang.org/protobuf v1.26.0 // indirect + golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect + google.golang.org/protobuf v1.28.1 // indirect gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect ) replace go.opentelemetry.io/otel => ../.. -replace go.opentelemetry.io/otel/metric => ../../metric - replace go.opentelemetry.io/otel/sdk => ../../sdk replace go.opentelemetry.io/otel/sdk/metric => ../../sdk/metric replace go.opentelemetry.io/otel/trace => ../../trace + +replace go.opentelemetry.io/otel/metric => ../../metric diff --git a/exporters/prometheus/go.sum b/exporters/prometheus/go.sum index b2cbcef3b03..51544d68f4f 100644 --- a/exporters/prometheus/go.sum +++ b/exporters/prometheus/go.sum @@ -38,7 +38,6 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -65,9 +64,11 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2 github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -167,8 +168,9 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34= -github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= +github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -177,14 +179,16 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -269,12 +273,15 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -319,15 +326,20 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -449,8 +461,9 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/exporters/prometheus/prometheus.go b/exporters/prometheus/prometheus.go deleted file mode 100644 index fb544d004fb..00000000000 --- a/exporters/prometheus/prometheus.go +++ /dev/null @@ -1,324 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus" - -// Note that this package does not support a way to export Prometheus -// Summary data points, removed in PR#1412. - -import ( - "context" - "fmt" - "net/http" - "sync" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/sdk/instrumentation" - controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" - "go.opentelemetry.io/otel/sdk/metric/export" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - "go.opentelemetry.io/otel/sdk/metric/number" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" - "go.opentelemetry.io/otel/sdk/resource" -) - -// Exporter supports Prometheus pulls. It does not implement the -// sdk/export/metric.Exporter interface--instead it creates a pull -// controller and reads the latest checkpointed data on-scrape. -type Exporter struct { - handler http.Handler - - registerer prometheus.Registerer - gatherer prometheus.Gatherer - - // lock protects access to the controller. The controller - // exposes its own lock, but using a dedicated lock in this - // struct allows the exporter to potentially support multiple - // controllers (e.g., with different resources). - lock sync.RWMutex - controller *controller.Controller -} - -// ErrUnsupportedAggregator is returned for unrepresentable aggregator -// types. -var ErrUnsupportedAggregator = fmt.Errorf("unsupported aggregator type") - -var _ http.Handler = &Exporter{} - -// Config is a set of configs for the tally reporter. -type Config struct { - // Registry is the prometheus registry that will be used as the default Registerer and - // Gatherer if these are not specified. - // - // If not set a new empty Registry is created. - Registry *prometheus.Registry - - // Registerer is the prometheus registerer to register - // metrics with. - // - // If not specified the Registry will be used as default. - Registerer prometheus.Registerer - - // Gatherer is the prometheus gatherer to gather - // metrics with. - // - // If not specified the Registry will be used as default. - Gatherer prometheus.Gatherer - - // DefaultHistogramBoundaries defines the default histogram bucket - // boundaries. - DefaultHistogramBoundaries []float64 -} - -// New returns a new Prometheus exporter using the configured metric -// controller. See controller.New(). -func New(config Config, ctrl *controller.Controller) (*Exporter, error) { - if config.Registry == nil { - config.Registry = prometheus.NewRegistry() - } - - if config.Registerer == nil { - config.Registerer = config.Registry - } - - if config.Gatherer == nil { - config.Gatherer = config.Registry - } - - e := &Exporter{ - handler: promhttp.HandlerFor(config.Gatherer, promhttp.HandlerOpts{}), - registerer: config.Registerer, - gatherer: config.Gatherer, - controller: ctrl, - } - - c := &collector{ - exp: e, - } - if err := config.Registerer.Register(c); err != nil { - return nil, fmt.Errorf("cannot register the collector: %w", err) - } - return e, nil -} - -// MeterProvider returns the MeterProvider of this exporter. -func (e *Exporter) MeterProvider() metric.MeterProvider { - return e.controller -} - -// Controller returns the controller object that coordinates collection for the SDK. -func (e *Exporter) Controller() *controller.Controller { - e.lock.RLock() - defer e.lock.RUnlock() - return e.controller -} - -// TemporalityFor implements TemporalitySelector. -func (e *Exporter) TemporalityFor(desc *sdkapi.Descriptor, kind aggregation.Kind) aggregation.Temporality { - return aggregation.CumulativeTemporalitySelector().TemporalityFor(desc, kind) -} - -// ServeHTTP implements http.Handler. -func (e *Exporter) ServeHTTP(w http.ResponseWriter, r *http.Request) { - e.handler.ServeHTTP(w, r) -} - -// collector implements prometheus.Collector interface. -type collector struct { - exp *Exporter -} - -var _ prometheus.Collector = (*collector)(nil) - -// Describe implements prometheus.Collector. -func (c *collector) Describe(ch chan<- *prometheus.Desc) { - c.exp.lock.RLock() - defer c.exp.lock.RUnlock() - - _ = c.exp.Controller().ForEach(func(_ instrumentation.Library, reader export.Reader) error { - return reader.ForEach(c.exp, func(record export.Record) error { - var attrKeys []string - mergeAttrs(record, c.exp.controller.Resource(), &attrKeys, nil) - ch <- c.toDesc(record, attrKeys) - return nil - }) - }) -} - -// Collect exports the last calculated Reader state. -// -// Collect is invoked whenever prometheus.Gatherer is also invoked. -// For example, when the HTTP endpoint is invoked by Prometheus. -func (c *collector) Collect(ch chan<- prometheus.Metric) { - c.exp.lock.RLock() - defer c.exp.lock.RUnlock() - - ctrl := c.exp.Controller() - if err := ctrl.Collect(context.Background()); err != nil { - otel.Handle(err) - } - - err := ctrl.ForEach(func(_ instrumentation.Library, reader export.Reader) error { - return reader.ForEach(c.exp, func(record export.Record) error { - agg := record.Aggregation() - numberKind := record.Descriptor().NumberKind() - instrumentKind := record.Descriptor().InstrumentKind() - - var attrKeys, attrs []string - mergeAttrs(record, c.exp.controller.Resource(), &attrKeys, &attrs) - - desc := c.toDesc(record, attrKeys) - - switch v := agg.(type) { - case aggregation.Histogram: - if err := c.exportHistogram(ch, v, numberKind, desc, attrs); err != nil { - return fmt.Errorf("exporting histogram: %w", err) - } - case aggregation.Sum: - if instrumentKind.Monotonic() { - if err := c.exportMonotonicCounter(ch, v, numberKind, desc, attrs); err != nil { - return fmt.Errorf("exporting monotonic counter: %w", err) - } - } else { - if err := c.exportNonMonotonicCounter(ch, v, numberKind, desc, attrs); err != nil { - return fmt.Errorf("exporting non monotonic counter: %w", err) - } - } - case aggregation.LastValue: - if err := c.exportLastValue(ch, v, numberKind, desc, attrs); err != nil { - return fmt.Errorf("exporting last value: %w", err) - } - default: - return fmt.Errorf("%w: %s", ErrUnsupportedAggregator, agg.Kind()) - } - return nil - }) - }) - if err != nil { - otel.Handle(err) - } -} - -func (c *collector) exportLastValue(ch chan<- prometheus.Metric, lvagg aggregation.LastValue, kind number.Kind, desc *prometheus.Desc, attrs []string) error { - lv, _, err := lvagg.LastValue() - if err != nil { - return fmt.Errorf("error retrieving last value: %w", err) - } - - m, err := prometheus.NewConstMetric(desc, prometheus.GaugeValue, lv.CoerceToFloat64(kind), attrs...) - if err != nil { - return fmt.Errorf("error creating constant metric: %w", err) - } - - ch <- m - return nil -} - -func (c *collector) exportNonMonotonicCounter(ch chan<- prometheus.Metric, sum aggregation.Sum, kind number.Kind, desc *prometheus.Desc, attrs []string) error { - v, err := sum.Sum() - if err != nil { - return fmt.Errorf("error retrieving counter: %w", err) - } - - m, err := prometheus.NewConstMetric(desc, prometheus.GaugeValue, v.CoerceToFloat64(kind), attrs...) - if err != nil { - return fmt.Errorf("error creating constant metric: %w", err) - } - - ch <- m - return nil -} - -func (c *collector) exportMonotonicCounter(ch chan<- prometheus.Metric, sum aggregation.Sum, kind number.Kind, desc *prometheus.Desc, attrs []string) error { - v, err := sum.Sum() - if err != nil { - return fmt.Errorf("error retrieving counter: %w", err) - } - - m, err := prometheus.NewConstMetric(desc, prometheus.CounterValue, v.CoerceToFloat64(kind), attrs...) - if err != nil { - return fmt.Errorf("error creating constant metric: %w", err) - } - - ch <- m - return nil -} - -func (c *collector) exportHistogram(ch chan<- prometheus.Metric, hist aggregation.Histogram, kind number.Kind, desc *prometheus.Desc, attrs []string) error { - buckets, err := hist.Histogram() - if err != nil { - return fmt.Errorf("error retrieving histogram: %w", err) - } - sum, err := hist.Sum() - if err != nil { - return fmt.Errorf("error retrieving sum: %w", err) - } - - var totalCount uint64 - // counts maps from the bucket upper-bound to the cumulative count. - // The bucket with upper-bound +inf is not included. - counts := make(map[float64]uint64, len(buckets.Boundaries)) - for i := range buckets.Boundaries { - boundary := buckets.Boundaries[i] - totalCount += uint64(buckets.Counts[i]) - counts[boundary] = totalCount - } - // Include the +inf bucket in the total count. - totalCount += uint64(buckets.Counts[len(buckets.Counts)-1]) - - m, err := prometheus.NewConstHistogram(desc, totalCount, sum.CoerceToFloat64(kind), counts, attrs...) - if err != nil { - return fmt.Errorf("error creating constant histogram: %w", err) - } - - ch <- m - return nil -} - -func (c *collector) toDesc(record export.Record, attrKeys []string) *prometheus.Desc { - desc := record.Descriptor() - return prometheus.NewDesc(sanitize(desc.Name()), desc.Description(), attrKeys, nil) -} - -// mergeAttrs merges the export.Record's attributes and resources into a -// single set, giving precedence to the record's attributes in case of -// duplicate keys. This outputs one or both of the keys and the values as a -// slice, and either argument may be nil to avoid allocating an unnecessary -// slice. -func mergeAttrs(record export.Record, res *resource.Resource, keys, values *[]string) { - if keys != nil { - *keys = make([]string, 0, record.Attributes().Len()+res.Len()) - } - if values != nil { - *values = make([]string, 0, record.Attributes().Len()+res.Len()) - } - - // Duplicate keys are resolved by taking the record attribute value over - // the resource value. - mi := attribute.NewMergeIterator(record.Attributes(), res.Set()) - for mi.Next() { - attr := mi.Attribute() - if keys != nil { - *keys = append(*keys, sanitize(string(attr.Key))) - } - if values != nil { - *values = append(*values, attr.Value.Emit()) - } - } -} diff --git a/exporters/prometheus/prometheus_test.go b/exporters/prometheus/prometheus_test.go deleted file mode 100644 index 749965ba575..00000000000 --- a/exporters/prometheus/prometheus_test.go +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus_test - -import ( - "context" - "fmt" - "net/http/httptest" - "sort" - "strings" - "testing" - - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/exporters/prometheus" - "go.opentelemetry.io/otel/metric/instrument" - "go.opentelemetry.io/otel/sdk/metric/aggregator/histogram" - controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - processor "go.opentelemetry.io/otel/sdk/metric/processor/basic" - selector "go.opentelemetry.io/otel/sdk/metric/selector/simple" - "go.opentelemetry.io/otel/sdk/resource" -) - -type expectedMetric struct { - kind string - name string - help string - values []string -} - -func (e *expectedMetric) lines() []string { - ret := []string{ - fmt.Sprintf("# HELP %s %s", e.name, e.help), - fmt.Sprintf("# TYPE %s %s", e.name, e.kind), - } - - ret = append(ret, e.values...) - - return ret -} - -func expectCounterWithHelp(name, help, value string) expectedMetric { - return expectedMetric{ - kind: "counter", - name: name, - help: help, - values: []string{value}, - } -} - -func expectCounter(name, value string) expectedMetric { - return expectCounterWithHelp(name, "", value) -} - -func expectGauge(name, value string) expectedMetric { - return expectedMetric{ - kind: "gauge", - name: name, - values: []string{value}, - } -} - -func expectHistogram(name string, values ...string) expectedMetric { - return expectedMetric{ - kind: "histogram", - name: name, - values: values, - } -} - -func newPipeline(config prometheus.Config, options ...controller.Option) (*prometheus.Exporter, error) { - c := controller.New( - processor.NewFactory( - selector.NewWithHistogramDistribution( - histogram.WithExplicitBoundaries(config.DefaultHistogramBoundaries), - ), - aggregation.CumulativeTemporalitySelector(), - processor.WithMemory(true), - ), - options..., - ) - return prometheus.New(config, c) -} - -func TestPrometheusExporter(t *testing.T) { - exporter, err := newPipeline( - prometheus.Config{ - DefaultHistogramBoundaries: []float64{-0.5, 1}, - }, - controller.WithCollectPeriod(0), - controller.WithResource(resource.NewSchemaless(attribute.String("R", "V"))), - ) - require.NoError(t, err) - - meter := exporter.MeterProvider().Meter("test") - upDownCounter, err := meter.SyncFloat64().UpDownCounter("updowncounter") - require.NoError(t, err) - counter, err := meter.SyncFloat64().Counter("counter") - require.NoError(t, err) - hist, err := meter.SyncFloat64().Histogram("histogram") - require.NoError(t, err) - - attrs := []attribute.KeyValue{ - attribute.Key("A").String("B"), - attribute.Key("C").String("D"), - } - ctx := context.Background() - - var expected []expectedMetric - - counter.Add(ctx, 10, attrs...) - counter.Add(ctx, 5.3, attrs...) - - expected = append(expected, expectCounter("counter", `counter{A="B",C="D",R="V"} 15.3`)) - - gaugeObserver, err := meter.AsyncInt64().Gauge("intgaugeobserver") - require.NoError(t, err) - - err = meter.RegisterCallback([]instrument.Asynchronous{gaugeObserver}, func(ctx context.Context) { - gaugeObserver.Observe(ctx, 1, attrs...) - }) - require.NoError(t, err) - - expected = append(expected, expectGauge("intgaugeobserver", `intgaugeobserver{A="B",C="D",R="V"} 1`)) - - hist.Record(ctx, -0.6, attrs...) - hist.Record(ctx, -0.4, attrs...) - hist.Record(ctx, 0.6, attrs...) - hist.Record(ctx, 20, attrs...) - - expected = append(expected, expectHistogram("histogram", - `histogram_bucket{A="B",C="D",R="V",le="-0.5"} 1`, - `histogram_bucket{A="B",C="D",R="V",le="1"} 3`, - `histogram_bucket{A="B",C="D",R="V",le="+Inf"} 4`, - `histogram_sum{A="B",C="D",R="V"} 19.6`, - `histogram_count{A="B",C="D",R="V"} 4`, - )) - - upDownCounter.Add(ctx, 10, attrs...) - upDownCounter.Add(ctx, -3.2, attrs...) - - expected = append(expected, expectGauge("updowncounter", `updowncounter{A="B",C="D",R="V"} 6.8`)) - - counterObserver, err := meter.AsyncFloat64().Counter("floatcounterobserver") - require.NoError(t, err) - - err = meter.RegisterCallback([]instrument.Asynchronous{counterObserver}, func(ctx context.Context) { - counterObserver.Observe(ctx, 7.7, attrs...) - }) - require.NoError(t, err) - - expected = append(expected, expectCounter("floatcounterobserver", `floatcounterobserver{A="B",C="D",R="V"} 7.7`)) - - upDownCounterObserver, err := meter.AsyncFloat64().UpDownCounter("floatupdowncounterobserver") - require.NoError(t, err) - - err = meter.RegisterCallback([]instrument.Asynchronous{upDownCounterObserver}, func(ctx context.Context) { - upDownCounterObserver.Observe(ctx, -7.7, attrs...) - }) - require.NoError(t, err) - - expected = append(expected, expectGauge("floatupdowncounterobserver", `floatupdowncounterobserver{A="B",C="D",R="V"} -7.7`)) - - compareExport(t, exporter, expected) - compareExport(t, exporter, expected) -} - -func compareExport(t *testing.T, exporter *prometheus.Exporter, expected []expectedMetric) { - rec := httptest.NewRecorder() - req := httptest.NewRequest("GET", "/metrics", nil) - exporter.ServeHTTP(rec, req) - - output := rec.Body.String() - lines := strings.Split(output, "\n") - - expectedLines := []string{""} - for _, v := range expected { - expectedLines = append(expectedLines, v.lines()...) - } - - sort.Strings(lines) - sort.Strings(expectedLines) - - require.Equal(t, expectedLines, lines) -} - -func TestPrometheusStatefulness(t *testing.T) { - // Create a meter - exporter, err := newPipeline( - prometheus.Config{}, - controller.WithCollectPeriod(0), - controller.WithResource(resource.Empty()), - ) - require.NoError(t, err) - - meter := exporter.MeterProvider().Meter("test") - - ctx := context.Background() - - counter, err := meter.SyncInt64().Counter("a.counter", instrument.WithDescription("Counts things")) - require.NoError(t, err) - - counter.Add(ctx, 100, attribute.String("key", "value")) - - compareExport(t, exporter, []expectedMetric{ - expectCounterWithHelp("a_counter", "Counts things", `a_counter{key="value"} 100`), - }) - - counter.Add(ctx, 100, attribute.String("key", "value")) - - compareExport(t, exporter, []expectedMetric{ - expectCounterWithHelp("a_counter", "Counts things", `a_counter{key="value"} 200`), - }) -} diff --git a/exporters/prometheus/sanitize.go b/exporters/prometheus/sanitize.go deleted file mode 100644 index b5588435359..00000000000 --- a/exporters/prometheus/sanitize.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus // import "go.opentelemetry.io/otel/exporters/prometheus" - -import ( - "strings" - "unicode" -) - -// TODO(paivagustavo): we should provide a more uniform and controlled way of sanitizing. -// Letting users define wether we should try or not to sanitize metric names. -// This is a copy of sdk/internal/sanitize.go - -// sanitize returns a string that is truncated to 100 characters if it's too -// long, and replaces non-alphanumeric characters to underscores. -func sanitize(s string) string { - if len(s) == 0 { - return s - } - // TODO(paivagustavo): change this to use a bytes buffer to avoid a large number of string allocations. - s = strings.Map(sanitizeRune, s) - if unicode.IsDigit(rune(s[0])) { - s = "key_" + s - } - if s[0] == '_' { - s = "key" + s - } - return s -} - -// converts anything that is not a letter or digit to an underscore. -func sanitizeRune(r rune) rune { - if unicode.IsLetter(r) || unicode.IsDigit(r) { - return r - } - // Everything else turns into an underscore - return '_' -} diff --git a/exporters/prometheus/sanitize_test.go b/exporters/prometheus/sanitize_test.go deleted file mode 100644 index 7a6b9fd55ee..00000000000 --- a/exporters/prometheus/sanitize_test.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "testing" -) - -func TestSanitize(t *testing.T) { - tests := []struct { - name string - input string - want string - }{ - { - name: "replace character", - input: "test/key-1", - want: "test_key_1", - }, - { - name: "add prefix if starting with digit", - input: "0123456789", - want: "key_0123456789", - }, - { - name: "add prefix if starting with _", - input: "_0123456789", - want: "key_0123456789", - }, - { - name: "starts with _ after sanitization", - input: "/0123456789", - want: "key_0123456789", - }, - { - name: "valid input", - input: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_0123456789", - want: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_0123456789", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got, want := sanitize(tt.input), tt.want; got != want { - t.Errorf("sanitize() = %q; want %q", got, want) - } - }) - } -} diff --git a/exporters/prometheus/testdata/counter.txt b/exporters/prometheus/testdata/counter.txt new file mode 100755 index 00000000000..2a61dee1d01 --- /dev/null +++ b/exporters/prometheus/testdata/counter.txt @@ -0,0 +1,3 @@ +# HELP foo a simple counter +# TYPE foo counter +foo{A="B",C="D"} 24.3 diff --git a/exporters/prometheus/testdata/gauge.txt b/exporters/prometheus/testdata/gauge.txt new file mode 100644 index 00000000000..889295d74e1 --- /dev/null +++ b/exporters/prometheus/testdata/gauge.txt @@ -0,0 +1,3 @@ +# HELP bar a fun little gauge +# TYPE bar counter +bar{A="B",C="D"} 75 diff --git a/exporters/prometheus/testdata/histogram.txt b/exporters/prometheus/testdata/histogram.txt new file mode 100644 index 00000000000..547599cf6d5 --- /dev/null +++ b/exporters/prometheus/testdata/histogram.txt @@ -0,0 +1,15 @@ +# HELP baz a very nice histogram +# TYPE baz histogram +baz_bucket{A="B",C="D",le="0"} 0 +baz_bucket{A="B",C="D",le="5"} 0 +baz_bucket{A="B",C="D",le="10"} 1 +baz_bucket{A="B",C="D",le="25"} 1 +baz_bucket{A="B",C="D",le="50"} 0 +baz_bucket{A="B",C="D",le="75"} 0 +baz_bucket{A="B",C="D",le="100"} 0 +baz_bucket{A="B",C="D",le="250"} 2 +baz_bucket{A="B",C="D",le="500"} 0 +baz_bucket{A="B",C="D",le="1000"} 0 +baz_bucket{A="B",C="D",le="+Inf"} 4 +baz_sum{A="B",C="D"} 236 +baz_count{A="B",C="D"} 4 diff --git a/exporters/prometheus/testdata/sanitized_labels.txt b/exporters/prometheus/testdata/sanitized_labels.txt new file mode 100755 index 00000000000..cd686cff97e --- /dev/null +++ b/exporters/prometheus/testdata/sanitized_labels.txt @@ -0,0 +1,3 @@ +# HELP foo a sanitary counter +# TYPE foo counter +foo{A_B="Q",C_D="Y;Z"} 24.3 diff --git a/exporters/stdout/stdoutmetric/config.go b/exporters/stdout/stdoutmetric/config.go index 95374aaef1d..63ba9fc592e 100644 --- a/exporters/stdout/stdoutmetric/config.go +++ b/exporters/stdout/stdoutmetric/config.go @@ -1,5 +1,4 @@ // Copyright The OpenTelemetry Authors -// // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -12,106 +11,55 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.18 +// +build go1.18 + package stdoutmetric // import "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" import ( - "io" + "encoding/json" "os" - - "go.opentelemetry.io/otel/attribute" -) - -var ( - defaultWriter = os.Stdout - defaultPrettyPrint = false - defaultTimestamps = true - defaultAttrEncoder = attribute.DefaultEncoder() ) -// config contains options for the STDOUT exporter. +// config contains options for the exporter. type config struct { - // Writer is the destination. If not set, os.Stdout is used. - Writer io.Writer - - // PrettyPrint will encode the output into readable JSON. Default is - // false. - PrettyPrint bool - - // Timestamps specifies if timestamps should be printed. Default is - // true. - Timestamps bool - - // Encoder encodes the attributes. - Encoder attribute.Encoder + encoder *encoderHolder } -// newConfig creates a validated Config configured with options. -func newConfig(options ...Option) (config, error) { - cfg := config{ - Writer: defaultWriter, - PrettyPrint: defaultPrettyPrint, - Timestamps: defaultTimestamps, - Encoder: defaultAttrEncoder, - } +// newConfig creates a validated config configured with options. +func newConfig(options ...Option) config { + cfg := config{} for _, opt := range options { cfg = opt.apply(cfg) } - return cfg, nil -} - -// Option sets the value of an option for a Config. -type Option interface { - apply(config) config -} - -// WithWriter sets the export stream destination. -func WithWriter(w io.Writer) Option { - return writerOption{w} -} -type writerOption struct { - W io.Writer -} - -func (o writerOption) apply(cfg config) config { - cfg.Writer = o.W - return cfg -} - -// WithPrettyPrint sets the export stream format to use JSON. -func WithPrettyPrint() Option { - return prettyPrintOption(true) -} - -type prettyPrintOption bool + if cfg.encoder == nil { + enc := json.NewEncoder(os.Stdout) + enc.SetIndent("", "\t") + cfg.encoder = &encoderHolder{encoder: enc} + } -func (o prettyPrintOption) apply(cfg config) config { - cfg.PrettyPrint = bool(o) return cfg } -// WithoutTimestamps sets the export stream to not include timestamps. -func WithoutTimestamps() Option { - return timestampsOption(false) -} - -type timestampsOption bool - -func (o timestampsOption) apply(cfg config) config { - cfg.Timestamps = bool(o) - return cfg +// Option sets exporter option values. +type Option interface { + apply(config) config } -// WithAttributeEncoder sets the attribute encoder used in export. -func WithAttributeEncoder(enc attribute.Encoder) Option { - return attrEncoderOption{enc} -} +type optionFunc func(config) config -type attrEncoderOption struct { - encoder attribute.Encoder +func (o optionFunc) apply(c config) config { + return o(c) } -func (o attrEncoderOption) apply(cfg config) config { - cfg.Encoder = o.encoder - return cfg +// WithEncoder sets the exporter to use encoder to encode all the metric +// data-types to an output. +func WithEncoder(encoder Encoder) Option { + return optionFunc(func(c config) config { + if encoder != nil { + c.encoder = &encoderHolder{encoder: encoder} + } + return c + }) } diff --git a/exporters/stdout/stdoutmetric/doc.go b/exporters/stdout/stdoutmetric/doc.go index 0bffd34b9ff..fc766ad0bea 100644 --- a/exporters/stdout/stdoutmetric/doc.go +++ b/exporters/stdout/stdoutmetric/doc.go @@ -12,10 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package stdoutmetric contains an OpenTelemetry exporter for metric -// telemetry to be written to an output destination as JSON. +// Package stdoutmetric provides an exporter for OpenTelemetry metric +// telemetry. // -// This package is currently in a pre-GA phase. Backwards incompatible changes -// may be introduced in subsequent minor version releases as we work to track -// the evolving OpenTelemetry specification and user feedback. +// The exporter is intended to be used for testing and debugging, it is not +// meant for production use. Additionally, it does not provide an interchange +// format for OpenTelemetry that is supported with any stability or +// compatibility guarantees. If these are needed features, please use the OTLP +// exporter instead. package stdoutmetric // import "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" diff --git a/exporters/stdout/stdoutmetric/encoder.go b/exporters/stdout/stdoutmetric/encoder.go new file mode 100644 index 00000000000..ab5510afcbe --- /dev/null +++ b/exporters/stdout/stdoutmetric/encoder.go @@ -0,0 +1,43 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package stdoutmetric // import "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" + +import "errors" + +// Encoder encodes and outputs OpenTelemetry metric data-types as human +// readable text. +type Encoder interface { + // Encode handles the encoding and writing of OpenTelemetry metric data. + Encode(v any) error +} + +// encoderHolder is the concrete type used to wrap an Encoder so it can be +// used as a atomic.Value type. +type encoderHolder struct { + encoder Encoder +} + +func (e encoderHolder) Encode(v any) error { return e.encoder.Encode(v) } + +// shutdownEncoder is used when the exporter is shutdown. It always returns +// errShutdown when Encode is called. +type shutdownEncoder struct{} + +var errShutdown = errors.New("exporter shutdown") + +func (shutdownEncoder) Encode(any) error { return errShutdown } diff --git a/exporters/stdout/stdoutmetric/example_test.go b/exporters/stdout/stdoutmetric/example_test.go index 82723ffa7ce..e8ea9310b46 100644 --- a/exporters/stdout/stdoutmetric/example_test.go +++ b/exporters/stdout/stdoutmetric/example_test.go @@ -12,100 +12,226 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.18 +// +build go1.18 + package stdoutmetric_test import ( "context" - "fmt" - "log" + "encoding/json" + "os" + "time" "go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/global" - "go.opentelemetry.io/otel/metric/instrument/syncint64" - controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" - processor "go.opentelemetry.io/otel/sdk/metric/processor/basic" - "go.opentelemetry.io/otel/sdk/metric/selector/simple" -) - -const ( - instrumentationName = "github.com/instrumentron" - instrumentationVersion = "v0.1.0" + "go.opentelemetry.io/otel/metric/unit" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/resource" + semconv "go.opentelemetry.io/otel/semconv/v1.10.0" ) var ( - loopCounter syncint64.Counter - paramValue syncint64.Histogram - - nameKey = attribute.Key("function.name") -) - -func add(ctx context.Context, x, y int64) int64 { - nameKV := nameKey.String("add") + // Sat Jan 01 2000 00:00:00 GMT+0000. + now = time.Date(2000, time.January, 01, 0, 0, 0, 0, time.FixedZone("GMT", 0)) - loopCounter.Add(ctx, 1, nameKV) - paramValue.Record(ctx, x, nameKV) - paramValue.Record(ctx, y, nameKV) - - return x + y -} - -func multiply(ctx context.Context, x, y int64) int64 { - nameKV := nameKey.String("multiply") - - loopCounter.Add(ctx, 1, nameKV) - paramValue.Record(ctx, x, nameKV) - paramValue.Record(ctx, y, nameKV) - - return x * y -} - -func InstallExportPipeline(ctx context.Context) (func(context.Context) error, error) { - exporter, err := stdoutmetric.New(stdoutmetric.WithPrettyPrint()) - if err != nil { - return nil, fmt.Errorf("creating stdoutmetric exporter: %w", err) - } - - pusher := controller.New( - processor.NewFactory( - simple.NewWithInexpensiveDistribution(), - exporter, - ), - controller.WithExporter(exporter), + res = resource.NewSchemaless( + semconv.ServiceNameKey.String("stdoutmetric-example"), ) - if err = pusher.Start(ctx); err != nil { - log.Fatalf("starting push controller: %v", err) - } - global.SetMeterProvider(pusher) - meter := global.Meter(instrumentationName, metric.WithInstrumentationVersion(instrumentationVersion)) - - loopCounter, err = meter.SyncInt64().Counter("function.loops") - if err != nil { - log.Fatalf("creating instrument: %v", err) + mockData = metricdata.ResourceMetrics{ + Resource: res, + ScopeMetrics: []metricdata.ScopeMetrics{ + { + Scope: instrumentation.Scope{Name: "example", Version: "v0.0.1"}, + Metrics: []metricdata.Metrics{ + { + Name: "requests", + Description: "Number of requests received", + Unit: unit.Dimensionless, + Data: metricdata.Sum[int64]{ + IsMonotonic: true, + Temporality: metricdata.DeltaTemporality, + DataPoints: []metricdata.DataPoint[int64]{ + { + Attributes: attribute.NewSet(attribute.String("server", "central")), + StartTime: now, + Time: now.Add(1 * time.Second), + Value: 5, + }, + }, + }, + }, + { + Name: "latency", + Description: "Time spend processing received requests", + Unit: unit.Milliseconds, + Data: metricdata.Histogram{ + Temporality: metricdata.DeltaTemporality, + DataPoints: []metricdata.HistogramDataPoint{ + { + Attributes: attribute.NewSet(attribute.String("server", "central")), + StartTime: now, + Time: now.Add(1 * time.Second), + Count: 10, + Bounds: []float64{1, 5, 10}, + BucketCounts: []uint64{1, 3, 6, 0}, + Sum: 57, + }, + }, + }, + }, + { + Name: "temperature", + Description: "CPU global temperature", + Unit: unit.Unit("cel(1 K)"), + Data: metricdata.Gauge[float64]{ + DataPoints: []metricdata.DataPoint[float64]{ + { + Attributes: attribute.NewSet(attribute.String("server", "central")), + Time: now.Add(1 * time.Second), + Value: 32.4, + }, + }, + }, + }, + }, + }, + }, } - paramValue, err = meter.SyncInt64().Histogram("function.param") - if err != nil { - log.Fatalf("creating instrument: %v", err) - } - - return pusher.Stop, nil -} +) func Example() { - ctx := context.Background() - - // TODO: Registers a meter Provider globally. - shutdown, err := InstallExportPipeline(ctx) + // Print with a JSON encoder that indents with two spaces. + enc := json.NewEncoder(os.Stdout) + enc.SetIndent("", " ") + exp, err := stdoutmetric.New(stdoutmetric.WithEncoder(enc)) if err != nil { - log.Fatal(err) + panic(err) } - defer func() { - if err := shutdown(ctx); err != nil { - log.Fatal(err) - } - }() - log.Println("the answer is", add(ctx, multiply(ctx, multiply(ctx, 2, 2), 10), 2)) + // Register the exporter with an SDK via a periodic reader. + sdk := metric.NewMeterProvider( + metric.WithResource(res), + metric.WithReader(metric.NewPeriodicReader(exp)), + ) + + ctx := context.Background() + // This is where the sdk would be used to create a Meter and from that + // instruments that would make measurments of your code. To simulate that + // behavior, call export directly with mocked data. + _ = exp.Export(ctx, mockData) + + // Ensure the periodic reader is cleaned up by shutting down the sdk. + _ = sdk.Shutdown(ctx) + + // Output: + // { + // "Resource": [ + // { + // "Key": "service.name", + // "Value": { + // "Type": "STRING", + // "Value": "stdoutmetric-example" + // } + // } + // ], + // "ScopeMetrics": [ + // { + // "Scope": { + // "Name": "example", + // "Version": "v0.0.1", + // "SchemaURL": "" + // }, + // "Metrics": [ + // { + // "Name": "requests", + // "Description": "Number of requests received", + // "Unit": "1", + // "Data": { + // "DataPoints": [ + // { + // "Attributes": [ + // { + // "Key": "server", + // "Value": { + // "Type": "STRING", + // "Value": "central" + // } + // } + // ], + // "StartTime": "2000-01-01T00:00:00Z", + // "Time": "2000-01-01T00:00:01Z", + // "Value": 5 + // } + // ], + // "Temporality": "DeltaTemporality", + // "IsMonotonic": true + // } + // }, + // { + // "Name": "latency", + // "Description": "Time spend processing received requests", + // "Unit": "ms", + // "Data": { + // "DataPoints": [ + // { + // "Attributes": [ + // { + // "Key": "server", + // "Value": { + // "Type": "STRING", + // "Value": "central" + // } + // } + // ], + // "StartTime": "2000-01-01T00:00:00Z", + // "Time": "2000-01-01T00:00:01Z", + // "Count": 10, + // "Bounds": [ + // 1, + // 5, + // 10 + // ], + // "BucketCounts": [ + // 1, + // 3, + // 6, + // 0 + // ], + // "Sum": 57 + // } + // ], + // "Temporality": "DeltaTemporality" + // } + // }, + // { + // "Name": "temperature", + // "Description": "CPU global temperature", + // "Unit": "cel(1 K)", + // "Data": { + // "DataPoints": [ + // { + // "Attributes": [ + // { + // "Key": "server", + // "Value": { + // "Type": "STRING", + // "Value": "central" + // } + // } + // ], + // "StartTime": "0001-01-01T00:00:00Z", + // "Time": "2000-01-01T00:00:01Z", + // "Value": 32.4 + // } + // ] + // } + // } + // ] + // } + // ] + // } } diff --git a/exporters/stdout/stdoutmetric/exporter.go b/exporters/stdout/stdoutmetric/exporter.go index 5f8ff4f7baf..f7976d5993f 100644 --- a/exporters/stdout/stdoutmetric/exporter.go +++ b/exporters/stdout/stdoutmetric/exporter.go @@ -12,27 +12,60 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.18 +// +build go1.18 + package stdoutmetric // import "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" -import "go.opentelemetry.io/otel/sdk/metric/export" +import ( + "context" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// exporter is an OpenTelemetry metric exporter. +type exporter struct { + encVal atomic.Value // encoderHolder -// Exporter is an OpenTelemetry metric exporter that transmits telemetry to -// the local STDOUT. -type Exporter struct { - metricExporter + shutdownOnce sync.Once } -var ( - _ export.Exporter = &Exporter{} -) +// New returns a configured metric exporter. +// +// If no options are passed, the default exporter returned will use a JSON +// encoder with tab indentations that output to STDOUT. +func New(options ...Option) (metric.Exporter, error) { + cfg := newConfig(options...) + exp := &exporter{} + exp.encVal.Store(*cfg.encoder) + return exp, nil +} -// New creates an Exporter with the passed options. -func New(options ...Option) (*Exporter, error) { - cfg, err := newConfig(options...) - if err != nil { - return nil, err +func (e *exporter) Export(ctx context.Context, data metricdata.ResourceMetrics) error { + select { + case <-ctx.Done(): + // Don't do anything if the context has already timed out. + return ctx.Err() + default: + // Context is still valid, continue. } - return &Exporter{ - metricExporter: metricExporter{cfg}, - }, nil + + return e.encVal.Load().(encoderHolder).Encode(data) +} + +func (e *exporter) ForceFlush(ctx context.Context) error { + // exporter holds no state, nothing to flush. + return ctx.Err() +} + +func (e *exporter) Shutdown(ctx context.Context) error { + e.shutdownOnce.Do(func() { + e.encVal.Store(encoderHolder{ + encoder: shutdownEncoder{}, + }) + }) + return ctx.Err() } diff --git a/exporters/stdout/stdoutmetric/exporter_test.go b/exporters/stdout/stdoutmetric/exporter_test.go new file mode 100644 index 00000000000..9e8faa3e8ec --- /dev/null +++ b/exporters/stdout/stdoutmetric/exporter_test.go @@ -0,0 +1,102 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package stdoutmetric_test // import "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" + +import ( + "context" + "encoding/json" + "io" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +func testEncoderOption() stdoutmetric.Option { + // Discard export output for testing. + enc := json.NewEncoder(io.Discard) + return stdoutmetric.WithEncoder(enc) +} + +func testCtxErrHonored(factory func(*testing.T) func(context.Context) error) func(t *testing.T) { + return func(t *testing.T) { + t.Helper() + ctx, cancel := context.WithCancel(context.Background()) + t.Cleanup(cancel) + + t.Run("DeadlineExceeded", func(t *testing.T) { + innerCtx, innerCancel := context.WithTimeout(ctx, time.Nanosecond) + t.Cleanup(innerCancel) + <-innerCtx.Done() + + f := factory(t) + assert.ErrorIs(t, f(innerCtx), context.DeadlineExceeded) + }) + + t.Run("Canceled", func(t *testing.T) { + innerCtx, innerCancel := context.WithCancel(ctx) + innerCancel() + + f := factory(t) + assert.ErrorIs(t, f(innerCtx), context.Canceled) + }) + + t.Run("NoError", func(t *testing.T) { + f := factory(t) + assert.NoError(t, f(ctx)) + }) + } +} + +func TestExporterHonorsContextErrors(t *testing.T) { + t.Run("Shutdown", testCtxErrHonored(func(t *testing.T) func(context.Context) error { + exp, err := stdoutmetric.New(testEncoderOption()) + require.NoError(t, err) + return exp.Shutdown + })) + + t.Run("ForceFlush", testCtxErrHonored(func(t *testing.T) func(context.Context) error { + exp, err := stdoutmetric.New(testEncoderOption()) + require.NoError(t, err) + return exp.ForceFlush + })) + + t.Run("Export", testCtxErrHonored(func(t *testing.T) func(context.Context) error { + exp, err := stdoutmetric.New(testEncoderOption()) + require.NoError(t, err) + return func(ctx context.Context) error { + var data metricdata.ResourceMetrics + return exp.Export(ctx, data) + } + })) +} + +func TestShutdownExporterReturnsShutdownErrorOnExport(t *testing.T) { + var ( + data metricdata.ResourceMetrics + ctx = context.Background() + exp, err = stdoutmetric.New(testEncoderOption()) + ) + require.NoError(t, err) + require.NoError(t, exp.Shutdown(ctx)) + assert.EqualError(t, exp.Export(ctx, data), "exporter shutdown") +} diff --git a/exporters/stdout/stdoutmetric/go.mod b/exporters/stdout/stdoutmetric/go.mod index 8e5752f088b..67edf5fc2ed 100644 --- a/exporters/stdout/stdoutmetric/go.mod +++ b/exporters/stdout/stdoutmetric/go.mod @@ -1,18 +1,13 @@ module go.opentelemetry.io/otel/exporters/stdout/stdoutmetric -go 1.17 - -replace ( - go.opentelemetry.io/otel => ../../.. - go.opentelemetry.io/otel/sdk => ../../../sdk -) +go 1.18 require ( github.com/stretchr/testify v1.7.1 go.opentelemetry.io/otel v1.10.0 - go.opentelemetry.io/otel/metric v0.31.0 - go.opentelemetry.io/otel/sdk v1.10.0 - go.opentelemetry.io/otel/sdk/metric v0.31.0 + go.opentelemetry.io/otel/metric v0.0.0-00010101000000-000000000000 + go.opentelemetry.io/otel/sdk v0.0.0-00010101000000-000000000000 + go.opentelemetry.io/otel/sdk/metric v0.0.0-00010101000000-000000000000 ) require ( @@ -27,6 +22,10 @@ require ( replace go.opentelemetry.io/otel/metric => ../../../metric +replace go.opentelemetry.io/otel => ../../.. + replace go.opentelemetry.io/otel/sdk/metric => ../../../sdk/metric replace go.opentelemetry.io/otel/trace => ../../../trace + +replace go.opentelemetry.io/otel/sdk => ../../../sdk diff --git a/exporters/stdout/stdoutmetric/go.sum b/exporters/stdout/stdoutmetric/go.sum index bb01dfbad5b..2e2aed63d24 100644 --- a/exporters/stdout/stdoutmetric/go.sum +++ b/exporters/stdout/stdoutmetric/go.sum @@ -1,4 +1,3 @@ -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= diff --git a/exporters/stdout/stdoutmetric/metric.go b/exporters/stdout/stdoutmetric/metric.go deleted file mode 100644 index 38289d281a7..00000000000 --- a/exporters/stdout/stdoutmetric/metric.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stdoutmetric // import "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" - -import ( - "context" - "encoding/json" - "fmt" - "strings" - "time" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/sdk/metric/export" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" - "go.opentelemetry.io/otel/sdk/resource" -) - -type metricExporter struct { - config config -} - -var _ export.Exporter = &metricExporter{} - -type line struct { - Name string `json:"Name"` - Sum interface{} `json:"Sum,omitempty"` - Count interface{} `json:"Count,omitempty"` - LastValue interface{} `json:"Last,omitempty"` - - // Note: this is a pointer because omitempty doesn't work when time.IsZero() - Timestamp *time.Time `json:"Timestamp,omitempty"` -} - -func (e *metricExporter) TemporalityFor(desc *sdkapi.Descriptor, kind aggregation.Kind) aggregation.Temporality { - return aggregation.StatelessTemporalitySelector().TemporalityFor(desc, kind) -} - -func (e *metricExporter) Export(_ context.Context, res *resource.Resource, reader export.InstrumentationLibraryReader) error { - var aggError error - var batch []line - aggError = reader.ForEach(func(lib instrumentation.Library, mr export.Reader) error { - var instAttrs []attribute.KeyValue - if name := lib.Name; name != "" { - instAttrs = append(instAttrs, attribute.String("instrumentation.name", name)) - if version := lib.Version; version != "" { - instAttrs = append(instAttrs, attribute.String("instrumentation.version", version)) - } - if schema := lib.SchemaURL; schema != "" { - instAttrs = append(instAttrs, attribute.String("instrumentation.schema_url", schema)) - } - } - instSet := attribute.NewSet(instAttrs...) - encodedInstAttrs := instSet.Encoded(e.config.Encoder) - - return mr.ForEach(e, func(record export.Record) error { - desc := record.Descriptor() - agg := record.Aggregation() - kind := desc.NumberKind() - encodedResource := res.Encoded(e.config.Encoder) - - var expose line - - if sum, ok := agg.(aggregation.Sum); ok { - value, err := sum.Sum() - if err != nil { - return err - } - expose.Sum = value.AsInterface(kind) - } else if lv, ok := agg.(aggregation.LastValue); ok { - value, timestamp, err := lv.LastValue() - if err != nil { - return err - } - expose.LastValue = value.AsInterface(kind) - - if e.config.Timestamps { - expose.Timestamp = ×tamp - } - } - - var encodedAttrs string - iter := record.Attributes().Iter() - if iter.Len() > 0 { - encodedAttrs = record.Attributes().Encoded(e.config.Encoder) - } - - var sb strings.Builder - - _, _ = sb.WriteString(desc.Name()) - - if len(encodedAttrs) > 0 || len(encodedResource) > 0 || len(encodedInstAttrs) > 0 { - _, _ = sb.WriteRune('{') - _, _ = sb.WriteString(encodedResource) - if len(encodedInstAttrs) > 0 && len(encodedResource) > 0 { - _, _ = sb.WriteRune(',') - } - _, _ = sb.WriteString(encodedInstAttrs) - if len(encodedAttrs) > 0 && (len(encodedInstAttrs) > 0 || len(encodedResource) > 0) { - _, _ = sb.WriteRune(',') - } - _, _ = sb.WriteString(encodedAttrs) - _, _ = sb.WriteRune('}') - } - - expose.Name = sb.String() - - batch = append(batch, expose) - return nil - }) - }) - if len(batch) == 0 { - return aggError - } - - data, err := e.marshal(batch) - if err != nil { - return err - } - fmt.Fprintln(e.config.Writer, string(data)) - - return aggError -} - -// marshal v with appropriate indentation. -func (e *metricExporter) marshal(v interface{}) ([]byte, error) { - if e.config.PrettyPrint { - return json.MarshalIndent(v, "", "\t") - } - return json.Marshal(v) -} diff --git a/exporters/stdout/stdoutmetric/metric_test.go b/exporters/stdout/stdoutmetric/metric_test.go deleted file mode 100644 index 40cb6fbb8e5..00000000000 --- a/exporters/stdout/stdoutmetric/metric_test.go +++ /dev/null @@ -1,260 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stdoutmetric_test - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" - "go.opentelemetry.io/otel/metric" - controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - processor "go.opentelemetry.io/otel/sdk/metric/processor/basic" - "go.opentelemetry.io/otel/sdk/metric/processor/processortest" - "go.opentelemetry.io/otel/sdk/resource" -) - -type testFixture struct { - t *testing.T - ctx context.Context - cont *controller.Controller - meter metric.Meter - exporter *stdoutmetric.Exporter - output *bytes.Buffer -} - -var testResource = resource.NewSchemaless(attribute.String("R", "V")) - -func newFixture(t *testing.T, opts ...stdoutmetric.Option) testFixture { - return newFixtureWithResource(t, testResource, opts...) -} - -func newFixtureWithResource(t *testing.T, res *resource.Resource, opts ...stdoutmetric.Option) testFixture { - buf := &bytes.Buffer{} - opts = append(opts, stdoutmetric.WithWriter(buf)) - opts = append(opts, stdoutmetric.WithoutTimestamps()) - exp, err := stdoutmetric.New(opts...) - if err != nil { - t.Fatal("Error building fixture: ", err) - } - aggSel := processortest.AggregatorSelector() - proc := processor.NewFactory(aggSel, aggregation.StatelessTemporalitySelector()) - cont := controller.New(proc, - controller.WithExporter(exp), - controller.WithResource(res), - ) - ctx := context.Background() - require.NoError(t, cont.Start(ctx)) - meter := cont.Meter("test") - - return testFixture{ - t: t, - ctx: ctx, - exporter: exp, - cont: cont, - meter: meter, - output: buf, - } -} - -func (fix testFixture) Output() string { - return strings.TrimSpace(fix.output.String()) -} - -func TestStdoutTimestamp(t *testing.T) { - var buf bytes.Buffer - aggSel := processortest.AggregatorSelector() - proc := processor.NewFactory(aggSel, aggregation.CumulativeTemporalitySelector()) - exporter, err := stdoutmetric.New( - stdoutmetric.WithWriter(&buf), - ) - if err != nil { - t.Fatal("Invalid config: ", err) - } - cont := controller.New(proc, - controller.WithExporter(exporter), - controller.WithResource(testResource), - ) - ctx := context.Background() - - require.NoError(t, cont.Start(ctx)) - meter := cont.Meter("test") - counter, err := meter.SyncInt64().Counter("name.lastvalue") - require.NoError(t, err) - - before := time.Now() - // Ensure the timestamp is after before. - time.Sleep(time.Nanosecond) - - counter.Add(ctx, 1) - - require.NoError(t, cont.Stop(ctx)) - - // Ensure the timestamp is before after. - time.Sleep(time.Nanosecond) - after := time.Now() - - var printed []interface{} - if err := json.Unmarshal(buf.Bytes(), &printed); err != nil { - t.Fatal("JSON parse error: ", err) - } - - require.Len(t, printed, 1) - lastValue, ok := printed[0].(map[string]interface{}) - require.True(t, ok, "last value format") - require.Contains(t, lastValue, "Timestamp") - lastValueTS := lastValue["Timestamp"].(string) - lastValueTimestamp, err := time.Parse(time.RFC3339Nano, lastValueTS) - if err != nil { - t.Fatal("JSON parse error: ", lastValueTS, ": ", err) - } - - assert.True(t, lastValueTimestamp.After(before)) - assert.True(t, lastValueTimestamp.Before(after)) -} - -func TestStdoutCounterFormat(t *testing.T) { - fix := newFixture(t) - - counter, err := fix.meter.SyncInt64().Counter("name.sum") - require.NoError(t, err) - counter.Add(fix.ctx, 123, attribute.String("A", "B"), attribute.String("C", "D")) - - require.NoError(t, fix.cont.Stop(fix.ctx)) - - require.Equal(t, `[{"Name":"name.sum{R=V,instrumentation.name=test,A=B,C=D}","Sum":123}]`, fix.Output()) -} - -func TestStdoutLastValueFormat(t *testing.T) { - fix := newFixture(t) - - counter, err := fix.meter.SyncFloat64().Counter("name.lastvalue") - require.NoError(t, err) - counter.Add(fix.ctx, 123.456, attribute.String("A", "B"), attribute.String("C", "D")) - - require.NoError(t, fix.cont.Stop(fix.ctx)) - - require.Equal(t, `[{"Name":"name.lastvalue{R=V,instrumentation.name=test,A=B,C=D}","Last":123.456}]`, fix.Output()) -} - -func TestStdoutHistogramFormat(t *testing.T) { - fix := newFixture(t, stdoutmetric.WithPrettyPrint()) - - inst, err := fix.meter.SyncFloat64().Histogram("name.histogram") - require.NoError(t, err) - - for i := 0; i < 1000; i++ { - inst.Record(fix.ctx, float64(i)+0.5, attribute.String("A", "B"), attribute.String("C", "D")) - } - require.NoError(t, fix.cont.Stop(fix.ctx)) - - // TODO: Stdout does not export `Count` for histogram, nor the buckets. - require.Equal(t, `[ - { - "Name": "name.histogram{R=V,instrumentation.name=test,A=B,C=D}", - "Sum": 500000 - } -]`, fix.Output()) -} - -func TestStdoutNoData(t *testing.T) { - runTwoAggs := func(aggName string) { - t.Run(aggName, func(t *testing.T) { - t.Parallel() - - fix := newFixture(t) - _, err := fix.meter.SyncFloat64().Counter(fmt.Sprint("name.", aggName)) - require.NoError(t, err) - require.NoError(t, fix.cont.Stop(fix.ctx)) - - require.Equal(t, "", fix.Output()) - }) - } - - runTwoAggs("lastvalue") -} - -func TestStdoutResource(t *testing.T) { - type testCase struct { - name string - expect string - res *resource.Resource - attrs []attribute.KeyValue - } - newCase := func(name, expect string, res *resource.Resource, attrs ...attribute.KeyValue) testCase { - return testCase{ - name: name, - expect: expect, - res: res, - attrs: attrs, - } - } - testCases := []testCase{ - newCase("resource and attribute", - "R1=V1,R2=V2,instrumentation.name=test,A=B,C=D", - resource.NewSchemaless(attribute.String("R1", "V1"), attribute.String("R2", "V2")), - attribute.String("A", "B"), - attribute.String("C", "D")), - newCase("only resource", - "R1=V1,R2=V2,instrumentation.name=test", - resource.NewSchemaless(attribute.String("R1", "V1"), attribute.String("R2", "V2")), - ), - newCase("empty resource", - "instrumentation.name=test,A=B,C=D", - resource.Empty(), - attribute.String("A", "B"), - attribute.String("C", "D"), - ), - newCase("default resource", - fmt.Sprint(resource.Default().Encoded(attribute.DefaultEncoder()), - ",instrumentation.name=test,A=B,C=D"), - resource.Default(), - attribute.String("A", "B"), - attribute.String("C", "D"), - ), - // We explicitly do not de-duplicate between resources - // and metric attributes in this exporter. - newCase("resource deduplication", - "R1=V1,R2=V2,instrumentation.name=test,R1=V3,R2=V4", - resource.NewSchemaless(attribute.String("R1", "V1"), attribute.String("R2", "V2")), - attribute.String("R1", "V3"), - attribute.String("R2", "V4")), - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - ctx := context.Background() - fix := newFixtureWithResource(t, tc.res) - - counter, err := fix.meter.SyncFloat64().Counter("name.lastvalue") - require.NoError(t, err) - counter.Add(ctx, 123.456, tc.attrs...) - - require.NoError(t, fix.cont.Stop(fix.ctx)) - - require.Equal(t, `[{"Name":"name.lastvalue{`+tc.expect+`}","Last":123.456}]`, fix.Output()) - }) - } -} diff --git a/sdk/metric/aggregation/aggregation.go b/sdk/metric/aggregation/aggregation.go new file mode 100644 index 00000000000..1eb2c348699 --- /dev/null +++ b/sdk/metric/aggregation/aggregation.go @@ -0,0 +1,164 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.17 +// +build go1.17 + +// Package aggregation contains configuration types that define the +// aggregation operation used to summarizes recorded measurements. +package aggregation // import "go.opentelemetry.io/otel/sdk/metric/aggregation" + +import ( + "errors" + "fmt" +) + +// errAgg is wrapped by misconfigured aggregations. +var errAgg = errors.New("aggregation") + +// Aggregation is the aggregation used to summarize recorded measurements. +type Aggregation interface { + // private attempts to ensure no user-defined Aggregation are allowed. The + // OTel specification does not allow user-defined Aggregation currently. + private() + + // Copy returns a deep copy of the Aggregation. + Copy() Aggregation + + // Err returns an error for any misconfigured Aggregation. + Err() error +} + +// Drop is an aggregation that drops all recorded data. +type Drop struct{} // Drop has no parameters. + +var _ Aggregation = Drop{} + +func (Drop) private() {} + +// Copy returns a deep copy of d. +func (d Drop) Copy() Aggregation { return d } + +// Err returns an error for any misconfiguration. A Drop aggregation has no +// parameters and cannot be misconfigured, therefore this always returns nil. +func (Drop) Err() error { return nil } + +// Default is an aggregation that uses the default instrument kind selection +// mapping to select another aggregation. A metric reader can be configured to +// make an aggregation selection based on instrument kind that differs from +// the default. This aggregation ensures the default is used. +// +// See the "go.opentelemetry.io/otel/sdk/metric".DefaultAggregationSelector +// for information about the default instrument kind selection mapping. +type Default struct{} // Default has no parameters. + +var _ Aggregation = Default{} + +func (Default) private() {} + +// Copy returns a deep copy of d. +func (d Default) Copy() Aggregation { return d } + +// Err returns an error for any misconfiguration. A Default aggregation has no +// parameters and cannot be misconfigured, therefore this always returns nil. +func (Default) Err() error { return nil } + +// Sum is an aggregation that summarizes a set of measurements as their +// arithmetic sum. +type Sum struct{} // Sum has no parameters. + +var _ Aggregation = Sum{} + +func (Sum) private() {} + +// Copy returns a deep copy of s. +func (s Sum) Copy() Aggregation { return s } + +// Err returns an error for any misconfiguration. A Sum aggregation has no +// parameters and cannot be misconfigured, therefore this always returns nil. +func (Sum) Err() error { return nil } + +// LastValue is an aggregation that summarizes a set of measurements as the +// last one made. +type LastValue struct{} // LastValue has no parameters. + +var _ Aggregation = LastValue{} + +func (LastValue) private() {} + +// Copy returns a deep copy of l. +func (l LastValue) Copy() Aggregation { return l } + +// Err returns an error for any misconfiguration. A LastValue aggregation has +// no parameters and cannot be misconfigured, therefore this always returns +// nil. +func (LastValue) Err() error { return nil } + +// ExplicitBucketHistogram is an aggregation that summarizes a set of +// measurements as an histogram with explicitly defined buckets. +type ExplicitBucketHistogram struct { + // Boundaries are the increasing bucket boundary values. Boundary values + // define bucket upper bounds. Buckets are exclusive of their lower + // boundary and inclusive of their upper bound (except at positive + // infinity). A measurement is defined to fall into the greatest-numbered + // bucket with a boundary that is greater than or equal to the + // measurement. As an example, boundaries defined as: + // + // []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 1000} + // + // Will define these buckets: + // + // (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, 25.0], (25.0, 50.0], + // (50.0, 75.0], (75.0, 100.0], (100.0, 250.0], (250.0, 500.0], + // (500.0, 1000.0], (1000.0, +∞) + Boundaries []float64 + // NoMinMax indicates whether to not record the min and max of the + // distribution. By default, these extremes are recorded. + NoMinMax bool +} + +var _ Aggregation = ExplicitBucketHistogram{} + +func (ExplicitBucketHistogram) private() {} + +// errHist is returned by misconfigured ExplicitBucketHistograms. +var errHist = fmt.Errorf("%w: explicit bucket histogram", errAgg) + +// Err returns an error for any misconfiguration. +func (h ExplicitBucketHistogram) Err() error { + if len(h.Boundaries) <= 1 { + return nil + } + + // Check boundaries are monotonic. + i := h.Boundaries[0] + for _, j := range h.Boundaries[1:] { + if i >= j { + return fmt.Errorf("%w: non-monotonic boundaries: %v", errHist, h.Boundaries) + } + i = j + } + + return nil +} + +// Copy returns a deep copy of h. +func (h ExplicitBucketHistogram) Copy() Aggregation { + b := make([]float64, len(h.Boundaries)) + copy(b, h.Boundaries) + return ExplicitBucketHistogram{ + Boundaries: b, + NoMinMax: h.NoMinMax, + } +} diff --git a/sdk/metric/aggregation/aggregation_test.go b/sdk/metric/aggregation/aggregation_test.go new file mode 100644 index 00000000000..772d7ea8fe1 --- /dev/null +++ b/sdk/metric/aggregation/aggregation_test.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.17 +// +build go1.17 + +package aggregation + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestAggregationErr(t *testing.T) { + t.Run("DropOperation", func(t *testing.T) { + assert.NoError(t, Drop{}.Err()) + }) + + t.Run("SumOperation", func(t *testing.T) { + assert.NoError(t, Sum{}.Err()) + }) + + t.Run("LastValueOperation", func(t *testing.T) { + assert.NoError(t, LastValue{}.Err()) + }) + + t.Run("ExplicitBucketHistogramOperation", func(t *testing.T) { + assert.NoError(t, ExplicitBucketHistogram{}.Err()) + + assert.NoError(t, ExplicitBucketHistogram{ + Boundaries: []float64{0}, + NoMinMax: true, + }.Err()) + + assert.NoError(t, ExplicitBucketHistogram{ + Boundaries: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 1000}, + }.Err()) + }) + + t.Run("NonmonotonicHistogramBoundaries", func(t *testing.T) { + assert.ErrorIs(t, ExplicitBucketHistogram{ + Boundaries: []float64{2, 1}, + }.Err(), errAgg) + + assert.ErrorIs(t, ExplicitBucketHistogram{ + Boundaries: []float64{0, 1, 2, 1, 3, 4}, + }.Err(), errAgg) + }) +} + +func TestExplicitBucketHistogramDeepCopy(t *testing.T) { + const orig = 0.0 + b := []float64{orig} + h := ExplicitBucketHistogram{Boundaries: b} + cpH := h.Copy().(ExplicitBucketHistogram) + b[0] = orig + 1 + assert.Equal(t, orig, cpH.Boundaries[0], "changing the underlying slice data should not affect the copy") +} diff --git a/sdk/metric/aggregator/aggregator.go b/sdk/metric/aggregator/aggregator.go deleted file mode 100644 index 59a2b4ffa68..00000000000 --- a/sdk/metric/aggregator/aggregator.go +++ /dev/null @@ -1,114 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package aggregator // import "go.opentelemetry.io/otel/sdk/metric/aggregator" - -import ( - "context" - "fmt" - "math" - - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - "go.opentelemetry.io/otel/sdk/metric/number" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" -) - -// Aggregator implements a specific aggregation behavior, e.g., a -// behavior to track a sequence of updates to an instrument. Counter -// instruments commonly use a simple Sum aggregator, but for the -// distribution instruments (Histogram, GaugeObserver) there are a -// number of possible aggregators with different cost and accuracy -// tradeoffs. -// -// Note that any Aggregator may be attached to any instrument--this is -// the result of the OpenTelemetry API/SDK separation. It is possible -// to attach a Sum aggregator to a Histogram instrument. -type Aggregator interface { - // Aggregation returns an Aggregation interface to access the - // current state of this Aggregator. The caller is - // responsible for synchronization and must not call any the - // other methods in this interface concurrently while using - // the Aggregation. - Aggregation() aggregation.Aggregation - - // Update receives a new measured value and incorporates it - // into the aggregation. Update() calls may be called - // concurrently. - // - // Descriptor.NumberKind() should be consulted to determine - // whether the provided number is an int64 or float64. - // - // The Context argument comes from user-level code and could be - // inspected for a `correlation.Map` or `trace.SpanContext`. - Update(ctx context.Context, n number.Number, descriptor *sdkapi.Descriptor) error - - // SynchronizedMove is called during collection to finish one - // period of aggregation by atomically saving the - // currently-updating state into the argument Aggregator AND - // resetting the current value to the zero state. - // - // SynchronizedMove() is called concurrently with Update(). These - // two methods must be synchronized with respect to each - // other, for correctness. - // - // After saving a synchronized copy, the Aggregator can be converted - // into one or more of the interfaces in the `aggregation` sub-package, - // according to kind of Aggregator that was selected. - // - // This method will return an InconsistentAggregatorError if - // this Aggregator cannot be copied into the destination due - // to an incompatible type. - // - // This call has no Context argument because it is expected to - // perform only computation. - // - // When called with a nil `destination`, this Aggregator is reset - // and the current value is discarded. - SynchronizedMove(destination Aggregator, descriptor *sdkapi.Descriptor) error - - // Merge combines the checkpointed state from the argument - // Aggregator into this Aggregator. Merge is not synchronized - // with respect to Update or SynchronizedMove. - // - // The owner of an Aggregator being merged is responsible for - // synchronization of both Aggregator states. - Merge(aggregator Aggregator, descriptor *sdkapi.Descriptor) error -} - -// NewInconsistentAggregatorError formats an error describing an attempt to -// Checkpoint or Merge different-type aggregators. The result can be unwrapped as -// an ErrInconsistentType. -func NewInconsistentAggregatorError(a1, a2 Aggregator) error { - return fmt.Errorf("%w: %T and %T", aggregation.ErrInconsistentType, a1, a2) -} - -// RangeTest is a common routine for testing for valid input values. -// This rejects NaN values. This rejects negative values when the -// metric instrument does not support negative values, including -// monotonic counter metrics and absolute Histogram metrics. -func RangeTest(num number.Number, descriptor *sdkapi.Descriptor) error { - numberKind := descriptor.NumberKind() - - if numberKind == number.Float64Kind && math.IsNaN(num.AsFloat64()) { - return aggregation.ErrNaNInput - } - - switch descriptor.InstrumentKind() { - case sdkapi.CounterInstrumentKind, sdkapi.CounterObserverInstrumentKind: - if num.IsNegative(numberKind) { - return aggregation.ErrNegativeInput - } - } - return nil -} diff --git a/sdk/metric/aggregator/aggregator_test.go b/sdk/metric/aggregator/aggregator_test.go deleted file mode 100644 index aab8393c932..00000000000 --- a/sdk/metric/aggregator/aggregator_test.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package aggregator_test // import "go.opentelemetry.io/otel/sdk/metric/aggregator" - -import ( - "errors" - "math" - "testing" - - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel/sdk/metric/aggregator" - "go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue" - "go.opentelemetry.io/otel/sdk/metric/aggregator/sum" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - "go.opentelemetry.io/otel/sdk/metric/metrictest" - "go.opentelemetry.io/otel/sdk/metric/number" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" -) - -func TestInconsistentAggregatorErr(t *testing.T) { - err := aggregator.NewInconsistentAggregatorError(&sum.New(1)[0], &lastvalue.New(1)[0]) - require.Equal( - t, - "inconsistent aggregator types: *sum.Aggregator and *lastvalue.Aggregator", - err.Error(), - ) - require.True(t, errors.Is(err, aggregation.ErrInconsistentType)) -} - -func testRangeNaN(t *testing.T, desc *sdkapi.Descriptor) { - // If the descriptor uses int64 numbers, this won't register as NaN - nan := number.NewFloat64Number(math.NaN()) - err := aggregator.RangeTest(nan, desc) - - if desc.NumberKind() == number.Float64Kind { - require.Equal(t, aggregation.ErrNaNInput, err) - } else { - require.Nil(t, err) - } -} - -func testRangeNegative(t *testing.T, desc *sdkapi.Descriptor) { - var neg, pos number.Number - - if desc.NumberKind() == number.Float64Kind { - pos = number.NewFloat64Number(+1) - neg = number.NewFloat64Number(-1) - } else { - pos = number.NewInt64Number(+1) - neg = number.NewInt64Number(-1) - } - - posErr := aggregator.RangeTest(pos, desc) - negErr := aggregator.RangeTest(neg, desc) - - require.Nil(t, posErr) - require.Equal(t, negErr, aggregation.ErrNegativeInput) -} - -func TestRangeTest(t *testing.T) { - // Only Counters implement a range test. - for _, nkind := range []number.Kind{number.Float64Kind, number.Int64Kind} { - t.Run(nkind.String(), func(t *testing.T) { - desc := metrictest.NewDescriptor( - "name", - sdkapi.CounterInstrumentKind, - nkind, - ) - testRangeNegative(t, &desc) - }) - } -} - -func TestNaNTest(t *testing.T) { - for _, nkind := range []number.Kind{number.Float64Kind, number.Int64Kind} { - t.Run(nkind.String(), func(t *testing.T) { - for _, mkind := range []sdkapi.InstrumentKind{ - sdkapi.CounterInstrumentKind, - sdkapi.HistogramInstrumentKind, - sdkapi.GaugeObserverInstrumentKind, - } { - desc := metrictest.NewDescriptor( - "name", - mkind, - nkind, - ) - testRangeNaN(t, &desc) - } - }) - } -} diff --git a/sdk/metric/aggregator/aggregatortest/test.go b/sdk/metric/aggregator/aggregatortest/test.go deleted file mode 100644 index f4778528b82..00000000000 --- a/sdk/metric/aggregator/aggregatortest/test.go +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package aggregatortest // import "go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest" - -import ( - "context" - "errors" - "math/rand" - "os" - "sort" - "testing" - "unsafe" - - "github.com/stretchr/testify/require" - - ottest "go.opentelemetry.io/otel/internal/internaltest" - "go.opentelemetry.io/otel/sdk/metric/aggregator" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - "go.opentelemetry.io/otel/sdk/metric/number" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" -) - -// Magnitude is the upper-bound of random numbers used in profile tests. -const Magnitude = 1000 - -// Profile is an aggregator test profile. -type Profile struct { - NumberKind number.Kind - Random func(sign int) number.Number -} - -// NoopAggregator is an aggregator that performs no operations. -type NoopAggregator struct{} - -// NoopAggregation is an aggregation that performs no operations. -type NoopAggregation struct{} - -var _ aggregator.Aggregator = NoopAggregator{} -var _ aggregation.Aggregation = NoopAggregation{} - -func newProfiles() []Profile { - rnd := rand.New(rand.NewSource(rand.Int63())) - return []Profile{ - { - NumberKind: number.Int64Kind, - Random: func(sign int) number.Number { - return number.NewInt64Number(int64(sign) * int64(rnd.Intn(Magnitude+1))) - }, - }, - { - NumberKind: number.Float64Kind, - Random: func(sign int) number.Number { - return number.NewFloat64Number(float64(sign) * rnd.Float64() * Magnitude) - }, - }, - } -} - -// NewAggregatorTest returns a descriptor for mkind and nkind. -func NewAggregatorTest(mkind sdkapi.InstrumentKind, nkind number.Kind) *sdkapi.Descriptor { - desc := sdkapi.NewDescriptor("test.name", mkind, nkind, "", "") - return &desc -} - -// RunProfiles runs all test profile against the factory function f. -func RunProfiles(t *testing.T, f func(*testing.T, Profile)) { - for _, profile := range newProfiles() { - t.Run(profile.NumberKind.String(), func(t *testing.T) { - f(t, profile) - }) - } -} - -// TestMain ensures local struct alignment prior to running tests. -func TestMain(m *testing.M) { - fields := []ottest.FieldOffset{ - { - Name: "Numbers.numbers", - Offset: unsafe.Offsetof(Numbers{}.numbers), - }, - } - if !ottest.Aligned8Byte(fields, os.Stderr) { - // nolint:revive // this is a main func, allow Exit. - os.Exit(1) - } - - // nolint:revive // this is a main func, allow Exit. - os.Exit(m.Run()) -} - -// Numbers are a collection of measured data point values. -type Numbers struct { - // numbers has to be aligned for 64-bit atomic operations. - numbers []number.Number - kind number.Kind -} - -// NewNumbers returns a new Numbers for the passed kind. -func NewNumbers(kind number.Kind) Numbers { - return Numbers{ - kind: kind, - } -} - -// Append appends v to the numbers n. -func (n *Numbers) Append(v number.Number) { - n.numbers = append(n.numbers, v) -} - -// Sort sorts all the numbers contained in n. -func (n *Numbers) Sort() { - sort.Sort(n) -} - -// Less returns if the number at index i is less than the number at index j. -func (n *Numbers) Less(i, j int) bool { - return n.numbers[i].CompareNumber(n.kind, n.numbers[j]) < 0 -} - -// Len returns number of data points Numbers contains. -func (n *Numbers) Len() int { - return len(n.numbers) -} - -// Swap swaps the location of the numbers at index i and j. -func (n *Numbers) Swap(i, j int) { - n.numbers[i], n.numbers[j] = n.numbers[j], n.numbers[i] -} - -// Sum returns the sum of all data points. -func (n *Numbers) Sum() number.Number { - var sum number.Number - for _, num := range n.numbers { - sum.AddNumber(n.kind, num) - } - return sum -} - -// Count returns the number of data points Numbers contains. -func (n *Numbers) Count() uint64 { - return uint64(len(n.numbers)) -} - -// Min returns the min number. -func (n *Numbers) Min() number.Number { - return n.numbers[0] -} - -// Max returns the max number. -func (n *Numbers) Max() number.Number { - return n.numbers[len(n.numbers)-1] -} - -// Points returns the slice of number for all data points. -func (n *Numbers) Points() []number.Number { - return n.numbers -} - -// CheckedUpdate performs the same range test the SDK does on behalf of the aggregator. -func CheckedUpdate(t *testing.T, agg aggregator.Aggregator, n number.Number, descriptor *sdkapi.Descriptor) { - ctx := context.Background() - - // Note: Aggregator tests are written assuming that the SDK - // has performed the RangeTest. Therefore we skip errors that - // would have been detected by the RangeTest. - err := aggregator.RangeTest(n, descriptor) - if err != nil { - return - } - - if err := agg.Update(ctx, n, descriptor); err != nil { - t.Error("Unexpected Update failure", err) - } -} - -// CheckedMerge verifies aggFrom merges into aggInto with the scope of -// descriptor. -func CheckedMerge(t *testing.T, aggInto, aggFrom aggregator.Aggregator, descriptor *sdkapi.Descriptor) { - if err := aggInto.Merge(aggFrom, descriptor); err != nil { - t.Error("Unexpected Merge failure", err) - } -} - -// Kind returns a Noop aggregation Kind. -func (NoopAggregation) Kind() aggregation.Kind { - return aggregation.Kind("Noop") -} - -// Aggregation returns a NoopAggregation. -func (NoopAggregator) Aggregation() aggregation.Aggregation { - return NoopAggregation{} -} - -// Update performs no operation. -func (NoopAggregator) Update(context.Context, number.Number, *sdkapi.Descriptor) error { - return nil -} - -// SynchronizedMove performs no operation. -func (NoopAggregator) SynchronizedMove(aggregator.Aggregator, *sdkapi.Descriptor) error { - return nil -} - -// Merge performs no operation. -func (NoopAggregator) Merge(aggregator.Aggregator, *sdkapi.Descriptor) error { - return nil -} - -// SynchronizedMoveResetTest tests SynchronizedMove behavior for an aggregator -// during resets. -func SynchronizedMoveResetTest(t *testing.T, mkind sdkapi.InstrumentKind, nf func(*sdkapi.Descriptor) aggregator.Aggregator) { - t.Run("reset on nil", func(t *testing.T) { - // Ensures that SynchronizedMove(nil, descriptor) discards and - // resets the aggregator. - RunProfiles(t, func(t *testing.T, profile Profile) { - descriptor := NewAggregatorTest( - mkind, - profile.NumberKind, - ) - agg := nf(descriptor) - - for i := 0; i < 10; i++ { - x1 := profile.Random(+1) - CheckedUpdate(t, agg, x1, descriptor) - } - - require.NoError(t, agg.SynchronizedMove(nil, descriptor)) - - if count, ok := agg.(aggregation.Count); ok { - c, err := count.Count() - require.Equal(t, uint64(0), c) - require.NoError(t, err) - } - - if sum, ok := agg.(aggregation.Sum); ok { - s, err := sum.Sum() - require.Equal(t, number.Number(0), s) - require.NoError(t, err) - } - - if lv, ok := agg.(aggregation.LastValue); ok { - v, _, err := lv.LastValue() - require.Equal(t, number.Number(0), v) - require.Error(t, err) - require.True(t, errors.Is(err, aggregation.ErrNoData)) - } - }) - }) - - t.Run("no reset on incorrect type", func(t *testing.T) { - // Ensures that SynchronizedMove(wrong_type, descriptor) does not - // reset the aggregator. - RunProfiles(t, func(t *testing.T, profile Profile) { - descriptor := NewAggregatorTest( - mkind, - profile.NumberKind, - ) - agg := nf(descriptor) - - var input number.Number - const inval = 100 - if profile.NumberKind == number.Int64Kind { - input = number.NewInt64Number(inval) - } else { - input = number.NewFloat64Number(inval) - } - - CheckedUpdate(t, agg, input, descriptor) - - err := agg.SynchronizedMove(NoopAggregator{}, descriptor) - require.Error(t, err) - require.True(t, errors.Is(err, aggregation.ErrInconsistentType)) - - // Test that the aggregator was not reset - - if count, ok := agg.(aggregation.Count); ok { - c, err := count.Count() - require.Equal(t, uint64(1), c) - require.NoError(t, err) - } - - if sum, ok := agg.(aggregation.Sum); ok { - s, err := sum.Sum() - require.Equal(t, input, s) - require.NoError(t, err) - } - - if lv, ok := agg.(aggregation.LastValue); ok { - v, _, err := lv.LastValue() - require.Equal(t, input, v) - require.NoError(t, err) - } - }) - }) -} diff --git a/sdk/metric/aggregator/exponential/README.md b/sdk/metric/aggregator/exponential/README.md deleted file mode 100644 index 490e1147557..00000000000 --- a/sdk/metric/aggregator/exponential/README.md +++ /dev/null @@ -1,27 +0,0 @@ -# Base-2 Exponential Histogram - -## Design - -This document is a placeholder for future Aggregator, once seen in [PR -2393](https://github.com/open-telemetry/opentelemetry-go/pull/2393). - -Only the mapping functions have been made available at this time. The -equations tested here are specified in the [data model for Exponential -Histogram data points](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#exponentialhistogram). - -### Mapping function - -There are two mapping functions used, depending on the sign of the -scale. Negative and zero scales use the `mapping/exponent` mapping -function, which computes the bucket index directly from the bits of -the `float64` exponent. This mapping function is used with scale `-10 -<= scale <= 0`. Scales smaller than -10 map the entire normal -`float64` number range into a single bucket, thus are not considered -useful. - -The `mapping/logarithm` mapping function uses `math.Log(value)` times -the scaling factor `math.Ldexp(math.Log2E, scale)`. This mapping -function is used with `0 < scale <= 20`. The maximum scale is -selected because at scale 21, simply, it becomes difficult to test -correctness--at this point `math.MaxFloat64` maps to index -`math.MaxInt32` and the `math/big` logic used in testing breaks down. diff --git a/sdk/metric/aggregator/exponential/benchmark_test.go b/sdk/metric/aggregator/exponential/benchmark_test.go deleted file mode 100644 index fd0ce7e41aa..00000000000 --- a/sdk/metric/aggregator/exponential/benchmark_test.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package exponential - -import ( - "fmt" - "math/rand" - "testing" - - "go.opentelemetry.io/otel/sdk/metric/aggregator/exponential/mapping" - "go.opentelemetry.io/otel/sdk/metric/aggregator/exponential/mapping/exponent" - "go.opentelemetry.io/otel/sdk/metric/aggregator/exponential/mapping/logarithm" -) - -func benchmarkMapping(b *testing.B, name string, mapper mapping.Mapping) { - b.Run(fmt.Sprintf("mapping_%s", name), func(b *testing.B) { - src := rand.New(rand.NewSource(54979)) - for i := 0; i < b.N; i++ { - _ = mapper.MapToIndex(1 + src.Float64()) - } - }) -} - -func benchmarkBoundary(b *testing.B, name string, mapper mapping.Mapping) { - b.Run(fmt.Sprintf("boundary_%s", name), func(b *testing.B) { - src := rand.New(rand.NewSource(54979)) - for i := 0; i < b.N; i++ { - _, _ = mapper.LowerBoundary(int32(src.Int63())) - } - }) -} - -// An earlier draft of this benchmark included a lookup-table based -// implementation: -// https://github.com/open-telemetry/opentelemetry-go-contrib/pull/1353 -// That mapping function uses O(2^scale) extra space and falls -// somewhere between the exponent and logarithm methods compared here. -// In the test, lookuptable was 40% faster than logarithm, which did -// not justify the significant extra complexity. - -// Benchmarks the MapToIndex function. -func BenchmarkMapping(b *testing.B) { - em, _ := exponent.NewMapping(-1) - lm, _ := logarithm.NewMapping(1) - benchmarkMapping(b, "exponent", em) - benchmarkMapping(b, "logarithm", lm) -} - -// Benchmarks the LowerBoundary function. -func BenchmarkReverseMapping(b *testing.B) { - em, _ := exponent.NewMapping(-1) - lm, _ := logarithm.NewMapping(1) - benchmarkBoundary(b, "exponent", em) - benchmarkBoundary(b, "logarithm", lm) -} diff --git a/sdk/metric/aggregator/exponential/mapping/exponent/exponent.go b/sdk/metric/aggregator/exponential/mapping/exponent/exponent.go deleted file mode 100644 index 3daec4b5a18..00000000000 --- a/sdk/metric/aggregator/exponential/mapping/exponent/exponent.go +++ /dev/null @@ -1,127 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package exponent // import "go.opentelemetry.io/otel/sdk/metric/aggregator/exponential/mapping/exponent" - -import ( - "fmt" - "math" - - "go.opentelemetry.io/otel/sdk/metric/aggregator/exponential/mapping" - "go.opentelemetry.io/otel/sdk/metric/aggregator/exponential/mapping/internal" -) - -const ( - // MinScale defines the point at which the exponential mapping - // function becomes useless for float64. With scale -10, ignoring - // subnormal values, bucket indices range from -1 to 1. - MinScale int32 = -10 - - // MaxScale is the largest scale supported in this code. Use - // ../logarithm for larger scales. - MaxScale int32 = 0 -) - -type exponentMapping struct { - shift uint8 // equals negative scale -} - -// exponentMapping is used for negative scales, effectively a -// mapping of the base-2 logarithm of the exponent. -var prebuiltMappings = [-MinScale + 1]exponentMapping{ - {10}, - {9}, - {8}, - {7}, - {6}, - {5}, - {4}, - {3}, - {2}, - {1}, - {0}, -} - -// NewMapping constructs an exponential mapping function, used for scales <= 0. -func NewMapping(scale int32) (mapping.Mapping, error) { - if scale > MaxScale { - return nil, fmt.Errorf("exponent mapping requires scale <= 0") - } - if scale < MinScale { - return nil, fmt.Errorf("scale too low") - } - return &prebuiltMappings[scale-MinScale], nil -} - -// minNormalLowerBoundaryIndex is the largest index such that -// base**index is <= MinValue. A histogram bucket with this index -// covers the range (base**index, base**(index+1)], including -// MinValue. -func (e *exponentMapping) minNormalLowerBoundaryIndex() int32 { - idx := int32(internal.MinNormalExponent) >> e.shift - if e.shift < 2 { - // For scales -1 and 0 the minimum value 2**-1022 - // is a power-of-two multiple, meaning it belongs - // to the index one less. - idx-- - } - return idx -} - -// maxNormalLowerBoundaryIndex is the index such that base**index -// equals the largest representable boundary. A histogram bucket with this -// index covers the range (0x1p+1024/base, 0x1p+1024], which includes -// MaxValue; note that this bucket is incomplete, since the upper -// boundary cannot be represented. One greater than this index -// corresponds with the bucket containing values > 0x1p1024. -func (e *exponentMapping) maxNormalLowerBoundaryIndex() int32 { - return int32(internal.MaxNormalExponent) >> e.shift -} - -// MapToIndex implements mapping.Mapping. -func (e *exponentMapping) MapToIndex(value float64) int32 { - // Note: we can assume not a 0, Inf, or NaN; positive sign bit. - if value < internal.MinValue { - return e.minNormalLowerBoundaryIndex() - } - - // Extract the raw exponent. - rawExp := internal.GetNormalBase2(value) - - // In case the value is an exact power of two, compute a - // correction of -1: - correction := int32((internal.GetSignificand(value) - 1) >> internal.SignificandWidth) - - // Note: bit-shifting does the right thing for negative - // exponents, e.g., -1 >> 1 == -1. - return (rawExp + correction) >> e.shift -} - -// LowerBoundary implements mapping.Mapping. -func (e *exponentMapping) LowerBoundary(index int32) (float64, error) { - if min := e.minNormalLowerBoundaryIndex(); index < min { - return 0, mapping.ErrUnderflow - } - - if max := e.maxNormalLowerBoundaryIndex(); index > max { - return 0, mapping.ErrOverflow - } - - return math.Ldexp(1, int(index<> -scale) - 1 - require.Equal(t, index, int32(maxIndex)) - - // The index maps to a finite boundary. - bound, err := m.LowerBoundary(index) - require.NoError(t, err) - - require.Equal(t, bound, roundedBoundary(scale, maxIndex)) - - // One larger index will overflow. - _, err = m.LowerBoundary(index + 1) - require.Equal(t, err, mapping.ErrOverflow) - } -} - -// TestExponentIndexMin ensures that for every valid scale, the -// smallest normal number and all smaller numbers map to the correct -// index, which is that of the smallest normal number. -// -// Tests that the lower boundary of the smallest bucket is correct, -// even when that number is subnormal. -func TestExponentIndexMin(t *testing.T) { - for scale := MinScale; scale <= MaxScale; scale++ { - m, err := NewMapping(scale) - require.NoError(t, err) - - // Test the smallest normal value. - minIndex := m.MapToIndex(MinValue) - - boundary, err := m.LowerBoundary(minIndex) - require.NoError(t, err) - - // The correct index for MinValue depends on whether - // 2**(-scale) evenly divides -1022. This is true for - // scales -1 and 0. - correctMinIndex := int64(MinNormalExponent) >> -scale - if MinNormalExponent%(int32(1)<<-scale) == 0 { - correctMinIndex-- - } - - require.Greater(t, correctMinIndex, int64(math.MinInt32)) - require.Equal(t, int32(correctMinIndex), minIndex) - - correctBoundary := roundedBoundary(scale, int32(correctMinIndex)) - - require.Equal(t, correctBoundary, boundary) - require.Greater(t, roundedBoundary(scale, int32(correctMinIndex+1)), boundary) - - // Subnormal values map to the min index: - require.Equal(t, int32(correctMinIndex), m.MapToIndex(MinValue/2)) - require.Equal(t, int32(correctMinIndex), m.MapToIndex(MinValue/3)) - require.Equal(t, int32(correctMinIndex), m.MapToIndex(MinValue/100)) - require.Equal(t, int32(correctMinIndex), m.MapToIndex(0x1p-1050)) - require.Equal(t, int32(correctMinIndex), m.MapToIndex(0x1p-1073)) - require.Equal(t, int32(correctMinIndex), m.MapToIndex(0x1.1p-1073)) - require.Equal(t, int32(correctMinIndex), m.MapToIndex(0x1p-1074)) - - // One smaller index will underflow. - _, err = m.LowerBoundary(minIndex - 1) - require.Equal(t, err, mapping.ErrUnderflow) - - // Next value above MinValue (not a power of two). - minPlus1Index := m.MapToIndex(math.Nextafter(MinValue, math.Inf(+1))) - - // The following boundary equation always works for - // non-powers of two (same as correctMinIndex before its - // power-of-two correction, above). - correctMinPlus1Index := int64(MinNormalExponent) >> -scale - require.Equal(t, int32(correctMinPlus1Index), minPlus1Index) - } -} diff --git a/sdk/metric/aggregator/exponential/mapping/internal/float64.go b/sdk/metric/aggregator/exponential/mapping/internal/float64.go deleted file mode 100644 index 6bac47fa698..00000000000 --- a/sdk/metric/aggregator/exponential/mapping/internal/float64.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal // import "go.opentelemetry.io/otel/sdk/metric/aggregator/exponential/mapping/internal" - -import "math" - -const ( - // SignificandWidth is the size of an IEEE 754 double-precision - // floating-point significand. - SignificandWidth = 52 - // ExponentWidth is the size of an IEEE 754 double-precision - // floating-point exponent. - ExponentWidth = 11 - - // SignificandMask is the mask for the significand of an IEEE 754 - // double-precision floating-point value: 0xFFFFFFFFFFFFF. - SignificandMask = 1<> SignificandWidth - return int32(rawExponent - ExponentBias) -} - -// GetSignificand returns the 52 bit (unsigned) significand as a -// signed value. -func GetSignificand(value float64) int64 { - return int64(math.Float64bits(value)) & SignificandMask -} diff --git a/sdk/metric/aggregator/exponential/mapping/internal/float64_test.go b/sdk/metric/aggregator/exponential/mapping/internal/float64_test.go deleted file mode 100644 index 7c86391744c..00000000000 --- a/sdk/metric/aggregator/exponential/mapping/internal/float64_test.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -// Tests that GetNormalBase2 returns the base-2 exponent as documented, unlike -// math.Frexp. -func TestGetNormalBase2(t *testing.T) { - require.Equal(t, int32(-1022), MinNormalExponent) - require.Equal(t, int32(+1023), MaxNormalExponent) - - require.Equal(t, MaxNormalExponent, GetNormalBase2(0x1p+1023)) - require.Equal(t, int32(1022), GetNormalBase2(0x1p+1022)) - - require.Equal(t, int32(0), GetNormalBase2(1)) - - require.Equal(t, int32(-1021), GetNormalBase2(0x1p-1021)) - require.Equal(t, int32(-1022), GetNormalBase2(0x1p-1022)) - - // Subnormals below this point - require.Equal(t, int32(-1023), GetNormalBase2(0x1p-1023)) - require.Equal(t, int32(-1023), GetNormalBase2(0x1p-1024)) - require.Equal(t, int32(-1023), GetNormalBase2(0x1p-1025)) - require.Equal(t, int32(-1023), GetNormalBase2(0x1p-1074)) -} - -func TestGetSignificand(t *testing.T) { - // The number 1.5 has a single most-significant bit set, i.e., 1<<51. - require.Equal(t, int64(1)<<(SignificandWidth-1), GetSignificand(1.5)) -} diff --git a/sdk/metric/aggregator/exponential/mapping/logarithm/logarithm.go b/sdk/metric/aggregator/exponential/mapping/logarithm/logarithm.go deleted file mode 100644 index 28ab8436e2b..00000000000 --- a/sdk/metric/aggregator/exponential/mapping/logarithm/logarithm.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package logarithm // import "go.opentelemetry.io/otel/sdk/metric/aggregator/exponential/mapping/logarithm" - -import ( - "fmt" - "math" - "sync" - - "go.opentelemetry.io/otel/sdk/metric/aggregator/exponential/mapping" - "go.opentelemetry.io/otel/sdk/metric/aggregator/exponential/mapping/internal" -) - -const ( - // MinScale ensures that the ../exponent mapper is used for - // zero and negative scale values. Do not use the logarithm - // mapper for scales <= 0. - MinScale int32 = 1 - - // MaxScale is selected as the largest scale that is possible - // in current code, considering there are 10 bits of base-2 - // exponent combined with scale-bits of range. At this scale, - // the growth factor is 0.0000661%. - // - // Scales larger than 20 complicate the logic in cmd/prebuild, - // because math/big overflows when exponent is math.MaxInt32 - // (== the index of math.MaxFloat64 at scale=21), - // - // At scale=20, index values are in the interval [-0x3fe00000, - // 0x3fffffff], having 31 bits of information. This is - // sensible given that the OTLP exponential histogram data - // point uses a signed 32 bit integer for indices. - MaxScale int32 = 20 - - // MinValue is the smallest normal number. - MinValue = internal.MinValue - - // MaxValue is the largest normal number. - MaxValue = internal.MaxValue -) - -// logarithmMapping contains the constants used to implement the -// exponential mapping function for a particular scale > 0. -type logarithmMapping struct { - // scale is between MinScale and MaxScale. The exponential - // base is defined as 2**(2**(-scale)). - scale int32 - - // scaleFactor is used and computed as follows: - // index = log(value) / log(base) - // = log(value) / log(2^(2^-scale)) - // = log(value) / (2^-scale * log(2)) - // = log(value) * (1/log(2) * 2^scale) - // = log(value) * scaleFactor - // where: - // scaleFactor = (1/log(2) * 2^scale) - // = math.Log2E * math.Exp2(scale) - // = math.Ldexp(math.Log2E, scale) - // Because multiplication is faster than division, we define scaleFactor as a multiplier. - // This implementation was copied from a Java prototype. See: - // https://github.com/newrelic-experimental/newrelic-sketch-java/blob/1ce245713603d61ba3a4510f6df930a5479cd3f6/src/main/java/com/newrelic/nrsketch/indexer/LogIndexer.java - // for the equations used here. - scaleFactor float64 - - // log(boundary) = index * log(base) - // log(boundary) = index * log(2^(2^-scale)) - // log(boundary) = index * 2^-scale * log(2) - // boundary = exp(index * inverseFactor) - // where: - // inverseFactor = 2^-scale * log(2) - // = math.Ldexp(math.Ln2, -scale) - inverseFactor float64 -} - -var ( - _ mapping.Mapping = &logarithmMapping{} - - prebuiltMappingsLock sync.Mutex - prebuiltMappings = map[int32]*logarithmMapping{} -) - -// NewMapping constructs a logarithm mapping function, used for scales > 0. -func NewMapping(scale int32) (mapping.Mapping, error) { - // An assumption used in this code is that scale is > 0. If - // scale is <= 0 it's better to use the exponent mapping. - if scale < MinScale || scale > MaxScale { - // scale 20 can represent the entire float64 range - // with a 30 bit index, and we don't handle larger - // scales to simplify range tests in this package. - return nil, fmt.Errorf("scale out of bounds") - } - prebuiltMappingsLock.Lock() - defer prebuiltMappingsLock.Unlock() - - if p := prebuiltMappings[scale]; p != nil { - return p, nil - } - l := &logarithmMapping{ - scale: scale, - scaleFactor: math.Ldexp(math.Log2E, int(scale)), - inverseFactor: math.Ldexp(math.Ln2, int(-scale)), - } - prebuiltMappings[scale] = l - return l, nil -} - -// minNormalLowerBoundaryIndex is the index such that base**index equals -// MinValue. A histogram bucket with this index covers the range -// (MinValue, MinValue*base]. One less than this index corresponds -// with the bucket containing values <= MinValue. -func (l *logarithmMapping) minNormalLowerBoundaryIndex() int32 { - return int32(internal.MinNormalExponent << l.scale) -} - -// maxNormalLowerBoundaryIndex is the index such that base**index equals the -// greatest representable lower boundary. A histogram bucket with this -// index covers the range (0x1p+1024/base, 0x1p+1024], which includes -// MaxValue; note that this bucket is incomplete, since the upper -// boundary cannot be represented. One greater than this index -// corresponds with the bucket containing values > 0x1p1024. -func (l *logarithmMapping) maxNormalLowerBoundaryIndex() int32 { - return (int32(internal.MaxNormalExponent+1) << l.scale) - 1 -} - -// MapToIndex implements mapping.Mapping. -func (l *logarithmMapping) MapToIndex(value float64) int32 { - // Note: we can assume not a 0, Inf, or NaN; positive sign bit. - if value <= MinValue { - return l.minNormalLowerBoundaryIndex() - 1 - } - - // Exact power-of-two correctness: an optional special case. - if internal.GetSignificand(value) == 0 { - exp := internal.GetNormalBase2(value) - return (exp << l.scale) - 1 - } - - // Non-power of two cases. Use Floor(x) to round the scaled - // logarithm. We could use Ceil(x)-1 to achieve the same - // result, though Ceil() is typically defined as -Floor(-x) - // and typically not performed in hardware, so this is likely - // less code. - index := int32(math.Floor(math.Log(value) * l.scaleFactor)) - - if max := l.maxNormalLowerBoundaryIndex(); index >= max { - return max - } - return index -} - -// LowerBoundary implements mapping.Mapping. -func (l *logarithmMapping) LowerBoundary(index int32) (float64, error) { - if max := l.maxNormalLowerBoundaryIndex(); index >= max { - if index == max { - // Note that the equation on the last line of this - // function returns +Inf. Use the alternate equation. - return 2 * math.Exp(float64(index-(int32(1)< 0; i-- { - f = (&big.Float{}).Sqrt(f) - } - - result, _ := f.Float64() - return result -} - -// TestLogarithmIndexMax ensures that for every valid scale, MaxFloat -// maps into the correct maximum index. Also tests that the reverse -// lookup does not produce infinity and the following index produces -// an overflow error. -func TestLogarithmIndexMax(t *testing.T) { - for scale := MinScale; scale <= MaxScale; scale++ { - m, err := NewMapping(scale) - require.NoError(t, err) - - index := m.MapToIndex(MaxValue) - - // Correct max index is one less than the first index - // that overflows math.MaxFloat64, i.e., one less than - // the index of +Inf. - maxIndex64 := (int64(MaxNormalExponent+1) << scale) - 1 - require.Less(t, maxIndex64, int64(math.MaxInt32)) - require.Equal(t, index, int32(maxIndex64)) - - // The index maps to a finite boundary near MaxFloat. - bound, err := m.LowerBoundary(index) - require.NoError(t, err) - - base, _ := m.LowerBoundary(1) - - require.Less(t, bound, MaxValue) - - // The expected ratio equals the base factor. - require.InEpsilon(t, (MaxValue-bound)/bound, base-1, 1e-6) - - // One larger index will overflow. - _, err = m.LowerBoundary(index + 1) - require.Equal(t, err, mapping.ErrOverflow) - - // Two larger will overflow. - _, err = m.LowerBoundary(index + 2) - require.Equal(t, err, mapping.ErrOverflow) - } -} - -// TestLogarithmIndexMin ensures that for every valid scale, the -// smallest normal number and all smaller numbers map to the correct -// index. -func TestLogarithmIndexMin(t *testing.T) { - for scale := MinScale; scale <= MaxScale; scale++ { - m, err := NewMapping(scale) - require.NoError(t, err) - - minIndex := m.MapToIndex(MinValue) - - correctMinIndex := (int64(MinNormalExponent) << scale) - 1 - require.Greater(t, correctMinIndex, int64(math.MinInt32)) - require.Equal(t, minIndex, int32(correctMinIndex)) - - correctMapped := roundedBoundary(scale, int32(correctMinIndex)) - require.Less(t, correctMapped, MinValue) - - correctMappedUpper := roundedBoundary(scale, int32(correctMinIndex+1)) - require.Equal(t, correctMappedUpper, MinValue) - - mapped, err := m.LowerBoundary(minIndex + 1) - require.NoError(t, err) - require.InEpsilon(t, mapped, MinValue, 1e-6) - - // Subnormal values map to the min index: - require.Equal(t, m.MapToIndex(MinValue/2), int32(correctMinIndex)) - require.Equal(t, m.MapToIndex(MinValue/3), int32(correctMinIndex)) - require.Equal(t, m.MapToIndex(MinValue/100), int32(correctMinIndex)) - require.Equal(t, m.MapToIndex(0x1p-1050), int32(correctMinIndex)) - require.Equal(t, m.MapToIndex(0x1p-1073), int32(correctMinIndex)) - require.Equal(t, m.MapToIndex(0x1.1p-1073), int32(correctMinIndex)) - require.Equal(t, m.MapToIndex(0x1p-1074), int32(correctMinIndex)) - - // All subnormal values map and MinValue to the min index: - mappedLower, err := m.LowerBoundary(minIndex) - require.NoError(t, err) - require.InEpsilon(t, correctMapped, mappedLower, 1e-6) - - // One smaller index will underflow. - _, err = m.LowerBoundary(minIndex - 1) - require.Equal(t, err, mapping.ErrUnderflow) - } -} - -// TestExponentIndexMax ensures that for every valid scale, MaxFloat -// maps into the correct maximum index. Also tests that the reverse -// lookup does not produce infinity and the following index produces -// an overflow error. -func TestExponentIndexMax(t *testing.T) { - for scale := MinScale; scale <= MaxScale; scale++ { - m, err := NewMapping(scale) - require.NoError(t, err) - - index := m.MapToIndex(MaxValue) - - // Correct max index is one less than the first index - // that overflows math.MaxFloat64, i.e., one less than - // the index of +Inf. - maxIndex64 := (int64(MaxNormalExponent+1) << scale) - 1 - require.Less(t, maxIndex64, int64(math.MaxInt32)) - require.Equal(t, index, int32(maxIndex64)) - - // The index maps to a finite boundary near MaxFloat. - bound, err := m.LowerBoundary(index) - require.NoError(t, err) - - base, _ := m.LowerBoundary(1) - - require.Less(t, bound, MaxValue) - - // The expected ratio equals the base factor. - require.InEpsilon(t, (MaxValue-bound)/bound, base-1, 1e-6) - - // One larger index will overflow. - _, err = m.LowerBoundary(index + 1) - require.Equal(t, err, mapping.ErrOverflow) - } -} diff --git a/sdk/metric/aggregator/exponential/mapping/mapping.go b/sdk/metric/aggregator/exponential/mapping/mapping.go deleted file mode 100644 index 19bf9df72d1..00000000000 --- a/sdk/metric/aggregator/exponential/mapping/mapping.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package mapping // import "go.opentelemetry.io/otel/sdk/metric/aggregator/exponential/mapping" - -import "fmt" - -// Mapping is the interface of an exponential histogram mapper. -type Mapping interface { - // MapToIndex maps positive floating point values to indexes - // corresponding to Scale(). Implementations are not expected - // to handle zeros, +Inf, NaN, or negative values. - MapToIndex(value float64) int32 - - // LowerBoundary returns the lower boundary of a given bucket - // index. The index is expected to map onto a range that is - // at least partially inside the range of normalized floating - // point values. If the corresponding bucket's upper boundary - // is less than or equal to 0x1p-1022, ErrUnderflow will be - // returned. If the corresponding bucket's lower boundary is - // greater than math.MaxFloat64, ErrOverflow will be returned. - LowerBoundary(index int32) (float64, error) - - // Scale returns the parameter that controls the resolution of - // this mapping. For details see: - // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/datamodel.md#exponential-scale - Scale() int32 -} - -var ( - // ErrUnderflow is returned when computing the lower boundary - // of an index that maps into a denormalized floating point value. - ErrUnderflow = fmt.Errorf("underflow") - // ErrOverflow is returned when computing the lower boundary - // of an index that maps into +Inf. - ErrOverflow = fmt.Errorf("overflow") -) diff --git a/sdk/metric/aggregator/histogram/benchmark_test.go b/sdk/metric/aggregator/histogram/benchmark_test.go deleted file mode 100644 index 597af3eb714..00000000000 --- a/sdk/metric/aggregator/histogram/benchmark_test.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package histogram_test - -import ( - "context" - "math/rand" - "testing" - - "go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest" - "go.opentelemetry.io/otel/sdk/metric/aggregator/histogram" - "go.opentelemetry.io/otel/sdk/metric/number" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" -) - -const inputRange = 1e6 - -func benchmarkHistogramSearchFloat64(b *testing.B, size int) { - boundaries := make([]float64, size) - - for i := range boundaries { - boundaries[i] = rand.Float64() * inputRange - } - - values := make([]float64, b.N) - for i := range values { - values[i] = rand.Float64() * inputRange - } - desc := aggregatortest.NewAggregatorTest(sdkapi.HistogramInstrumentKind, number.Float64Kind) - agg := &histogram.New(1, desc, histogram.WithExplicitBoundaries(boundaries))[0] - ctx := context.Background() - - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - _ = agg.Update(ctx, number.NewFloat64Number(values[i]), desc) - } -} - -func BenchmarkHistogramSearchFloat64_1(b *testing.B) { - benchmarkHistogramSearchFloat64(b, 1) -} -func BenchmarkHistogramSearchFloat64_8(b *testing.B) { - benchmarkHistogramSearchFloat64(b, 8) -} -func BenchmarkHistogramSearchFloat64_16(b *testing.B) { - benchmarkHistogramSearchFloat64(b, 16) -} -func BenchmarkHistogramSearchFloat64_32(b *testing.B) { - benchmarkHistogramSearchFloat64(b, 32) -} -func BenchmarkHistogramSearchFloat64_64(b *testing.B) { - benchmarkHistogramSearchFloat64(b, 64) -} -func BenchmarkHistogramSearchFloat64_128(b *testing.B) { - benchmarkHistogramSearchFloat64(b, 128) -} -func BenchmarkHistogramSearchFloat64_256(b *testing.B) { - benchmarkHistogramSearchFloat64(b, 256) -} -func BenchmarkHistogramSearchFloat64_512(b *testing.B) { - benchmarkHistogramSearchFloat64(b, 512) -} -func BenchmarkHistogramSearchFloat64_1024(b *testing.B) { - benchmarkHistogramSearchFloat64(b, 1024) -} - -func benchmarkHistogramSearchInt64(b *testing.B, size int) { - boundaries := make([]float64, size) - - for i := range boundaries { - boundaries[i] = rand.Float64() * inputRange - } - - values := make([]int64, b.N) - for i := range values { - values[i] = int64(rand.Float64() * inputRange) - } - desc := aggregatortest.NewAggregatorTest(sdkapi.HistogramInstrumentKind, number.Int64Kind) - agg := &histogram.New(1, desc, histogram.WithExplicitBoundaries(boundaries))[0] - ctx := context.Background() - - b.ReportAllocs() - b.ResetTimer() - - for i := 0; i < b.N; i++ { - _ = agg.Update(ctx, number.NewInt64Number(values[i]), desc) - } -} - -func BenchmarkHistogramSearchInt64_1(b *testing.B) { - benchmarkHistogramSearchInt64(b, 1) -} -func BenchmarkHistogramSearchInt64_8(b *testing.B) { - benchmarkHistogramSearchInt64(b, 8) -} -func BenchmarkHistogramSearchInt64_16(b *testing.B) { - benchmarkHistogramSearchInt64(b, 16) -} -func BenchmarkHistogramSearchInt64_32(b *testing.B) { - benchmarkHistogramSearchInt64(b, 32) -} -func BenchmarkHistogramSearchInt64_64(b *testing.B) { - benchmarkHistogramSearchInt64(b, 64) -} -func BenchmarkHistogramSearchInt64_128(b *testing.B) { - benchmarkHistogramSearchInt64(b, 128) -} -func BenchmarkHistogramSearchInt64_256(b *testing.B) { - benchmarkHistogramSearchInt64(b, 256) -} -func BenchmarkHistogramSearchInt64_512(b *testing.B) { - benchmarkHistogramSearchInt64(b, 512) -} -func BenchmarkHistogramSearchInt64_1024(b *testing.B) { - benchmarkHistogramSearchInt64(b, 1024) -} diff --git a/sdk/metric/aggregator/histogram/histogram.go b/sdk/metric/aggregator/histogram/histogram.go deleted file mode 100644 index 69722ace113..00000000000 --- a/sdk/metric/aggregator/histogram/histogram.go +++ /dev/null @@ -1,269 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package histogram // import "go.opentelemetry.io/otel/sdk/metric/aggregator/histogram" - -import ( - "context" - "sort" - "sync" - - "go.opentelemetry.io/otel/sdk/metric/aggregator" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - "go.opentelemetry.io/otel/sdk/metric/number" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" -) - -// Note: This code uses a Mutex to govern access to the exclusive -// aggregator state. This is in contrast to a lock-free approach -// (as in the Go prometheus client) that was reverted here: -// https://github.com/open-telemetry/opentelemetry-go/pull/669 - -type ( - // Aggregator observe events and counts them in pre-determined buckets. - // It also calculates the sum and count of all events. - Aggregator struct { - lock sync.Mutex - boundaries []float64 - kind number.Kind - state *state - } - - // config describes how the histogram is aggregated. - config struct { - // explicitBoundaries support arbitrary bucketing schemes. This - // is the general case. - explicitBoundaries []float64 - } - - // Option configures a histogram config. - Option interface { - // apply sets one or more config fields. - apply(*config) - } - - // state represents the state of a histogram, consisting of - // the sum and counts for all observed values and - // the less than equal bucket count for the pre-determined boundaries. - state struct { - bucketCounts []uint64 - sum number.Number - count uint64 - } -) - -// WithExplicitBoundaries sets the ExplicitBoundaries configuration option of a config. -func WithExplicitBoundaries(explicitBoundaries []float64) Option { - return explicitBoundariesOption{explicitBoundaries} -} - -type explicitBoundariesOption struct { - boundaries []float64 -} - -func (o explicitBoundariesOption) apply(config *config) { - config.explicitBoundaries = o.boundaries -} - -// defaultExplicitBoundaries have been copied from prometheus.DefBuckets. -// -// Note we anticipate the use of a high-precision histogram sketch as -// the standard histogram aggregator for OTLP export. -// (https://github.com/open-telemetry/opentelemetry-specification/issues/982). -var defaultFloat64ExplicitBoundaries = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} - -// defaultInt64ExplicitBoundaryMultiplier determines the default -// integer histogram boundaries. -const defaultInt64ExplicitBoundaryMultiplier = 1e6 - -// defaultInt64ExplicitBoundaries applies a multiplier to the default -// float64 boundaries: [ 5K, 10K, 25K, ..., 2.5M, 5M, 10M ]. -var defaultInt64ExplicitBoundaries = func(bounds []float64) (asint []float64) { - for _, f := range bounds { - asint = append(asint, defaultInt64ExplicitBoundaryMultiplier*f) - } - return -}(defaultFloat64ExplicitBoundaries) - -var _ aggregator.Aggregator = &Aggregator{} -var _ aggregation.Sum = &Aggregator{} -var _ aggregation.Count = &Aggregator{} -var _ aggregation.Histogram = &Aggregator{} - -// New returns a new aggregator for computing Histograms. -// -// A Histogram observe events and counts them in pre-defined buckets. -// And also provides the total sum and count of all observations. -// -// Note that this aggregator maintains each value using independent -// atomic operations, which introduces the possibility that -// checkpoints are inconsistent. -func New(cnt int, desc *sdkapi.Descriptor, opts ...Option) []Aggregator { - var cfg config - - if desc.NumberKind() == number.Int64Kind { - cfg.explicitBoundaries = defaultInt64ExplicitBoundaries - } else { - cfg.explicitBoundaries = defaultFloat64ExplicitBoundaries - } - - for _, opt := range opts { - opt.apply(&cfg) - } - - aggs := make([]Aggregator, cnt) - - // Boundaries MUST be ordered otherwise the histogram could not - // be properly computed. - sortedBoundaries := make([]float64, len(cfg.explicitBoundaries)) - - copy(sortedBoundaries, cfg.explicitBoundaries) - sort.Float64s(sortedBoundaries) - - for i := range aggs { - aggs[i] = Aggregator{ - kind: desc.NumberKind(), - boundaries: sortedBoundaries, - } - aggs[i].state = aggs[i].newState() - } - return aggs -} - -// Aggregation returns an interface for reading the state of this aggregator. -func (c *Aggregator) Aggregation() aggregation.Aggregation { - return c -} - -// Kind returns aggregation.HistogramKind. -func (c *Aggregator) Kind() aggregation.Kind { - return aggregation.HistogramKind -} - -// Sum returns the sum of all values in the checkpoint. -func (c *Aggregator) Sum() (number.Number, error) { - return c.state.sum, nil -} - -// Count returns the number of values in the checkpoint. -func (c *Aggregator) Count() (uint64, error) { - return c.state.count, nil -} - -// Histogram returns the count of events in pre-determined buckets. -func (c *Aggregator) Histogram() (aggregation.Buckets, error) { - return aggregation.Buckets{ - Boundaries: c.boundaries, - Counts: c.state.bucketCounts, - }, nil -} - -// SynchronizedMove saves the current state into oa and resets the current state to -// the empty set. Since no locks are taken, there is a chance that -// the independent Sum, Count and Bucket Count are not consistent with each -// other. -func (c *Aggregator) SynchronizedMove(oa aggregator.Aggregator, desc *sdkapi.Descriptor) error { - o, _ := oa.(*Aggregator) - - if oa != nil && o == nil { - return aggregator.NewInconsistentAggregatorError(c, oa) - } - - if o != nil { - // Swap case: This is the ordinary case for a - // synchronous instrument, where the SDK allocates two - // Aggregators and lock contention is anticipated. - // Reset the target state before swapping it under the - // lock below. - o.clearState() - } - - c.lock.Lock() - if o != nil { - c.state, o.state = o.state, c.state - } else { - // No swap case: This is the ordinary case for an - // asynchronous instrument, where the SDK allocates a - // single Aggregator and there is no anticipated lock - // contention. - c.clearState() - } - c.lock.Unlock() - - return nil -} - -func (c *Aggregator) newState() *state { - return &state{ - bucketCounts: make([]uint64, len(c.boundaries)+1), - } -} - -func (c *Aggregator) clearState() { - for i := range c.state.bucketCounts { - c.state.bucketCounts[i] = 0 - } - c.state.sum = 0 - c.state.count = 0 -} - -// Update adds the recorded measurement to the current data set. -func (c *Aggregator) Update(_ context.Context, n number.Number, desc *sdkapi.Descriptor) error { - kind := desc.NumberKind() - asFloat := n.CoerceToFloat64(kind) - - bucketID := len(c.boundaries) - for i, boundary := range c.boundaries { - if asFloat < boundary { - bucketID = i - break - } - } - // Note: Binary-search was compared using the benchmarks. The following - // code is equivalent to the linear search above: - // - // bucketID := sort.Search(len(c.boundaries), func(i int) bool { - // return asFloat < c.boundaries[i] - // }) - // - // The binary search wins for very large boundary sets, but - // the linear search performs better up through arrays between - // 256 and 512 elements, which is a relatively large histogram, so we - // continue to prefer linear search. - - c.lock.Lock() - defer c.lock.Unlock() - - c.state.count++ - c.state.sum.AddNumber(kind, n) - c.state.bucketCounts[bucketID]++ - - return nil -} - -// Merge combines two histograms that have the same buckets into a single one. -func (c *Aggregator) Merge(oa aggregator.Aggregator, desc *sdkapi.Descriptor) error { - o, _ := oa.(*Aggregator) - if o == nil { - return aggregator.NewInconsistentAggregatorError(c, oa) - } - - c.state.sum.AddNumber(desc.NumberKind(), o.state.sum) - c.state.count += o.state.count - - for i := 0; i < len(c.state.bucketCounts); i++ { - c.state.bucketCounts[i] += o.state.bucketCounts[i] - } - return nil -} diff --git a/sdk/metric/aggregator/histogram/histogram_test.go b/sdk/metric/aggregator/histogram/histogram_test.go deleted file mode 100644 index d19940ac54f..00000000000 --- a/sdk/metric/aggregator/histogram/histogram_test.go +++ /dev/null @@ -1,295 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package histogram_test - -import ( - "context" - "math" - "math/rand" - "sort" - "testing" - - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel/sdk/metric/aggregator" - "go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest" - "go.opentelemetry.io/otel/sdk/metric/aggregator/histogram" - "go.opentelemetry.io/otel/sdk/metric/number" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" -) - -const count = 100 - -type policy struct { - name string - absolute bool - sign func() int -} - -var ( - positiveOnly = policy{ - name: "absolute", - absolute: true, - sign: func() int { return +1 }, - } - negativeOnly = policy{ - name: "negative", - absolute: false, - sign: func() int { return -1 }, - } - positiveAndNegative = policy{ - name: "positiveAndNegative", - absolute: false, - sign: func() int { - if rand.Uint32() > math.MaxUint32/2 { - return -1 - } - return 1 - }, - } - - testBoundaries = []float64{500, 250, 750} -) - -func new2(desc *sdkapi.Descriptor, options ...histogram.Option) (_, _ *histogram.Aggregator) { - alloc := histogram.New(2, desc, options...) - return &alloc[0], &alloc[1] -} - -func new4(desc *sdkapi.Descriptor, options ...histogram.Option) (_, _, _, _ *histogram.Aggregator) { - alloc := histogram.New(4, desc, options...) - return &alloc[0], &alloc[1], &alloc[2], &alloc[3] -} - -func checkZero(t *testing.T, agg *histogram.Aggregator, desc *sdkapi.Descriptor) { - asum, err := agg.Sum() - require.Equal(t, number.Number(0), asum, "Empty checkpoint sum = 0") - require.NoError(t, err) - - count, err := agg.Count() - require.Equal(t, uint64(0), count, "Empty checkpoint count = 0") - require.NoError(t, err) - - buckets, err := agg.Histogram() - require.NoError(t, err) - - require.Equal(t, len(buckets.Counts), len(testBoundaries)+1, "There should be b + 1 counts, where b is the number of boundaries") - for i, bCount := range buckets.Counts { - require.Equal(t, uint64(0), uint64(bCount), "Bucket #%d must have 0 observed values", i) - } -} - -func TestHistogramAbsolute(t *testing.T) { - aggregatortest.RunProfiles(t, func(t *testing.T, profile aggregatortest.Profile) { - testHistogram(t, profile, positiveOnly) - }) -} - -func TestHistogramNegativeOnly(t *testing.T) { - aggregatortest.RunProfiles(t, func(t *testing.T, profile aggregatortest.Profile) { - testHistogram(t, profile, negativeOnly) - }) -} - -func TestHistogramPositiveAndNegative(t *testing.T) { - aggregatortest.RunProfiles(t, func(t *testing.T, profile aggregatortest.Profile) { - testHistogram(t, profile, positiveAndNegative) - }) -} - -// Validates count, sum and buckets for a given profile and policy. -func testHistogram(t *testing.T, profile aggregatortest.Profile, policy policy) { - descriptor := aggregatortest.NewAggregatorTest(sdkapi.HistogramInstrumentKind, profile.NumberKind) - - agg, ckpt := new2(descriptor, histogram.WithExplicitBoundaries(testBoundaries)) - - // This needs to repeat at least 3 times to uncover a failure to reset - // for the overall sum and count fields, since the third time through - // is the first time a `histogram.state` object is reused. - for repeat := 0; repeat < 3; repeat++ { - all := aggregatortest.NewNumbers(profile.NumberKind) - - for i := 0; i < count; i++ { - x := profile.Random(policy.sign()) - all.Append(x) - aggregatortest.CheckedUpdate(t, agg, x, descriptor) - } - - require.NoError(t, agg.SynchronizedMove(ckpt, descriptor)) - - checkZero(t, agg, descriptor) - - checkHistogram(t, all, profile, ckpt) - } -} - -func TestHistogramInitial(t *testing.T) { - aggregatortest.RunProfiles(t, func(t *testing.T, profile aggregatortest.Profile) { - descriptor := aggregatortest.NewAggregatorTest(sdkapi.HistogramInstrumentKind, profile.NumberKind) - - agg := &histogram.New(1, descriptor, histogram.WithExplicitBoundaries(testBoundaries))[0] - buckets, err := agg.Histogram() - - require.NoError(t, err) - require.Equal(t, len(buckets.Counts), len(testBoundaries)+1) - require.Equal(t, len(buckets.Boundaries), len(testBoundaries)) - }) -} - -func TestHistogramMerge(t *testing.T) { - aggregatortest.RunProfiles(t, func(t *testing.T, profile aggregatortest.Profile) { - descriptor := aggregatortest.NewAggregatorTest(sdkapi.HistogramInstrumentKind, profile.NumberKind) - - agg1, agg2, ckpt1, ckpt2 := new4(descriptor, histogram.WithExplicitBoundaries(testBoundaries)) - - all := aggregatortest.NewNumbers(profile.NumberKind) - - for i := 0; i < count; i++ { - x := profile.Random(+1) - all.Append(x) - aggregatortest.CheckedUpdate(t, agg1, x, descriptor) - } - for i := 0; i < count; i++ { - x := profile.Random(+1) - all.Append(x) - aggregatortest.CheckedUpdate(t, agg2, x, descriptor) - } - - require.NoError(t, agg1.SynchronizedMove(ckpt1, descriptor)) - require.NoError(t, agg2.SynchronizedMove(ckpt2, descriptor)) - - aggregatortest.CheckedMerge(t, ckpt1, ckpt2, descriptor) - - checkHistogram(t, all, profile, ckpt1) - }) -} - -func TestHistogramNotSet(t *testing.T) { - aggregatortest.RunProfiles(t, func(t *testing.T, profile aggregatortest.Profile) { - descriptor := aggregatortest.NewAggregatorTest(sdkapi.HistogramInstrumentKind, profile.NumberKind) - - agg, ckpt := new2(descriptor, histogram.WithExplicitBoundaries(testBoundaries)) - - err := agg.SynchronizedMove(ckpt, descriptor) - require.NoError(t, err) - - checkZero(t, agg, descriptor) - checkZero(t, ckpt, descriptor) - }) -} - -// checkHistogram ensures the correct aggregated state between `all` -// (test aggregator) and `agg` (code under test). -func checkHistogram(t *testing.T, all aggregatortest.Numbers, profile aggregatortest.Profile, agg *histogram.Aggregator) { - all.Sort() - - asum, err := agg.Sum() - require.NoError(t, err) - - sum := all.Sum() - require.InEpsilon(t, - sum.CoerceToFloat64(profile.NumberKind), - asum.CoerceToFloat64(profile.NumberKind), - 0.000000001) - - count, err := agg.Count() - require.NoError(t, err) - require.Equal(t, all.Count(), count) - - buckets, err := agg.Histogram() - require.NoError(t, err) - - require.Equal(t, len(buckets.Counts), len(testBoundaries)+1, - "There should be b + 1 counts, where b is the number of boundaries") - - sortedBoundaries := make([]float64, len(testBoundaries)) - copy(sortedBoundaries, testBoundaries) - - sort.Float64s(sortedBoundaries) - - require.EqualValues(t, sortedBoundaries, buckets.Boundaries) - - counts := make([]uint64, len(sortedBoundaries)+1) - idx := 0 - for _, p := range all.Points() { - for idx < len(sortedBoundaries) && p.CoerceToFloat64(profile.NumberKind) >= sortedBoundaries[idx] { - idx++ - } - counts[idx]++ - } - for i, v := range counts { - bCount := uint64(buckets.Counts[i]) - require.Equal(t, v, bCount, "Wrong bucket #%d count: %v != %v", i, counts, buckets.Counts) - } -} - -func TestSynchronizedMoveReset(t *testing.T) { - aggregatortest.SynchronizedMoveResetTest( - t, - sdkapi.HistogramInstrumentKind, - func(desc *sdkapi.Descriptor) aggregator.Aggregator { - return &histogram.New(1, desc, histogram.WithExplicitBoundaries(testBoundaries))[0] - }, - ) -} - -func TestHistogramDefaultBoundaries(t *testing.T) { - aggregatortest.RunProfiles(t, func(t *testing.T, profile aggregatortest.Profile) { - ctx := context.Background() - descriptor := aggregatortest.NewAggregatorTest(sdkapi.HistogramInstrumentKind, profile.NumberKind) - - agg, ckpt := new2(descriptor) - - bounds := []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} // len 11 - values := append(bounds, 100) // len 12 - expect := []uint64{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} // len 12 - - for _, value := range values { - var num number.Number - - value -= .001 // Avoid exact boundaries - - if descriptor.NumberKind() == number.Int64Kind { - value *= 1e6 - num = number.NewInt64Number(int64(value)) - } else { - num = number.NewFloat64Number(value) - } - - require.NoError(t, agg.Update(ctx, num, descriptor)) - } - - bucks, err := agg.Histogram() - require.NoError(t, err) - - // Check for proper lengths, 1 count in each bucket. - require.Equal(t, len(values), len(bucks.Counts)) - require.Equal(t, len(bounds), len(bucks.Boundaries)) - require.EqualValues(t, expect, bucks.Counts) - - require.Equal(t, expect, bucks.Counts) - - // Move and repeat the test on `ckpt`. - err = agg.SynchronizedMove(ckpt, descriptor) - require.NoError(t, err) - - bucks, err = ckpt.Histogram() - require.NoError(t, err) - - require.Equal(t, len(values), len(bucks.Counts)) - require.Equal(t, len(bounds), len(bucks.Boundaries)) - require.EqualValues(t, expect, bucks.Counts) - }) -} diff --git a/sdk/metric/aggregator/lastvalue/lastvalue.go b/sdk/metric/aggregator/lastvalue/lastvalue.go deleted file mode 100644 index 17e51faefc1..00000000000 --- a/sdk/metric/aggregator/lastvalue/lastvalue.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package lastvalue // import "go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue" - -import ( - "context" - "sync/atomic" - "time" - "unsafe" - - "go.opentelemetry.io/otel/sdk/metric/aggregator" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - "go.opentelemetry.io/otel/sdk/metric/number" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" -) - -type ( - - // Aggregator aggregates lastValue events. - Aggregator struct { - // value is an atomic pointer to *lastValueData. It is never nil. - value unsafe.Pointer - } - - // lastValueData stores the current value of a lastValue along with - // a sequence number to determine the winner of a race. - lastValueData struct { - // value is the int64- or float64-encoded Set() data - // - // value needs to be aligned for 64-bit atomic operations. - value number.Number - - // timestamp indicates when this record was submitted. This can be - // used to pick a winner when multiple records contain lastValue data - // for the same attributes due to races. - timestamp time.Time - } -) - -var _ aggregator.Aggregator = &Aggregator{} -var _ aggregation.LastValue = &Aggregator{} - -// An unset lastValue has zero timestamp and zero value. -var unsetLastValue = &lastValueData{} - -// New returns a new lastValue aggregator. This aggregator retains the -// last value and timestamp that were recorded. -func New(cnt int) []Aggregator { - aggs := make([]Aggregator, cnt) - for i := range aggs { - aggs[i] = Aggregator{ - value: unsafe.Pointer(unsetLastValue), - } - } - return aggs -} - -// Aggregation returns an interface for reading the state of this aggregator. -func (g *Aggregator) Aggregation() aggregation.Aggregation { - return g -} - -// Kind returns aggregation.LastValueKind. -func (g *Aggregator) Kind() aggregation.Kind { - return aggregation.LastValueKind -} - -// LastValue returns the last-recorded lastValue value and the -// corresponding timestamp. The error value aggregation.ErrNoData -// will be returned if (due to a race condition) the checkpoint was -// computed before the first value was set. -func (g *Aggregator) LastValue() (number.Number, time.Time, error) { - gd := (*lastValueData)(g.value) - if gd == unsetLastValue { - return 0, time.Time{}, aggregation.ErrNoData - } - return gd.value.AsNumber(), gd.timestamp, nil -} - -// SynchronizedMove atomically saves the current value. -func (g *Aggregator) SynchronizedMove(oa aggregator.Aggregator, _ *sdkapi.Descriptor) error { - if oa == nil { - atomic.StorePointer(&g.value, unsafe.Pointer(unsetLastValue)) - return nil - } - o, _ := oa.(*Aggregator) - if o == nil { - return aggregator.NewInconsistentAggregatorError(g, oa) - } - o.value = atomic.SwapPointer(&g.value, unsafe.Pointer(unsetLastValue)) - return nil -} - -// Update atomically sets the current "last" value. -func (g *Aggregator) Update(_ context.Context, n number.Number, desc *sdkapi.Descriptor) error { - ngd := &lastValueData{ - value: n, - timestamp: time.Now(), - } - atomic.StorePointer(&g.value, unsafe.Pointer(ngd)) - return nil -} - -// Merge combines state from two aggregators. The most-recently set -// value is chosen. -func (g *Aggregator) Merge(oa aggregator.Aggregator, desc *sdkapi.Descriptor) error { - o, _ := oa.(*Aggregator) - if o == nil { - return aggregator.NewInconsistentAggregatorError(g, oa) - } - - ggd := (*lastValueData)(atomic.LoadPointer(&g.value)) - ogd := (*lastValueData)(atomic.LoadPointer(&o.value)) - - if ggd.timestamp.After(ogd.timestamp) { - return nil - } - - g.value = unsafe.Pointer(ogd) - return nil -} diff --git a/sdk/metric/aggregator/lastvalue/lastvalue_test.go b/sdk/metric/aggregator/lastvalue/lastvalue_test.go deleted file mode 100644 index fd47ad8c4a3..00000000000 --- a/sdk/metric/aggregator/lastvalue/lastvalue_test.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package lastvalue - -import ( - "errors" - "math/rand" - "os" - "testing" - "time" - "unsafe" - - "github.com/stretchr/testify/require" - - ottest "go.opentelemetry.io/otel/internal/internaltest" - "go.opentelemetry.io/otel/sdk/metric/aggregator" - "go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - "go.opentelemetry.io/otel/sdk/metric/number" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" -) - -const count = 100 - -var _ aggregator.Aggregator = &Aggregator{} - -// Ensure struct alignment prior to running tests. -func TestMain(m *testing.M) { - fields := []ottest.FieldOffset{ - { - Name: "lastValueData.value", - Offset: unsafe.Offsetof(lastValueData{}.value), - }, - } - if !ottest.Aligned8Byte(fields, os.Stderr) { - os.Exit(1) - } - - os.Exit(m.Run()) -} - -func new2() (_, _ *Aggregator) { - alloc := New(2) - return &alloc[0], &alloc[1] -} - -func new4() (_, _, _, _ *Aggregator) { - alloc := New(4) - return &alloc[0], &alloc[1], &alloc[2], &alloc[3] -} - -func checkZero(t *testing.T, agg *Aggregator) { - lv, ts, err := agg.LastValue() - require.True(t, errors.Is(err, aggregation.ErrNoData)) - require.Equal(t, time.Time{}, ts) - require.Equal(t, number.Number(0), lv) -} - -func TestLastValueUpdate(t *testing.T) { - aggregatortest.RunProfiles(t, func(t *testing.T, profile aggregatortest.Profile) { - agg, ckpt := new2() - - record := aggregatortest.NewAggregatorTest(sdkapi.GaugeObserverInstrumentKind, profile.NumberKind) - - var last number.Number - for i := 0; i < count; i++ { - x := profile.Random(rand.Intn(2)*2 - 1) - last = x - aggregatortest.CheckedUpdate(t, agg, x, record) - } - - err := agg.SynchronizedMove(ckpt, record) - require.NoError(t, err) - - lv, _, err := ckpt.LastValue() - require.Equal(t, last, lv, "Same last value - non-monotonic") - require.Nil(t, err) - }) -} - -func TestLastValueMerge(t *testing.T) { - aggregatortest.RunProfiles(t, func(t *testing.T, profile aggregatortest.Profile) { - agg1, agg2, ckpt1, ckpt2 := new4() - - descriptor := aggregatortest.NewAggregatorTest(sdkapi.GaugeObserverInstrumentKind, profile.NumberKind) - - first1 := profile.Random(+1) - first2 := profile.Random(+1) - first1.AddNumber(profile.NumberKind, first2) - - aggregatortest.CheckedUpdate(t, agg1, first1, descriptor) - // Ensure these should not have the same timestamp. - time.Sleep(time.Nanosecond) - aggregatortest.CheckedUpdate(t, agg2, first2, descriptor) - - require.NoError(t, agg1.SynchronizedMove(ckpt1, descriptor)) - require.NoError(t, agg2.SynchronizedMove(ckpt2, descriptor)) - - checkZero(t, agg1) - checkZero(t, agg2) - - _, t1, err := ckpt1.LastValue() - require.Nil(t, err) - _, t2, err := ckpt2.LastValue() - require.Nil(t, err) - require.True(t, t1.Before(t2)) - - aggregatortest.CheckedMerge(t, ckpt1, ckpt2, descriptor) - - lv, ts, err := ckpt1.LastValue() - require.Nil(t, err) - require.Equal(t, t2, ts, "Merged timestamp - non-monotonic") - require.Equal(t, first2, lv, "Merged value - non-monotonic") - }) -} - -func TestLastValueNotSet(t *testing.T) { - descriptor := aggregatortest.NewAggregatorTest(sdkapi.GaugeObserverInstrumentKind, number.Int64Kind) - - g, ckpt := new2() - require.NoError(t, g.SynchronizedMove(ckpt, descriptor)) - - checkZero(t, g) -} - -func TestSynchronizedMoveReset(t *testing.T) { - aggregatortest.SynchronizedMoveResetTest( - t, - sdkapi.GaugeObserverInstrumentKind, - func(desc *sdkapi.Descriptor) aggregator.Aggregator { - return &New(1)[0] - }, - ) -} diff --git a/sdk/metric/aggregator/sum/sum.go b/sdk/metric/aggregator/sum/sum.go deleted file mode 100644 index d5c70e59bdf..00000000000 --- a/sdk/metric/aggregator/sum/sum.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sum // import "go.opentelemetry.io/otel/sdk/metric/aggregator/sum" - -import ( - "context" - - "go.opentelemetry.io/otel/sdk/metric/aggregator" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - "go.opentelemetry.io/otel/sdk/metric/number" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" -) - -// Aggregator aggregates counter events. -type Aggregator struct { - // current holds current increments to this counter record - // current needs to be aligned for 64-bit atomic operations. - value number.Number -} - -var _ aggregator.Aggregator = &Aggregator{} -var _ aggregation.Sum = &Aggregator{} - -// New returns a new counter aggregator implemented by atomic -// operations. This aggregator implements the aggregation.Sum -// export interface. -func New(cnt int) []Aggregator { - return make([]Aggregator, cnt) -} - -// Aggregation returns an interface for reading the state of this aggregator. -func (c *Aggregator) Aggregation() aggregation.Aggregation { - return c -} - -// Kind returns aggregation.SumKind. -func (c *Aggregator) Kind() aggregation.Kind { - return aggregation.SumKind -} - -// Sum returns the last-checkpointed sum. This will never return an -// error. -func (c *Aggregator) Sum() (number.Number, error) { - return c.value, nil -} - -// SynchronizedMove atomically saves the current value into oa and resets the -// current sum to zero. -func (c *Aggregator) SynchronizedMove(oa aggregator.Aggregator, _ *sdkapi.Descriptor) error { - if oa == nil { - c.value.SetRawAtomic(0) - return nil - } - o, _ := oa.(*Aggregator) - if o == nil { - return aggregator.NewInconsistentAggregatorError(c, oa) - } - o.value = c.value.SwapNumberAtomic(number.Number(0)) - return nil -} - -// Update atomically adds to the current value. -func (c *Aggregator) Update(_ context.Context, num number.Number, desc *sdkapi.Descriptor) error { - c.value.AddNumberAtomic(desc.NumberKind(), num) - return nil -} - -// Merge combines two counters by adding their sums. -func (c *Aggregator) Merge(oa aggregator.Aggregator, desc *sdkapi.Descriptor) error { - o, _ := oa.(*Aggregator) - if o == nil { - return aggregator.NewInconsistentAggregatorError(c, oa) - } - c.value.AddNumber(desc.NumberKind(), o.value) - return nil -} diff --git a/sdk/metric/aggregator/sum/sum_test.go b/sdk/metric/aggregator/sum/sum_test.go deleted file mode 100644 index c92594a460c..00000000000 --- a/sdk/metric/aggregator/sum/sum_test.go +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sum - -import ( - "os" - "testing" - "unsafe" - - "github.com/stretchr/testify/require" - - ottest "go.opentelemetry.io/otel/internal/internaltest" - "go.opentelemetry.io/otel/sdk/metric/aggregator" - "go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest" - "go.opentelemetry.io/otel/sdk/metric/number" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" -) - -const count = 100 - -// Ensure struct alignment prior to running tests. -func TestMain(m *testing.M) { - fields := []ottest.FieldOffset{ - { - Name: "Aggregator.value", - Offset: unsafe.Offsetof(Aggregator{}.value), - }, - } - if !ottest.Aligned8Byte(fields, os.Stderr) { - os.Exit(1) - } - - os.Exit(m.Run()) -} - -func new2() (_, _ *Aggregator) { - alloc := New(2) - return &alloc[0], &alloc[1] -} - -func new4() (_, _, _, _ *Aggregator) { - alloc := New(4) - return &alloc[0], &alloc[1], &alloc[2], &alloc[3] -} - -func checkZero(t *testing.T, agg *Aggregator, desc *sdkapi.Descriptor) { - kind := desc.NumberKind() - - sum, err := agg.Sum() - require.NoError(t, err) - require.Equal(t, kind.Zero(), sum) -} - -func TestCounterSum(t *testing.T) { - aggregatortest.RunProfiles(t, func(t *testing.T, profile aggregatortest.Profile) { - agg, ckpt := new2() - - descriptor := aggregatortest.NewAggregatorTest(sdkapi.CounterInstrumentKind, profile.NumberKind) - - sum := number.Number(0) - for i := 0; i < count; i++ { - x := profile.Random(+1) - sum.AddNumber(profile.NumberKind, x) - aggregatortest.CheckedUpdate(t, agg, x, descriptor) - } - - err := agg.SynchronizedMove(ckpt, descriptor) - require.NoError(t, err) - - checkZero(t, agg, descriptor) - - asum, err := ckpt.Sum() - require.Equal(t, sum, asum, "Same sum - monotonic") - require.Nil(t, err) - }) -} - -func TestHistogramSum(t *testing.T) { - aggregatortest.RunProfiles(t, func(t *testing.T, profile aggregatortest.Profile) { - agg, ckpt := new2() - - descriptor := aggregatortest.NewAggregatorTest(sdkapi.HistogramInstrumentKind, profile.NumberKind) - - sum := number.Number(0) - - for i := 0; i < count; i++ { - r1 := profile.Random(+1) - r2 := profile.Random(-1) - aggregatortest.CheckedUpdate(t, agg, r1, descriptor) - aggregatortest.CheckedUpdate(t, agg, r2, descriptor) - sum.AddNumber(profile.NumberKind, r1) - sum.AddNumber(profile.NumberKind, r2) - } - - require.NoError(t, agg.SynchronizedMove(ckpt, descriptor)) - checkZero(t, agg, descriptor) - - asum, err := ckpt.Sum() - require.Equal(t, sum, asum, "Same sum - monotonic") - require.Nil(t, err) - }) -} - -func TestCounterMerge(t *testing.T) { - aggregatortest.RunProfiles(t, func(t *testing.T, profile aggregatortest.Profile) { - agg1, agg2, ckpt1, ckpt2 := new4() - - descriptor := aggregatortest.NewAggregatorTest(sdkapi.CounterInstrumentKind, profile.NumberKind) - - sum := number.Number(0) - for i := 0; i < count; i++ { - x := profile.Random(+1) - sum.AddNumber(profile.NumberKind, x) - aggregatortest.CheckedUpdate(t, agg1, x, descriptor) - aggregatortest.CheckedUpdate(t, agg2, x, descriptor) - } - - require.NoError(t, agg1.SynchronizedMove(ckpt1, descriptor)) - require.NoError(t, agg2.SynchronizedMove(ckpt2, descriptor)) - - checkZero(t, agg1, descriptor) - checkZero(t, agg2, descriptor) - - aggregatortest.CheckedMerge(t, ckpt1, ckpt2, descriptor) - - sum.AddNumber(descriptor.NumberKind(), sum) - - asum, err := ckpt1.Sum() - require.Equal(t, sum, asum, "Same sum - monotonic") - require.Nil(t, err) - }) -} - -func TestSynchronizedMoveReset(t *testing.T) { - aggregatortest.SynchronizedMoveResetTest( - t, - sdkapi.CounterObserverInstrumentKind, - func(desc *sdkapi.Descriptor) aggregator.Aggregator { - return &New(1)[0] - }, - ) -} diff --git a/sdk/metric/alignment_test.go b/sdk/metric/alignment_test.go deleted file mode 100644 index e0839aa95ee..00000000000 --- a/sdk/metric/alignment_test.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric - -import ( - "os" - "testing" - "unsafe" - - ottest "go.opentelemetry.io/otel/internal/internaltest" -) - -// Ensure struct alignment prior to running tests. -func TestMain(m *testing.M) { - offsets := map[string]uintptr{ - "record.refMapped.value": unsafe.Offsetof(record{}.refMapped.value), - "record.updateCount": unsafe.Offsetof(record{}.updateCount), - } - var r []ottest.FieldOffset - for name, offset := range offsets { - r = append(r, ottest.FieldOffset{ - Name: name, - Offset: offset, - }) - } - if !ottest.Aligned8Byte(r, os.Stderr) { - os.Exit(1) - } - - os.Exit(m.Run()) -} diff --git a/sdk/metric/benchmark_test.go b/sdk/metric/benchmark_test.go deleted file mode 100644 index ff05792b1b1..00000000000 --- a/sdk/metric/benchmark_test.go +++ /dev/null @@ -1,444 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric_test - -import ( - "context" - "fmt" - "math/rand" - "testing" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/global" - "go.opentelemetry.io/otel/metric/instrument" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" - "go.opentelemetry.io/otel/metric/instrument/syncint64" - sdk "go.opentelemetry.io/otel/sdk/metric" - "go.opentelemetry.io/otel/sdk/metric/export" - "go.opentelemetry.io/otel/sdk/metric/processor/processortest" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" -) - -type benchFixture struct { - meter metric.Meter - accumulator *sdk.Accumulator - B *testing.B - export.AggregatorSelector -} - -func newFixture(b *testing.B) *benchFixture { - b.ReportAllocs() - bf := &benchFixture{ - B: b, - AggregatorSelector: processortest.AggregatorSelector(), - } - - bf.accumulator = sdk.NewAccumulator(bf) - bf.meter = sdkapi.WrapMeterImpl(bf.accumulator) - return bf -} - -func (f *benchFixture) Process(export.Accumulation) error { - return nil -} - -func (f *benchFixture) Meter(_ string, _ ...metric.MeterOption) metric.Meter { - return f.meter -} - -func (f *benchFixture) iCounter(name string) syncint64.Counter { - ctr, err := f.meter.SyncInt64().Counter(name) - if err != nil { - f.B.Error(err) - } - return ctr -} - -func (f *benchFixture) fCounter(name string) syncfloat64.Counter { - ctr, err := f.meter.SyncFloat64().Counter(name) - if err != nil { - f.B.Error(err) - } - return ctr -} - -func (f *benchFixture) iUpDownCounter(name string) syncint64.UpDownCounter { - ctr, err := f.meter.SyncInt64().UpDownCounter(name) - if err != nil { - f.B.Error(err) - } - return ctr -} - -func (f *benchFixture) fUpDownCounter(name string) syncfloat64.UpDownCounter { - ctr, err := f.meter.SyncFloat64().UpDownCounter(name) - if err != nil { - f.B.Error(err) - } - return ctr -} - -func (f *benchFixture) iHistogram(name string) syncint64.Histogram { - ctr, err := f.meter.SyncInt64().Histogram(name) - if err != nil { - f.B.Error(err) - } - return ctr -} - -func (f *benchFixture) fHistogram(name string) syncfloat64.Histogram { - ctr, err := f.meter.SyncFloat64().Histogram(name) - if err != nil { - f.B.Error(err) - } - return ctr -} - -func makeAttrs(n int) []attribute.KeyValue { - used := map[string]bool{} - l := make([]attribute.KeyValue, n) - for i := 0; i < n; i++ { - var k string - for { - k = fmt.Sprint("k", rand.Intn(1000000000)) - if !used[k] { - used[k] = true - break - } - } - l[i] = attribute.String(k, fmt.Sprint("v", rand.Intn(1000000000))) - } - return l -} - -func benchmarkAttrs(b *testing.B, n int) { - ctx := context.Background() - fix := newFixture(b) - labs := makeAttrs(n) - cnt := fix.iCounter("int64.sum") - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - cnt.Add(ctx, 1, labs...) - } -} - -func BenchmarkInt64CounterAddWithAttrs_1(b *testing.B) { - benchmarkAttrs(b, 1) -} - -func BenchmarkInt64CounterAddWithAttrs_2(b *testing.B) { - benchmarkAttrs(b, 2) -} - -func BenchmarkInt64CounterAddWithAttrs_4(b *testing.B) { - benchmarkAttrs(b, 4) -} - -func BenchmarkInt64CounterAddWithAttrs_8(b *testing.B) { - benchmarkAttrs(b, 8) -} - -func BenchmarkInt64CounterAddWithAttrs_16(b *testing.B) { - benchmarkAttrs(b, 16) -} - -// Note: performance does not depend on attribute set size for the benchmarks -// below--all are benchmarked for a single attribute. - -// Iterators - -var benchmarkIteratorVar attribute.KeyValue - -func benchmarkIterator(b *testing.B, n int) { - attrs := attribute.NewSet(makeAttrs(n)...) - b.ResetTimer() - for i := 0; i < b.N; i++ { - iter := attrs.Iter() - for iter.Next() { - benchmarkIteratorVar = iter.Attribute() - } - } -} - -func BenchmarkIterator_0(b *testing.B) { - benchmarkIterator(b, 0) -} - -func BenchmarkIterator_1(b *testing.B) { - benchmarkIterator(b, 1) -} - -func BenchmarkIterator_2(b *testing.B) { - benchmarkIterator(b, 2) -} - -func BenchmarkIterator_4(b *testing.B) { - benchmarkIterator(b, 4) -} - -func BenchmarkIterator_8(b *testing.B) { - benchmarkIterator(b, 8) -} - -func BenchmarkIterator_16(b *testing.B) { - benchmarkIterator(b, 16) -} - -// Counters - -func BenchmarkGlobalInt64CounterAddWithSDK(b *testing.B) { - // Compare with BenchmarkInt64CounterAdd() to see overhead of global - // package. This is in the SDK to avoid the API from depending on the - // SDK. - ctx := context.Background() - fix := newFixture(b) - - global.SetMeterProvider(fix) - - labs := []attribute.KeyValue{attribute.String("A", "B")} - - cnt := fix.iCounter("int64.sum") - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - cnt.Add(ctx, 1, labs...) - } -} - -func BenchmarkInt64CounterAdd(b *testing.B) { - ctx := context.Background() - fix := newFixture(b) - labs := makeAttrs(1) - cnt := fix.iCounter("int64.sum") - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - cnt.Add(ctx, 1, labs...) - } -} - -func BenchmarkFloat64CounterAdd(b *testing.B) { - ctx := context.Background() - fix := newFixture(b) - labs := makeAttrs(1) - cnt := fix.fCounter("float64.sum") - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - cnt.Add(ctx, 1.1, labs...) - } -} - -// UpDownCounter - -func BenchmarkInt64UpDownCounterAdd(b *testing.B) { - ctx := context.Background() - fix := newFixture(b) - labs := makeAttrs(1) - cnt := fix.iUpDownCounter("int64.sum") - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - cnt.Add(ctx, 1, labs...) - } -} - -func BenchmarkFloat64UpDownCounterAdd(b *testing.B) { - ctx := context.Background() - fix := newFixture(b) - labs := makeAttrs(1) - cnt := fix.fUpDownCounter("float64.sum") - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - cnt.Add(ctx, 1.1, labs...) - } -} - -// LastValue - -func BenchmarkInt64LastValueAdd(b *testing.B) { - ctx := context.Background() - fix := newFixture(b) - labs := makeAttrs(1) - mea := fix.iHistogram("int64.lastvalue") - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - mea.Record(ctx, int64(i), labs...) - } -} - -func BenchmarkFloat64LastValueAdd(b *testing.B) { - ctx := context.Background() - fix := newFixture(b) - labs := makeAttrs(1) - mea := fix.fHistogram("float64.lastvalue") - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - mea.Record(ctx, float64(i), labs...) - } -} - -// Histograms - -func BenchmarkInt64HistogramAdd(b *testing.B) { - ctx := context.Background() - fix := newFixture(b) - labs := makeAttrs(1) - mea := fix.iHistogram("int64.histogram") - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - mea.Record(ctx, int64(i), labs...) - } -} - -func BenchmarkFloat64HistogramAdd(b *testing.B) { - ctx := context.Background() - fix := newFixture(b) - labs := makeAttrs(1) - mea := fix.fHistogram("float64.histogram") - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - mea.Record(ctx, float64(i), labs...) - } -} - -// Observers - -func BenchmarkObserverRegistration(b *testing.B) { - fix := newFixture(b) - names := make([]string, 0, b.N) - for i := 0; i < b.N; i++ { - names = append(names, fmt.Sprintf("test.%d.lastvalue", i)) - } - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - ctr, _ := fix.meter.AsyncInt64().Counter(names[i]) - _ = fix.meter.RegisterCallback([]instrument.Asynchronous{ctr}, func(context.Context) {}) - } -} - -func BenchmarkGaugeObserverObservationInt64(b *testing.B) { - ctx := context.Background() - fix := newFixture(b) - labs := makeAttrs(1) - ctr, _ := fix.meter.AsyncInt64().Counter("test.lastvalue") - err := fix.meter.RegisterCallback([]instrument.Asynchronous{ctr}, func(ctx context.Context) { - for i := 0; i < b.N; i++ { - ctr.Observe(ctx, (int64)(i), labs...) - } - }) - if err != nil { - b.Errorf("could not register callback: %v", err) - b.FailNow() - } - - b.ResetTimer() - - fix.accumulator.Collect(ctx) -} - -func BenchmarkGaugeObserverObservationFloat64(b *testing.B) { - ctx := context.Background() - fix := newFixture(b) - labs := makeAttrs(1) - ctr, _ := fix.meter.AsyncFloat64().Counter("test.lastvalue") - err := fix.meter.RegisterCallback([]instrument.Asynchronous{ctr}, func(ctx context.Context) { - for i := 0; i < b.N; i++ { - ctr.Observe(ctx, (float64)(i), labs...) - } - }) - if err != nil { - b.Errorf("could not register callback: %v", err) - b.FailNow() - } - - b.ResetTimer() - - fix.accumulator.Collect(ctx) -} - -// BatchRecord - -func benchmarkBatchRecord8Attrs(b *testing.B, numInst int) { - const numAttrs = 8 - ctx := context.Background() - fix := newFixture(b) - labs := makeAttrs(numAttrs) - var meas []syncint64.Counter - - for i := 0; i < numInst; i++ { - meas = append(meas, fix.iCounter(fmt.Sprintf("int64.%d.sum", i))) - } - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - for _, ctr := range meas { - ctr.Add(ctx, 1, labs...) - } - } -} - -func BenchmarkBatchRecord8Attrs_1Instrument(b *testing.B) { - benchmarkBatchRecord8Attrs(b, 1) -} - -func BenchmarkBatchRecord_8Attrs_2Instruments(b *testing.B) { - benchmarkBatchRecord8Attrs(b, 2) -} - -func BenchmarkBatchRecord_8Attrs_4Instruments(b *testing.B) { - benchmarkBatchRecord8Attrs(b, 4) -} - -func BenchmarkBatchRecord_8Attrs_8Instruments(b *testing.B) { - benchmarkBatchRecord8Attrs(b, 8) -} - -// Record creation - -func BenchmarkRepeatedDirectCalls(b *testing.B) { - ctx := context.Background() - fix := newFixture(b) - - c := fix.iCounter("int64.sum") - k := attribute.String("bench", "true") - - b.ResetTimer() - - for i := 0; i < b.N; i++ { - c.Add(ctx, 1, k) - fix.accumulator.Collect(ctx) - } -} diff --git a/sdk/metric/config.go b/sdk/metric/config.go new file mode 100644 index 00000000000..83ae6565410 --- /dev/null +++ b/sdk/metric/config.go @@ -0,0 +1,139 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "fmt" + "sync" + + "go.opentelemetry.io/otel/sdk/metric/view" + "go.opentelemetry.io/otel/sdk/resource" +) + +// config contains configuration options for a MeterProvider. +type config struct { + res *resource.Resource + readers map[Reader][]view.View +} + +// readerSignals returns a force-flush and shutdown function for a +// MeterProvider to call in their respective options. All Readers c contains +// will have their force-flush and shutdown methods unified into returned +// single functions. +func (c config) readerSignals() (forceFlush, shutdown func(context.Context) error) { + var fFuncs, sFuncs []func(context.Context) error + for r := range c.readers { + sFuncs = append(sFuncs, r.Shutdown) + fFuncs = append(fFuncs, r.ForceFlush) + } + + return unify(fFuncs), unifyShutdown(sFuncs) +} + +// unify unifies calling all of funcs into a single function call. All errors +// returned from calls to funcs will be unify into a single error return +// value. +func unify(funcs []func(context.Context) error) func(context.Context) error { + return func(ctx context.Context) error { + var errs []error + for _, f := range funcs { + if err := f(ctx); err != nil { + errs = append(errs, err) + } + } + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + return fmt.Errorf("%v", errs) + } + } +} + +// unifyShutdown unifies calling all of funcs once for a shutdown. If called +// more than once, an ErrReaderShutdown error is returned. +func unifyShutdown(funcs []func(context.Context) error) func(context.Context) error { + f := unify(funcs) + var once sync.Once + return func(ctx context.Context) error { + err := ErrReaderShutdown + once.Do(func() { err = f(ctx) }) + return err + } +} + +// newConfig returns a config configured with options. +func newConfig(options []Option) config { + conf := config{res: resource.Default()} + for _, o := range options { + conf = o.apply(conf) + } + return conf +} + +// Option applies a configuration option value to a MeterProvider. +type Option interface { + apply(config) config +} + +// optionFunc applies a set of options to a config. +type optionFunc func(config) config + +// apply returns a config with option(s) applied. +func (o optionFunc) apply(conf config) config { + return o(conf) +} + +// WithResource associates a Resource with a MeterProvider. This Resource +// represents the entity producing telemetry and is associated with all Meters +// the MeterProvider will create. +// +// By default, if this Option is not used, the default Resource from the +// go.opentelemetry.io/otel/sdk/resource package will be used. +func WithResource(res *resource.Resource) Option { + return optionFunc(func(conf config) config { + conf.res = res + return conf + }) +} + +// WithReader associates a Reader with a MeterProvider. Any passed view config +// will be used to associate a view with the Reader. If no views are passed +// the default view will be use for the Reader. +// +// Passing this option multiple times for the same Reader will overwrite. The +// last option passed will be the one used for that Reader. +// +// By default, if this option is not used, the MeterProvider will perform no +// operations; no data will be exported without a Reader. +func WithReader(r Reader, views ...view.View) Option { + return optionFunc(func(cfg config) config { + if cfg.readers == nil { + cfg.readers = make(map[Reader][]view.View) + } + if len(views) == 0 { + views = []view.View{{}} + } + + cfg.readers[r] = views + return cfg + }) +} diff --git a/sdk/metric/config_test.go b/sdk/metric/config_test.go new file mode 100644 index 00000000000..be3aa55fb16 --- /dev/null +++ b/sdk/metric/config_test.go @@ -0,0 +1,134 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package metric + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/otel/sdk/metric/aggregation" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/metric/view" + "go.opentelemetry.io/otel/sdk/resource" +) + +type reader struct { + producer producer + temporalityFunc TemporalitySelector + aggregationFunc AggregationSelector + collectFunc func(context.Context) (metricdata.ResourceMetrics, error) + forceFlushFunc func(context.Context) error + shutdownFunc func(context.Context) error +} + +var _ Reader = (*reader)(nil) + +func (r *reader) aggregation(kind view.InstrumentKind) aggregation.Aggregation { // nolint:revive // import-shadow for method scoped by type. + return r.aggregationFunc(kind) +} + +func (r *reader) register(p producer) { r.producer = p } +func (r *reader) temporality(kind view.InstrumentKind) metricdata.Temporality { + return r.temporalityFunc(kind) +} +func (r *reader) Collect(ctx context.Context) (metricdata.ResourceMetrics, error) { + return r.collectFunc(ctx) +} +func (r *reader) ForceFlush(ctx context.Context) error { return r.forceFlushFunc(ctx) } +func (r *reader) Shutdown(ctx context.Context) error { return r.shutdownFunc(ctx) } + +func TestConfigReaderSignalsEmpty(t *testing.T) { + f, s := config{}.readerSignals() + + require.NotNil(t, f) + require.NotNil(t, s) + + ctx := context.Background() + assert.Nil(t, f(ctx)) + assert.Nil(t, s(ctx)) + assert.ErrorIs(t, s(ctx), ErrReaderShutdown) +} + +func TestConfigReaderSignalsForwarded(t *testing.T) { + var flush, sdown int + r := &reader{ + forceFlushFunc: func(ctx context.Context) error { + flush++ + return nil + }, + shutdownFunc: func(ctx context.Context) error { + sdown++ + return nil + }, + } + c := newConfig([]Option{WithReader(r)}) + f, s := c.readerSignals() + + require.NotNil(t, f) + require.NotNil(t, s) + + ctx := context.Background() + assert.NoError(t, f(ctx)) + assert.NoError(t, f(ctx)) + assert.NoError(t, s(ctx)) + assert.ErrorIs(t, s(ctx), ErrReaderShutdown) + + assert.Equal(t, 2, flush, "flush not called 2 times") + assert.Equal(t, 1, sdown, "shutdown not called 1 time") +} + +func TestConfigReaderSignalsForwardedErrors(t *testing.T) { + r := &reader{ + forceFlushFunc: func(ctx context.Context) error { return assert.AnError }, + shutdownFunc: func(ctx context.Context) error { return assert.AnError }, + } + c := newConfig([]Option{WithReader(r)}) + f, s := c.readerSignals() + + require.NotNil(t, f) + require.NotNil(t, s) + + ctx := context.Background() + assert.ErrorIs(t, f(ctx), assert.AnError) + assert.ErrorIs(t, s(ctx), assert.AnError) + assert.ErrorIs(t, s(ctx), ErrReaderShutdown) +} + +func TestUnifyMultiError(t *testing.T) { + f := func(context.Context) error { return assert.AnError } + funcs := []func(context.Context) error{f, f, f} + errs := []error{assert.AnError, assert.AnError, assert.AnError} + target := fmt.Errorf("%v", errs) + assert.Equal(t, unify(funcs)(context.Background()), target) +} + +func TestWithResource(t *testing.T) { + res := resource.NewSchemaless() + c := newConfig([]Option{WithResource(res)}) + assert.Same(t, res, c.res) +} + +func TestWithReader(t *testing.T) { + r := &reader{} + c := newConfig([]Option{WithReader(r)}) + assert.Contains(t, c.readers, r) +} diff --git a/sdk/metric/controller/basic/config.go b/sdk/metric/controller/basic/config.go deleted file mode 100644 index f3a9830c6af..00000000000 --- a/sdk/metric/controller/basic/config.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package basic // import "go.opentelemetry.io/otel/sdk/metric/controller/basic" - -import ( - "time" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/sdk/metric/export" - "go.opentelemetry.io/otel/sdk/resource" -) - -// config contains configuration for a basic Controller. -type config struct { - // Resource is the OpenTelemetry resource associated with all Meters - // created by the Controller. - Resource *resource.Resource - - // CollectPeriod is the interval between calls to Collect a - // checkpoint. - // - // When pulling metrics and not exporting, this is the minimum - // time between calls to Collect. In a pull-only - // configuration, collection is performed on demand; set - // CollectPeriod to 0 always recompute the export record set. - // - // When exporting metrics, this must be > 0. - // - // Default value is 10s. - CollectPeriod time.Duration - - // CollectTimeout is the timeout of the Context passed to - // Collect() and subsequently to Observer instrument callbacks. - // - // Default value is 10s. If zero, no Collect timeout is applied. - CollectTimeout time.Duration - - // Exporter is used for exporting metric data. - // - // Note: Exporters such as Prometheus that pull data do not implement - // export.Exporter. These will directly call Collect() and ForEach(). - Exporter export.Exporter - - // PushTimeout is the timeout of the Context when a exporter is configured. - // - // Default value is 10s. If zero, no Export timeout is applied. - PushTimeout time.Duration -} - -// Option is the interface that applies the value to a configuration option. -type Option interface { - // apply sets the Option value of a Config. - apply(config) config -} - -// WithResource sets the Resource configuration option of a Config by merging it -// with the Resource configuration in the environment. -func WithResource(r *resource.Resource) Option { - return resourceOption{r} -} - -type resourceOption struct{ *resource.Resource } - -func (o resourceOption) apply(cfg config) config { - res, err := resource.Merge(cfg.Resource, o.Resource) - if err != nil { - otel.Handle(err) - } - cfg.Resource = res - return cfg -} - -// WithCollectPeriod sets the CollectPeriod configuration option of a Config. -func WithCollectPeriod(period time.Duration) Option { - return collectPeriodOption(period) -} - -type collectPeriodOption time.Duration - -func (o collectPeriodOption) apply(cfg config) config { - cfg.CollectPeriod = time.Duration(o) - return cfg -} - -// WithCollectTimeout sets the CollectTimeout configuration option of a Config. -func WithCollectTimeout(timeout time.Duration) Option { - return collectTimeoutOption(timeout) -} - -type collectTimeoutOption time.Duration - -func (o collectTimeoutOption) apply(cfg config) config { - cfg.CollectTimeout = time.Duration(o) - return cfg -} - -// WithExporter sets the exporter configuration option of a Config. -func WithExporter(exporter export.Exporter) Option { - return exporterOption{exporter} -} - -type exporterOption struct{ exporter export.Exporter } - -func (o exporterOption) apply(cfg config) config { - cfg.Exporter = o.exporter - return cfg -} - -// WithPushTimeout sets the PushTimeout configuration option of a Config. -func WithPushTimeout(timeout time.Duration) Option { - return pushTimeoutOption(timeout) -} - -type pushTimeoutOption time.Duration - -func (o pushTimeoutOption) apply(cfg config) config { - cfg.PushTimeout = time.Duration(o) - return cfg -} diff --git a/sdk/metric/controller/basic/config_test.go b/sdk/metric/controller/basic/config_test.go deleted file mode 100644 index 32757b8a966..00000000000 --- a/sdk/metric/controller/basic/config_test.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package basic - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/resource" -) - -func TestWithResource(t *testing.T) { - r := resource.NewSchemaless(attribute.String("A", "a")) - - c := config{} - c = WithResource(r).apply(c) - assert.Equal(t, r.Equivalent(), c.Resource.Equivalent()) - - // Ensure overwriting works. - c = config{Resource: &resource.Resource{}} - c = WithResource(r).apply(c) - assert.Equal(t, r.Equivalent(), c.Resource.Equivalent()) -} diff --git a/sdk/metric/controller/basic/controller.go b/sdk/metric/controller/basic/controller.go deleted file mode 100644 index 31ddb0f1509..00000000000 --- a/sdk/metric/controller/basic/controller.go +++ /dev/null @@ -1,382 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package basic // import "go.opentelemetry.io/otel/sdk/metric/controller/basic" - -import ( - "context" - "fmt" - "sync" - "time" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/sdk/instrumentation" - sdk "go.opentelemetry.io/otel/sdk/metric" - controllerTime "go.opentelemetry.io/otel/sdk/metric/controller/time" - "go.opentelemetry.io/otel/sdk/metric/export" - "go.opentelemetry.io/otel/sdk/metric/registry" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" - "go.opentelemetry.io/otel/sdk/resource" -) - -// DefaultPeriod is used for: -// -// - the minimum time between calls to Collect() -// - the timeout for Export() -// - the timeout for Collect(). -const DefaultPeriod = 10 * time.Second - -// ErrControllerStarted indicates that a controller was started more -// than once. -var ErrControllerStarted = fmt.Errorf("controller already started") - -// Controller organizes and synchronizes collection of metric data in -// both "pull" and "push" configurations. This supports two distinct -// modes: -// -// - Push and Pull: Start() must be called to begin calling the exporter; -// Collect() is called periodically by a background thread after starting -// the controller. -// - Pull-Only: Start() is optional in this case, to call Collect periodically. -// If Start() is not called, Collect() can be called manually to initiate -// collection -// -// The controller supports mixing push and pull access to metric data -// using the export.Reader RWLock interface. Collection will -// be blocked by a pull request in the basic controller. -type Controller struct { - // lock synchronizes Start() and Stop(). - lock sync.Mutex - scopes sync.Map - checkpointerFactory export.CheckpointerFactory - - resource *resource.Resource - exporter export.Exporter - wg sync.WaitGroup - stopCh chan struct{} - clock controllerTime.Clock - ticker controllerTime.Ticker - - collectPeriod time.Duration - collectTimeout time.Duration - pushTimeout time.Duration - - // collectedTime is used only in configurations with no - // exporter, when ticker != nil. - collectedTime time.Time -} - -var _ export.InstrumentationLibraryReader = &Controller{} -var _ metric.MeterProvider = &Controller{} - -// Meter returns a new Meter defined by instrumentationName and configured -// with opts. -func (c *Controller) Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter { - cfg := metric.NewMeterConfig(opts...) - scope := instrumentation.Scope{ - Name: instrumentationName, - Version: cfg.InstrumentationVersion(), - SchemaURL: cfg.SchemaURL(), - } - - m, ok := c.scopes.Load(scope) - if !ok { - checkpointer := c.checkpointerFactory.NewCheckpointer() - m, _ = c.scopes.LoadOrStore( - scope, - registry.NewUniqueInstrumentMeterImpl(&accumulatorCheckpointer{ - Accumulator: sdk.NewAccumulator(checkpointer), - checkpointer: checkpointer, - scope: scope, - })) - } - return sdkapi.WrapMeterImpl(m.(*registry.UniqueInstrumentMeterImpl)) -} - -type accumulatorCheckpointer struct { - *sdk.Accumulator - checkpointer export.Checkpointer - scope instrumentation.Scope -} - -var _ sdkapi.MeterImpl = &accumulatorCheckpointer{} - -// New constructs a Controller using the provided checkpointer factory -// and options (including optional exporter) to configure a metric -// export pipeline. -func New(checkpointerFactory export.CheckpointerFactory, opts ...Option) *Controller { - c := config{ - CollectPeriod: DefaultPeriod, - CollectTimeout: DefaultPeriod, - PushTimeout: DefaultPeriod, - } - for _, opt := range opts { - c = opt.apply(c) - } - if c.Resource == nil { - c.Resource = resource.Default() - } else { - var err error - c.Resource, err = resource.Merge(resource.Environment(), c.Resource) - if err != nil { - otel.Handle(err) - } - } - return &Controller{ - checkpointerFactory: checkpointerFactory, - exporter: c.Exporter, - resource: c.Resource, - stopCh: nil, - clock: controllerTime.RealClock{}, - - collectPeriod: c.CollectPeriod, - collectTimeout: c.CollectTimeout, - pushTimeout: c.PushTimeout, - } -} - -// SetClock supports setting a mock clock for testing. This must be -// called before Start(). -func (c *Controller) SetClock(clock controllerTime.Clock) { - c.lock.Lock() - defer c.lock.Unlock() - c.clock = clock -} - -// Resource returns the *resource.Resource associated with this -// controller. -func (c *Controller) Resource() *resource.Resource { - return c.resource -} - -// Start begins a ticker that periodically collects and exports -// metrics with the configured interval. This is required for calling -// a configured Exporter (see WithExporter) and is otherwise optional -// when only pulling metric data. -// -// The passed context is passed to Collect() and subsequently to -// asynchronous instrument callbacks. Returns an error when the -// controller was already started. -// -// Note that it is not necessary to Start a controller when only -// pulling data; use the Collect() and ForEach() methods directly in -// this case. -func (c *Controller) Start(ctx context.Context) error { - c.lock.Lock() - defer c.lock.Unlock() - - if c.stopCh != nil { - return ErrControllerStarted - } - - c.wg.Add(1) - c.stopCh = make(chan struct{}) - c.ticker = c.clock.Ticker(c.collectPeriod) - go c.runTicker(ctx, c.stopCh) - return nil -} - -// Stop waits for the background goroutine to return and then collects -// and exports metrics one last time before returning. The passed -// context is passed to the final Collect() and subsequently to the -// final asynchronous instruments. -// -// Note that Stop() will not cancel an ongoing collection or export. -func (c *Controller) Stop(ctx context.Context) error { - if lastCollection := func() bool { - c.lock.Lock() - defer c.lock.Unlock() - - if c.stopCh == nil { - return false - } - - close(c.stopCh) - c.stopCh = nil - c.wg.Wait() - c.ticker.Stop() - c.ticker = nil - return true - }(); !lastCollection { - return nil - } - return c.collect(ctx) -} - -// runTicker collection on ticker events until the stop channel is closed. -func (c *Controller) runTicker(ctx context.Context, stopCh chan struct{}) { - defer c.wg.Done() - for { - select { - case <-stopCh: - return - case <-c.ticker.C(): - if err := c.collect(ctx); err != nil { - otel.Handle(err) - } - } - } -} - -// collect computes a checkpoint and optionally exports it. -func (c *Controller) collect(ctx context.Context) error { - if err := c.checkpoint(ctx); err != nil { - return err - } - if c.exporter == nil { - return nil - } - - // Note: this is not subject to collectTimeout. This blocks the next - // collection despite collectTimeout because it holds a lock. - return c.export(ctx) -} - -// accumulatorList returns a snapshot of current accumulators -// registered to this controller. This briefly locks the controller. -func (c *Controller) accumulatorList() []*accumulatorCheckpointer { - var r []*accumulatorCheckpointer - c.scopes.Range(func(key, value interface{}) bool { - acc, ok := value.(*registry.UniqueInstrumentMeterImpl).MeterImpl().(*accumulatorCheckpointer) - if ok { - r = append(r, acc) - } - return true - }) - return r -} - -// checkpoint calls the Accumulator and Checkpointer interfaces to -// compute the Reader. This applies the configured collection -// timeout. Note that this does not try to cancel a Collect or Export -// when Stop() is called. -func (c *Controller) checkpoint(ctx context.Context) error { - for _, impl := range c.accumulatorList() { - if err := c.checkpointSingleAccumulator(ctx, impl); err != nil { - return err - } - } - return nil -} - -// checkpointSingleAccumulator checkpoints a single instrumentation -// scope's accumulator, which involves calling -// checkpointer.StartCollection, accumulator.Collect, and -// checkpointer.FinishCollection in sequence. -func (c *Controller) checkpointSingleAccumulator(ctx context.Context, ac *accumulatorCheckpointer) error { - ckpt := ac.checkpointer.Reader() - ckpt.Lock() - defer ckpt.Unlock() - - ac.checkpointer.StartCollection() - - if c.collectTimeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, c.collectTimeout) - defer cancel() - } - - _ = ac.Accumulator.Collect(ctx) - - var err error - select { - case <-ctx.Done(): - err = ctx.Err() - default: - // The context wasn't done, ok. - } - - // Finish the checkpoint whether the accumulator timed out or not. - if cerr := ac.checkpointer.FinishCollection(); cerr != nil { - if err == nil { - err = cerr - } else { - err = fmt.Errorf("%s: %w", cerr.Error(), err) - } - } - - return err -} - -// export calls the exporter with a read lock on the Reader, -// applying the configured export timeout. -func (c *Controller) export(ctx context.Context) error { // nolint:revive // method name shadows import. - if c.pushTimeout > 0 { - var cancel context.CancelFunc - ctx, cancel = context.WithTimeout(ctx, c.pushTimeout) - defer cancel() - } - - return c.exporter.Export(ctx, c.resource, c) -} - -// ForEach implements export.InstrumentationLibraryReader. -func (c *Controller) ForEach(readerFunc func(l instrumentation.Library, r export.Reader) error) error { - for _, acPair := range c.accumulatorList() { - reader := acPair.checkpointer.Reader() - // TODO: We should not fail fast; instead accumulate errors. - if err := func() error { - reader.RLock() - defer reader.RUnlock() - return readerFunc(acPair.scope, reader) - }(); err != nil { - return err - } - } - return nil -} - -// IsRunning returns true if the controller was started via Start(), -// indicating that the current export.Reader is being kept -// up-to-date. -func (c *Controller) IsRunning() bool { - c.lock.Lock() - defer c.lock.Unlock() - return c.ticker != nil -} - -// Collect requests a collection. The collection will be skipped if -// the last collection is aged less than the configured collection -// period. -func (c *Controller) Collect(ctx context.Context) error { - if c.IsRunning() { - // When there's a non-nil ticker, there's a goroutine - // computing checkpoints with the collection period. - return ErrControllerStarted - } - if !c.shouldCollect() { - return nil - } - - return c.checkpoint(ctx) -} - -// shouldCollect returns true if the collector should collect now, -// based on the timestamp, the last collection time, and the -// configured period. -func (c *Controller) shouldCollect() bool { - c.lock.Lock() - defer c.lock.Unlock() - - if c.collectPeriod == 0 { - return true - } - now := c.clock.Now() - if now.Sub(c.collectedTime) < c.collectPeriod { - return false - } - c.collectedTime = now - return true -} diff --git a/sdk/metric/controller/basic/controller_test.go b/sdk/metric/controller/basic/controller_test.go deleted file mode 100644 index 74904d68c49..00000000000 --- a/sdk/metric/controller/basic/controller_test.go +++ /dev/null @@ -1,493 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package basic_test - -import ( - "context" - "errors" - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel/attribute" - ottest "go.opentelemetry.io/otel/internal/internaltest" - "go.opentelemetry.io/otel/metric/instrument" - "go.opentelemetry.io/otel/sdk/instrumentation" - controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" - "go.opentelemetry.io/otel/sdk/metric/controller/controllertest" - "go.opentelemetry.io/otel/sdk/metric/export" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - processor "go.opentelemetry.io/otel/sdk/metric/processor/basic" - "go.opentelemetry.io/otel/sdk/metric/processor/processortest" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" - "go.opentelemetry.io/otel/sdk/resource" -) - -const envVar = "OTEL_RESOURCE_ATTRIBUTES" - -func getMap(t *testing.T, cont *controller.Controller) map[string]float64 { - out := processortest.NewOutput(attribute.DefaultEncoder()) - - require.NoError(t, cont.ForEach( - func(_ instrumentation.Scope, reader export.Reader) error { - return reader.ForEach( - aggregation.CumulativeTemporalitySelector(), - func(record export.Record) error { - return out.AddRecord(record) - }, - ) - })) - return out.Map() -} - -type testContextKey string - -func testContext() context.Context { - ctx := context.Background() - return context.WithValue(ctx, testContextKey("A"), "B") -} - -func checkTestContext(t *testing.T, ctx context.Context) { - require.Equal(t, "B", ctx.Value(testContextKey("A"))) -} - -func TestControllerUsesResource(t *testing.T) { - const envVal = "T=U,key=value" - store, err := ottest.SetEnvVariables(map[string]string{ - envVar: envVal, - }) - - require.NoError(t, err) - defer func() { require.NoError(t, store.Restore()) }() - - cases := []struct { - name string - options []controller.Option - wanted string - }{ - { - name: "explicitly empty resource", - options: []controller.Option{controller.WithResource(resource.Empty())}, - wanted: envVal, - }, - { - name: "uses default if no resource option", - options: nil, - wanted: resource.Default().Encoded(attribute.DefaultEncoder()), - }, - { - name: "explicit resource", - options: []controller.Option{controller.WithResource(resource.NewSchemaless(attribute.String("R", "S")))}, - wanted: "R=S," + envVal, - }, - { - name: "multi resource", - options: []controller.Option{ - controller.WithResource(resource.NewSchemaless(attribute.String("R", "WRONG"))), - controller.WithResource(resource.NewSchemaless(attribute.String("R", "S"))), - controller.WithResource(resource.NewSchemaless(attribute.String("W", "X"))), - controller.WithResource(resource.NewSchemaless(attribute.String("T", "V"))), - }, - wanted: "R=S,T=V,W=X,key=value", - }, - { - name: "user override environment", - options: []controller.Option{ - controller.WithResource(resource.NewSchemaless(attribute.String("T", "V"))), - controller.WithResource(resource.NewSchemaless(attribute.String("key", "I win"))), - }, - wanted: "T=V,key=I win", - }, - } - for _, c := range cases { - t.Run(fmt.Sprintf("case-%s", c.name), func(t *testing.T) { - sel := aggregation.CumulativeTemporalitySelector() - exp := processortest.New(sel, attribute.DefaultEncoder()) - cont := controller.New( - processor.NewFactory( - processortest.AggregatorSelector(), - exp, - ), - append(c.options, controller.WithExporter(exp))..., - ) - ctx := context.Background() - require.NoError(t, cont.Start(ctx)) - - ctr, _ := cont.Meter("named").SyncFloat64().Counter("calls.sum") - ctr.Add(context.Background(), 1.) - - // Collect once - require.NoError(t, cont.Stop(ctx)) - - expect := map[string]float64{ - "calls.sum//" + c.wanted: 1., - } - require.EqualValues(t, expect, exp.Values()) - }) - } -} - -func TestStartNoExporter(t *testing.T) { - cont := controller.New( - processor.NewFactory( - processortest.AggregatorSelector(), - aggregation.CumulativeTemporalitySelector(), - ), - controller.WithCollectPeriod(time.Second), - controller.WithResource(resource.Empty()), - ) - mock := controllertest.NewMockClock() - cont.SetClock(mock) - meter := cont.Meter("go.opentelemetry.io/otel/sdk/metric/controller/basic_test#StartNoExporter") - - calls := int64(0) - - counterObserver, err := meter.AsyncInt64().Counter("calls.lastvalue") - require.NoError(t, err) - - err = meter.RegisterCallback([]instrument.Asynchronous{counterObserver}, func(ctx context.Context) { - calls++ - checkTestContext(t, ctx) - counterObserver.Observe(ctx, calls, attribute.String("A", "B")) - }) - require.NoError(t, err) - - // Collect() has not been called. The controller is unstarted. - expect := map[string]float64{} - - // The time advances, but doesn't change the result (not collected). - require.EqualValues(t, expect, getMap(t, cont)) - mock.Add(time.Second) - require.EqualValues(t, expect, getMap(t, cont)) - mock.Add(time.Second) - - expect = map[string]float64{ - "calls.lastvalue/A=B/": 1, - } - - // Collect once - ctx := testContext() - - require.NoError(t, cont.Collect(ctx)) - - require.EqualValues(t, expect, getMap(t, cont)) - mock.Add(time.Second) - require.EqualValues(t, expect, getMap(t, cont)) - mock.Add(time.Second) - - // Again - expect = map[string]float64{ - "calls.lastvalue/A=B/": 2, - } - - require.NoError(t, cont.Collect(ctx)) - - require.EqualValues(t, expect, getMap(t, cont)) - mock.Add(time.Second) - require.EqualValues(t, expect, getMap(t, cont)) - - // Start the controller - require.NoError(t, cont.Start(ctx)) - - for i := 1; i <= 3; i++ { - expect = map[string]float64{ - "calls.lastvalue/A=B/": 2 + float64(i), - } - - mock.Add(time.Second) - require.EqualValues(t, expect, getMap(t, cont)) - } -} - -func TestObserverCanceled(t *testing.T) { - cont := controller.New( - processor.NewFactory( - processortest.AggregatorSelector(), - aggregation.CumulativeTemporalitySelector(), - ), - controller.WithCollectPeriod(0), - controller.WithCollectTimeout(time.Millisecond), - controller.WithResource(resource.Empty()), - ) - meter := cont.Meter("go.opentelemetry.io/otel/sdk/metric/controller/basic_test#ObserverCanceled") - - calls := int64(0) - - counterObserver, err := meter.AsyncInt64().Counter("done.lastvalue") - require.NoError(t, err) - - err = meter.RegisterCallback([]instrument.Asynchronous{counterObserver}, func(ctx context.Context) { - <-ctx.Done() - calls++ - counterObserver.Observe(ctx, calls) - }) - require.NoError(t, err) - - // This relies on the context timing out - err = cont.Collect(context.Background()) - require.Error(t, err) - require.True(t, errors.Is(err, context.DeadlineExceeded)) - - expect := map[string]float64{ - "done.lastvalue//": 1, - } - - require.EqualValues(t, expect, getMap(t, cont)) -} - -func TestObserverContext(t *testing.T) { - cont := controller.New( - processor.NewFactory( - processortest.AggregatorSelector(), - aggregation.CumulativeTemporalitySelector(), - ), - controller.WithCollectTimeout(0), - controller.WithResource(resource.Empty()), - ) - meter := cont.Meter("go.opentelemetry.io/otel/sdk/metric/controller/basic_test#ObserverContext") - - counterObserver, err := meter.AsyncInt64().Counter("done.lastvalue") - require.NoError(t, err) - - err = meter.RegisterCallback([]instrument.Asynchronous{counterObserver}, func(ctx context.Context) { - time.Sleep(10 * time.Millisecond) - checkTestContext(t, ctx) - counterObserver.Observe(ctx, 1) - }) - require.NoError(t, err) - - ctx := testContext() - - require.NoError(t, cont.Collect(ctx)) - - expect := map[string]float64{ - "done.lastvalue//": 1, - } - - require.EqualValues(t, expect, getMap(t, cont)) -} - -type blockingExporter struct { - calls int - exporter *processortest.Exporter -} - -func newBlockingExporter() *blockingExporter { - return &blockingExporter{ - exporter: processortest.New( - aggregation.CumulativeTemporalitySelector(), - attribute.DefaultEncoder(), - ), - } -} - -func (b *blockingExporter) Export(ctx context.Context, res *resource.Resource, output export.InstrumentationLibraryReader) error { - var err error - _ = b.exporter.Export(ctx, res, output) - if b.calls == 0 { - // timeout once - <-ctx.Done() - err = ctx.Err() - } - b.calls++ - return err -} - -func (*blockingExporter) TemporalityFor(*sdkapi.Descriptor, aggregation.Kind) aggregation.Temporality { - return aggregation.CumulativeTemporality -} - -func TestExportTimeout(t *testing.T) { - exporter := newBlockingExporter() - cont := controller.New( - processor.NewFactory( - processortest.AggregatorSelector(), - aggregation.CumulativeTemporalitySelector(), - ), - controller.WithCollectPeriod(time.Second), - controller.WithPushTimeout(time.Millisecond), - controller.WithExporter(exporter), - controller.WithResource(resource.Empty()), - ) - mock := controllertest.NewMockClock() - cont.SetClock(mock) - meter := cont.Meter("go.opentelemetry.io/otel/sdk/metric/controller/basic_test#ExportTimeout") - - calls := int64(0) - counterObserver, err := meter.AsyncInt64().Counter("one.lastvalue") - require.NoError(t, err) - - err = meter.RegisterCallback([]instrument.Asynchronous{counterObserver}, func(ctx context.Context) { - calls++ - counterObserver.Observe(ctx, calls) - }) - require.NoError(t, err) - - require.NoError(t, cont.Start(context.Background())) - - // Initial empty state - expect := map[string]float64{} - require.EqualValues(t, expect, exporter.exporter.Values()) - - // Collect after 1s, timeout - mock.Add(time.Second) - - err = testHandler.Flush() - require.Error(t, err) - require.True(t, errors.Is(err, context.DeadlineExceeded)) - - expect = map[string]float64{ - "one.lastvalue//": 1, - } - require.EqualValues(t, expect, exporter.exporter.Values()) - - // Collect again - mock.Add(time.Second) - expect = map[string]float64{ - "one.lastvalue//": 2, - } - require.EqualValues(t, expect, exporter.exporter.Values()) - - err = testHandler.Flush() - require.NoError(t, err) -} - -func TestCollectAfterStopThenStartAgain(t *testing.T) { - exp := processortest.New( - aggregation.CumulativeTemporalitySelector(), - attribute.DefaultEncoder(), - ) - cont := controller.New( - processor.NewFactory( - processortest.AggregatorSelector(), - exp, - ), - controller.WithCollectPeriod(time.Second), - controller.WithExporter(exp), - controller.WithResource(resource.Empty()), - ) - mock := controllertest.NewMockClock() - cont.SetClock(mock) - - meter := cont.Meter("go.opentelemetry.io/otel/sdk/metric/controller/basic_test#CollectAfterStopThenStartAgain") - - calls := 0 - counterObserver, err := meter.AsyncInt64().Counter("one.lastvalue") - require.NoError(t, err) - - err = meter.RegisterCallback([]instrument.Asynchronous{counterObserver}, func(ctx context.Context) { - calls++ - counterObserver.Observe(ctx, int64(calls)) - }) - require.NoError(t, err) - - // No collections happen (because mock clock does not advance): - require.NoError(t, cont.Start(context.Background())) - require.True(t, cont.IsRunning()) - - // There's one collection run by Stop(): - require.NoError(t, cont.Stop(context.Background())) - - require.EqualValues(t, map[string]float64{ - "one.lastvalue//": 1, - }, exp.Values()) - require.NoError(t, testHandler.Flush()) - - // Manual collect after Stop still works, subject to - // CollectPeriod. - require.NoError(t, cont.Collect(context.Background())) - require.EqualValues(t, map[string]float64{ - "one.lastvalue//": 2, - }, getMap(t, cont)) - - require.NoError(t, testHandler.Flush()) - require.False(t, cont.IsRunning()) - - // Start again, see that collection proceeds. However, - // explicit collection should still fail. - require.NoError(t, cont.Start(context.Background())) - require.True(t, cont.IsRunning()) - err = cont.Collect(context.Background()) - require.Error(t, err) - require.Equal(t, controller.ErrControllerStarted, err) - - require.NoError(t, cont.Stop(context.Background())) - require.EqualValues(t, map[string]float64{ - "one.lastvalue//": 3, - }, exp.Values()) - require.False(t, cont.IsRunning()) - - // Time has not advanced yet. Now let the ticker perform - // collection: - require.NoError(t, cont.Start(context.Background())) - mock.Add(time.Second) - require.EqualValues(t, map[string]float64{ - "one.lastvalue//": 4, - }, exp.Values()) - - mock.Add(time.Second) - require.EqualValues(t, map[string]float64{ - "one.lastvalue//": 5, - }, exp.Values()) - require.NoError(t, cont.Stop(context.Background())) - require.EqualValues(t, map[string]float64{ - "one.lastvalue//": 6, - }, exp.Values()) -} - -func TestRegistryFunction(t *testing.T) { - exp := processortest.New( - aggregation.CumulativeTemporalitySelector(), - attribute.DefaultEncoder(), - ) - cont := controller.New( - processor.NewFactory( - processortest.AggregatorSelector(), - exp, - ), - controller.WithCollectPeriod(time.Second), - controller.WithExporter(exp), - controller.WithResource(resource.Empty()), - ) - - m1 := cont.Meter("test") - m2 := cont.Meter("test") - - require.NotNil(t, m1) - require.Equal(t, m1, m2) - - c1, err := m1.SyncInt64().Counter("counter.sum") - require.NoError(t, err) - - c2, err := m1.SyncInt64().Counter("counter.sum") - require.NoError(t, err) - - require.Equal(t, c1, c2) - - ctx := context.Background() - - require.NoError(t, cont.Start(ctx)) - - c1.Add(ctx, 10) - c2.Add(ctx, 10) - - require.NoError(t, cont.Stop(ctx)) - - require.EqualValues(t, map[string]float64{ - "counter.sum//": 20, - }, exp.Values()) -} diff --git a/sdk/metric/controller/basic/pull_test.go b/sdk/metric/controller/basic/pull_test.go deleted file mode 100644 index 98496f6e780..00000000000 --- a/sdk/metric/controller/basic/pull_test.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package basic_test - -import ( - "context" - "runtime" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel/attribute" - controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" - "go.opentelemetry.io/otel/sdk/metric/controller/controllertest" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - processor "go.opentelemetry.io/otel/sdk/metric/processor/basic" - "go.opentelemetry.io/otel/sdk/metric/processor/processortest" - "go.opentelemetry.io/otel/sdk/resource" -) - -func TestPullNoCollect(t *testing.T) { - puller := controller.New( - processor.NewFactory( - processortest.AggregatorSelector(), - aggregation.CumulativeTemporalitySelector(), - processor.WithMemory(true), - ), - controller.WithCollectPeriod(0), - controller.WithResource(resource.Empty()), - ) - - ctx := context.Background() - meter := puller.Meter("nocache") - counter, err := meter.SyncInt64().Counter("counter.sum") - require.NoError(t, err) - - counter.Add(ctx, 10, attribute.String("A", "B")) - - require.NoError(t, puller.Collect(ctx)) - records := processortest.NewOutput(attribute.DefaultEncoder()) - require.NoError(t, controllertest.ReadAll(puller, aggregation.CumulativeTemporalitySelector(), records.AddInstrumentationLibraryRecord)) - - require.EqualValues(t, map[string]float64{ - "counter.sum/A=B/": 10, - }, records.Map()) - - counter.Add(ctx, 10, attribute.String("A", "B")) - - require.NoError(t, puller.Collect(ctx)) - records = processortest.NewOutput(attribute.DefaultEncoder()) - require.NoError(t, controllertest.ReadAll(puller, aggregation.CumulativeTemporalitySelector(), records.AddInstrumentationLibraryRecord)) - - require.EqualValues(t, map[string]float64{ - "counter.sum/A=B/": 20, - }, records.Map()) -} - -func TestPullWithCollect(t *testing.T) { - puller := controller.New( - processor.NewFactory( - processortest.AggregatorSelector(), - aggregation.CumulativeTemporalitySelector(), - processor.WithMemory(true), - ), - controller.WithCollectPeriod(time.Second), - controller.WithResource(resource.Empty()), - ) - mock := controllertest.NewMockClock() - puller.SetClock(mock) - - ctx := context.Background() - meter := puller.Meter("nocache") - counter, err := meter.SyncInt64().Counter("counter.sum") - require.NoError(t, err) - - counter.Add(ctx, 10, attribute.String("A", "B")) - - require.NoError(t, puller.Collect(ctx)) - records := processortest.NewOutput(attribute.DefaultEncoder()) - require.NoError(t, controllertest.ReadAll(puller, aggregation.CumulativeTemporalitySelector(), records.AddInstrumentationLibraryRecord)) - - require.EqualValues(t, map[string]float64{ - "counter.sum/A=B/": 10, - }, records.Map()) - - counter.Add(ctx, 10, attribute.String("A", "B")) - - // Cached value! - require.NoError(t, puller.Collect(ctx)) - records = processortest.NewOutput(attribute.DefaultEncoder()) - require.NoError(t, controllertest.ReadAll(puller, aggregation.CumulativeTemporalitySelector(), records.AddInstrumentationLibraryRecord)) - - require.EqualValues(t, map[string]float64{ - "counter.sum/A=B/": 10, - }, records.Map()) - - mock.Add(time.Second) - runtime.Gosched() - - // Re-computed value! - require.NoError(t, puller.Collect(ctx)) - records = processortest.NewOutput(attribute.DefaultEncoder()) - require.NoError(t, controllertest.ReadAll(puller, aggregation.CumulativeTemporalitySelector(), records.AddInstrumentationLibraryRecord)) - - require.EqualValues(t, map[string]float64{ - "counter.sum/A=B/": 20, - }, records.Map()) -} diff --git a/sdk/metric/controller/basic/push_test.go b/sdk/metric/controller/basic/push_test.go deleted file mode 100644 index 67bbddfdc84..00000000000 --- a/sdk/metric/controller/basic/push_test.go +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package basic_test - -import ( - "context" - "errors" - "fmt" - "runtime" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" - "go.opentelemetry.io/otel/sdk/metric/controller/controllertest" - "go.opentelemetry.io/otel/sdk/metric/export" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - processor "go.opentelemetry.io/otel/sdk/metric/processor/basic" - "go.opentelemetry.io/otel/sdk/metric/processor/processortest" - "go.opentelemetry.io/otel/sdk/resource" -) - -var testResource = resource.NewSchemaless(attribute.String("R", "V")) - -type handler struct { - sync.Mutex - err error -} - -func (h *handler) Handle(err error) { - h.Lock() - h.err = err - h.Unlock() -} - -func (h *handler) Flush() error { - h.Lock() - err := h.err - h.err = nil - h.Unlock() - return err -} - -var testHandler *handler - -func init() { - testHandler = new(handler) - otel.SetErrorHandler(testHandler) -} - -func newExporter() *processortest.Exporter { - return processortest.New( - aggregation.StatelessTemporalitySelector(), - attribute.DefaultEncoder(), - ) -} - -func newCheckpointerFactory() export.CheckpointerFactory { - return processortest.NewCheckpointerFactory( - processortest.AggregatorSelector(), - attribute.DefaultEncoder(), - ) -} - -func TestPushDoubleStop(t *testing.T) { - ctx := context.Background() - exporter := newExporter() - checkpointer := newCheckpointerFactory() - p := controller.New(checkpointer, controller.WithExporter(exporter)) - require.NoError(t, p.Start(ctx)) - require.NoError(t, p.Stop(ctx)) - require.NoError(t, p.Stop(ctx)) -} - -func TestPushDoubleStart(t *testing.T) { - ctx := context.Background() - exporter := newExporter() - checkpointer := newCheckpointerFactory() - p := controller.New(checkpointer, controller.WithExporter(exporter)) - require.NoError(t, p.Start(ctx)) - err := p.Start(ctx) - require.Error(t, err) - require.True(t, errors.Is(err, controller.ErrControllerStarted)) - require.NoError(t, p.Stop(ctx)) -} - -func TestPushTicker(t *testing.T) { - exporter := newExporter() - checkpointer := newCheckpointerFactory() - p := controller.New( - checkpointer, - controller.WithExporter(exporter), - controller.WithCollectPeriod(time.Second), - controller.WithResource(testResource), - ) - meter := p.Meter("name") - - mock := controllertest.NewMockClock() - p.SetClock(mock) - - ctx := context.Background() - - counter, err := meter.SyncInt64().Counter("counter.sum") - require.NoError(t, err) - - require.NoError(t, p.Start(ctx)) - - counter.Add(ctx, 3) - - require.EqualValues(t, map[string]float64{}, exporter.Values()) - - mock.Add(time.Second) - runtime.Gosched() - - require.EqualValues(t, map[string]float64{ - "counter.sum//R=V": 3, - }, exporter.Values()) - - require.Equal(t, 1, exporter.ExportCount()) - exporter.Reset() - - counter.Add(ctx, 7) - - mock.Add(time.Second) - runtime.Gosched() - - require.EqualValues(t, map[string]float64{ - "counter.sum//R=V": 10, - }, exporter.Values()) - - require.Equal(t, 1, exporter.ExportCount()) - exporter.Reset() - - require.NoError(t, p.Stop(ctx)) -} - -func TestPushExportError(t *testing.T) { - injector := func(name string, e error) func(r export.Record) error { - return func(r export.Record) error { - if r.Descriptor().Name() == name { - return e - } - return nil - } - } - var errAggregator = fmt.Errorf("unexpected error") - var tests = []struct { - name string - injectedError error - expected map[string]float64 - expectedError error - }{ - {"errNone", nil, map[string]float64{ - "counter1.sum/X=Y/R=V": 3, - "counter2.sum//R=V": 5, - }, nil}, - {"errNoData", aggregation.ErrNoData, map[string]float64{ - "counter2.sum//R=V": 5, - }, nil}, - {"errUnexpected", errAggregator, map[string]float64{}, errAggregator}, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - exporter := newExporter() - exporter.InjectErr = injector("counter1.sum", tt.injectedError) - - // This test validates the error handling - // behavior of the basic Processor is honored - // by the push processor. - checkpointer := processor.NewFactory(processortest.AggregatorSelector(), exporter) - p := controller.New( - checkpointer, - controller.WithExporter(exporter), - controller.WithCollectPeriod(time.Second), - controller.WithResource(testResource), - ) - - mock := controllertest.NewMockClock() - p.SetClock(mock) - - ctx := context.Background() - - meter := p.Meter("name") - counter1, err := meter.SyncInt64().Counter("counter1.sum") - require.NoError(t, err) - counter2, err := meter.SyncInt64().Counter("counter2.sum") - require.NoError(t, err) - - require.NoError(t, p.Start(ctx)) - runtime.Gosched() - - counter1.Add(ctx, 3, attribute.String("X", "Y")) - counter2.Add(ctx, 5) - - require.Equal(t, 0, exporter.ExportCount()) - require.Nil(t, testHandler.Flush()) - - mock.Add(time.Second) - runtime.Gosched() - - require.Equal(t, 1, exporter.ExportCount()) - if tt.expectedError == nil { - require.EqualValues(t, tt.expected, exporter.Values()) - require.NoError(t, testHandler.Flush()) - } else { - err := testHandler.Flush() - require.Error(t, err) - require.Equal(t, tt.expectedError, err) - } - - require.NoError(t, p.Stop(ctx)) - }) - } -} diff --git a/sdk/metric/controller/controllertest/controller_test.go b/sdk/metric/controller/controllertest/controller_test.go deleted file mode 100644 index 0f85ca37f9c..00000000000 --- a/sdk/metric/controller/controllertest/controller_test.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package controllertest // import "go.opentelemetry.io/otel/sdk/metric/controller/controllertest" - -import ( - "context" - "sync" - "testing" - - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/metric/global" - "go.opentelemetry.io/otel/metric/instrument" - controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - "go.opentelemetry.io/otel/sdk/metric/processor/basic" - "go.opentelemetry.io/otel/sdk/metric/selector/simple" -) - -type errorCatcher struct { - lock sync.Mutex - errors []error -} - -func (e *errorCatcher) Handle(err error) { - e.lock.Lock() - defer e.lock.Unlock() - - e.errors = append(e.errors, err) -} - -func TestEndToEnd(t *testing.T) { - h := &errorCatcher{} - otel.SetErrorHandler(h) - - meter := global.Meter("go.opentelemetry.io/otel/sdk/metric/controller/controllertest_EndToEnd") - gauge, err := meter.AsyncInt64().Gauge("test") - require.NoError(t, err) - err = meter.RegisterCallback([]instrument.Asynchronous{gauge}, func(context.Context) {}) - require.NoError(t, err) - - c := controller.New(basic.NewFactory(simple.NewWithInexpensiveDistribution(), aggregation.CumulativeTemporalitySelector())) - - global.SetMeterProvider(c) - - gauge, err = meter.AsyncInt64().Gauge("test2") - require.NoError(t, err) - err = meter.RegisterCallback([]instrument.Asynchronous{gauge}, func(context.Context) {}) - require.NoError(t, err) - - h.lock.Lock() - require.Len(t, h.errors, 0) -} diff --git a/sdk/metric/controller/controllertest/test.go b/sdk/metric/controller/controllertest/test.go deleted file mode 100644 index 9c1a3421972..00000000000 --- a/sdk/metric/controller/controllertest/test.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package controllertest // import "go.opentelemetry.io/otel/sdk/metric/controller/controllertest" - -import ( - "time" - - "github.com/benbjohnson/clock" - - "go.opentelemetry.io/otel/sdk/instrumentation" - controllerTime "go.opentelemetry.io/otel/sdk/metric/controller/time" - "go.opentelemetry.io/otel/sdk/metric/export" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" -) - -// MockClock is a Clock used for testing. -type MockClock struct { - mock *clock.Mock -} - -// MockTicker is a Ticker used for testing. -type MockTicker struct { - ticker *clock.Ticker -} - -var _ controllerTime.Clock = MockClock{} -var _ controllerTime.Ticker = MockTicker{} - -// NewMockClock returns a new unset MockClock. -func NewMockClock() MockClock { - return MockClock{clock.NewMock()} -} - -// Now returns the current time. -func (c MockClock) Now() time.Time { - return c.mock.Now() -} - -// Ticker creates a new instance of a Ticker. -func (c MockClock) Ticker(period time.Duration) controllerTime.Ticker { - return MockTicker{c.mock.Ticker(period)} -} - -// Add moves the current time of the MockClock forward by the specified -// duration. -func (c MockClock) Add(d time.Duration) { - c.mock.Add(d) -} - -// Stop turns off the MockTicker. -func (t MockTicker) Stop() { - t.ticker.Stop() -} - -// C returns a channel that receives the current time when MockTicker ticks. -func (t MockTicker) C() <-chan time.Time { - return t.ticker.C -} - -// ReadAll is a helper for tests that want a flat iterator over all -// metrics instead of a two-level iterator (instrumentation library, -// metric). -func ReadAll( - reader export.InstrumentationLibraryReader, - kind aggregation.TemporalitySelector, - apply func(instrumentation.Library, export.Record) error, -) error { - return reader.ForEach(func(library instrumentation.Library, reader export.Reader) error { - return reader.ForEach(kind, func(record export.Record) error { - return apply(library, record) - }) - }) -} diff --git a/sdk/metric/controller/time/time.go b/sdk/metric/controller/time/time.go deleted file mode 100644 index 10b3cd8726f..00000000000 --- a/sdk/metric/controller/time/time.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package time // import "go.opentelemetry.io/otel/sdk/metric/controller/time" - -import ( - "time" -) - -// Several types below are created to match "github.com/benbjohnson/clock" -// so that it remains a test-only dependency. - -// Clock keeps track of time for a metric SDK. -type Clock interface { - Now() time.Time - Ticker(duration time.Duration) Ticker -} - -// Ticker signals time intervals. -type Ticker interface { - Stop() - C() <-chan time.Time -} - -// RealClock wraps the time package and uses the system time to tell time. -type RealClock struct { -} - -// RealTicker wraps the time package and uses system time to tick time -// intervals. -type RealTicker struct { - ticker *time.Ticker -} - -var _ Clock = RealClock{} -var _ Ticker = RealTicker{} - -// Now returns the current time. -func (RealClock) Now() time.Time { - return time.Now() -} - -// Ticker creates a new RealTicker that will tick with period. -func (RealClock) Ticker(period time.Duration) Ticker { - return RealTicker{time.NewTicker(period)} -} - -// Stop turns off the RealTicker. -func (t RealTicker) Stop() { - t.ticker.Stop() -} - -// C returns a channel that receives the current time when RealTicker ticks. -func (t RealTicker) C() <-chan time.Time { - return t.ticker.C -} diff --git a/sdk/metric/correct_test.go b/sdk/metric/correct_test.go deleted file mode 100644 index 944570375ae..00000000000 --- a/sdk/metric/correct_test.go +++ /dev/null @@ -1,572 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric_test - -import ( - "context" - "fmt" - "math" - "sync" - "testing" - - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/instrument" - "go.opentelemetry.io/otel/metric/instrument/asyncint64" - metricsdk "go.opentelemetry.io/otel/sdk/metric" - "go.opentelemetry.io/otel/sdk/metric/aggregator" - "go.opentelemetry.io/otel/sdk/metric/export" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - "go.opentelemetry.io/otel/sdk/metric/processor/processortest" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" -) - -type handler struct { - sync.Mutex - err error -} - -func (h *handler) Handle(err error) { - h.Lock() - h.err = err - h.Unlock() -} - -func (h *handler) Reset() { - h.Lock() - h.err = nil - h.Unlock() -} - -func (h *handler) Flush() error { - h.Lock() - err := h.err - h.err = nil - h.Unlock() - return err -} - -var testHandler *handler - -func init() { - testHandler = new(handler) - otel.SetErrorHandler(testHandler) -} - -type testSelector struct { - selector export.AggregatorSelector - newAggCount int -} - -func (ts *testSelector) AggregatorFor(desc *sdkapi.Descriptor, aggPtrs ...*aggregator.Aggregator) { - ts.newAggCount += len(aggPtrs) - processortest.AggregatorSelector().AggregatorFor(desc, aggPtrs...) -} - -func newSDK(t *testing.T) (metric.Meter, *metricsdk.Accumulator, *testSelector, *processortest.Processor) { - testHandler.Reset() - testSelector := &testSelector{selector: processortest.AggregatorSelector()} - processor := processortest.NewProcessor( - testSelector, - attribute.DefaultEncoder(), - ) - accum := metricsdk.NewAccumulator( - processor, - ) - meter := sdkapi.WrapMeterImpl(accum) - return meter, accum, testSelector, processor -} - -func TestInputRangeCounter(t *testing.T) { - ctx := context.Background() - meter, sdk, _, processor := newSDK(t) - - counter, err := meter.SyncInt64().Counter("name.sum") - require.NoError(t, err) - - counter.Add(ctx, -1) - require.Equal(t, aggregation.ErrNegativeInput, testHandler.Flush()) - - checkpointed := sdk.Collect(ctx) - require.Equal(t, 0, checkpointed) - - processor.Reset() - counter.Add(ctx, 1) - checkpointed = sdk.Collect(ctx) - require.Equal(t, map[string]float64{ - "name.sum//": 1, - }, processor.Values()) - require.Equal(t, 1, checkpointed) - require.Nil(t, testHandler.Flush()) -} - -func TestInputRangeUpDownCounter(t *testing.T) { - ctx := context.Background() - meter, sdk, _, processor := newSDK(t) - - counter, err := meter.SyncInt64().UpDownCounter("name.sum") - require.NoError(t, err) - - counter.Add(ctx, -1) - counter.Add(ctx, -1) - counter.Add(ctx, 2) - counter.Add(ctx, 1) - - checkpointed := sdk.Collect(ctx) - require.Equal(t, map[string]float64{ - "name.sum//": 1, - }, processor.Values()) - require.Equal(t, 1, checkpointed) - require.Nil(t, testHandler.Flush()) -} - -func TestInputRangeHistogram(t *testing.T) { - ctx := context.Background() - meter, sdk, _, processor := newSDK(t) - - histogram, err := meter.SyncFloat64().Histogram("name.histogram") - require.NoError(t, err) - - histogram.Record(ctx, math.NaN()) - require.Equal(t, aggregation.ErrNaNInput, testHandler.Flush()) - - checkpointed := sdk.Collect(ctx) - require.Equal(t, 0, checkpointed) - - histogram.Record(ctx, 1) - histogram.Record(ctx, 2) - - processor.Reset() - checkpointed = sdk.Collect(ctx) - - require.Equal(t, map[string]float64{ - "name.histogram//": 3, - }, processor.Values()) - require.Equal(t, 1, checkpointed) - require.Nil(t, testHandler.Flush()) -} - -func TestDisabledInstrument(t *testing.T) { - ctx := context.Background() - meter, sdk, _, processor := newSDK(t) - - histogram, err := meter.SyncFloat64().Histogram("name.disabled") - require.NoError(t, err) - - histogram.Record(ctx, -1) - checkpointed := sdk.Collect(ctx) - - require.Equal(t, 0, checkpointed) - require.Equal(t, map[string]float64{}, processor.Values()) -} - -func TestRecordNaN(t *testing.T) { - ctx := context.Background() - meter, _, _, _ := newSDK(t) - - c, err := meter.SyncFloat64().Counter("name.sum") - require.NoError(t, err) - - require.Nil(t, testHandler.Flush()) - c.Add(ctx, math.NaN()) - require.Error(t, testHandler.Flush()) -} - -func TestSDKAttrsDeduplication(t *testing.T) { - ctx := context.Background() - meter, sdk, _, processor := newSDK(t) - - counter, err := meter.SyncInt64().Counter("name.sum") - require.NoError(t, err) - - const ( - maxKeys = 21 - keySets = 2 - repeats = 3 - ) - var keysA []attribute.Key - var keysB []attribute.Key - - for i := 0; i < maxKeys; i++ { - keysA = append(keysA, attribute.Key(fmt.Sprintf("A%03d", i))) - keysB = append(keysB, attribute.Key(fmt.Sprintf("B%03d", i))) - } - - allExpect := map[string]float64{} - for numKeys := 0; numKeys < maxKeys; numKeys++ { - var kvsA []attribute.KeyValue - var kvsB []attribute.KeyValue - for r := 0; r < repeats; r++ { - for i := 0; i < numKeys; i++ { - kvsA = append(kvsA, keysA[i].Int(r)) - kvsB = append(kvsB, keysB[i].Int(r)) - } - } - - var expectA []attribute.KeyValue - var expectB []attribute.KeyValue - for i := 0; i < numKeys; i++ { - expectA = append(expectA, keysA[i].Int(repeats-1)) - expectB = append(expectB, keysB[i].Int(repeats-1)) - } - - counter.Add(ctx, 1, kvsA...) - counter.Add(ctx, 1, kvsA...) - format := func(attrs []attribute.KeyValue) string { - str := attribute.DefaultEncoder().Encode(newSetIter(attrs...)) - return fmt.Sprint("name.sum/", str, "/") - } - allExpect[format(expectA)] += 2 - - if numKeys != 0 { - // In this case A and B sets are the same. - counter.Add(ctx, 1, kvsB...) - counter.Add(ctx, 1, kvsB...) - allExpect[format(expectB)] += 2 - } - } - - sdk.Collect(ctx) - - require.EqualValues(t, allExpect, processor.Values()) -} - -func newSetIter(kvs ...attribute.KeyValue) attribute.Iterator { - attrs := attribute.NewSet(kvs...) - return attrs.Iter() -} - -func TestDefaultAttributeEncoder(t *testing.T) { - encoder := attribute.DefaultEncoder() - - encoded := encoder.Encode(newSetIter(attribute.String("A", "B"), attribute.String("C", "D"))) - require.Equal(t, `A=B,C=D`, encoded) - - encoded = encoder.Encode(newSetIter(attribute.String("A", "B,c=d"), attribute.String(`C\`, "D"))) - require.Equal(t, `A=B\,c\=d,C\\=D`, encoded) - - encoded = encoder.Encode(newSetIter(attribute.String(`\`, `=`), attribute.String(`,`, `\`))) - require.Equal(t, `\,=\\,\\=\=`, encoded) - - // Note: the attr encoder does not sort or de-dup values, - // that is done in Attributes(...). - encoded = encoder.Encode(newSetIter( - attribute.Int("I", 1), - attribute.Int64("I64", 1), - attribute.Float64("F64", 1), - attribute.Float64("F64", 1), - attribute.String("S", "1"), - attribute.Bool("B", true), - )) - require.Equal(t, "B=true,F64=1,I=1,I64=1,S=1", encoded) -} - -func TestObserverCollection(t *testing.T) { - ctx := context.Background() - meter, sdk, _, processor := newSDK(t) - mult := 1 - - gaugeF, err := meter.AsyncFloat64().Gauge("float.gauge.lastvalue") - require.NoError(t, err) - err = meter.RegisterCallback([]instrument.Asynchronous{ - gaugeF, - }, func(ctx context.Context) { - gaugeF.Observe(ctx, float64(mult), attribute.String("A", "B")) - // last value wins - gaugeF.Observe(ctx, float64(-mult), attribute.String("A", "B")) - gaugeF.Observe(ctx, float64(-mult), attribute.String("C", "D")) - }) - require.NoError(t, err) - - gaugeI, err := meter.AsyncInt64().Gauge("int.gauge.lastvalue") - require.NoError(t, err) - err = meter.RegisterCallback([]instrument.Asynchronous{ - gaugeI, - }, func(ctx context.Context) { - gaugeI.Observe(ctx, int64(-mult), attribute.String("A", "B")) - gaugeI.Observe(ctx, int64(mult)) - // last value wins - gaugeI.Observe(ctx, int64(mult), attribute.String("A", "B")) - gaugeI.Observe(ctx, int64(mult)) - }) - require.NoError(t, err) - - counterF, err := meter.AsyncFloat64().Counter("float.counterobserver.sum") - require.NoError(t, err) - err = meter.RegisterCallback([]instrument.Asynchronous{ - counterF, - }, func(ctx context.Context) { - counterF.Observe(ctx, float64(mult), attribute.String("A", "B")) - counterF.Observe(ctx, float64(2*mult), attribute.String("A", "B")) - counterF.Observe(ctx, float64(mult), attribute.String("C", "D")) - }) - require.NoError(t, err) - - counterI, err := meter.AsyncInt64().Counter("int.counterobserver.sum") - require.NoError(t, err) - err = meter.RegisterCallback([]instrument.Asynchronous{ - counterI, - }, func(ctx context.Context) { - counterI.Observe(ctx, int64(2*mult), attribute.String("A", "B")) - counterI.Observe(ctx, int64(mult)) - // last value wins - counterI.Observe(ctx, int64(mult), attribute.String("A", "B")) - counterI.Observe(ctx, int64(mult)) - }) - require.NoError(t, err) - - updowncounterF, err := meter.AsyncFloat64().UpDownCounter("float.updowncounterobserver.sum") - require.NoError(t, err) - err = meter.RegisterCallback([]instrument.Asynchronous{ - updowncounterF, - }, func(ctx context.Context) { - updowncounterF.Observe(ctx, float64(mult), attribute.String("A", "B")) - updowncounterF.Observe(ctx, float64(-2*mult), attribute.String("A", "B")) - updowncounterF.Observe(ctx, float64(mult), attribute.String("C", "D")) - }) - require.NoError(t, err) - - updowncounterI, err := meter.AsyncInt64().UpDownCounter("int.updowncounterobserver.sum") - require.NoError(t, err) - err = meter.RegisterCallback([]instrument.Asynchronous{ - updowncounterI, - }, func(ctx context.Context) { - updowncounterI.Observe(ctx, int64(2*mult), attribute.String("A", "B")) - updowncounterI.Observe(ctx, int64(mult)) - // last value wins - updowncounterI.Observe(ctx, int64(mult), attribute.String("A", "B")) - updowncounterI.Observe(ctx, int64(-mult)) - }) - require.NoError(t, err) - - unused, err := meter.AsyncInt64().Gauge("empty.gauge.sum") - require.NoError(t, err) - err = meter.RegisterCallback([]instrument.Asynchronous{ - unused, - }, func(ctx context.Context) { - }) - require.NoError(t, err) - - for mult = 0; mult < 3; mult++ { - processor.Reset() - - collected := sdk.Collect(ctx) - require.Equal(t, collected, len(processor.Values())) - - mult := float64(mult) - require.EqualValues(t, map[string]float64{ - "float.gauge.lastvalue/A=B/": -mult, - "float.gauge.lastvalue/C=D/": -mult, - "int.gauge.lastvalue//": mult, - "int.gauge.lastvalue/A=B/": mult, - - "float.counterobserver.sum/A=B/": 3 * mult, - "float.counterobserver.sum/C=D/": mult, - "int.counterobserver.sum//": 2 * mult, - "int.counterobserver.sum/A=B/": 3 * mult, - - "float.updowncounterobserver.sum/A=B/": -mult, - "float.updowncounterobserver.sum/C=D/": mult, - "int.updowncounterobserver.sum//": 0, - "int.updowncounterobserver.sum/A=B/": 3 * mult, - }, processor.Values()) - } -} - -func TestCounterObserverInputRange(t *testing.T) { - ctx := context.Background() - meter, sdk, _, processor := newSDK(t) - - // TODO: these tests are testing for negative values, not for _descending values_. Fix. - counterF, _ := meter.AsyncFloat64().Counter("float.counterobserver.sum") - err := meter.RegisterCallback([]instrument.Asynchronous{ - counterF, - }, func(ctx context.Context) { - counterF.Observe(ctx, -2, attribute.String("A", "B")) - require.Equal(t, aggregation.ErrNegativeInput, testHandler.Flush()) - counterF.Observe(ctx, -1, attribute.String("C", "D")) - require.Equal(t, aggregation.ErrNegativeInput, testHandler.Flush()) - }) - require.NoError(t, err) - counterI, _ := meter.AsyncInt64().Counter("int.counterobserver.sum") - err = meter.RegisterCallback([]instrument.Asynchronous{ - counterI, - }, func(ctx context.Context) { - counterI.Observe(ctx, -1, attribute.String("A", "B")) - require.Equal(t, aggregation.ErrNegativeInput, testHandler.Flush()) - counterI.Observe(ctx, -1) - require.Equal(t, aggregation.ErrNegativeInput, testHandler.Flush()) - }) - require.NoError(t, err) - - collected := sdk.Collect(ctx) - - require.Equal(t, 0, collected) - require.EqualValues(t, map[string]float64{}, processor.Values()) - - // check that the error condition was reset - require.NoError(t, testHandler.Flush()) -} - -func TestObserverBatch(t *testing.T) { - ctx := context.Background() - meter, sdk, _, processor := newSDK(t) - - floatGaugeObs, _ := meter.AsyncFloat64().Gauge("float.gauge.lastvalue") - intGaugeObs, _ := meter.AsyncInt64().Gauge("int.gauge.lastvalue") - floatCounterObs, _ := meter.AsyncFloat64().Counter("float.counterobserver.sum") - intCounterObs, _ := meter.AsyncInt64().Counter("int.counterobserver.sum") - floatUpDownCounterObs, _ := meter.AsyncFloat64().UpDownCounter("float.updowncounterobserver.sum") - intUpDownCounterObs, _ := meter.AsyncInt64().UpDownCounter("int.updowncounterobserver.sum") - - err := meter.RegisterCallback([]instrument.Asynchronous{ - floatGaugeObs, - intGaugeObs, - floatCounterObs, - intCounterObs, - floatUpDownCounterObs, - intUpDownCounterObs, - }, func(ctx context.Context) { - ab := attribute.String("A", "B") - floatGaugeObs.Observe(ctx, 1, ab) - floatGaugeObs.Observe(ctx, -1, ab) - intGaugeObs.Observe(ctx, -1, ab) - intGaugeObs.Observe(ctx, 1, ab) - floatCounterObs.Observe(ctx, 1000, ab) - intCounterObs.Observe(ctx, 100, ab) - floatUpDownCounterObs.Observe(ctx, -1000, ab) - intUpDownCounterObs.Observe(ctx, -100, ab) - - cd := attribute.String("C", "D") - floatGaugeObs.Observe(ctx, -1, cd) - floatCounterObs.Observe(ctx, -1, cd) - floatUpDownCounterObs.Observe(ctx, -1, cd) - - intGaugeObs.Observe(ctx, 1) - intGaugeObs.Observe(ctx, 1) - intCounterObs.Observe(ctx, 10) - floatCounterObs.Observe(ctx, 1.1) - intUpDownCounterObs.Observe(ctx, 10) - }) - require.NoError(t, err) - - collected := sdk.Collect(ctx) - - require.Equal(t, collected, len(processor.Values())) - - require.EqualValues(t, map[string]float64{ - "float.counterobserver.sum//": 1.1, - "float.counterobserver.sum/A=B/": 1000, - "int.counterobserver.sum//": 10, - "int.counterobserver.sum/A=B/": 100, - - "int.updowncounterobserver.sum/A=B/": -100, - "float.updowncounterobserver.sum/A=B/": -1000, - "int.updowncounterobserver.sum//": 10, - "float.updowncounterobserver.sum/C=D/": -1, - - "float.gauge.lastvalue/A=B/": -1, - "float.gauge.lastvalue/C=D/": -1, - "int.gauge.lastvalue//": 1, - "int.gauge.lastvalue/A=B/": 1, - }, processor.Values()) -} - -// TestRecordPersistence ensures that a direct-called instrument that is -// repeatedly used each interval results in a persistent record, so that its -// encoded attribute will be cached across collection intervals. -func TestRecordPersistence(t *testing.T) { - ctx := context.Background() - meter, sdk, selector, _ := newSDK(t) - - c, err := meter.SyncFloat64().Counter("name.sum") - require.NoError(t, err) - - uk := attribute.String("bound", "false") - - for i := 0; i < 100; i++ { - c.Add(ctx, 1, uk) - sdk.Collect(ctx) - } - - require.Equal(t, 2, selector.newAggCount) -} - -func TestIncorrectInstruments(t *testing.T) { - // The Batch observe/record APIs are susceptible to - // uninitialized instruments. - var observer asyncint64.Gauge - - ctx := context.Background() - meter, sdk, _, processor := newSDK(t) - - // Now try with uninitialized instruments. - err := meter.RegisterCallback([]instrument.Asynchronous{ - observer, - }, func(ctx context.Context) { - observer.Observe(ctx, 1) - }) - require.ErrorIs(t, err, metricsdk.ErrBadInstrument) - - collected := sdk.Collect(ctx) - err = testHandler.Flush() - require.NoError(t, err) - require.Equal(t, 0, collected) - - // Now try with instruments from another SDK. - noopMeter := metric.NewNoopMeter() - observer, _ = noopMeter.AsyncInt64().Gauge("observer") - - err = meter.RegisterCallback( - []instrument.Asynchronous{observer}, - func(ctx context.Context) { - observer.Observe(ctx, 1) - }, - ) - require.ErrorIs(t, err, metricsdk.ErrBadInstrument) - - collected = sdk.Collect(ctx) - require.Equal(t, 0, collected) - require.EqualValues(t, map[string]float64{}, processor.Values()) - - err = testHandler.Flush() - require.NoError(t, err) -} - -func TestSyncInAsync(t *testing.T) { - ctx := context.Background() - meter, sdk, _, processor := newSDK(t) - - counter, _ := meter.SyncFloat64().Counter("counter.sum") - gauge, _ := meter.AsyncInt64().Gauge("observer.lastvalue") - - err := meter.RegisterCallback([]instrument.Asynchronous{ - gauge, - }, func(ctx context.Context) { - gauge.Observe(ctx, 10) - counter.Add(ctx, 100) - }) - require.NoError(t, err) - - sdk.Collect(ctx) - - require.EqualValues(t, map[string]float64{ - "counter.sum//": 100, - "observer.lastvalue//": 10, - }, processor.Values()) -} diff --git a/sdk/metric/doc.go b/sdk/metric/doc.go index bcb641ee446..6cbc7fdfc0d 100644 --- a/sdk/metric/doc.go +++ b/sdk/metric/doc.go @@ -12,119 +12,35 @@ // See the License for the specific language governing permissions and // limitations under the License. -/* -Package metric implements the OpenTelemetry metric API. - -This package is currently in a pre-GA phase. Backwards incompatible changes -may be introduced in subsequent minor version releases as we work to track the -evolving OpenTelemetry specification and user feedback. - -The Accumulator type supports configurable metrics export behavior through a -collection of export interfaces that support various export strategies, -described below. - -The OpenTelemetry metric API consists of methods for constructing synchronous -and asynchronous instruments. There are two constructors per instrument for -the two kinds of number (int64, float64). - -Synchronous instruments are managed by a sync.Map containing a *record -with the current state for each synchronous instrument. A lock-free -algorithm is used to protect against races when adding and removing -items from the sync.Map. - -Asynchronous instruments are managed by an internal -AsyncInstrumentState, which coordinates calling batch and single -instrument callbacks. - -# Internal Structure - -Each observer also has its own kind of record stored in the SDK. This -record contains a set of recorders for every specific attribute set used in -the callback. - -A sync.Map maintains the mapping of current instruments and attribute sets to -internal records. To find a record, the SDK consults the Map to locate an -existing record, otherwise it constructs a new record. The SDK maintains a -count of the number of references to each record, ensuring that records are -not reclaimed from the Map while they are still active from the user's -perspective. - -Metric collection is performed via a single-threaded call to Collect that -sweeps through all records in the SDK, checkpointing their state. When a -record is discovered that has no references and has not been updated since -the prior collection pass, it is removed from the Map. - -Both synchronous and asynchronous instruments have an associated -aggregator, which maintains the current state resulting from all metric -events since its last checkpoint. Aggregators may be lock-free or they may -use locking, but they should expect to be called concurrently. Aggregators -must be capable of merging with another aggregator of the same type. - -# Export Pipeline - -While the SDK serves to maintain a current set of records and -coordinate collection, the behavior of a metrics export pipeline is -configured through the export types in -go.opentelemetry.io/otel/sdk/metric/export. It is important to keep -in mind the context these interfaces are called from. There are two -contexts, instrumentation context, where a user-level goroutine that -enters the SDK resulting in a new record, and collection context, -where a system-level thread performs a collection pass through the -SDK. - -Descriptor is a struct that describes the metric instrument to the -export pipeline, containing the name, units, description, metric kind, -number kind (int64 or float64). A Descriptor accompanies metric data -as it passes through the export pipeline. - -The AggregatorSelector interface supports choosing the method of -aggregation to apply to a particular instrument, by delegating the -construction of an Aggregator to this interface. Given the Descriptor, -the AggregatorFor method returns an implementation of Aggregator. If this -interface returns nil, the metric will be disabled. The aggregator should -be matched to the capabilities of the exporter. Selecting the aggregator -for Adding instruments is relatively straightforward, but many options -are available for aggregating distributions from Grouping instruments. - -Aggregator is an interface which implements a concrete strategy for -aggregating metric updates. Several Aggregator implementations are -provided by the SDK. Aggregators may be lock-free or use locking, -depending on their structure and semantics. Aggregators implement an -Update method, called in instrumentation context, to receive a single -metric event. Aggregators implement a Checkpoint method, called in -collection context, to save a checkpoint of the current state. -Aggregators implement a Merge method, also called in collection -context, that combines state from two aggregators into one. Each SDK -record has an associated aggregator. - -Processor is an interface which sits between the SDK and an exporter. -The Processor embeds an AggregatorSelector, used by the SDK to assign -new Aggregators. The Processor supports a Process() API for submitting -checkpointed aggregators to the processor, and a Reader() API -for producing a complete checkpoint for the exporter. Two default -Processor implementations are provided, the "defaultkeys" Processor groups -aggregate metrics by their recommended Descriptor.Keys(), the -"simple" Processor aggregates metrics at full dimensionality. - -Reader is an interface between the Processor and the Exporter. -After completing a collection pass, the Processor.Reader() method -returns a Reader, which the Exporter uses to iterate over all -the updated metrics. - -Record is a struct containing the state of an individual exported -metric. This is the result of one collection interface for one -instrument and one attribute set. - -Exporter is the final stage of an export pipeline. It is called with -a Reader capable of enumerating all the updated metrics. - -Controller is not an export interface per se, but it orchestrates the -export pipeline. For example, a "push" controller will establish a -periodic timer to regularly collect and export metrics. A "pull" -controller will await a pull request before initiating metric -collection. Either way, the job of the controller is to call the SDK -Collect() method, then read the checkpoint, then invoke the exporter. -Controllers are expected to implement the public metric.MeterProvider -API, meaning they can be installed as the global Meter provider. -*/ +// Package metric provides an implementation of the OpenTelemetry metric SDK. +// +// See https://opentelemetry.io/docs/concepts/signals/metrics/ for information +// about the concept of OpenTelemetry metrics and +// https://opentelemetry.io/docs/concepts/components/ for more information +// about OpenTelemetry SDKs. +// +// The entry point for the metric package is the MeterProvider. It is the +// object that all API calls use to create Meters, instruments, and ultimately +// make metric measurements. Also, it is an object that should be used to +// control the life-cycle (start, flush, and shutdown) of the SDK. +// +// A MeterProvider needs to be configured to export the measured data, this is +// done by configuring it with a Reader implementation (using the WithReader +// MeterProviderOption). Readers take two forms: ones that push to an endpoint +// (NewPeriodicReader), and ones that an endpoint pulls from. See the +// go.opentelemetry.io/otel/exporters package for exporters that can be used as +// or with these Readers. +// +// Each Reader, when registered with the MeterProvider, can be augmented with a +// View. Views allow users that run OpenTelemetry instrumented code to modify +// the generated data of that instrumentation. See the +// go.opentelemetry.io/otel/sdk/metric/view package for more information about +// Views. +// +// The data generated by a MeterProvider needs to include information about its +// origin. A MeterProvider needs to be configured with a Resource, using the +// WithResource MeterProviderOption, to include this information. This Resource +// should be used to describe the unique runtime environment instrumented code +// is being run on. That way when multiple instances of the code are collected +// at a single endpoint their origin is decipherable. package metric // import "go.opentelemetry.io/otel/sdk/metric" diff --git a/sdk/metric/example_test.go b/sdk/metric/example_test.go new file mode 100644 index 00000000000..eabe781738a --- /dev/null +++ b/sdk/metric/example_test.go @@ -0,0 +1,63 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package metric_test + +import ( + "context" + "log" + + "go.opentelemetry.io/otel/metric/global" + "go.opentelemetry.io/otel/sdk/metric" + "go.opentelemetry.io/otel/sdk/resource" + semconv "go.opentelemetry.io/otel/semconv/v1.12.0" +) + +func Example() { + // This reader is used as a stand-in for a reader that will actually export + // data. See exporters in the go.opentelemetry.io/otel/exporters package + // for more information. + reader := metric.NewManualReader() + + // See the go.opentelemetry.io/otel/sdk/resource package for more + // information about how to create and use Resources. + res := resource.NewWithAttributes( + semconv.SchemaURL, + semconv.ServiceNameKey.String("my-service"), + semconv.ServiceVersionKey.String("v0.1.0"), + ) + + meterProvider := metric.NewMeterProvider( + metric.WithResource(res), + metric.WithReader(reader), + ) + global.SetMeterProvider(meterProvider) + defer func() { + err := meterProvider.Shutdown(context.Background()) + if err != nil { + log.Fatalln(err) + } + }() + // The MeterProvider is configured and registered globally. You can now run + // your code instrumented with the OpenTelemetry API that uses the global + // MeterProvider without having to pass this MeterProvider instance. Or, + // you can pass this instance directly to your instrumented code if it + // accepts a MeterProvider instance. + // + // See the go.opentelemetry.io/otel/metric package for more information + // about the metric API. +} diff --git a/sdk/metric/export/aggregation/aggregation.go b/sdk/metric/export/aggregation/aggregation.go deleted file mode 100644 index c43651c5889..00000000000 --- a/sdk/metric/export/aggregation/aggregation.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package aggregation // import "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - -import ( - "fmt" - "time" - - "go.opentelemetry.io/otel/sdk/metric/number" -) - -// These interfaces describe the various ways to access state from an -// Aggregation. - -type ( - // Aggregation is an interface returned by the Aggregator - // containing an interval of metric data. - Aggregation interface { - // Kind returns a short identifying string to identify - // the Aggregator that was used to produce the - // Aggregation (e.g., "Sum"). - Kind() Kind - } - - // Sum returns an aggregated sum. - Sum interface { - Aggregation - Sum() (number.Number, error) - } - - // Count returns the number of values that were aggregated. - Count interface { - Aggregation - Count() (uint64, error) - } - - // LastValue returns the latest value that was aggregated. - LastValue interface { - Aggregation - LastValue() (number.Number, time.Time, error) - } - - // Buckets represents histogram buckets boundaries and counts. - // - // For a Histogram with N defined boundaries, e.g, [x, y, z]. - // There are N+1 counts: [-inf, x), [x, y), [y, z), [z, +inf]. - Buckets struct { - // Boundaries are floating point numbers, even when - // aggregating integers. - Boundaries []float64 - - // Counts holds the count in each bucket. - Counts []uint64 - } - - // Histogram returns the count of events in pre-determined buckets. - Histogram interface { - Aggregation - Count() (uint64, error) - Sum() (number.Number, error) - Histogram() (Buckets, error) - } -) - -type ( - // Kind is a short name for the Aggregator that produces an - // Aggregation, used for descriptive purpose only. Kind is a - // string to allow user-defined Aggregators. - // - // When deciding how to handle an Aggregation, Exporters are - // encouraged to decide based on conversion to the above - // interfaces based on strength, not on Kind value, when - // deciding how to expose metric data. This enables - // user-supplied Aggregators to replace builtin Aggregators. - // - // For example, test for a Histogram before testing for a - // Sum, and so on. - Kind string -) - -// Kind description constants. -const ( - SumKind Kind = "Sum" - HistogramKind Kind = "Histogram" - LastValueKind Kind = "Lastvalue" -) - -// Sentinel errors for Aggregation interface. -var ( - ErrNegativeInput = fmt.Errorf("negative value is out of range for this instrument") - ErrNaNInput = fmt.Errorf("invalid input value: NaN") - ErrInconsistentType = fmt.Errorf("inconsistent aggregator types") - - // ErrNoCumulativeToDelta is returned when requesting delta - // export kind for a precomputed sum instrument. - ErrNoCumulativeToDelta = fmt.Errorf("cumulative to delta not implemented") - - // ErrNoData is returned when (due to a race with collection) - // the Aggregator is check-pointed before the first value is set. - // The aggregator should simply be skipped in this case. - ErrNoData = fmt.Errorf("no data collected by this aggregator") -) - -// String returns the string value of Kind. -func (k Kind) String() string { - return string(k) -} diff --git a/sdk/metric/export/aggregation/temporality.go b/sdk/metric/export/aggregation/temporality.go deleted file mode 100644 index 0612fe06af0..00000000000 --- a/sdk/metric/export/aggregation/temporality.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:generate stringer -type=Temporality - -package aggregation // import "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - -import ( - "go.opentelemetry.io/otel/sdk/metric/sdkapi" -) - -// Temporality indicates the temporal aggregation exported by an exporter. -// These bits may be OR-d together when multiple exporters are in use. -type Temporality uint8 - -const ( - // CumulativeTemporality indicates that an Exporter expects a - // Cumulative Aggregation. - CumulativeTemporality Temporality = 1 - - // DeltaTemporality indicates that an Exporter expects a - // Delta Aggregation. - DeltaTemporality Temporality = 2 -) - -// Includes returns if t includes support for other temporality. -func (t Temporality) Includes(other Temporality) bool { - return t&other != 0 -} - -// MemoryRequired returns whether an exporter of this temporality requires -// memory to export correctly. -func (t Temporality) MemoryRequired(mkind sdkapi.InstrumentKind) bool { - switch mkind { - case sdkapi.HistogramInstrumentKind, sdkapi.GaugeObserverInstrumentKind, - sdkapi.CounterInstrumentKind, sdkapi.UpDownCounterInstrumentKind: - // Delta-oriented instruments: - return t.Includes(CumulativeTemporality) - - case sdkapi.CounterObserverInstrumentKind, sdkapi.UpDownCounterObserverInstrumentKind: - // Cumulative-oriented instruments: - return t.Includes(DeltaTemporality) - } - // Something unexpected is happening--we could panic. This - // will become an error when the exporter tries to access a - // checkpoint, presumably, so let it be. - return false -} - -type ( - constantTemporalitySelector Temporality - statelessTemporalitySelector struct{} -) - -var ( - _ TemporalitySelector = constantTemporalitySelector(0) - _ TemporalitySelector = statelessTemporalitySelector{} -) - -// ConstantTemporalitySelector returns an TemporalitySelector that returns -// a constant Temporality. -func ConstantTemporalitySelector(t Temporality) TemporalitySelector { - return constantTemporalitySelector(t) -} - -// CumulativeTemporalitySelector returns an TemporalitySelector that -// always returns CumulativeTemporality. -func CumulativeTemporalitySelector() TemporalitySelector { - return ConstantTemporalitySelector(CumulativeTemporality) -} - -// DeltaTemporalitySelector returns an TemporalitySelector that -// always returns DeltaTemporality. -func DeltaTemporalitySelector() TemporalitySelector { - return ConstantTemporalitySelector(DeltaTemporality) -} - -// StatelessTemporalitySelector returns an TemporalitySelector that -// always returns the Temporality that avoids long-term memory -// requirements. -func StatelessTemporalitySelector() TemporalitySelector { - return statelessTemporalitySelector{} -} - -// TemporalityFor implements TemporalitySelector. -func (c constantTemporalitySelector) TemporalityFor(_ *sdkapi.Descriptor, _ Kind) Temporality { - return Temporality(c) -} - -// TemporalityFor implements TemporalitySelector. -func (s statelessTemporalitySelector) TemporalityFor(desc *sdkapi.Descriptor, kind Kind) Temporality { - if kind == SumKind && desc.InstrumentKind().PrecomputedSum() { - return CumulativeTemporality - } - return DeltaTemporality -} - -// TemporalitySelector is a sub-interface of Exporter used to indicate -// whether the Processor should compute Delta or Cumulative -// Aggregations. -type TemporalitySelector interface { - // TemporalityFor should return the correct Temporality that - // should be used when exporting data for the given metric - // instrument and Aggregator kind. - TemporalityFor(descriptor *sdkapi.Descriptor, aggregationKind Kind) Temporality -} diff --git a/sdk/metric/export/aggregation/temporality_test.go b/sdk/metric/export/aggregation/temporality_test.go deleted file mode 100644 index ab1682b729d..00000000000 --- a/sdk/metric/export/aggregation/temporality_test.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package aggregation - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel/sdk/metric/number" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" -) - -func TestTemporalityIncludes(t *testing.T) { - require.True(t, CumulativeTemporality.Includes(CumulativeTemporality)) - require.True(t, DeltaTemporality.Includes(CumulativeTemporality|DeltaTemporality)) -} - -var deltaMemoryTemporalties = []sdkapi.InstrumentKind{ - sdkapi.CounterObserverInstrumentKind, - sdkapi.UpDownCounterObserverInstrumentKind, -} - -var cumulativeMemoryTemporalties = []sdkapi.InstrumentKind{ - sdkapi.HistogramInstrumentKind, - sdkapi.GaugeObserverInstrumentKind, - sdkapi.CounterInstrumentKind, - sdkapi.UpDownCounterInstrumentKind, -} - -func TestTemporalityMemoryRequired(t *testing.T) { - for _, kind := range deltaMemoryTemporalties { - require.True(t, DeltaTemporality.MemoryRequired(kind)) - require.False(t, CumulativeTemporality.MemoryRequired(kind)) - } - - for _, kind := range cumulativeMemoryTemporalties { - require.True(t, CumulativeTemporality.MemoryRequired(kind)) - require.False(t, DeltaTemporality.MemoryRequired(kind)) - } -} - -func TestTemporalitySelectors(t *testing.T) { - cAggTemp := CumulativeTemporalitySelector() - dAggTemp := DeltaTemporalitySelector() - sAggTemp := StatelessTemporalitySelector() - - for _, ikind := range append(deltaMemoryTemporalties, cumulativeMemoryTemporalties...) { - desc := sdkapi.NewDescriptor("instrument", ikind, number.Int64Kind, "", "") - - var akind Kind - if ikind.Adding() { - akind = SumKind - } else { - akind = HistogramKind - } - require.Equal(t, CumulativeTemporality, cAggTemp.TemporalityFor(&desc, akind)) - require.Equal(t, DeltaTemporality, dAggTemp.TemporalityFor(&desc, akind)) - require.False(t, sAggTemp.TemporalityFor(&desc, akind).MemoryRequired(ikind)) - } -} diff --git a/sdk/metric/export/metric.go b/sdk/metric/export/metric.go deleted file mode 100644 index 6168ca445ba..00000000000 --- a/sdk/metric/export/metric.go +++ /dev/null @@ -1,280 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package export // import "go.opentelemetry.io/otel/sdk/metric/export" - -import ( - "context" - "sync" - "time" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/sdk/metric/aggregator" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" - "go.opentelemetry.io/otel/sdk/resource" -) - -// Processor is responsible for deciding which kind of aggregation to -// use (via AggregatorSelector), gathering exported results from the -// SDK during collection, and deciding over which dimensions to group -// the exported data. -// -// The SDK supports binding only one of these interfaces, as it has -// the sole responsibility of determining which Aggregator to use for -// each record. -// -// The embedded AggregatorSelector interface is called (concurrently) -// in instrumentation context to select the appropriate Aggregator for -// an instrument. -// -// The `Process` method is called during collection in a -// single-threaded context from the SDK, after the aggregator is -// checkpointed, allowing the processor to build the set of metrics -// currently being exported. -type Processor interface { - // AggregatorSelector is responsible for selecting the - // concrete type of Aggregator used for a metric in the SDK. - // - // This may be a static decision based on fields of the - // Descriptor, or it could use an external configuration - // source to customize the treatment of each metric - // instrument. - // - // The result from AggregatorSelector.AggregatorFor should be - // the same type for a given Descriptor or else nil. The same - // type should be returned for a given descriptor, because - // Aggregators only know how to Merge with their own type. If - // the result is nil, the metric instrument will be disabled. - // - // Note that the SDK only calls AggregatorFor when new records - // require an Aggregator. This does not provide a way to - // disable metrics with active records. - AggregatorSelector - - // Process is called by the SDK once per internal record, passing the - // export Accumulation (a Descriptor, the corresponding attributes, and - // the checkpointed Aggregator). This call has no Context argument because - // it is expected to perform only computation. An SDK is not expected to - // call exporters from with Process, use a controller for that (see - // ./controllers/{pull,push}. - Process(accum Accumulation) error -} - -// AggregatorSelector supports selecting the kind of Aggregator to -// use at runtime for a specific metric instrument. -type AggregatorSelector interface { - // AggregatorFor allocates a variable number of aggregators of - // a kind suitable for the requested export. This method - // initializes a `...*Aggregator`, to support making a single - // allocation. - // - // When the call returns without initializing the *Aggregator - // to a non-nil value, the metric instrument is explicitly - // disabled. - // - // This must return a consistent type to avoid confusion in - // later stages of the metrics export process, i.e., when - // Merging or Checkpointing aggregators for a specific - // instrument. - // - // Note: This is context-free because the aggregator should - // not relate to the incoming context. This call should not - // block. - AggregatorFor(descriptor *sdkapi.Descriptor, agg ...*aggregator.Aggregator) -} - -// Checkpointer is the interface used by a Controller to coordinate -// the Processor with Accumulator(s) and Exporter(s). The -// StartCollection() and FinishCollection() methods start and finish a -// collection interval. Controllers call the Accumulator(s) during -// collection to process Accumulations. -type Checkpointer interface { - // Processor processes metric data for export. The Process - // method is bracketed by StartCollection and FinishCollection - // calls. The embedded AggregatorSelector can be called at - // any time. - Processor - - // Reader returns the current data set. This may be - // called before and after collection. The - // implementation is required to return the same value - // throughout its lifetime, since Reader exposes a - // sync.Locker interface. The caller is responsible for - // locking the Reader before initiating collection. - Reader() Reader - - // StartCollection begins a collection interval. - StartCollection() - - // FinishCollection ends a collection interval. - FinishCollection() error -} - -// CheckpointerFactory is an interface for producing configured -// Checkpointer instances. -type CheckpointerFactory interface { - NewCheckpointer() Checkpointer -} - -// Exporter handles presentation of the checkpoint of aggregate -// metrics. This is the final stage of a metrics export pipeline, -// where metric data are formatted for a specific system. -type Exporter interface { - // Export is called immediately after completing a collection - // pass in the SDK. - // - // The Context comes from the controller that initiated - // collection. - // - // The InstrumentationLibraryReader interface refers to the - // Processor that just completed collection. - Export(ctx context.Context, res *resource.Resource, reader InstrumentationLibraryReader) error - - // TemporalitySelector is an interface used by the Processor - // in deciding whether to compute Delta or Cumulative - // Aggregations when passing Records to this Exporter. - aggregation.TemporalitySelector -} - -// InstrumentationLibraryReader is an interface for exporters to iterate -// over one instrumentation library of metric data at a time. -type InstrumentationLibraryReader interface { - // ForEach calls the passed function once per instrumentation library, - // allowing the caller to emit metrics grouped by the library that - // produced them. - ForEach(readerFunc func(instrumentation.Library, Reader) error) error -} - -// Reader allows a controller to access a complete checkpoint of -// aggregated metrics from the Processor for a single library of -// metric data. This is passed to the Exporter which may then use -// ForEach to iterate over the collection of aggregated metrics. -type Reader interface { - // ForEach iterates over aggregated checkpoints for all - // metrics that were updated during the last collection - // period. Each aggregated checkpoint returned by the - // function parameter may return an error. - // - // The TemporalitySelector argument is used to determine - // whether the Record is computed using Delta or Cumulative - // aggregation. - // - // ForEach tolerates ErrNoData silently, as this is - // expected from the Meter implementation. Any other kind - // of error will immediately halt ForEach and return - // the error to the caller. - ForEach(tempSelector aggregation.TemporalitySelector, recordFunc func(Record) error) error - - // Locker supports locking the checkpoint set. Collection - // into the checkpoint set cannot take place (in case of a - // stateful processor) while it is locked. - // - // The Processor attached to the Accumulator MUST be called - // with the lock held. - sync.Locker - - // RLock acquires a read lock corresponding to this Locker. - RLock() - // RUnlock releases a read lock corresponding to this Locker. - RUnlock() -} - -// Metadata contains the common elements for exported metric data that -// are shared by the Accumulator->Processor and Processor->Exporter -// steps. -type Metadata struct { - descriptor *sdkapi.Descriptor - attrs *attribute.Set -} - -// Accumulation contains the exported data for a single metric instrument -// and attribute set, as prepared by an Accumulator for the Processor. -type Accumulation struct { - Metadata - aggregator aggregator.Aggregator -} - -// Record contains the exported data for a single metric instrument -// and attribute set, as prepared by the Processor for the Exporter. -// This includes the effective start and end time for the aggregation. -type Record struct { - Metadata - aggregation aggregation.Aggregation - start time.Time - end time.Time -} - -// Descriptor describes the metric instrument being exported. -func (m Metadata) Descriptor() *sdkapi.Descriptor { - return m.descriptor -} - -// Attributes returns the attribute set associated with the instrument and the -// aggregated data. -func (m Metadata) Attributes() *attribute.Set { - return m.attrs -} - -// NewAccumulation allows Accumulator implementations to construct new -// Accumulations to send to Processors. The Descriptor, attributes, and -// Aggregator represent aggregate metric events received over a single -// collection period. -func NewAccumulation(descriptor *sdkapi.Descriptor, attrs *attribute.Set, agg aggregator.Aggregator) Accumulation { - return Accumulation{ - Metadata: Metadata{ - descriptor: descriptor, - attrs: attrs, - }, - aggregator: agg, - } -} - -// Aggregator returns the checkpointed aggregator. It is safe to -// access the checkpointed state without locking. -func (r Accumulation) Aggregator() aggregator.Aggregator { - return r.aggregator -} - -// NewRecord allows Processor implementations to construct export records. -// The Descriptor, attributes, and Aggregator represent aggregate metric -// events received over a single collection period. -func NewRecord(descriptor *sdkapi.Descriptor, attrs *attribute.Set, agg aggregation.Aggregation, start, end time.Time) Record { - return Record{ - Metadata: Metadata{ - descriptor: descriptor, - attrs: attrs, - }, - aggregation: agg, - start: start, - end: end, - } -} - -// Aggregation returns the aggregation, an interface to the record and -// its aggregator, dependent on the kind of both the input and exporter. -func (r Record) Aggregation() aggregation.Aggregation { - return r.aggregation -} - -// StartTime is the start time of the interval covered by this aggregation. -func (r Record) StartTime() time.Time { - return r.start -} - -// EndTime is the end time of the interval covered by this aggregation. -func (r Record) EndTime() time.Time { - return r.end -} diff --git a/sdk/metric/export/metric_test.go b/sdk/metric/export/metric_test.go deleted file mode 100644 index 4a6b803b0c2..00000000000 --- a/sdk/metric/export/metric_test.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package export - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel/attribute" -) - -var testSlice = []attribute.KeyValue{ - attribute.String("bar", "baz"), - attribute.Int("foo", 42), -} - -func newIter(slice []attribute.KeyValue) attribute.Iterator { - attrs := attribute.NewSet(slice...) - return attrs.Iter() -} - -func TestAttributeIterator(t *testing.T) { - iter := newIter(testSlice) - require.Equal(t, 2, iter.Len()) - - require.True(t, iter.Next()) - require.Equal(t, attribute.String("bar", "baz"), iter.Attribute()) - idx, kv := iter.IndexedAttribute() - require.Equal(t, 0, idx) - require.Equal(t, attribute.String("bar", "baz"), kv) - require.Equal(t, 2, iter.Len()) - - require.True(t, iter.Next()) - require.Equal(t, attribute.Int("foo", 42), iter.Attribute()) - idx, kv = iter.IndexedAttribute() - require.Equal(t, 1, idx) - require.Equal(t, attribute.Int("foo", 42), kv) - require.Equal(t, 2, iter.Len()) - - require.False(t, iter.Next()) - require.Equal(t, 2, iter.Len()) -} - -func TestEmptyAttributeIterator(t *testing.T) { - iter := newIter(nil) - require.Equal(t, 0, iter.Len()) - require.False(t, iter.Next()) -} - -func TestIteratorToSlice(t *testing.T) { - iter := newIter(testSlice) - got := iter.ToSlice() - require.Equal(t, testSlice, got) - - iter = newIter(nil) - got = iter.ToSlice() - require.Nil(t, got) -} diff --git a/sdk/metric/exporter.go b/sdk/metric/exporter.go new file mode 100644 index 00000000000..309381fe8d3 --- /dev/null +++ b/sdk/metric/exporter.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// ErrExporterShutdown is returned if Export or Shutdown are called after an +// Exporter has been Shutdown. +var ErrExporterShutdown = fmt.Errorf("exporter is shutdown") + +// Exporter handles the delivery of metric data to external receivers. This is +// the final component in the metric push pipeline. +type Exporter interface { + // Export serializes and transmits metric data to a receiver. + // + // This is called synchronously, there is no concurrency safety + // requirement. Because of this, it is critical that all timeouts and + // cancellations of the passed context be honored. + // + // All retry logic must be contained in this function. The SDK does not + // implement any retry logic. All errors returned by this function are + // considered unrecoverable and will be reported to a configured error + // Handler. + Export(context.Context, metricdata.ResourceMetrics) error + + // ForceFlush flushes any metric data held by an exporter. + // + // The deadline or cancellation of the passed context must be honored. An + // appropriate error should be returned in these situations. + ForceFlush(context.Context) error + + // Shutdown flushes all metric data held by an exporter and releases any + // held computational resources. + // + // The deadline or cancellation of the passed context must be honored. An + // appropriate error should be returned in these situations. + // + // After Shutdown is called, calls to Export will perform no operation and + // instead will return an error indicating the shutdown state. + Shutdown(context.Context) error +} diff --git a/sdk/metric/go.mod b/sdk/metric/go.mod index 7a5922ae9a9..1c96d819855 100644 --- a/sdk/metric/go.mod +++ b/sdk/metric/go.mod @@ -1,29 +1,28 @@ module go.opentelemetry.io/otel/sdk/metric -go 1.17 - -replace go.opentelemetry.io/otel => ../.. - -replace go.opentelemetry.io/otel/metric => ../../metric - -replace go.opentelemetry.io/otel/sdk => ../ - -replace go.opentelemetry.io/otel/trace => ../../trace +go 1.18 require ( - github.com/benbjohnson/clock v1.3.0 + github.com/go-logr/logr v1.2.3 github.com/stretchr/testify v1.7.1 go.opentelemetry.io/otel v1.10.0 - go.opentelemetry.io/otel/metric v0.31.0 - go.opentelemetry.io/otel/sdk v1.10.0 + go.opentelemetry.io/otel/metric v0.0.0-00010101000000-000000000000 + go.opentelemetry.io/otel/sdk v0.0.0-00010101000000-000000000000 ) require ( github.com/davecgh/go-spew v1.1.0 // indirect - github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect go.opentelemetry.io/otel/trace v1.10.0 // indirect golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7 // indirect gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect ) + +replace go.opentelemetry.io/otel => ../.. + +replace go.opentelemetry.io/otel/metric => ../../metric + +replace go.opentelemetry.io/otel/trace => ../../trace + +replace go.opentelemetry.io/otel/sdk => ../ diff --git a/sdk/metric/go.sum b/sdk/metric/go.sum index 4e67ced5ad4..2e2aed63d24 100644 --- a/sdk/metric/go.sum +++ b/sdk/metric/go.sum @@ -1,5 +1,3 @@ -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= -github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= diff --git a/sdk/metric/histogram_stress_test.go b/sdk/metric/histogram_stress_test.go deleted file mode 100644 index abc8b967c60..00000000000 --- a/sdk/metric/histogram_stress_test.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric_test - -import ( - "context" - "math/rand" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel/sdk/metric/aggregator/histogram" - "go.opentelemetry.io/otel/sdk/metric/metrictest" - "go.opentelemetry.io/otel/sdk/metric/number" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" -) - -func TestStressInt64Histogram(t *testing.T) { - desc := metrictest.NewDescriptor("some_metric", sdkapi.HistogramInstrumentKind, number.Int64Kind) - - alloc := histogram.New(2, &desc, histogram.WithExplicitBoundaries([]float64{25, 50, 75})) - h, ckpt := &alloc[0], &alloc[1] - - ctx, cancelFunc := context.WithCancel(context.Background()) - defer cancelFunc() - go func() { - rnd := rand.New(rand.NewSource(time.Now().Unix())) - for { - select { - case <-ctx.Done(): - return - default: - _ = h.Update(ctx, number.NewInt64Number(rnd.Int63()%100), &desc) - } - } - }() - - startTime := time.Now() - for time.Since(startTime) < time.Second { - require.NoError(t, h.SynchronizedMove(ckpt, &desc)) - - b, _ := ckpt.Histogram() - c, _ := ckpt.Count() - - var realCount uint64 - for _, c := range b.Counts { - realCount += c - } - - if realCount != c { - t.Fail() - } - } -} diff --git a/sdk/metric/instrument.go b/sdk/metric/instrument.go new file mode 100644 index 00000000000..19f3840887a --- /dev/null +++ b/sdk/metric/instrument.go @@ -0,0 +1,76 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/instrument" + "go.opentelemetry.io/otel/metric/instrument/asyncfloat64" + "go.opentelemetry.io/otel/metric/instrument/asyncint64" + "go.opentelemetry.io/otel/metric/instrument/syncfloat64" + "go.opentelemetry.io/otel/metric/instrument/syncint64" + "go.opentelemetry.io/otel/sdk/metric/internal" +) + +type instrumentImpl[N int64 | float64] struct { + instrument.Asynchronous + instrument.Synchronous + + aggregators []internal.Aggregator[N] +} + +var _ asyncfloat64.Counter = &instrumentImpl[float64]{} +var _ asyncfloat64.UpDownCounter = &instrumentImpl[float64]{} +var _ asyncfloat64.Gauge = &instrumentImpl[float64]{} +var _ asyncint64.Counter = &instrumentImpl[int64]{} +var _ asyncint64.UpDownCounter = &instrumentImpl[int64]{} +var _ asyncint64.Gauge = &instrumentImpl[int64]{} +var _ syncfloat64.Counter = &instrumentImpl[float64]{} +var _ syncfloat64.UpDownCounter = &instrumentImpl[float64]{} +var _ syncfloat64.Histogram = &instrumentImpl[float64]{} +var _ syncint64.Counter = &instrumentImpl[int64]{} +var _ syncint64.UpDownCounter = &instrumentImpl[int64]{} +var _ syncint64.Histogram = &instrumentImpl[int64]{} + +func (i *instrumentImpl[N]) Observe(ctx context.Context, val N, attrs ...attribute.KeyValue) { + // Only record a value if this is being called from the MetricProvider. + _, ok := ctx.Value(produceKey).(struct{}) + if !ok { + return + } + i.aggregate(ctx, val, attrs) +} + +func (i *instrumentImpl[N]) Add(ctx context.Context, val N, attrs ...attribute.KeyValue) { + i.aggregate(ctx, val, attrs) +} + +func (i *instrumentImpl[N]) Record(ctx context.Context, val N, attrs ...attribute.KeyValue) { + i.aggregate(ctx, val, attrs) +} + +func (i *instrumentImpl[N]) aggregate(ctx context.Context, val N, attrs []attribute.KeyValue) { + if err := ctx.Err(); err != nil { + return + } + for _, agg := range i.aggregators { + agg.Aggregate(val, attribute.NewSet(attrs...)) + } +} diff --git a/sdk/metric/instrument_provider.go b/sdk/metric/instrument_provider.go new file mode 100644 index 00000000000..fd79aa74d91 --- /dev/null +++ b/sdk/metric/instrument_provider.go @@ -0,0 +1,275 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "fmt" + + "go.opentelemetry.io/otel/metric/instrument" + "go.opentelemetry.io/otel/metric/instrument/asyncfloat64" + "go.opentelemetry.io/otel/metric/instrument/asyncint64" + "go.opentelemetry.io/otel/metric/instrument/syncfloat64" + "go.opentelemetry.io/otel/metric/instrument/syncint64" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/metric/view" +) + +type asyncInt64Provider struct { + scope instrumentation.Scope + registry *pipelineRegistry +} + +var _ asyncint64.InstrumentProvider = asyncInt64Provider{} + +// Counter creates an instrument for recording increasing values. +func (p asyncInt64Provider) Counter(name string, opts ...instrument.Option) (asyncint64.Counter, error) { + cfg := instrument.NewConfig(opts...) + + aggs, err := createAggregators[int64](p.registry, view.Instrument{ + Scope: p.scope, + Name: name, + Description: cfg.Description(), + Kind: view.AsyncCounter, + }, cfg.Unit()) + if len(aggs) == 0 && err != nil { + err = fmt.Errorf("instrument does not match any view: %w", err) + } + + return &instrumentImpl[int64]{ + aggregators: aggs, + }, err +} + +// UpDownCounter creates an instrument for recording changes of a value. +func (p asyncInt64Provider) UpDownCounter(name string, opts ...instrument.Option) (asyncint64.UpDownCounter, error) { + cfg := instrument.NewConfig(opts...) + + aggs, err := createAggregators[int64](p.registry, view.Instrument{ + Scope: p.scope, + Name: name, + Description: cfg.Description(), + Kind: view.AsyncUpDownCounter, + }, cfg.Unit()) + if len(aggs) == 0 && err != nil { + err = fmt.Errorf("instrument does not match any view: %w", err) + } + return &instrumentImpl[int64]{ + aggregators: aggs, + }, err +} + +// Gauge creates an instrument for recording the current value. +func (p asyncInt64Provider) Gauge(name string, opts ...instrument.Option) (asyncint64.Gauge, error) { + cfg := instrument.NewConfig(opts...) + + aggs, err := createAggregators[int64](p.registry, view.Instrument{ + Scope: p.scope, + Name: name, + Description: cfg.Description(), + Kind: view.AsyncGauge, + }, cfg.Unit()) + if len(aggs) == 0 && err != nil { + err = fmt.Errorf("instrument does not match any view: %w", err) + } + return &instrumentImpl[int64]{ + aggregators: aggs, + }, err +} + +type asyncFloat64Provider struct { + scope instrumentation.Scope + registry *pipelineRegistry +} + +var _ asyncfloat64.InstrumentProvider = asyncFloat64Provider{} + +// Counter creates an instrument for recording increasing values. +func (p asyncFloat64Provider) Counter(name string, opts ...instrument.Option) (asyncfloat64.Counter, error) { + cfg := instrument.NewConfig(opts...) + + aggs, err := createAggregators[float64](p.registry, view.Instrument{ + Scope: p.scope, + Name: name, + Description: cfg.Description(), + Kind: view.AsyncCounter, + }, cfg.Unit()) + if len(aggs) == 0 && err != nil { + err = fmt.Errorf("instrument does not match any view: %w", err) + } + return &instrumentImpl[float64]{ + aggregators: aggs, + }, err +} + +// UpDownCounter creates an instrument for recording changes of a value. +func (p asyncFloat64Provider) UpDownCounter(name string, opts ...instrument.Option) (asyncfloat64.UpDownCounter, error) { + cfg := instrument.NewConfig(opts...) + + aggs, err := createAggregators[float64](p.registry, view.Instrument{ + Scope: p.scope, + Name: name, + Description: cfg.Description(), + Kind: view.AsyncUpDownCounter, + }, cfg.Unit()) + if len(aggs) == 0 && err != nil { + err = fmt.Errorf("instrument does not match any view: %w", err) + } + return &instrumentImpl[float64]{ + aggregators: aggs, + }, err +} + +// Gauge creates an instrument for recording the current value. +func (p asyncFloat64Provider) Gauge(name string, opts ...instrument.Option) (asyncfloat64.Gauge, error) { + cfg := instrument.NewConfig(opts...) + + aggs, err := createAggregators[float64](p.registry, view.Instrument{ + Scope: p.scope, + Name: name, + Description: cfg.Description(), + Kind: view.AsyncGauge, + }, cfg.Unit()) + if len(aggs) == 0 && err != nil { + err = fmt.Errorf("instrument does not match any view: %w", err) + } + return &instrumentImpl[float64]{ + aggregators: aggs, + }, err +} + +type syncInt64Provider struct { + scope instrumentation.Scope + registry *pipelineRegistry +} + +var _ syncint64.InstrumentProvider = syncInt64Provider{} + +// Counter creates an instrument for recording increasing values. +func (p syncInt64Provider) Counter(name string, opts ...instrument.Option) (syncint64.Counter, error) { + cfg := instrument.NewConfig(opts...) + + aggs, err := createAggregators[int64](p.registry, view.Instrument{ + Scope: p.scope, + Name: name, + Description: cfg.Description(), + Kind: view.SyncCounter, + }, cfg.Unit()) + if len(aggs) == 0 && err != nil { + err = fmt.Errorf("instrument does not match any view: %w", err) + } + return &instrumentImpl[int64]{ + aggregators: aggs, + }, err +} + +// UpDownCounter creates an instrument for recording changes of a value. +func (p syncInt64Provider) UpDownCounter(name string, opts ...instrument.Option) (syncint64.UpDownCounter, error) { + cfg := instrument.NewConfig(opts...) + + aggs, err := createAggregators[int64](p.registry, view.Instrument{ + Scope: p.scope, + Name: name, + Description: cfg.Description(), + Kind: view.SyncUpDownCounter, + }, cfg.Unit()) + if len(aggs) == 0 && err != nil { + err = fmt.Errorf("instrument does not match any view: %w", err) + } + return &instrumentImpl[int64]{ + aggregators: aggs, + }, err +} + +// Histogram creates an instrument for recording the current value. +func (p syncInt64Provider) Histogram(name string, opts ...instrument.Option) (syncint64.Histogram, error) { + cfg := instrument.NewConfig(opts...) + + aggs, err := createAggregators[int64](p.registry, view.Instrument{ + Scope: p.scope, + Name: name, + Description: cfg.Description(), + Kind: view.SyncHistogram, + }, cfg.Unit()) + if len(aggs) == 0 && err != nil { + err = fmt.Errorf("instrument does not match any view: %w", err) + } + return &instrumentImpl[int64]{ + aggregators: aggs, + }, err +} + +type syncFloat64Provider struct { + scope instrumentation.Scope + registry *pipelineRegistry +} + +var _ syncfloat64.InstrumentProvider = syncFloat64Provider{} + +// Counter creates an instrument for recording increasing values. +func (p syncFloat64Provider) Counter(name string, opts ...instrument.Option) (syncfloat64.Counter, error) { + cfg := instrument.NewConfig(opts...) + + aggs, err := createAggregators[float64](p.registry, view.Instrument{ + Scope: p.scope, + Name: name, + Description: cfg.Description(), + Kind: view.SyncCounter, + }, cfg.Unit()) + if len(aggs) == 0 && err != nil { + err = fmt.Errorf("instrument does not match any view: %w", err) + } + return &instrumentImpl[float64]{ + aggregators: aggs, + }, err +} + +// UpDownCounter creates an instrument for recording changes of a value. +func (p syncFloat64Provider) UpDownCounter(name string, opts ...instrument.Option) (syncfloat64.UpDownCounter, error) { + cfg := instrument.NewConfig(opts...) + + aggs, err := createAggregators[float64](p.registry, view.Instrument{ + Scope: p.scope, + Name: name, + Description: cfg.Description(), + Kind: view.SyncUpDownCounter, + }, cfg.Unit()) + if len(aggs) == 0 && err != nil { + err = fmt.Errorf("instrument does not match any view: %w", err) + } + return &instrumentImpl[float64]{ + aggregators: aggs, + }, err +} + +// Histogram creates an instrument for recording the current value. +func (p syncFloat64Provider) Histogram(name string, opts ...instrument.Option) (syncfloat64.Histogram, error) { + cfg := instrument.NewConfig(opts...) + + aggs, err := createAggregators[float64](p.registry, view.Instrument{ + Scope: p.scope, + Name: name, + Description: cfg.Description(), + Kind: view.SyncHistogram, + }, cfg.Unit()) + if len(aggs) == 0 && err != nil { + err = fmt.Errorf("instrument does not match any view: %w", err) + } + return &instrumentImpl[float64]{ + aggregators: aggs, + }, err +} diff --git a/sdk/metric/internal/aggregator.go b/sdk/metric/internal/aggregator.go new file mode 100644 index 00000000000..e9068a4b936 --- /dev/null +++ b/sdk/metric/internal/aggregator.go @@ -0,0 +1,40 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package internal // import "go.opentelemetry.io/otel/sdk/metric/internal" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// now is used to return the current local time while allowing tests to +// override the the default time.Now function. +var now = time.Now + +// Aggregator forms an aggregation from a collection of recorded measurements. +type Aggregator[N int64 | float64] interface { + // Aggregate records the measurement, scoped by attr, and aggregates it + // into an aggregation. + Aggregate(measurement N, attr attribute.Set) + + // Aggregation returns an Aggregation, for all the aggregated + // measurements made and ends an aggregation cycle. + Aggregation() metricdata.Aggregation +} diff --git a/sdk/metric/internal/aggregator_example_test.go b/sdk/metric/internal/aggregator_example_test.go new file mode 100644 index 00000000000..dc4a0cd499e --- /dev/null +++ b/sdk/metric/internal/aggregator_example_test.go @@ -0,0 +1,122 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package internal + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/instrument" + "go.opentelemetry.io/otel/metric/instrument/syncint64" + "go.opentelemetry.io/otel/sdk/metric/aggregation" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +type meter struct { + // When a reader initiates a collection, the meter would collect + // aggregations from each of these functions. + aggregations []metricdata.Aggregation +} + +func (m *meter) SyncInt64() syncint64.InstrumentProvider { + // The same would be done for all the other instrument providers. + return (*syncInt64Provider)(m) +} + +type syncInt64Provider meter + +func (p *syncInt64Provider) Counter(string, ...instrument.Option) (syncint64.Counter, error) { + // This is an example of how a synchronous int64 provider would create an + // aggregator for a new counter. At this point the provider would + // determine the aggregation and temporality to used based on the Reader + // and View configuration. Assume here these are determined to be a + // cumulative sum. + + aggregator := NewCumulativeSum[int64](true) + count := inst{aggregateFunc: aggregator.Aggregate} + + p.aggregations = append(p.aggregations, aggregator.Aggregation()) + + fmt.Printf("using %T aggregator for counter\n", aggregator) + + return count, nil +} + +func (p *syncInt64Provider) UpDownCounter(string, ...instrument.Option) (syncint64.UpDownCounter, error) { + // This is an example of how a synchronous int64 provider would create an + // aggregator for a new up-down counter. At this point the provider would + // determine the aggregation and temporality to used based on the Reader + // and View configuration. Assume here these are determined to be a + // last-value aggregation (the temporality does not affect the produced + // aggregations). + + aggregator := NewLastValue[int64]() + upDownCount := inst{aggregateFunc: aggregator.Aggregate} + + p.aggregations = append(p.aggregations, aggregator.Aggregation()) + + fmt.Printf("using %T aggregator for up-down counter\n", aggregator) + + return upDownCount, nil +} + +func (p *syncInt64Provider) Histogram(string, ...instrument.Option) (syncint64.Histogram, error) { + // This is an example of how a synchronous int64 provider would create an + // aggregator for a new histogram. At this point the provider would + // determine the aggregation and temporality to used based on the Reader + // and View configuration. Assume here these are determined to be a delta + // explicit-bucket histogram. + + aggregator := NewDeltaHistogram[int64](aggregation.ExplicitBucketHistogram{ + Boundaries: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 1000}, + NoMinMax: false, + }) + hist := inst{aggregateFunc: aggregator.Aggregate} + + p.aggregations = append(p.aggregations, aggregator.Aggregation()) + + fmt.Printf("using %T aggregator for histogram\n", aggregator) + + return hist, nil +} + +// inst is a generalized int64 synchronous counter, up-down counter, and +// histogram used for demonstration purposes only. +type inst struct { + instrument.Synchronous + + aggregateFunc func(int64, attribute.Set) +} + +func (inst) Add(context.Context, int64, ...attribute.KeyValue) {} +func (inst) Record(context.Context, int64, ...attribute.KeyValue) {} + +func Example() { + m := meter{} + provider := m.SyncInt64() + + _, _ = provider.Counter("counter example") + _, _ = provider.UpDownCounter("up-down counter example") + _, _ = provider.Histogram("histogram example") + + // Output: + // using *internal.cumulativeSum[int64] aggregator for counter + // using *internal.lastValue[int64] aggregator for up-down counter + // using *internal.deltaHistogram[int64] aggregator for histogram +} diff --git a/sdk/metric/internal/aggregator_test.go b/sdk/metric/internal/aggregator_test.go new file mode 100644 index 00000000000..e93d643b642 --- /dev/null +++ b/sdk/metric/internal/aggregator_test.go @@ -0,0 +1,155 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package internal // import "go.opentelemetry.io/otel/sdk/metric/internal" + +import ( + "strconv" + "sync" + "testing" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" +) + +const ( + defaultGoroutines = 5 + defaultMeasurements = 30 + defaultCycles = 3 +) + +var ( + alice = attribute.NewSet(attribute.String("user", "alice"), attribute.Bool("admin", true)) + bob = attribute.NewSet(attribute.String("user", "bob"), attribute.Bool("admin", false)) + carol = attribute.NewSet(attribute.String("user", "carol"), attribute.Bool("admin", false)) + + monoIncr = setMap{alice: 1, bob: 10, carol: 2} + nonMonoIncr = setMap{alice: 1, bob: -1, carol: 2} + + // Sat Jan 01 2000 00:00:00 GMT+0000. + staticTime = time.Unix(946684800, 0) + staticNowFunc = func() time.Time { return staticTime } + // Pass to t.Cleanup to override the now function with staticNowFunc and + // revert once the test completes. E.g. t.Cleanup(mockTime(now)). + mockTime = func(orig func() time.Time) (cleanup func()) { + now = staticNowFunc + return func() { now = orig } + } +) + +// setMap maps attribute sets to a number. +type setMap map[attribute.Set]int + +// expectFunc is a function that returns an Aggregation of expected values for +// a cycle that contains m measurements (total across all goroutines). Each +// call advances the cycle. +type expectFunc func(m int) metricdata.Aggregation + +// aggregatorTester runs an acceptance test on an Aggregator. It will ask an +// Aggregator to aggregate a set of values as if they were real measurements +// made MeasurementN number of times. This will be done in GoroutineN number +// of different goroutines. After the Aggregator has been asked to aggregate +// all these measurements, it is validated using a passed expecterFunc. This +// set of operation is a signle cycle, and the the aggregatorTester will run +// CycleN number of cycles. +type aggregatorTester[N int64 | float64] struct { + // GoroutineN is the number of goroutines aggregatorTester will use to run + // the test with. + GoroutineN int + // MeasurementN is the number of measurements that are made each cycle a + // goroutine runs the test. + MeasurementN int + // CycleN is the number of times a goroutine will make a set of + // measurements. + CycleN int +} + +func (at *aggregatorTester[N]) Run(a Aggregator[N], incr setMap, eFunc expectFunc) func(*testing.T) { + m := at.MeasurementN * at.GoroutineN + return func(t *testing.T) { + for i := 0; i < at.CycleN; i++ { + var wg sync.WaitGroup + wg.Add(at.GoroutineN) + for i := 0; i < at.GoroutineN; i++ { + go func() { + defer wg.Done() + for j := 0; j < at.MeasurementN; j++ { + for attrs, n := range incr { + a.Aggregate(N(n), attrs) + } + } + }() + } + wg.Wait() + + metricdatatest.AssertAggregationsEqual(t, eFunc(m), a.Aggregation()) + } + } +} + +var bmarkResults metricdata.Aggregation + +func benchmarkAggregatorN[N int64 | float64](b *testing.B, factory func() Aggregator[N], count int) { + attrs := make([]attribute.Set, count) + for i := range attrs { + attrs[i] = attribute.NewSet(attribute.Int("value", i)) + } + + b.Run("Aggregate", func(b *testing.B) { + agg := factory() + b.ReportAllocs() + b.ResetTimer() + + for n := 0; n < b.N; n++ { + for _, attr := range attrs { + agg.Aggregate(1, attr) + } + } + bmarkResults = agg.Aggregation() + }) + + b.Run("Aggregations", func(b *testing.B) { + aggs := make([]Aggregator[N], b.N) + for n := range aggs { + a := factory() + for _, attr := range attrs { + a.Aggregate(1, attr) + } + aggs[n] = a + } + + b.ReportAllocs() + b.ResetTimer() + + for n := 0; n < b.N; n++ { + bmarkResults = aggs[n].Aggregation() + } + }) +} + +func benchmarkAggregator[N int64 | float64](factory func() Aggregator[N]) func(*testing.B) { + counts := []int{1, 10, 100} + return func(b *testing.B) { + for _, n := range counts { + b.Run(strconv.Itoa(n), func(b *testing.B) { + benchmarkAggregatorN(b, factory, n) + }) + } + } +} diff --git a/sdk/metric/internal/doc.go b/sdk/metric/internal/doc.go new file mode 100644 index 00000000000..e1aa11ab2e1 --- /dev/null +++ b/sdk/metric/internal/doc.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal provides types and functionality used to aggregate and +// cycle the state of metric measurements made by the SDK. These types and +// functionality are meant only for internal SDK use. +package internal // import "go.opentelemetry.io/otel/sdk/metric/internal" diff --git a/sdk/metric/internal/filter.go b/sdk/metric/internal/filter.go new file mode 100644 index 00000000000..2407d016e90 --- /dev/null +++ b/sdk/metric/internal/filter.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package internal // import "go.opentelemetry.io/otel/sdk/metric/internal" + +import ( + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// filter is an aggregator that applies attribute filter when Aggregating. filters +// do not have any backing memory, and must be constructed with a backing Aggregator. +type filter[N int64 | float64] struct { + filter func(attribute.Set) attribute.Set + aggregator Aggregator[N] + + sync.Mutex + seen map[attribute.Set]attribute.Set +} + +// NewFilter wraps an Aggregator with an attribute filtering function. +func NewFilter[N int64 | float64](agg Aggregator[N], fn func(attribute.Set) attribute.Set) Aggregator[N] { + if fn == nil { + return agg + } + return &filter[N]{ + filter: fn, + aggregator: agg, + seen: map[attribute.Set]attribute.Set{}, + } +} + +// Aggregate records the measurement, scoped by attr, and aggregates it +// into an aggregation. +func (f *filter[N]) Aggregate(measurement N, attr attribute.Set) { + // TODO (#3006): drop stale attributes from seen. + f.Lock() + defer f.Unlock() + fAttr, ok := f.seen[attr] + if !ok { + fAttr = f.filter(attr) + f.seen[attr] = fAttr + } + f.aggregator.Aggregate(measurement, fAttr) +} + +// Aggregation returns an Aggregation, for all the aggregated +// measurements made and ends an aggregation cycle. +func (f *filter[N]) Aggregation() metricdata.Aggregation { + return f.aggregator.Aggregation() +} diff --git a/sdk/metric/internal/filter_test.go b/sdk/metric/internal/filter_test.go new file mode 100644 index 00000000000..8ce2747a375 --- /dev/null +++ b/sdk/metric/internal/filter_test.go @@ -0,0 +1,202 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package internal // import "go.opentelemetry.io/otel/sdk/metric/internal" + +import ( + "sync" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// This is an aggregator that has a stable output, used for testing. It does not +// follow any spec prescribed aggregation. +type testStableAggregator[N int64 | float64] struct { + sync.Mutex + values []metricdata.DataPoint[N] +} + +// Aggregate records the measurement, scoped by attr, and aggregates it +// into an aggregation. +func (a *testStableAggregator[N]) Aggregate(measurement N, attr attribute.Set) { + a.Lock() + defer a.Unlock() + + a.values = append(a.values, metricdata.DataPoint[N]{ + Attributes: attr, + Value: measurement, + }) +} + +// Aggregation returns an Aggregation, for all the aggregated +// measurements made and ends an aggregation cycle. +func (a *testStableAggregator[N]) Aggregation() metricdata.Aggregation { + return metricdata.Gauge[N]{ + DataPoints: a.values, + } +} + +func testNewFilterNoFilter[N int64 | float64](t *testing.T, agg Aggregator[N]) { + filter := NewFilter(agg, nil) + assert.Equal(t, agg, filter) +} + +func testNewFilter[N int64 | float64](t *testing.T, agg Aggregator[N]) { + f := NewFilter(agg, testAttributeFilter) + require.IsType(t, &filter[N]{}, f) + filt := f.(*filter[N]) + assert.Equal(t, agg, filt.aggregator) +} + +func testAttributeFilter(input attribute.Set) attribute.Set { + out, _ := input.Filter(func(kv attribute.KeyValue) bool { + return kv.Key == "power-level" + }) + return out +} + +func TestNewFilter(t *testing.T) { + t.Run("int64", func(t *testing.T) { + agg := &testStableAggregator[int64]{} + testNewFilterNoFilter[int64](t, agg) + testNewFilter[int64](t, agg) + }) + t.Run("float64", func(t *testing.T) { + agg := &testStableAggregator[float64]{} + testNewFilterNoFilter[float64](t, agg) + testNewFilter[float64](t, agg) + }) +} + +func testDataPoint[N int64 | float64](attr attribute.Set) metricdata.DataPoint[N] { + return metricdata.DataPoint[N]{ + Attributes: attr, + Value: 1, + } +} + +func testFilterAggregate[N int64 | float64](t *testing.T) { + testCases := []struct { + name string + inputAttr []attribute.Set + output []metricdata.DataPoint[N] + }{ + { + name: "Will filter all out", + inputAttr: []attribute.Set{ + attribute.NewSet( + attribute.String("foo", "bar"), + attribute.Float64("lifeUniverseEverything", 42.0), + ), + }, + output: []metricdata.DataPoint[N]{ + testDataPoint[N](*attribute.EmptySet()), + }, + }, + { + name: "Will keep appropriate attributes", + inputAttr: []attribute.Set{ + attribute.NewSet( + attribute.String("foo", "bar"), + attribute.Int("power-level", 9001), + attribute.Float64("lifeUniverseEverything", 42.0), + ), + attribute.NewSet( + attribute.String("foo", "bar"), + attribute.Int("power-level", 9001), + ), + }, + output: []metricdata.DataPoint[N]{ + // A real Aggregator will combine these, the testAggregator doesn't for list stability. + testDataPoint[N](attribute.NewSet(attribute.Int("power-level", 9001))), + testDataPoint[N](attribute.NewSet(attribute.Int("power-level", 9001))), + }, + }, + { + name: "Will combine Aggregations", + inputAttr: []attribute.Set{ + attribute.NewSet( + attribute.String("foo", "bar"), + ), + attribute.NewSet( + attribute.Float64("lifeUniverseEverything", 42.0), + ), + }, + output: []metricdata.DataPoint[N]{ + // A real Aggregator will combine these, the testAggregator doesn't for list stability. + testDataPoint[N](*attribute.EmptySet()), + testDataPoint[N](*attribute.EmptySet()), + }, + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + f := NewFilter[N](&testStableAggregator[N]{}, testAttributeFilter) + for _, set := range tt.inputAttr { + f.Aggregate(1, set) + } + out := f.Aggregation().(metricdata.Gauge[N]) + assert.Equal(t, tt.output, out.DataPoints) + }) + } +} + +func TestFilterAggregate(t *testing.T) { + t.Run("int64", func(t *testing.T) { + testFilterAggregate[int64](t) + }) + t.Run("float64", func(t *testing.T) { + testFilterAggregate[float64](t) + }) +} + +func testFilterConcurrent[N int64 | float64](t *testing.T) { + f := NewFilter[N](&testStableAggregator[N]{}, testAttributeFilter) + wg := &sync.WaitGroup{} + wg.Add(2) + + go func() { + f.Aggregate(1, attribute.NewSet( + attribute.String("foo", "bar"), + )) + wg.Done() + }() + + go func() { + f.Aggregate(1, attribute.NewSet( + attribute.Int("power-level", 9001), + )) + wg.Done() + }() + + wg.Wait() +} + +func TestFilterConcurrent(t *testing.T) { + t.Run("int64", func(t *testing.T) { + testFilterConcurrent[int64](t) + }) + t.Run("float64", func(t *testing.T) { + testFilterConcurrent[float64](t) + }) +} diff --git a/sdk/metric/internal/histogram.go b/sdk/metric/internal/histogram.go new file mode 100644 index 00000000000..e5298e22d6f --- /dev/null +++ b/sdk/metric/internal/histogram.go @@ -0,0 +1,243 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package internal // import "go.opentelemetry.io/otel/sdk/metric/internal" + +import ( + "sort" + "sync" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/aggregation" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +type buckets struct { + counts []uint64 + count uint64 + sum float64 + min, max float64 +} + +// newBuckets returns buckets with n bins. +func newBuckets(n int) *buckets { + return &buckets{counts: make([]uint64, n)} +} + +func (b *buckets) bin(idx int, value float64) { + b.counts[idx]++ + b.count++ + b.sum += value + if value < b.min { + b.min = value + } else if value > b.max { + b.max = value + } +} + +// histValues summarizes a set of measurements as an histValues with +// explicitly defined buckets. +type histValues[N int64 | float64] struct { + bounds []float64 + + values map[attribute.Set]*buckets + valuesMu sync.Mutex +} + +func newHistValues[N int64 | float64](bounds []float64) *histValues[N] { + // The responsibility of keeping all buckets correctly associated with the + // passed boundaries is ultimately this type's responsibility. Make a copy + // here so we can always guarantee this. Or, in the case of failure, have + // complete control over the fix. + b := make([]float64, len(bounds)) + copy(b, bounds) + sort.Float64s(b) + return &histValues[N]{ + bounds: b, + values: make(map[attribute.Set]*buckets), + } +} + +// Aggregate records the measurement value, scoped by attr, and aggregates it +// into a histogram. +func (s *histValues[N]) Aggregate(value N, attr attribute.Set) { + // Accept all types to satisfy the Aggregator interface. However, since + // the Aggregation produced by this Aggregator is only float64, convert + // here to only use this type. + v := float64(value) + + // This search will return an index in the range [0, len(s.bounds)], where + // it will return len(s.bounds) if value is greater than the last element + // of s.bounds. This aligns with the buckets in that the length of buckets + // is len(s.bounds)+1, with the last bucket representing: + // (s.bounds[len(s.bounds)-1], +∞). + idx := sort.SearchFloat64s(s.bounds, v) + + s.valuesMu.Lock() + defer s.valuesMu.Unlock() + + b, ok := s.values[attr] + if !ok { + // N+1 buckets. For example: + // + // bounds = [0, 5, 10] + // + // Then, + // + // buckets = (-∞, 0], (0, 5.0], (5.0, 10.0], (10.0, +∞) + b = newBuckets(len(s.bounds) + 1) + // Ensure min and max are recorded values (not zero), for new buckets. + b.min, b.max = v, v + s.values[attr] = b + } + b.bin(idx, v) +} + +// NewDeltaHistogram returns an Aggregator that summarizes a set of +// measurements as an histogram. Each histogram is scoped by attributes and +// the aggregation cycle the measurements were made in. +// +// Each aggregation cycle is treated independently. When the returned +// Aggregator's Aggregations method is called it will reset all histogram +// counts to zero. +func NewDeltaHistogram[N int64 | float64](cfg aggregation.ExplicitBucketHistogram) Aggregator[N] { + return &deltaHistogram[N]{ + histValues: newHistValues[N](cfg.Boundaries), + noMinMax: cfg.NoMinMax, + start: now(), + } +} + +// deltaHistogram summarizes a set of measurements made in a single +// aggregation cycle as an histogram with explicitly defined buckets. +type deltaHistogram[N int64 | float64] struct { + *histValues[N] + + noMinMax bool + start time.Time +} + +func (s *deltaHistogram[N]) Aggregation() metricdata.Aggregation { + h := metricdata.Histogram{Temporality: metricdata.DeltaTemporality} + + s.valuesMu.Lock() + defer s.valuesMu.Unlock() + + if len(s.values) == 0 { + return h + } + + // Do not allow modification of our copy of bounds. + bounds := make([]float64, len(s.bounds)) + copy(bounds, s.bounds) + t := now() + h.DataPoints = make([]metricdata.HistogramDataPoint, 0, len(s.values)) + for a, b := range s.values { + hdp := metricdata.HistogramDataPoint{ + Attributes: a, + StartTime: s.start, + Time: t, + Count: b.count, + Bounds: bounds, + BucketCounts: b.counts, + Sum: b.sum, + } + if !s.noMinMax { + hdp.Min = &b.min + hdp.Max = &b.max + } + h.DataPoints = append(h.DataPoints, hdp) + + // Unused attribute sets do not report. + delete(s.values, a) + } + // The delta collection cycle resets. + s.start = t + return h +} + +// NewCumulativeHistogram returns an Aggregator that summarizes a set of +// measurements as an histogram. Each histogram is scoped by attributes. +// +// Each aggregation cycle builds from the previous, the histogram counts are +// the bucketed counts of all values aggregated since the returned Aggregator +// was created. +func NewCumulativeHistogram[N int64 | float64](cfg aggregation.ExplicitBucketHistogram) Aggregator[N] { + return &cumulativeHistogram[N]{ + histValues: newHistValues[N](cfg.Boundaries), + noMinMax: cfg.NoMinMax, + start: now(), + } +} + +// cumulativeHistogram summarizes a set of measurements made over all +// aggregation cycles as an histogram with explicitly defined buckets. +type cumulativeHistogram[N int64 | float64] struct { + *histValues[N] + + noMinMax bool + start time.Time +} + +func (s *cumulativeHistogram[N]) Aggregation() metricdata.Aggregation { + h := metricdata.Histogram{Temporality: metricdata.CumulativeTemporality} + + s.valuesMu.Lock() + defer s.valuesMu.Unlock() + + if len(s.values) == 0 { + return h + } + + // Do not allow modification of our copy of bounds. + bounds := make([]float64, len(s.bounds)) + copy(bounds, s.bounds) + t := now() + h.DataPoints = make([]metricdata.HistogramDataPoint, 0, len(s.values)) + for a, b := range s.values { + // The HistogramDataPoint field values returned need to be copies of + // the buckets value as we will keep updating them. + // + // TODO (#3047): Making copies for bounds and counts incurs a large + // memory allocation footprint. Alternatives should be explored. + counts := make([]uint64, len(b.counts)) + copy(counts, b.counts) + + hdp := metricdata.HistogramDataPoint{ + Attributes: a, + StartTime: s.start, + Time: t, + Count: b.count, + Bounds: bounds, + BucketCounts: counts, + Sum: b.sum, + } + if !s.noMinMax { + // Similar to counts, make a copy. + min, max := b.min, b.max + hdp.Min = &min + hdp.Max = &max + } + h.DataPoints = append(h.DataPoints, hdp) + // TODO (#3006): This will use an unbounded amount of memory if there + // are unbounded number of attribute sets being aggregated. Attribute + // sets that become "stale" need to be forgotten so this will not + // overload the system. + } + return h +} diff --git a/sdk/metric/internal/histogram_test.go b/sdk/metric/internal/histogram_test.go new file mode 100644 index 00000000000..edeaf8a6945 --- /dev/null +++ b/sdk/metric/internal/histogram_test.go @@ -0,0 +1,203 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package internal // import "go.opentelemetry.io/otel/sdk/metric/internal" + +import ( + "sort" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/aggregation" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" +) + +var ( + bounds = []float64{1, 5} + histConf = aggregation.ExplicitBucketHistogram{ + Boundaries: bounds, + NoMinMax: false, + } +) + +func TestHistogram(t *testing.T) { + t.Cleanup(mockTime(now)) + t.Run("Int64", testHistogram[int64]) + t.Run("Float64", testHistogram[float64]) +} + +func testHistogram[N int64 | float64](t *testing.T) { + tester := &aggregatorTester[N]{ + GoroutineN: defaultGoroutines, + MeasurementN: defaultMeasurements, + CycleN: defaultCycles, + } + + incr := monoIncr + eFunc := deltaHistExpecter(incr) + t.Run("Delta", tester.Run(NewDeltaHistogram[N](histConf), incr, eFunc)) + eFunc = cumuHistExpecter(incr) + t.Run("Cumulative", tester.Run(NewCumulativeHistogram[N](histConf), incr, eFunc)) +} + +func deltaHistExpecter(incr setMap) expectFunc { + h := metricdata.Histogram{Temporality: metricdata.DeltaTemporality} + return func(m int) metricdata.Aggregation { + h.DataPoints = make([]metricdata.HistogramDataPoint, 0, len(incr)) + for a, v := range incr { + h.DataPoints = append(h.DataPoints, hPoint(a, float64(v), uint64(m))) + } + return h + } +} + +func cumuHistExpecter(incr setMap) expectFunc { + var cycle int + h := metricdata.Histogram{Temporality: metricdata.CumulativeTemporality} + return func(m int) metricdata.Aggregation { + cycle++ + h.DataPoints = make([]metricdata.HistogramDataPoint, 0, len(incr)) + for a, v := range incr { + h.DataPoints = append(h.DataPoints, hPoint(a, float64(v), uint64(cycle*m))) + } + return h + } +} + +// hPoint returns an HistogramDataPoint that started and ended now with multi +// number of measurements values v. It includes a min and max (set to v). +func hPoint(a attribute.Set, v float64, multi uint64) metricdata.HistogramDataPoint { + idx := sort.SearchFloat64s(bounds, v) + counts := make([]uint64, len(bounds)+1) + counts[idx] += multi + return metricdata.HistogramDataPoint{ + Attributes: a, + StartTime: now(), + Time: now(), + Count: multi, + Bounds: bounds, + BucketCounts: counts, + Min: &v, + Max: &v, + Sum: v * float64(multi), + } +} + +func TestBucketsBin(t *testing.T) { + b := newBuckets(3) + assertB := func(counts []uint64, count uint64, sum, min, max float64) { + assert.Equal(t, counts, b.counts) + assert.Equal(t, count, b.count) + assert.Equal(t, sum, b.sum) + assert.Equal(t, min, b.min) + assert.Equal(t, max, b.max) + } + + assertB([]uint64{0, 0, 0}, 0, 0, 0, 0) + b.bin(1, 2) + assertB([]uint64{0, 1, 0}, 1, 2, 0, 2) + b.bin(0, -1) + assertB([]uint64{1, 1, 0}, 2, 1, -1, 2) +} + +func testHistImmutableBounds[N int64 | float64](newA func(aggregation.ExplicitBucketHistogram) Aggregator[N], getBounds func(Aggregator[N]) []float64) func(t *testing.T) { + b := []float64{0, 1, 2} + cpB := make([]float64, len(b)) + copy(cpB, b) + + a := newA(aggregation.ExplicitBucketHistogram{Boundaries: b}) + return func(t *testing.T) { + require.Equal(t, cpB, getBounds(a)) + + b[0] = 10 + assert.Equal(t, cpB, getBounds(a), "modifying the bounds argument should not change the bounds") + + a.Aggregate(5, alice) + hdp := a.Aggregation().(metricdata.Histogram).DataPoints[0] + hdp.Bounds[1] = 10 + assert.Equal(t, cpB, getBounds(a), "modifying the Aggregation bounds should not change the bounds") + } +} + +func TestHistogramImmutableBounds(t *testing.T) { + t.Run("Delta", testHistImmutableBounds[int64]( + NewDeltaHistogram[int64], + func(a Aggregator[int64]) []float64 { + deltaH := a.(*deltaHistogram[int64]) + return deltaH.bounds + }, + )) + + t.Run("Cumulative", testHistImmutableBounds[int64]( + NewCumulativeHistogram[int64], + func(a Aggregator[int64]) []float64 { + cumuH := a.(*cumulativeHistogram[int64]) + return cumuH.bounds + }, + )) +} + +func TestCumulativeHistogramImutableCounts(t *testing.T) { + a := NewCumulativeHistogram[int64](histConf) + a.Aggregate(5, alice) + hdp := a.Aggregation().(metricdata.Histogram).DataPoints[0] + + cumuH := a.(*cumulativeHistogram[int64]) + require.Equal(t, hdp.BucketCounts, cumuH.values[alice].counts) + + cpCounts := make([]uint64, len(hdp.BucketCounts)) + copy(cpCounts, hdp.BucketCounts) + hdp.BucketCounts[0] = 10 + assert.Equal(t, cpCounts, cumuH.values[alice].counts, "modifying the Aggregator bucket counts should not change the Aggregator") +} + +func TestDeltaHistogramReset(t *testing.T) { + t.Cleanup(mockTime(now)) + + expect := metricdata.Histogram{Temporality: metricdata.DeltaTemporality} + a := NewDeltaHistogram[int64](histConf) + metricdatatest.AssertAggregationsEqual(t, expect, a.Aggregation()) + + a.Aggregate(1, alice) + expect.DataPoints = []metricdata.HistogramDataPoint{hPoint(alice, 1, 1)} + metricdatatest.AssertAggregationsEqual(t, expect, a.Aggregation()) + + // The attr set should be forgotten once Aggregations is called. + expect.DataPoints = nil + metricdatatest.AssertAggregationsEqual(t, expect, a.Aggregation()) + + // Aggregating another set should not affect the original (alice). + a.Aggregate(1, bob) + expect.DataPoints = []metricdata.HistogramDataPoint{hPoint(bob, 1, 1)} + metricdatatest.AssertAggregationsEqual(t, expect, a.Aggregation()) +} + +func BenchmarkHistogram(b *testing.B) { + b.Run("Int64", benchmarkHistogram[int64]) + b.Run("Float64", benchmarkHistogram[float64]) +} + +func benchmarkHistogram[N int64 | float64](b *testing.B) { + factory := func() Aggregator[N] { return NewDeltaHistogram[N](histConf) } + b.Run("Delta", benchmarkAggregator(factory)) + factory = func() Aggregator[N] { return NewCumulativeHistogram[N](histConf) } + b.Run("Cumulative", benchmarkAggregator(factory)) +} diff --git a/sdk/metric/internal/lastvalue.go b/sdk/metric/internal/lastvalue.go new file mode 100644 index 00000000000..48e1b426c76 --- /dev/null +++ b/sdk/metric/internal/lastvalue.go @@ -0,0 +1,77 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package internal // import "go.opentelemetry.io/otel/sdk/metric/internal" + +import ( + "sync" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// datapoint is timestamped measurement data. +type datapoint[N int64 | float64] struct { + timestamp time.Time + value N +} + +// lastValue summarizes a set of measurements as the last one made. +type lastValue[N int64 | float64] struct { + sync.Mutex + + values map[attribute.Set]datapoint[N] +} + +// NewLastValue returns an Aggregator that summarizes a set of measurements as +// the last one made. +func NewLastValue[N int64 | float64]() Aggregator[N] { + return &lastValue[N]{values: make(map[attribute.Set]datapoint[N])} +} + +func (s *lastValue[N]) Aggregate(value N, attr attribute.Set) { + d := datapoint[N]{timestamp: now(), value: value} + s.Lock() + s.values[attr] = d + s.Unlock() +} + +func (s *lastValue[N]) Aggregation() metricdata.Aggregation { + gauge := metricdata.Gauge[N]{} + + s.Lock() + defer s.Unlock() + + if len(s.values) == 0 { + return gauge + } + + gauge.DataPoints = make([]metricdata.DataPoint[N], 0, len(s.values)) + for a, v := range s.values { + gauge.DataPoints = append(gauge.DataPoints, metricdata.DataPoint[N]{ + Attributes: a, + // The event time is the only meaningful timestamp, StartTime is + // ignored. + Time: v.timestamp, + Value: v.value, + }) + // Do not report stale values. + delete(s.values, a) + } + return gauge +} diff --git a/sdk/metric/internal/lastvalue_test.go b/sdk/metric/internal/lastvalue_test.go new file mode 100644 index 00000000000..41b75877fe3 --- /dev/null +++ b/sdk/metric/internal/lastvalue_test.go @@ -0,0 +1,91 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package internal // import "go.opentelemetry.io/otel/sdk/metric/internal" + +import ( + "testing" + + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" +) + +func TestLastValue(t *testing.T) { + t.Cleanup(mockTime(now)) + + t.Run("Int64", testLastValue[int64]()) + t.Run("Float64", testLastValue[float64]()) +} + +func testLastValue[N int64 | float64]() func(*testing.T) { + tester := &aggregatorTester[N]{ + GoroutineN: defaultGoroutines, + MeasurementN: defaultMeasurements, + CycleN: defaultCycles, + } + + eFunc := func(increments setMap) expectFunc { + data := make([]metricdata.DataPoint[N], 0, len(increments)) + for a, v := range increments { + point := metricdata.DataPoint[N]{Attributes: a, Time: now(), Value: N(v)} + data = append(data, point) + } + gauge := metricdata.Gauge[N]{DataPoints: data} + return func(int) metricdata.Aggregation { return gauge } + } + incr := monoIncr + return tester.Run(NewLastValue[N](), incr, eFunc(incr)) +} + +func testLastValueReset[N int64 | float64](t *testing.T) { + t.Cleanup(mockTime(now)) + + a := NewLastValue[N]() + expect := metricdata.Gauge[N]{} + metricdatatest.AssertAggregationsEqual(t, expect, a.Aggregation()) + + a.Aggregate(1, alice) + expect.DataPoints = []metricdata.DataPoint[N]{{ + Attributes: alice, + Time: now(), + Value: 1, + }} + metricdatatest.AssertAggregationsEqual(t, expect, a.Aggregation()) + + // The attr set should be forgotten once Aggregations is called. + expect.DataPoints = nil + metricdatatest.AssertAggregationsEqual(t, expect, a.Aggregation()) + + // Aggregating another set should not affect the original (alice). + a.Aggregate(1, bob) + expect.DataPoints = []metricdata.DataPoint[N]{{ + Attributes: bob, + Time: now(), + Value: 1, + }} + metricdatatest.AssertAggregationsEqual(t, expect, a.Aggregation()) +} + +func TestLastValueReset(t *testing.T) { + t.Run("Int64", testLastValueReset[int64]) + t.Run("Float64", testLastValueReset[float64]) +} + +func BenchmarkLastValue(b *testing.B) { + b.Run("Int64", benchmarkAggregator(NewLastValue[int64])) + b.Run("Float64", benchmarkAggregator(NewLastValue[float64])) +} diff --git a/sdk/metric/internal/sum.go b/sdk/metric/internal/sum.go new file mode 100644 index 00000000000..b80dcd9c40b --- /dev/null +++ b/sdk/metric/internal/sum.go @@ -0,0 +1,156 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package internal // import "go.opentelemetry.io/otel/sdk/metric/internal" + +import ( + "sync" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// valueMap is the storage for all sums. +type valueMap[N int64 | float64] struct { + sync.Mutex + values map[attribute.Set]N +} + +func newValueMap[N int64 | float64]() *valueMap[N] { + return &valueMap[N]{values: make(map[attribute.Set]N)} +} + +func (s *valueMap[N]) Aggregate(value N, attr attribute.Set) { + s.Lock() + s.values[attr] += value + s.Unlock() +} + +// NewDeltaSum returns an Aggregator that summarizes a set of measurements as +// their arithmetic sum. Each sum is scoped by attributes and the aggregation +// cycle the measurements were made in. +// +// The monotonic value is used to communicate the produced Aggregation is +// monotonic or not. The returned Aggregator does not make any guarantees this +// value is accurate. It is up to the caller to ensure it. +// +// Each aggregation cycle is treated independently. When the returned +// Aggregator's Aggregation method is called it will reset all sums to zero. +func NewDeltaSum[N int64 | float64](monotonic bool) Aggregator[N] { + return &deltaSum[N]{ + valueMap: newValueMap[N](), + monotonic: monotonic, + start: now(), + } +} + +// deltaSum summarizes a set of measurements made in a single aggregation +// cycle as their arithmetic sum. +type deltaSum[N int64 | float64] struct { + *valueMap[N] + + monotonic bool + start time.Time +} + +func (s *deltaSum[N]) Aggregation() metricdata.Aggregation { + out := metricdata.Sum[N]{ + Temporality: metricdata.DeltaTemporality, + IsMonotonic: s.monotonic, + } + + s.Lock() + defer s.Unlock() + + if len(s.values) == 0 { + return out + } + + t := now() + out.DataPoints = make([]metricdata.DataPoint[N], 0, len(s.values)) + for attr, value := range s.values { + out.DataPoints = append(out.DataPoints, metricdata.DataPoint[N]{ + Attributes: attr, + StartTime: s.start, + Time: t, + Value: value, + }) + // Unused attribute sets do not report. + delete(s.values, attr) + } + // The delta collection cycle resets. + s.start = t + return out +} + +// NewCumulativeSum returns an Aggregator that summarizes a set of +// measurements as their arithmetic sum. Each sum is scoped by attributes and +// the aggregation cycle the measurements were made in. +// +// The monotonic value is used to communicate the produced Aggregation is +// monotonic or not. The returned Aggregator does not make any guarantees this +// value is accurate. It is up to the caller to ensure it. +// +// Each aggregation cycle is treated independently. When the returned +// Aggregator's Aggregation method is called it will reset all sums to zero. +func NewCumulativeSum[N int64 | float64](monotonic bool) Aggregator[N] { + return &cumulativeSum[N]{ + valueMap: newValueMap[N](), + monotonic: monotonic, + start: now(), + } +} + +// cumulativeSum summarizes a set of measurements made over all aggregation +// cycles as their arithmetic sum. +type cumulativeSum[N int64 | float64] struct { + *valueMap[N] + + monotonic bool + start time.Time +} + +func (s *cumulativeSum[N]) Aggregation() metricdata.Aggregation { + out := metricdata.Sum[N]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: s.monotonic, + } + + s.Lock() + defer s.Unlock() + + if len(s.values) == 0 { + return out + } + + t := now() + out.DataPoints = make([]metricdata.DataPoint[N], 0, len(s.values)) + for attr, value := range s.values { + out.DataPoints = append(out.DataPoints, metricdata.DataPoint[N]{ + Attributes: attr, + StartTime: s.start, + Time: t, + Value: value, + }) + // TODO (#3006): This will use an unbounded amount of memory if there + // are unbounded number of attribute sets being aggregated. Attribute + // sets that become "stale" need to be forgotten so this will not + // overload the system. + } + return out +} diff --git a/sdk/metric/internal/sum_test.go b/sdk/metric/internal/sum_test.go new file mode 100644 index 00000000000..668afd1a022 --- /dev/null +++ b/sdk/metric/internal/sum_test.go @@ -0,0 +1,135 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package internal // import "go.opentelemetry.io/otel/sdk/metric/internal" + +import ( + "testing" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" +) + +func TestSum(t *testing.T) { + t.Cleanup(mockTime(now)) + t.Run("Int64", testSum[int64]) + t.Run("Float64", testSum[float64]) +} + +func testSum[N int64 | float64](t *testing.T) { + tester := &aggregatorTester[N]{ + GoroutineN: defaultGoroutines, + MeasurementN: defaultMeasurements, + CycleN: defaultCycles, + } + + t.Run("Delta", func(t *testing.T) { + incr, mono := monoIncr, true + eFunc := deltaExpecter[N](incr, mono) + t.Run("Monotonic", tester.Run(NewDeltaSum[N](mono), incr, eFunc)) + + incr, mono = nonMonoIncr, false + eFunc = deltaExpecter[N](incr, mono) + t.Run("NonMonotonic", tester.Run(NewDeltaSum[N](mono), incr, eFunc)) + }) + + t.Run("Cumulative", func(t *testing.T) { + incr, mono := monoIncr, true + eFunc := cumuExpecter[N](incr, mono) + t.Run("Monotonic", tester.Run(NewCumulativeSum[N](mono), incr, eFunc)) + + incr, mono = nonMonoIncr, false + eFunc = cumuExpecter[N](incr, mono) + t.Run("NonMonotonic", tester.Run(NewCumulativeSum[N](mono), incr, eFunc)) + }) +} + +func deltaExpecter[N int64 | float64](incr setMap, mono bool) expectFunc { + sum := metricdata.Sum[N]{Temporality: metricdata.DeltaTemporality, IsMonotonic: mono} + return func(m int) metricdata.Aggregation { + sum.DataPoints = make([]metricdata.DataPoint[N], 0, len(incr)) + for a, v := range incr { + sum.DataPoints = append(sum.DataPoints, point[N](a, N(v*m))) + } + return sum + } +} + +func cumuExpecter[N int64 | float64](incr setMap, mono bool) expectFunc { + var cycle int + sum := metricdata.Sum[N]{Temporality: metricdata.CumulativeTemporality, IsMonotonic: mono} + return func(m int) metricdata.Aggregation { + cycle++ + sum.DataPoints = make([]metricdata.DataPoint[N], 0, len(incr)) + for a, v := range incr { + sum.DataPoints = append(sum.DataPoints, point[N](a, N(v*cycle*m))) + } + return sum + } +} + +// point returns a DataPoint that started and ended now. +func point[N int64 | float64](a attribute.Set, v N) metricdata.DataPoint[N] { + return metricdata.DataPoint[N]{ + Attributes: a, + StartTime: now(), + Time: now(), + Value: N(v), + } +} + +func testDeltaSumReset[N int64 | float64](t *testing.T) { + t.Cleanup(mockTime(now)) + + expect := metricdata.Sum[N]{Temporality: metricdata.DeltaTemporality} + a := NewDeltaSum[N](false) + metricdatatest.AssertAggregationsEqual(t, expect, a.Aggregation()) + + a.Aggregate(1, alice) + expect.DataPoints = []metricdata.DataPoint[N]{point[N](alice, 1)} + metricdatatest.AssertAggregationsEqual(t, expect, a.Aggregation()) + + // The attr set should be forgotten once Aggregations is called. + expect.DataPoints = nil + metricdatatest.AssertAggregationsEqual(t, expect, a.Aggregation()) + + // Aggregating another set should not affect the original (alice). + a.Aggregate(1, bob) + expect.DataPoints = []metricdata.DataPoint[N]{point[N](bob, 1)} + metricdatatest.AssertAggregationsEqual(t, expect, a.Aggregation()) +} + +func TestDeltaSumReset(t *testing.T) { + t.Run("Int64", testDeltaSumReset[int64]) + t.Run("Float64", testDeltaSumReset[float64]) +} + +func BenchmarkSum(b *testing.B) { + b.Run("Int64", benchmarkSum[int64]) + b.Run("Float64", benchmarkSum[float64]) +} + +func benchmarkSum[N int64 | float64](b *testing.B) { + // The monotonic argument is only used to annotate the Sum returned from + // the Aggregation method. It should not have an effect on operational + // performance, therefore, only monotonic=false is benchmarked here. + factory := func() Aggregator[N] { return NewDeltaSum[N](false) } + b.Run("Delta", benchmarkAggregator(factory)) + factory = func() Aggregator[N] { return NewCumulativeSum[N](false) } + b.Run("Cumulative", benchmarkAggregator(factory)) +} diff --git a/sdk/metric/manual_reader.go b/sdk/metric/manual_reader.go new file mode 100644 index 00000000000..ec985332188 --- /dev/null +++ b/sdk/metric/manual_reader.go @@ -0,0 +1,134 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/metric/aggregation" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/metric/view" +) + +// manualReader is a a simple Reader that allows an application to +// read metrics on demand. +type manualReader struct { + producer atomic.Value + shutdownOnce sync.Once + + temporalitySelector TemporalitySelector + aggregationSelector AggregationSelector +} + +// Compile time check the manualReader implements Reader and is comparable. +var _ = map[Reader]struct{}{&manualReader{}: {}} + +// NewManualReader returns a Reader which is directly called to collect metrics. +func NewManualReader(opts ...ManualReaderOption) Reader { + cfg := newManualReaderConfig(opts) + return &manualReader{ + temporalitySelector: cfg.temporalitySelector, + aggregationSelector: cfg.aggregationSelector, + } +} + +// register stores the Producer which enables the caller to read +// metrics on demand. +func (mr *manualReader) register(p producer) { + // Only register once. If producer is already set, do nothing. + if !mr.producer.CompareAndSwap(nil, produceHolder{produce: p.produce}) { + msg := "did not register manual reader" + global.Error(errDuplicateRegister, msg) + } +} + +// temporality reports the Temporality for the instrument kind provided. +func (mr *manualReader) temporality(kind view.InstrumentKind) metricdata.Temporality { + return mr.temporalitySelector(kind) +} + +// aggregation returns what Aggregation to use for kind. +func (mr *manualReader) aggregation(kind view.InstrumentKind) aggregation.Aggregation { // nolint:revive // import-shadow for method scoped by type. + return mr.aggregationSelector(kind) +} + +// ForceFlush is a no-op, it always returns nil. +func (mr *manualReader) ForceFlush(context.Context) error { + return nil +} + +// Shutdown closes any connections and frees any resources used by the reader. +func (mr *manualReader) Shutdown(context.Context) error { + err := ErrReaderShutdown + mr.shutdownOnce.Do(func() { + // Any future call to Collect will now return ErrReaderShutdown. + mr.producer.Store(produceHolder{ + produce: shutdownProducer{}.produce, + }) + err = nil + }) + return err +} + +// Collect gathers all metrics from the SDK, calling any callbacks necessary. +// Collect will return an error if called after shutdown. +func (mr *manualReader) Collect(ctx context.Context) (metricdata.ResourceMetrics, error) { + p := mr.producer.Load() + if p == nil { + return metricdata.ResourceMetrics{}, ErrReaderNotRegistered + } + + ph, ok := p.(produceHolder) + if !ok { + // The atomic.Value is entirely in the periodicReader's control so + // this should never happen. In the unforeseen case that this does + // happen, return an error instead of panicking so a users code does + // not halt in the processes. + err := fmt.Errorf("manual reader: invalid producer: %T", p) + return metricdata.ResourceMetrics{}, err + } + + return ph.produce(ctx) +} + +// manualReaderConfig contains configuration options for a ManualReader. +type manualReaderConfig struct { + temporalitySelector TemporalitySelector + aggregationSelector AggregationSelector +} + +// newManualReaderConfig returns a manualReaderConfig configured with options. +func newManualReaderConfig(opts []ManualReaderOption) manualReaderConfig { + cfg := manualReaderConfig{ + temporalitySelector: DefaultTemporalitySelector, + aggregationSelector: DefaultAggregationSelector, + } + for _, opt := range opts { + cfg = opt.applyManual(cfg) + } + return cfg +} + +// ManualReaderOption applies a configuration option value to a ManualReader. +type ManualReaderOption interface { + applyManual(manualReaderConfig) manualReaderConfig +} diff --git a/sdk/metric/manual_reader_test.go b/sdk/metric/manual_reader_test.go new file mode 100644 index 00000000000..58b1a85cb1d --- /dev/null +++ b/sdk/metric/manual_reader_test.go @@ -0,0 +1,77 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package metric // import "go.opentelemetry.io/otel/sdk/metric/reader" + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/metric/view" +) + +func TestManualReader(t *testing.T) { + suite.Run(t, &readerTestSuite{Factory: func() Reader { return NewManualReader() }}) +} + +func BenchmarkManualReader(b *testing.B) { + b.Run("Collect", benchReaderCollectFunc(NewManualReader())) +} + +var deltaTemporalitySelector = func(view.InstrumentKind) metricdata.Temporality { return metricdata.DeltaTemporality } +var cumulativeTemporalitySelector = func(view.InstrumentKind) metricdata.Temporality { return metricdata.CumulativeTemporality } + +func TestManualReaderTemporality(t *testing.T) { + tests := []struct { + name string + options []ManualReaderOption + // Currently only testing constant temporality. This should be expanded + // if we put more advanced selection in the SDK + wantTemporality metricdata.Temporality + }{ + { + name: "default", + wantTemporality: metricdata.CumulativeTemporality, + }, + { + name: "delta", + options: []ManualReaderOption{ + WithTemporalitySelector(deltaTemporalitySelector), + }, + wantTemporality: metricdata.DeltaTemporality, + }, + { + name: "repeats overwrite", + options: []ManualReaderOption{ + WithTemporalitySelector(deltaTemporalitySelector), + WithTemporalitySelector(cumulativeTemporalitySelector), + }, + wantTemporality: metricdata.CumulativeTemporality, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var undefinedInstrument view.InstrumentKind + rdr := NewManualReader(tt.options...) + assert.Equal(t, tt.wantTemporality, rdr.temporality(undefinedInstrument)) + }) + } +} diff --git a/sdk/metric/meter.go b/sdk/metric/meter.go new file mode 100644 index 00000000000..4f51c03308b --- /dev/null +++ b/sdk/metric/meter.go @@ -0,0 +1,135 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/instrument" + "go.opentelemetry.io/otel/metric/instrument/asyncfloat64" + "go.opentelemetry.io/otel/metric/instrument/asyncint64" + "go.opentelemetry.io/otel/metric/instrument/syncfloat64" + "go.opentelemetry.io/otel/metric/instrument/syncint64" + "go.opentelemetry.io/otel/sdk/instrumentation" +) + +// meterRegistry keeps a record of initialized meters for instrumentation +// scopes. A meter is unique to an instrumentation scope and if multiple +// requests for that meter are made a meterRegistry ensure the same instance +// is used. +// +// The zero meterRegistry is empty and ready for use. +// +// A meterRegistry must not be copied after first use. +// +// All methods of a meterRegistry are safe to call concurrently. +type meterRegistry struct { + sync.Mutex + + meters map[instrumentation.Scope]*meter + + registry *pipelineRegistry +} + +// Get returns a registered meter matching the instrumentation scope if it +// exists in the meterRegistry. Otherwise, a new meter configured for the +// instrumentation scope is registered and then returned. +// +// Get is safe to call concurrently. +func (r *meterRegistry) Get(s instrumentation.Scope) *meter { + r.Lock() + defer r.Unlock() + + if r.meters == nil { + m := &meter{ + Scope: s, + registry: r.registry, + } + r.meters = map[instrumentation.Scope]*meter{s: m} + return m + } + + m, ok := r.meters[s] + if ok { + return m + } + + m = &meter{ + Scope: s, + registry: r.registry, + } + r.meters[s] = m + return m +} + +// Range calls f sequentially for each meter present in the meterRegistry. If +// f returns false, the iteration is stopped. +// +// Range is safe to call concurrently. +func (r *meterRegistry) Range(f func(*meter) bool) { + r.Lock() + defer r.Unlock() + + for _, m := range r.meters { + if !f(m) { + return + } + } +} + +// meter handles the creation and coordination of all metric instruments. A +// meter represents a single instrumentation scope; all metric telemetry +// produced by an instrumentation scope will use metric instruments from a +// single meter. +type meter struct { + instrumentation.Scope + + registry *pipelineRegistry +} + +// Compile-time check meter implements metric.Meter. +var _ metric.Meter = (*meter)(nil) + +// AsyncInt64 returns the asynchronous integer instrument provider. +func (m *meter) AsyncInt64() asyncint64.InstrumentProvider { + return asyncInt64Provider{scope: m.Scope, registry: m.registry} +} + +// AsyncFloat64 returns the asynchronous floating-point instrument provider. +func (m *meter) AsyncFloat64() asyncfloat64.InstrumentProvider { + return asyncFloat64Provider{scope: m.Scope, registry: m.registry} +} + +// RegisterCallback registers the function f to be called when any of the +// insts Collect method is called. +func (m *meter) RegisterCallback(insts []instrument.Asynchronous, f func(context.Context)) error { + m.registry.registerCallback(f) + return nil +} + +// SyncInt64 returns the synchronous integer instrument provider. +func (m *meter) SyncInt64() syncint64.InstrumentProvider { + return syncInt64Provider{scope: m.Scope, registry: m.registry} +} + +// SyncFloat64 returns the synchronous floating-point instrument provider. +func (m *meter) SyncFloat64() syncfloat64.InstrumentProvider { + return syncFloat64Provider{scope: m.Scope, registry: m.registry} +} diff --git a/sdk/metric/meter_test.go b/sdk/metric/meter_test.go new file mode 100644 index 00000000000..8efcbd6b261 --- /dev/null +++ b/sdk/metric/meter_test.go @@ -0,0 +1,517 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package metric + +import ( + "context" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/instrument" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" +) + +func TestMeterRegistry(t *testing.T) { + is0 := instrumentation.Scope{Name: "zero"} + is1 := instrumentation.Scope{Name: "one"} + + r := meterRegistry{} + var m0 *meter + t.Run("ZeroValueGetDoesNotPanic", func(t *testing.T) { + assert.NotPanics(t, func() { m0 = r.Get(is0) }) + assert.Equal(t, is0, m0.Scope, "uninitialized meter returned") + }) + + m01 := r.Get(is0) + t.Run("GetSameMeter", func(t *testing.T) { + assert.Samef(t, m0, m01, "returned different meters: %v", is0) + }) + + m1 := r.Get(is1) + t.Run("GetDifferentMeter", func(t *testing.T) { + assert.NotSamef(t, m0, m1, "returned same meters: %v", is1) + }) + + t.Run("RangeComplete", func(t *testing.T) { + var got []*meter + r.Range(func(m *meter) bool { + got = append(got, m) + return true + }) + assert.ElementsMatch(t, []*meter{m0, m1}, got) + }) + + t.Run("RangeStopIteration", func(t *testing.T) { + var i int + r.Range(func(m *meter) bool { + i++ + return false + }) + assert.Equal(t, 1, i, "iteration not stopped after first flase return") + }) +} + +// A meter should be able to make instruments concurrently. +func TestMeterInstrumentConcurrency(t *testing.T) { + wg := &sync.WaitGroup{} + wg.Add(12) + + m := NewMeterProvider().Meter("inst-concurrency") + + go func() { + _, _ = m.AsyncFloat64().Counter("AFCounter") + wg.Done() + }() + go func() { + _, _ = m.AsyncFloat64().UpDownCounter("AFUpDownCounter") + wg.Done() + }() + go func() { + _, _ = m.AsyncFloat64().Gauge("AFGauge") + wg.Done() + }() + go func() { + _, _ = m.AsyncInt64().Counter("AICounter") + wg.Done() + }() + go func() { + _, _ = m.AsyncInt64().UpDownCounter("AIUpDownCounter") + wg.Done() + }() + go func() { + _, _ = m.AsyncInt64().Gauge("AIGauge") + wg.Done() + }() + go func() { + _, _ = m.SyncFloat64().Counter("SFCounter") + wg.Done() + }() + go func() { + _, _ = m.SyncFloat64().UpDownCounter("SFUpDownCounter") + wg.Done() + }() + go func() { + _, _ = m.SyncFloat64().Histogram("SFHistogram") + wg.Done() + }() + go func() { + _, _ = m.SyncInt64().Counter("SICounter") + wg.Done() + }() + go func() { + _, _ = m.SyncInt64().UpDownCounter("SIUpDownCounter") + wg.Done() + }() + go func() { + _, _ = m.SyncInt64().Histogram("SIHistogram") + wg.Done() + }() + + wg.Wait() +} + +// A Meter Should be able register Callbacks Concurrently. +func TestMeterCallbackCreationConcurrency(t *testing.T) { + wg := &sync.WaitGroup{} + wg.Add(2) + + m := NewMeterProvider().Meter("callback-concurrency") + + go func() { + _ = m.RegisterCallback([]instrument.Asynchronous{}, func(ctx context.Context) {}) + wg.Done() + }() + go func() { + _ = m.RegisterCallback([]instrument.Asynchronous{}, func(ctx context.Context) {}) + wg.Done() + }() + wg.Wait() +} + +// Instruments should produce correct ResourceMetrics. +func TestMeterCreatesInstruments(t *testing.T) { + var seven float64 = 7.0 + testCases := []struct { + name string + fn func(*testing.T, metric.Meter) + want metricdata.Metrics + }{ + { + name: "AsyncInt64Count", + fn: func(t *testing.T, m metric.Meter) { + ctr, err := m.AsyncInt64().Counter("aint") + assert.NoError(t, err) + err = m.RegisterCallback([]instrument.Asynchronous{ctr}, func(ctx context.Context) { + ctr.Observe(ctx, 3) + }) + assert.NoError(t, err) + + // Observed outside of a callback, it should be ignored. + ctr.Observe(context.Background(), 19) + }, + want: metricdata.Metrics{ + Name: "aint", + Data: metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + DataPoints: []metricdata.DataPoint[int64]{ + {Value: 3}, + }, + }, + }, + }, + { + name: "AsyncInt64UpDownCount", + fn: func(t *testing.T, m metric.Meter) { + ctr, err := m.AsyncInt64().UpDownCounter("aint") + assert.NoError(t, err) + err = m.RegisterCallback([]instrument.Asynchronous{ctr}, func(ctx context.Context) { + ctr.Observe(ctx, 11) + }) + assert.NoError(t, err) + + // Observed outside of a callback, it should be ignored. + ctr.Observe(context.Background(), 19) + }, + want: metricdata.Metrics{ + Name: "aint", + Data: metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: false, + DataPoints: []metricdata.DataPoint[int64]{ + {Value: 11}, + }, + }, + }, + }, + { + name: "AsyncInt64Gauge", + fn: func(t *testing.T, m metric.Meter) { + gauge, err := m.AsyncInt64().Gauge("agauge") + assert.NoError(t, err) + err = m.RegisterCallback([]instrument.Asynchronous{gauge}, func(ctx context.Context) { + gauge.Observe(ctx, 11) + }) + assert.NoError(t, err) + + // Observed outside of a callback, it should be ignored. + gauge.Observe(context.Background(), 19) + }, + want: metricdata.Metrics{ + Name: "agauge", + Data: metricdata.Gauge[int64]{ + DataPoints: []metricdata.DataPoint[int64]{ + {Value: 11}, + }, + }, + }, + }, + { + name: "AsyncFloat64Count", + fn: func(t *testing.T, m metric.Meter) { + ctr, err := m.AsyncFloat64().Counter("afloat") + assert.NoError(t, err) + err = m.RegisterCallback([]instrument.Asynchronous{ctr}, func(ctx context.Context) { + ctr.Observe(ctx, 3) + }) + assert.NoError(t, err) + + // Observed outside of a callback, it should be ignored. + ctr.Observe(context.Background(), 19) + }, + want: metricdata.Metrics{ + Name: "afloat", + Data: metricdata.Sum[float64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + DataPoints: []metricdata.DataPoint[float64]{ + {Value: 3}, + }, + }, + }, + }, + { + name: "AsyncFloat64UpDownCount", + fn: func(t *testing.T, m metric.Meter) { + ctr, err := m.AsyncFloat64().UpDownCounter("afloat") + assert.NoError(t, err) + err = m.RegisterCallback([]instrument.Asynchronous{ctr}, func(ctx context.Context) { + ctr.Observe(ctx, 11) + }) + assert.NoError(t, err) + + // Observed outside of a callback, it should be ignored. + ctr.Observe(context.Background(), 19) + }, + want: metricdata.Metrics{ + Name: "afloat", + Data: metricdata.Sum[float64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: false, + DataPoints: []metricdata.DataPoint[float64]{ + {Value: 11}, + }, + }, + }, + }, + { + name: "AsyncFloat64Gauge", + fn: func(t *testing.T, m metric.Meter) { + gauge, err := m.AsyncFloat64().Gauge("agauge") + assert.NoError(t, err) + err = m.RegisterCallback([]instrument.Asynchronous{gauge}, func(ctx context.Context) { + gauge.Observe(ctx, 11) + }) + assert.NoError(t, err) + + // Observed outside of a callback, it should be ignored. + gauge.Observe(context.Background(), 19) + }, + want: metricdata.Metrics{ + Name: "agauge", + Data: metricdata.Gauge[float64]{ + DataPoints: []metricdata.DataPoint[float64]{ + {Value: 11}, + }, + }, + }, + }, + + { + name: "SyncInt64Count", + fn: func(t *testing.T, m metric.Meter) { + ctr, err := m.SyncInt64().Counter("sint") + assert.NoError(t, err) + + ctr.Add(context.Background(), 3) + }, + want: metricdata.Metrics{ + Name: "sint", + Data: metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + DataPoints: []metricdata.DataPoint[int64]{ + {Value: 3}, + }, + }, + }, + }, + { + name: "SyncInt64UpDownCount", + fn: func(t *testing.T, m metric.Meter) { + ctr, err := m.SyncInt64().UpDownCounter("sint") + assert.NoError(t, err) + + ctr.Add(context.Background(), 11) + }, + want: metricdata.Metrics{ + Name: "sint", + Data: metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: false, + DataPoints: []metricdata.DataPoint[int64]{ + {Value: 11}, + }, + }, + }, + }, + { + name: "SyncInt64Histogram", + fn: func(t *testing.T, m metric.Meter) { + gauge, err := m.SyncInt64().Histogram("histogram") + assert.NoError(t, err) + + gauge.Record(context.Background(), 7) + }, + want: metricdata.Metrics{ + Name: "histogram", + Data: metricdata.Histogram{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.HistogramDataPoint{ + { + Attributes: attribute.Set{}, + Count: 1, + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 1000}, + BucketCounts: []uint64{0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}, + Min: &seven, + Max: &seven, + Sum: 7.0, + }, + }, + }, + }, + }, + { + name: "SyncFloat64Count", + fn: func(t *testing.T, m metric.Meter) { + ctr, err := m.SyncFloat64().Counter("sfloat") + assert.NoError(t, err) + + ctr.Add(context.Background(), 3) + }, + want: metricdata.Metrics{ + Name: "sfloat", + Data: metricdata.Sum[float64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + DataPoints: []metricdata.DataPoint[float64]{ + {Value: 3}, + }, + }, + }, + }, + { + name: "SyncFloat64UpDownCount", + fn: func(t *testing.T, m metric.Meter) { + ctr, err := m.SyncFloat64().UpDownCounter("sfloat") + assert.NoError(t, err) + + ctr.Add(context.Background(), 11) + }, + want: metricdata.Metrics{ + Name: "sfloat", + Data: metricdata.Sum[float64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: false, + DataPoints: []metricdata.DataPoint[float64]{ + {Value: 11}, + }, + }, + }, + }, + { + name: "SyncFloat64Histogram", + fn: func(t *testing.T, m metric.Meter) { + gauge, err := m.SyncFloat64().Histogram("histogram") + assert.NoError(t, err) + + gauge.Record(context.Background(), 7) + }, + want: metricdata.Metrics{ + Name: "histogram", + Data: metricdata.Histogram{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.HistogramDataPoint{ + { + Attributes: attribute.Set{}, + Count: 1, + Bounds: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 1000}, + BucketCounts: []uint64{0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0}, + Min: &seven, + Max: &seven, + Sum: 7.0, + }, + }, + }, + }, + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + rdr := NewManualReader() + m := NewMeterProvider(WithReader(rdr)).Meter("testInstruments") + + tt.fn(t, m) + + rm, err := rdr.Collect(context.Background()) + assert.NoError(t, err) + + require.Len(t, rm.ScopeMetrics, 1) + sm := rm.ScopeMetrics[0] + require.Len(t, sm.Metrics, 1) + got := sm.Metrics[0] + metricdatatest.AssertEqual(t, tt.want, got, metricdatatest.IgnoreTimestamp()) + }) + } +} + +func TestMetersProvideScope(t *testing.T) { + rdr := NewManualReader() + mp := NewMeterProvider(WithReader(rdr)) + + m1 := mp.Meter("scope1") + ctr1, err := m1.AsyncFloat64().Counter("ctr1") + assert.NoError(t, err) + err = m1.RegisterCallback([]instrument.Asynchronous{ctr1}, func(ctx context.Context) { + ctr1.Observe(ctx, 5) + }) + assert.NoError(t, err) + + m2 := mp.Meter("scope2") + ctr2, err := m2.AsyncInt64().Counter("ctr2") + assert.NoError(t, err) + err = m1.RegisterCallback([]instrument.Asynchronous{ctr2}, func(ctx context.Context) { + ctr2.Observe(ctx, 7) + }) + assert.NoError(t, err) + + want := metricdata.ResourceMetrics{ + ScopeMetrics: []metricdata.ScopeMetrics{ + { + Scope: instrumentation.Scope{ + Name: "scope1", + }, + Metrics: []metricdata.Metrics{ + { + Name: "ctr1", + Data: metricdata.Sum[float64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + DataPoints: []metricdata.DataPoint[float64]{ + { + Value: 5, + }, + }, + }, + }, + }, + }, + { + Scope: instrumentation.Scope{ + Name: "scope2", + }, + Metrics: []metricdata.Metrics{ + { + Name: "ctr2", + Data: metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + DataPoints: []metricdata.DataPoint[int64]{ + { + Value: 7, + }, + }, + }, + }, + }, + }, + }, + } + + got, err := rdr.Collect(context.Background()) + assert.NoError(t, err) + metricdatatest.AssertEqual(t, want, got, metricdatatest.IgnoreTimestamp()) +} diff --git a/sdk/metric/metricdata/data.go b/sdk/metric/metricdata/data.go new file mode 100644 index 00000000000..effaf71c73a --- /dev/null +++ b/sdk/metric/metricdata/data.go @@ -0,0 +1,133 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package metricdata // import "go.opentelemetry.io/otel/sdk/metric/metricdata" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/unit" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" +) + +// ResourceMetrics is a collection of ScopeMetrics and the associated Resource +// that created them. +type ResourceMetrics struct { + // Resource represents the entity that collected the metrics. + Resource *resource.Resource + // ScopeMetrics are the collection of metrics with unique Scopes. + ScopeMetrics []ScopeMetrics +} + +// ScopeMetrics is a collection of Metrics Produces by a Meter. +type ScopeMetrics struct { + // Scope is the Scope that the Meter was created with. + Scope instrumentation.Scope + // Metrics are a list of aggregations created by the Meter. + Metrics []Metrics +} + +// Metrics is a collection of one or more aggregated timeseries from an Instrument. +type Metrics struct { + // Name is the name of the Instrument that created this data. + Name string + // Description is the description of the Instrument, which can be used in documentation. + Description string + // Unit is the unit in which the Instrument reports. + Unit unit.Unit + // Data is the aggregated data from an Instrument. + Data Aggregation +} + +// Aggregation is the store of data reported by an Instrument. +// It will be one of: Gauge, Sum, Histogram. +type Aggregation interface { + privateAggregation() +} + +// Gauge represents a measurement of the current value of an instrument. +type Gauge[N int64 | float64] struct { + // DataPoints reprents individual aggregated measurements with unique Attributes. + DataPoints []DataPoint[N] +} + +func (Gauge[N]) privateAggregation() {} + +// Sum represents the sum of all measurements of values from an instrument. +type Sum[N int64 | float64] struct { + // DataPoints reprents individual aggregated measurements with unique Attributes. + DataPoints []DataPoint[N] + // Temporality describes if the aggregation is reported as the change from the + // last report time, or the cumulative changes since a fixed start time. + Temporality Temporality + // IsMonotonic represents if this aggregation only increases or decreases. + IsMonotonic bool +} + +func (Sum[N]) privateAggregation() {} + +// DataPoint is a single data point in a timeseries. +type DataPoint[N int64 | float64] struct { + // Attributes is the set of key value pairs that uniquely identify the + // timeseries. + Attributes attribute.Set + // StartTime is when the timeseries was started. (optional) + StartTime time.Time `json:",omitempty"` + // Time is the time when the timeseries was recorded. (optional) + Time time.Time `json:",omitempty"` + // Value is the value of this data point. + Value N +} + +// Histogram represents the histogram of all measurements of values from an instrument. +type Histogram struct { + // DataPoints reprents individual aggregated measurements with unique Attributes. + DataPoints []HistogramDataPoint + // Temporality describes if the aggregation is reported as the change from the + // last report time, or the cumulative changes since a fixed start time. + Temporality Temporality +} + +func (Histogram) privateAggregation() {} + +// HistogramDataPoint is a single histogram data point in a timeseries. +type HistogramDataPoint struct { + // Attributes is the set of key value pairs that uniquely identify the + // timeseries. + Attributes attribute.Set + // StartTime is when the timeseries was started. + StartTime time.Time + // Time is the time when the timeseries was recorded. + Time time.Time + + // Count is the number of updates this histogram has been calculated with. + Count uint64 + // Bounds are the upper bounds of the buckets of the histogram. Because the + // last boundary is +infinity this one is implied. + Bounds []float64 + // BucketCounts is the count of each of the buckets. + BucketCounts []uint64 + + // Min is the minimum value recorded. (optional) + Min *float64 `json:",omitempty"` + // Max is the maximum value recorded. (optional) + Max *float64 `json:",omitempty"` + // Sum is the sum of the values recorded. + Sum float64 +} diff --git a/sdk/metric/metricdata/metricdatatest/assertion.go b/sdk/metric/metricdata/metricdatatest/assertion.go new file mode 100644 index 00000000000..4c519d15d49 --- /dev/null +++ b/sdk/metric/metricdata/metricdatatest/assertion.go @@ -0,0 +1,134 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +// Package metricdatatest provides testing functionality for use with the +// metricdata package. +package metricdatatest // import "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" + +import ( + "fmt" + "testing" + + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// Datatypes are the concrete data-types the metricdata package provides. +type Datatypes interface { + metricdata.DataPoint[float64] | + metricdata.DataPoint[int64] | + metricdata.Gauge[float64] | + metricdata.Gauge[int64] | + metricdata.Histogram | + metricdata.HistogramDataPoint | + metricdata.Metrics | + metricdata.ResourceMetrics | + metricdata.ScopeMetrics | + metricdata.Sum[float64] | + metricdata.Sum[int64] + + // Interface types are not allowed in union types, therefore the + // Aggregation and Value type from metricdata are not included here. +} + +type config struct { + ignoreTimestamp bool +} + +// Option allows for fine grain control over how AssertEqual operates. +type Option interface { + apply(cfg config) config +} + +type fnOption func(cfg config) config + +func (fn fnOption) apply(cfg config) config { + return fn(cfg) +} + +// IgnoreTimestamp disables checking if timestamps are different. +func IgnoreTimestamp() Option { + return fnOption(func(cfg config) config { + cfg.ignoreTimestamp = true + return cfg + }) +} + +// AssertEqual asserts that the two concrete data-types from the metricdata +// package are equal. +func AssertEqual[T Datatypes](t *testing.T, expected, actual T, opts ...Option) bool { + t.Helper() + + cfg := config{} + for _, opt := range opts { + cfg = opt.apply(cfg) + } + + // Generic types cannot be type asserted. Use an interface instead. + aIface := interface{}(actual) + + var r []string + switch e := interface{}(expected).(type) { + case metricdata.DataPoint[int64]: + r = equalDataPoints(e, aIface.(metricdata.DataPoint[int64]), cfg) + case metricdata.DataPoint[float64]: + r = equalDataPoints(e, aIface.(metricdata.DataPoint[float64]), cfg) + case metricdata.Gauge[int64]: + r = equalGauges(e, aIface.(metricdata.Gauge[int64]), cfg) + case metricdata.Gauge[float64]: + r = equalGauges(e, aIface.(metricdata.Gauge[float64]), cfg) + case metricdata.Histogram: + r = equalHistograms(e, aIface.(metricdata.Histogram), cfg) + case metricdata.HistogramDataPoint: + r = equalHistogramDataPoints(e, aIface.(metricdata.HistogramDataPoint), cfg) + case metricdata.Metrics: + r = equalMetrics(e, aIface.(metricdata.Metrics), cfg) + case metricdata.ResourceMetrics: + r = equalResourceMetrics(e, aIface.(metricdata.ResourceMetrics), cfg) + case metricdata.ScopeMetrics: + r = equalScopeMetrics(e, aIface.(metricdata.ScopeMetrics), cfg) + case metricdata.Sum[int64]: + r = equalSums(e, aIface.(metricdata.Sum[int64]), cfg) + case metricdata.Sum[float64]: + r = equalSums(e, aIface.(metricdata.Sum[float64]), cfg) + default: + // We control all types passed to this, panic to signal developers + // early they changed things in an incompatible way. + panic(fmt.Sprintf("unknown types: %T", expected)) + } + + if len(r) > 0 { + t.Error(r) + return false + } + return true +} + +// AssertAggregationsEqual asserts that two Aggregations are equal. +func AssertAggregationsEqual(t *testing.T, expected, actual metricdata.Aggregation, opts ...Option) bool { + t.Helper() + + cfg := config{} + for _, opt := range opts { + cfg = opt.apply(cfg) + } + + if r := equalAggregations(expected, actual, cfg); len(r) > 0 { + t.Error(r) + return false + } + return true +} diff --git a/sdk/metric/metricdata/metricdatatest/assertion_fail_test.go b/sdk/metric/metricdata/metricdatatest/assertion_fail_test.go new file mode 100644 index 00000000000..fffbe6421be --- /dev/null +++ b/sdk/metric/metricdata/metricdatatest/assertion_fail_test.go @@ -0,0 +1,59 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 && tests_fail +// +build go1.18,tests_fail + +package metricdatatest // import "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" + +import ( + "testing" +) + +// These tests are used to develop the failure messages of this package's +// assertions. They can be run with the following. +// +// go test -tags tests_fail ./... + +func testFailDatatype[T Datatypes](a, b T) func(*testing.T) { + return func(t *testing.T) { + AssertEqual(t, a, b) + } +} + +func TestFailAssertEqual(t *testing.T) { + t.Run("ResourceMetrics", testFailDatatype(resourceMetricsA, resourceMetricsB)) + t.Run("ScopeMetrics", testFailDatatype(scopeMetricsA, scopeMetricsB)) + t.Run("Metrics", testFailDatatype(metricsA, metricsB)) + t.Run("Histogram", testFailDatatype(histogramA, histogramB)) + t.Run("SumInt64", testFailDatatype(sumInt64A, sumInt64B)) + t.Run("SumFloat64", testFailDatatype(sumFloat64A, sumFloat64B)) + t.Run("GaugeInt64", testFailDatatype(gaugeInt64A, gaugeInt64B)) + t.Run("GaugeFloat64", testFailDatatype(gaugeFloat64A, gaugeFloat64B)) + t.Run("HistogramDataPoint", testFailDatatype(histogramDataPointA, histogramDataPointB)) + t.Run("DataPointInt64", testFailDatatype(dataPointInt64A, dataPointInt64B)) + t.Run("DataPointFloat64", testFailDatatype(dataPointFloat64A, dataPointFloat64B)) + +} + +func TestFailAssertAggregationsEqual(t *testing.T) { + AssertAggregationsEqual(t, sumInt64A, nil) + AssertAggregationsEqual(t, sumFloat64A, gaugeFloat64A) + AssertAggregationsEqual(t, unknownAggregation{}, unknownAggregation{}) + AssertAggregationsEqual(t, sumInt64A, sumInt64B) + AssertAggregationsEqual(t, sumFloat64A, sumFloat64B) + AssertAggregationsEqual(t, gaugeInt64A, gaugeInt64B) + AssertAggregationsEqual(t, gaugeFloat64A, gaugeFloat64B) + AssertAggregationsEqual(t, histogramA, histogramB) +} diff --git a/sdk/metric/metricdata/metricdatatest/assertion_test.go b/sdk/metric/metricdata/metricdatatest/assertion_test.go new file mode 100644 index 00000000000..71331f5a945 --- /dev/null +++ b/sdk/metric/metricdata/metricdatatest/assertion_test.go @@ -0,0 +1,319 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package metricdatatest // import "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/unit" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/resource" +) + +var ( + attrA = attribute.NewSet(attribute.Bool("A", true)) + attrB = attribute.NewSet(attribute.Bool("B", true)) + + startA = time.Now() + startB = startA.Add(time.Millisecond) + endA = startA.Add(time.Second) + endB = startB.Add(time.Second) + + dataPointInt64A = metricdata.DataPoint[int64]{ + Attributes: attrA, + StartTime: startA, + Time: endA, + Value: -1, + } + dataPointFloat64A = metricdata.DataPoint[float64]{ + Attributes: attrA, + StartTime: startA, + Time: endA, + Value: -1.0, + } + dataPointInt64B = metricdata.DataPoint[int64]{ + Attributes: attrB, + StartTime: startB, + Time: endB, + Value: 2, + } + dataPointFloat64B = metricdata.DataPoint[float64]{ + Attributes: attrB, + StartTime: startB, + Time: endB, + Value: 2.0, + } + dataPointInt64C = metricdata.DataPoint[int64]{ + Attributes: attrA, + StartTime: startB, + Time: endB, + Value: -1, + } + dataPointFloat64C = metricdata.DataPoint[float64]{ + Attributes: attrA, + StartTime: startB, + Time: endB, + Value: -1.0, + } + + max, min = 99.0, 3. + histogramDataPointA = metricdata.HistogramDataPoint{ + Attributes: attrA, + StartTime: startA, + Time: endA, + Count: 2, + Bounds: []float64{0, 10}, + BucketCounts: []uint64{1, 1}, + Sum: 2, + } + histogramDataPointB = metricdata.HistogramDataPoint{ + Attributes: attrB, + StartTime: startB, + Time: endB, + Count: 3, + Bounds: []float64{0, 10, 100}, + BucketCounts: []uint64{1, 1, 1}, + Max: &max, + Min: &min, + Sum: 3, + } + histogramDataPointC = metricdata.HistogramDataPoint{ + Attributes: attrA, + StartTime: startB, + Time: endB, + Count: 2, + Bounds: []float64{0, 10}, + BucketCounts: []uint64{1, 1}, + Sum: 2, + } + + gaugeInt64A = metricdata.Gauge[int64]{ + DataPoints: []metricdata.DataPoint[int64]{dataPointInt64A}, + } + gaugeFloat64A = metricdata.Gauge[float64]{ + DataPoints: []metricdata.DataPoint[float64]{dataPointFloat64A}, + } + gaugeInt64B = metricdata.Gauge[int64]{ + DataPoints: []metricdata.DataPoint[int64]{dataPointInt64B}, + } + gaugeFloat64B = metricdata.Gauge[float64]{ + DataPoints: []metricdata.DataPoint[float64]{dataPointFloat64B}, + } + gaugeInt64C = metricdata.Gauge[int64]{ + DataPoints: []metricdata.DataPoint[int64]{dataPointInt64C}, + } + gaugeFloat64C = metricdata.Gauge[float64]{ + DataPoints: []metricdata.DataPoint[float64]{dataPointFloat64C}, + } + + sumInt64A = metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + DataPoints: []metricdata.DataPoint[int64]{dataPointInt64A}, + } + sumFloat64A = metricdata.Sum[float64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + DataPoints: []metricdata.DataPoint[float64]{dataPointFloat64A}, + } + sumInt64B = metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + DataPoints: []metricdata.DataPoint[int64]{dataPointInt64B}, + } + sumFloat64B = metricdata.Sum[float64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + DataPoints: []metricdata.DataPoint[float64]{dataPointFloat64B}, + } + sumInt64C = metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + DataPoints: []metricdata.DataPoint[int64]{dataPointInt64C}, + } + sumFloat64C = metricdata.Sum[float64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + DataPoints: []metricdata.DataPoint[float64]{dataPointFloat64C}, + } + + histogramA = metricdata.Histogram{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.HistogramDataPoint{histogramDataPointA}, + } + histogramB = metricdata.Histogram{ + Temporality: metricdata.DeltaTemporality, + DataPoints: []metricdata.HistogramDataPoint{histogramDataPointB}, + } + histogramC = metricdata.Histogram{ + Temporality: metricdata.CumulativeTemporality, + DataPoints: []metricdata.HistogramDataPoint{histogramDataPointC}, + } + + metricsA = metricdata.Metrics{ + Name: "A", + Description: "A desc", + Unit: unit.Dimensionless, + Data: sumInt64A, + } + metricsB = metricdata.Metrics{ + Name: "B", + Description: "B desc", + Unit: unit.Bytes, + Data: gaugeFloat64B, + } + metricsC = metricdata.Metrics{ + Name: "A", + Description: "A desc", + Unit: unit.Dimensionless, + Data: sumInt64C, + } + + scopeMetricsA = metricdata.ScopeMetrics{ + Scope: instrumentation.Scope{Name: "A"}, + Metrics: []metricdata.Metrics{metricsA}, + } + scopeMetricsB = metricdata.ScopeMetrics{ + Scope: instrumentation.Scope{Name: "B"}, + Metrics: []metricdata.Metrics{metricsB}, + } + scopeMetricsC = metricdata.ScopeMetrics{ + Scope: instrumentation.Scope{Name: "A"}, + Metrics: []metricdata.Metrics{metricsC}, + } + + resourceMetricsA = metricdata.ResourceMetrics{ + Resource: resource.NewSchemaless(attribute.String("resource", "A")), + ScopeMetrics: []metricdata.ScopeMetrics{scopeMetricsA}, + } + resourceMetricsB = metricdata.ResourceMetrics{ + Resource: resource.NewSchemaless(attribute.String("resource", "B")), + ScopeMetrics: []metricdata.ScopeMetrics{scopeMetricsB}, + } + resourceMetricsC = metricdata.ResourceMetrics{ + Resource: resource.NewSchemaless(attribute.String("resource", "A")), + ScopeMetrics: []metricdata.ScopeMetrics{scopeMetricsC}, + } +) + +type equalFunc[T Datatypes] func(T, T, config) []string + +func testDatatype[T Datatypes](a, b T, f equalFunc[T]) func(*testing.T) { + return func(t *testing.T) { + AssertEqual(t, a, a) + AssertEqual(t, b, b) + + r := f(a, b, config{}) + assert.Greaterf(t, len(r), 0, "%v == %v", a, b) + } +} + +func testDatatypeIgnoreTime[T Datatypes](a, b T, f equalFunc[T]) func(*testing.T) { + return func(t *testing.T) { + AssertEqual(t, a, a) + AssertEqual(t, b, b) + + r := f(a, b, config{ignoreTimestamp: true}) + assert.Equalf(t, len(r), 0, "%v == %v", a, b) + } +} + +func TestAssertEqual(t *testing.T) { + t.Run("ResourceMetrics", testDatatype(resourceMetricsA, resourceMetricsB, equalResourceMetrics)) + t.Run("ScopeMetrics", testDatatype(scopeMetricsA, scopeMetricsB, equalScopeMetrics)) + t.Run("Metrics", testDatatype(metricsA, metricsB, equalMetrics)) + t.Run("Histogram", testDatatype(histogramA, histogramB, equalHistograms)) + t.Run("SumInt64", testDatatype(sumInt64A, sumInt64B, equalSums[int64])) + t.Run("SumFloat64", testDatatype(sumFloat64A, sumFloat64B, equalSums[float64])) + t.Run("GaugeInt64", testDatatype(gaugeInt64A, gaugeInt64B, equalGauges[int64])) + t.Run("GaugeFloat64", testDatatype(gaugeFloat64A, gaugeFloat64B, equalGauges[float64])) + t.Run("HistogramDataPoint", testDatatype(histogramDataPointA, histogramDataPointB, equalHistogramDataPoints)) + t.Run("DataPointInt64", testDatatype(dataPointInt64A, dataPointInt64B, equalDataPoints[int64])) + t.Run("DataPointFloat64", testDatatype(dataPointFloat64A, dataPointFloat64B, equalDataPoints[float64])) +} + +func TestAssertEqualIgnoreTime(t *testing.T) { + t.Run("ResourceMetrics", testDatatypeIgnoreTime(resourceMetricsA, resourceMetricsC, equalResourceMetrics)) + t.Run("ScopeMetrics", testDatatypeIgnoreTime(scopeMetricsA, scopeMetricsC, equalScopeMetrics)) + t.Run("Metrics", testDatatypeIgnoreTime(metricsA, metricsC, equalMetrics)) + t.Run("Histogram", testDatatypeIgnoreTime(histogramA, histogramC, equalHistograms)) + t.Run("SumInt64", testDatatypeIgnoreTime(sumInt64A, sumInt64C, equalSums[int64])) + t.Run("SumFloat64", testDatatypeIgnoreTime(sumFloat64A, sumFloat64C, equalSums[float64])) + t.Run("GaugeInt64", testDatatypeIgnoreTime(gaugeInt64A, gaugeInt64C, equalGauges[int64])) + t.Run("GaugeFloat64", testDatatypeIgnoreTime(gaugeFloat64A, gaugeFloat64C, equalGauges[float64])) + t.Run("HistogramDataPoint", testDatatypeIgnoreTime(histogramDataPointA, histogramDataPointC, equalHistogramDataPoints)) + t.Run("DataPointInt64", testDatatypeIgnoreTime(dataPointInt64A, dataPointInt64C, equalDataPoints[int64])) + t.Run("DataPointFloat64", testDatatypeIgnoreTime(dataPointFloat64A, dataPointFloat64C, equalDataPoints[float64])) +} + +type unknownAggregation struct { + metricdata.Aggregation +} + +func TestAssertAggregationsEqual(t *testing.T) { + AssertAggregationsEqual(t, nil, nil) + AssertAggregationsEqual(t, sumInt64A, sumInt64A) + AssertAggregationsEqual(t, sumFloat64A, sumFloat64A) + AssertAggregationsEqual(t, gaugeInt64A, gaugeInt64A) + AssertAggregationsEqual(t, gaugeFloat64A, gaugeFloat64A) + AssertAggregationsEqual(t, histogramA, histogramA) + + r := equalAggregations(sumInt64A, nil, config{}) + assert.Len(t, r, 1, "should return nil comparison mismatch only") + + r = equalAggregations(sumInt64A, gaugeInt64A, config{}) + assert.Len(t, r, 1, "should return with type mismatch only") + + r = equalAggregations(unknownAggregation{}, unknownAggregation{}, config{}) + assert.Len(t, r, 1, "should return with unknown aggregation only") + + r = equalAggregations(sumInt64A, sumInt64B, config{}) + assert.Greaterf(t, len(r), 0, "%v == %v", sumInt64A, sumInt64B) + + r = equalAggregations(sumInt64A, sumInt64C, config{ignoreTimestamp: true}) + assert.Equalf(t, len(r), 0, "%v == %v", sumInt64A, sumInt64C) + + r = equalAggregations(sumFloat64A, sumFloat64B, config{}) + assert.Greaterf(t, len(r), 0, "%v == %v", sumFloat64A, sumFloat64B) + + r = equalAggregations(sumFloat64A, sumFloat64C, config{ignoreTimestamp: true}) + assert.Equalf(t, len(r), 0, "%v == %v", sumFloat64A, sumFloat64C) + + r = equalAggregations(gaugeInt64A, gaugeInt64B, config{}) + assert.Greaterf(t, len(r), 0, "%v == %v", gaugeInt64A, gaugeInt64B) + + r = equalAggregations(gaugeInt64A, gaugeInt64C, config{ignoreTimestamp: true}) + assert.Equalf(t, len(r), 0, "%v == %v", gaugeInt64A, gaugeInt64C) + + r = equalAggregations(gaugeFloat64A, gaugeFloat64B, config{}) + assert.Greaterf(t, len(r), 0, "%v == %v", gaugeFloat64A, gaugeFloat64B) + + r = equalAggregations(gaugeFloat64A, gaugeFloat64C, config{ignoreTimestamp: true}) + assert.Equalf(t, len(r), 0, "%v == %v", gaugeFloat64A, gaugeFloat64C) + + r = equalAggregations(histogramA, histogramB, config{}) + assert.Greaterf(t, len(r), 0, "%v == %v", histogramA, histogramB) + + r = equalAggregations(histogramA, histogramC, config{ignoreTimestamp: true}) + assert.Equalf(t, len(r), 0, "%v == %v", histogramA, histogramC) +} diff --git a/sdk/metric/metricdata/metricdatatest/comparisons.go b/sdk/metric/metricdata/metricdatatest/comparisons.go new file mode 100644 index 00000000000..4c40f3bf67d --- /dev/null +++ b/sdk/metric/metricdata/metricdatatest/comparisons.go @@ -0,0 +1,363 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package metricdatatest // import "go.opentelemetry.io/otel/sdk/metric/metricdata/metricdatatest" + +import ( + "bytes" + "fmt" + "reflect" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/metric/metricdata" +) + +// equalResourceMetrics returns reasons ResourceMetrics are not equal. If they +// are equal, the returned reasons will be empty. +// +// The ScopeMetrics each ResourceMetrics contains are compared based on +// containing the same ScopeMetrics, not the order they are stored in. +func equalResourceMetrics(a, b metricdata.ResourceMetrics, cfg config) (reasons []string) { + if !a.Resource.Equal(b.Resource) { + reasons = append(reasons, notEqualStr("Resources", a.Resource, b.Resource)) + } + + r := compareDiff(diffSlices( + a.ScopeMetrics, + b.ScopeMetrics, + func(a, b metricdata.ScopeMetrics) bool { + r := equalScopeMetrics(a, b, cfg) + return len(r) == 0 + }, + )) + if r != "" { + reasons = append(reasons, fmt.Sprintf("ResourceMetrics ScopeMetrics not equal:\n%s", r)) + } + return reasons +} + +// equalScopeMetrics returns reasons ScopeMetrics are not equal. If they are +// equal, the returned reasons will be empty. +// +// The Metrics each ScopeMetrics contains are compared based on containing the +// same Metrics, not the order they are stored in. +func equalScopeMetrics(a, b metricdata.ScopeMetrics, cfg config) (reasons []string) { + if a.Scope != b.Scope { + reasons = append(reasons, notEqualStr("Scope", a.Scope, b.Scope)) + } + + r := compareDiff(diffSlices( + a.Metrics, + b.Metrics, + func(a, b metricdata.Metrics) bool { + r := equalMetrics(a, b, cfg) + return len(r) == 0 + }, + )) + if r != "" { + reasons = append(reasons, fmt.Sprintf("ScopeMetrics Metrics not equal:\n%s", r)) + } + return reasons +} + +// equalMetrics returns reasons Metrics are not equal. If they are equal, the +// returned reasons will be empty. +func equalMetrics(a, b metricdata.Metrics, cfg config) (reasons []string) { + if a.Name != b.Name { + reasons = append(reasons, notEqualStr("Name", a.Name, b.Name)) + } + if a.Description != b.Description { + reasons = append(reasons, notEqualStr("Description", a.Description, b.Description)) + } + if a.Unit != b.Unit { + reasons = append(reasons, notEqualStr("Unit", a.Unit, b.Unit)) + } + + r := equalAggregations(a.Data, b.Data, cfg) + if len(r) > 0 { + reasons = append(reasons, "Metrics Data not equal:") + reasons = append(reasons, r...) + } + return reasons +} + +// equalAggregations returns reasons a and b are not equal. If they are equal, +// the returned reasons will be empty. +func equalAggregations(a, b metricdata.Aggregation, cfg config) (reasons []string) { + if a == nil || b == nil { + if a != b { + return []string{notEqualStr("Aggregation", a, b)} + } + return reasons + } + + if reflect.TypeOf(a) != reflect.TypeOf(b) { + return []string{fmt.Sprintf("Aggregation types not equal:\nexpected: %T\nactual: %T", a, b)} + } + + switch v := a.(type) { + case metricdata.Gauge[int64]: + r := equalGauges(v, b.(metricdata.Gauge[int64]), cfg) + if len(r) > 0 { + reasons = append(reasons, "Gauge[int64] not equal:") + reasons = append(reasons, r...) + } + case metricdata.Gauge[float64]: + r := equalGauges(v, b.(metricdata.Gauge[float64]), cfg) + if len(r) > 0 { + reasons = append(reasons, "Gauge[float64] not equal:") + reasons = append(reasons, r...) + } + case metricdata.Sum[int64]: + r := equalSums(v, b.(metricdata.Sum[int64]), cfg) + if len(r) > 0 { + reasons = append(reasons, "Sum[int64] not equal:") + reasons = append(reasons, r...) + } + case metricdata.Sum[float64]: + r := equalSums(v, b.(metricdata.Sum[float64]), cfg) + if len(r) > 0 { + reasons = append(reasons, "Sum[float64] not equal:") + reasons = append(reasons, r...) + } + case metricdata.Histogram: + r := equalHistograms(v, b.(metricdata.Histogram), cfg) + if len(r) > 0 { + reasons = append(reasons, "Histogram not equal:") + reasons = append(reasons, r...) + } + default: + reasons = append(reasons, fmt.Sprintf("Aggregation of unknown types %T", a)) + } + return reasons +} + +// equalGauges returns reasons Gauges are not equal. If they are equal, the +// returned reasons will be empty. +// +// The DataPoints each Gauge contains are compared based on containing the +// same DataPoints, not the order they are stored in. +func equalGauges[N int64 | float64](a, b metricdata.Gauge[N], cfg config) (reasons []string) { + r := compareDiff(diffSlices( + a.DataPoints, + b.DataPoints, + func(a, b metricdata.DataPoint[N]) bool { + r := equalDataPoints(a, b, cfg) + return len(r) == 0 + }, + )) + if r != "" { + reasons = append(reasons, fmt.Sprintf("Gauge DataPoints not equal:\n%s", r)) + } + return reasons +} + +// equalSums returns reasons Sums are not equal. If they are equal, the +// returned reasons will be empty. +// +// The DataPoints each Sum contains are compared based on containing the same +// DataPoints, not the order they are stored in. +func equalSums[N int64 | float64](a, b metricdata.Sum[N], cfg config) (reasons []string) { + if a.Temporality != b.Temporality { + reasons = append(reasons, notEqualStr("Temporality", a.Temporality, b.Temporality)) + } + if a.IsMonotonic != b.IsMonotonic { + reasons = append(reasons, notEqualStr("IsMonotonic", a.IsMonotonic, b.IsMonotonic)) + } + + r := compareDiff(diffSlices( + a.DataPoints, + b.DataPoints, + func(a, b metricdata.DataPoint[N]) bool { + r := equalDataPoints(a, b, cfg) + return len(r) == 0 + }, + )) + if r != "" { + reasons = append(reasons, fmt.Sprintf("Sum DataPoints not equal:\n%s", r)) + } + return reasons +} + +// equalHistograms returns reasons Histograms are not equal. If they are +// equal, the returned reasons will be empty. +// +// The DataPoints each Histogram contains are compared based on containing the +// same HistogramDataPoint, not the order they are stored in. +func equalHistograms(a, b metricdata.Histogram, cfg config) (reasons []string) { + if a.Temporality != b.Temporality { + reasons = append(reasons, notEqualStr("Temporality", a.Temporality, b.Temporality)) + } + + r := compareDiff(diffSlices( + a.DataPoints, + b.DataPoints, + func(a, b metricdata.HistogramDataPoint) bool { + r := equalHistogramDataPoints(a, b, cfg) + return len(r) == 0 + }, + )) + if r != "" { + reasons = append(reasons, fmt.Sprintf("Histogram DataPoints not equal:\n%s", r)) + } + return reasons +} + +// equalDataPoints returns reasons DataPoints are not equal. If they are +// equal, the returned reasons will be empty. +func equalDataPoints[N int64 | float64](a, b metricdata.DataPoint[N], cfg config) (reasons []string) { // nolint: revive // Intentional internal control flag + if !a.Attributes.Equals(&b.Attributes) { + reasons = append(reasons, notEqualStr( + "Attributes", + a.Attributes.Encoded(attribute.DefaultEncoder()), + b.Attributes.Encoded(attribute.DefaultEncoder()), + )) + } + + if !cfg.ignoreTimestamp { + if !a.StartTime.Equal(b.StartTime) { + reasons = append(reasons, notEqualStr("StartTime", a.StartTime.UnixNano(), b.StartTime.UnixNano())) + } + if !a.Time.Equal(b.Time) { + reasons = append(reasons, notEqualStr("Time", a.Time.UnixNano(), b.Time.UnixNano())) + } + } + + if a.Value != b.Value { + reasons = append(reasons, notEqualStr("Value", a.Value, b.Value)) + } + return reasons +} + +// equalHistogramDataPoints returns reasons HistogramDataPoints are not equal. +// If they are equal, the returned reasons will be empty. +func equalHistogramDataPoints(a, b metricdata.HistogramDataPoint, cfg config) (reasons []string) { // nolint: revive // Intentional internal control flag + if !a.Attributes.Equals(&b.Attributes) { + reasons = append(reasons, notEqualStr( + "Attributes", + a.Attributes.Encoded(attribute.DefaultEncoder()), + b.Attributes.Encoded(attribute.DefaultEncoder()), + )) + } + if !cfg.ignoreTimestamp { + if !a.StartTime.Equal(b.StartTime) { + reasons = append(reasons, notEqualStr("StartTime", a.StartTime.UnixNano(), b.StartTime.UnixNano())) + } + if !a.Time.Equal(b.Time) { + reasons = append(reasons, notEqualStr("Time", a.Time.UnixNano(), b.Time.UnixNano())) + } + } + if a.Count != b.Count { + reasons = append(reasons, notEqualStr("Count", a.Count, b.Count)) + } + if !equalSlices(a.Bounds, b.Bounds) { + reasons = append(reasons, notEqualStr("Bounds", a.Bounds, b.Bounds)) + } + if !equalSlices(a.BucketCounts, b.BucketCounts) { + reasons = append(reasons, notEqualStr("BucketCounts", a.BucketCounts, b.BucketCounts)) + } + if !equalPtrValues(a.Min, b.Min) { + reasons = append(reasons, notEqualStr("Min", a.Min, b.Min)) + } + if !equalPtrValues(a.Max, b.Max) { + reasons = append(reasons, notEqualStr("Max", a.Max, b.Max)) + } + if a.Sum != b.Sum { + reasons = append(reasons, notEqualStr("Sum", a.Sum, b.Sum)) + } + return reasons +} + +func notEqualStr(prefix string, expected, actual interface{}) string { + return fmt.Sprintf("%s not equal:\nexpected: %v\nactual: %v", prefix, expected, actual) +} + +func equalSlices[T comparable](a, b []T) bool { + if len(a) != len(b) { + return false + } + for i, v := range a { + if v != b[i] { + return false + } + } + return true +} + +func equalPtrValues[T comparable](a, b *T) bool { + if a == nil || b == nil { + return a == b + } + + return *a == *b +} + +func diffSlices[T any](a, b []T, equal func(T, T) bool) (extraA, extraB []T) { + visited := make([]bool, len(b)) + for i := 0; i < len(a); i++ { + found := false + for j := 0; j < len(b); j++ { + if visited[j] { + continue + } + if equal(a[i], b[j]) { + visited[j] = true + found = true + break + } + } + if !found { + extraA = append(extraA, a[i]) + } + } + + for j := 0; j < len(b); j++ { + if visited[j] { + continue + } + extraB = append(extraB, b[j]) + } + + return extraA, extraB +} + +func compareDiff[T any](extraExpected, extraActual []T) string { + if len(extraExpected) == 0 && len(extraActual) == 0 { + return "" + } + + formater := func(v T) string { + return fmt.Sprintf("%#v", v) + } + + var msg bytes.Buffer + if len(extraExpected) > 0 { + _, _ = msg.WriteString("missing expected values:\n") + for _, v := range extraExpected { + _, _ = msg.WriteString(formater(v) + "\n") + } + } + + if len(extraActual) > 0 { + _, _ = msg.WriteString("unexpected additional values:\n") + for _, v := range extraActual { + _, _ = msg.WriteString(formater(v) + "\n") + } + } + + return msg.String() +} diff --git a/sdk/metric/metricdata/temporality.go b/sdk/metric/metricdata/temporality.go new file mode 100644 index 00000000000..5cf105b7947 --- /dev/null +++ b/sdk/metric/metricdata/temporality.go @@ -0,0 +1,43 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate stringer -type=Temporality +//go:build go1.17 +// +build go1.17 + +package metricdata // import "go.opentelemetry.io/otel/sdk/metric/metricdata" + +// Temporality defines the window that an aggregation was calculated over. +type Temporality uint8 + +const ( + // undefinedTemporality represents an unset Temporality. + //nolint:deadcode,unused,varcheck + undefinedTemporality Temporality = iota + + // CumulativeTemporality defines a measurement interval that continues to + // expand forward in time from a starting point. New measurements are + // added to all previous measurements since a start time. + CumulativeTemporality + + // DeltaTemporality defines a measurement interval that resets each cycle. + // Measurements from one cycle are recorded independently, measurements + // from other cycles do not affect them. + DeltaTemporality +) + +// MarshalText returns the byte encoded of t. +func (t Temporality) MarshalText() ([]byte, error) { + return []byte(t.String()), nil +} diff --git a/sdk/metric/export/aggregation/temporality_string.go b/sdk/metric/metricdata/temporality_string.go similarity index 66% rename from sdk/metric/export/aggregation/temporality_string.go rename to sdk/metric/metricdata/temporality_string.go index 3edbeb4592d..4da833cdce2 100644 --- a/sdk/metric/export/aggregation/temporality_string.go +++ b/sdk/metric/metricdata/temporality_string.go @@ -1,6 +1,6 @@ // Code generated by "stringer -type=Temporality"; DO NOT EDIT. -package aggregation +package metricdata import "strconv" @@ -8,18 +8,18 @@ func _() { // An "invalid array index" compiler error signifies that the constant values have changed. // Re-run the stringer command to generate them again. var x [1]struct{} + _ = x[undefinedTemporality-0] _ = x[CumulativeTemporality-1] _ = x[DeltaTemporality-2] } -const _Temporality_name = "CumulativeTemporalityDeltaTemporality" +const _Temporality_name = "undefinedTemporalityCumulativeTemporalityDeltaTemporality" -var _Temporality_index = [...]uint8{0, 21, 37} +var _Temporality_index = [...]uint8{0, 20, 41, 57} func (i Temporality) String() string { - i -= 1 if i >= Temporality(len(_Temporality_index)-1) { - return "Temporality(" + strconv.FormatInt(int64(i+1), 10) + ")" + return "Temporality(" + strconv.FormatInt(int64(i), 10) + ")" } return _Temporality_name[_Temporality_index[i]:_Temporality_index[i+1]] } diff --git a/sdk/metric/metrictest/config.go b/sdk/metric/metrictest/config.go deleted file mode 100644 index 4531b178fdc..00000000000 --- a/sdk/metric/metrictest/config.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metrictest // import "go.opentelemetry.io/otel/sdk/metric/metrictest" - -import "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - -type config struct { - temporalitySelector aggregation.TemporalitySelector -} - -func newConfig(opts ...Option) config { - cfg := config{ - temporalitySelector: aggregation.CumulativeTemporalitySelector(), - } - for _, opt := range opts { - cfg = opt.apply(cfg) - } - return cfg -} - -// Option allow for control of details of the TestMeterProvider created. -type Option interface { - apply(config) config -} - -type functionOption func(config) config - -func (f functionOption) apply(cfg config) config { - return f(cfg) -} - -// WithTemporalitySelector allows for the use of either cumulative (default) or -// delta metrics. -// -// Warning: the current SDK does not convert async instruments into delta -// temporality. -func WithTemporalitySelector(ts aggregation.TemporalitySelector) Option { - return functionOption(func(cfg config) config { - if ts == nil { - return cfg - } - cfg.temporalitySelector = ts - return cfg - }) -} diff --git a/sdk/metric/metrictest/exporter.go b/sdk/metric/metrictest/exporter.go deleted file mode 100644 index c0926dd7cf6..00000000000 --- a/sdk/metric/metrictest/exporter.go +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metrictest // import "go.opentelemetry.io/otel/sdk/metric/metrictest" - -import ( - "context" - "fmt" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/instrument" - "go.opentelemetry.io/otel/sdk/instrumentation" - controller "go.opentelemetry.io/otel/sdk/metric/controller/basic" - "go.opentelemetry.io/otel/sdk/metric/export" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - "go.opentelemetry.io/otel/sdk/metric/number" - processor "go.opentelemetry.io/otel/sdk/metric/processor/basic" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" - selector "go.opentelemetry.io/otel/sdk/metric/selector/simple" -) - -// Exporter is a manually collected exporter for testing the SDK. It does not -// satisfy the `export.Exporter` interface because it is not intended to be -// used with the periodic collection of the SDK, instead the test should -// manually call `Collect()` -// -// Exporters are not thread safe, and should only be used for testing. -type Exporter struct { - // Records contains the last metrics collected. - Records []ExportRecord - - controller *controller.Controller - temporalitySelector aggregation.TemporalitySelector -} - -// NewTestMeterProvider creates a MeterProvider and Exporter to be used in tests. -func NewTestMeterProvider(opts ...Option) (metric.MeterProvider, *Exporter) { - cfg := newConfig(opts...) - - c := controller.New( - processor.NewFactory( - selector.NewWithHistogramDistribution(), - cfg.temporalitySelector, - ), - controller.WithCollectPeriod(0), - ) - exp := &Exporter{ - controller: c, - temporalitySelector: cfg.temporalitySelector, - } - - return c, exp -} - -// Deprecated: please use Scope instead. -type Library = Scope - -// Scope is the same as "sdk/instrumentation".Scope but there is -// a package cycle to use it so it is redeclared here. -type Scope struct { - InstrumentationName string - InstrumentationVersion string - SchemaURL string -} - -// ExportRecord represents one collected datapoint from the Exporter. -type ExportRecord struct { - InstrumentName string - InstrumentationLibrary Library - Attributes []attribute.KeyValue - AggregationKind aggregation.Kind - NumberKind number.Kind - Sum number.Number - Count uint64 - Histogram aggregation.Buckets - LastValue number.Number -} - -// Collect triggers the SDK's collect methods and then aggregates the data into -// ExportRecords. This will overwrite any previous collected metrics. -func (e *Exporter) Collect(ctx context.Context) error { - e.Records = []ExportRecord{} - - err := e.controller.Collect(ctx) - if err != nil { - return err - } - - return e.controller.ForEach(func(l instrumentation.Library, r export.Reader) error { - lib := Library{ - InstrumentationName: l.Name, - InstrumentationVersion: l.Version, - SchemaURL: l.SchemaURL, - } - - return r.ForEach(e.temporalitySelector, func(rec export.Record) error { - record := ExportRecord{ - InstrumentName: rec.Descriptor().Name(), - InstrumentationLibrary: lib, - Attributes: rec.Attributes().ToSlice(), - AggregationKind: rec.Aggregation().Kind(), - NumberKind: rec.Descriptor().NumberKind(), - } - - var err error - switch agg := rec.Aggregation().(type) { - case aggregation.Histogram: - record.AggregationKind = aggregation.HistogramKind - record.Histogram, err = agg.Histogram() - if err != nil { - return err - } - record.Sum, err = agg.Sum() - if err != nil { - return err - } - record.Count, err = agg.Count() - if err != nil { - return err - } - case aggregation.Count: - record.Count, err = agg.Count() - if err != nil { - return err - } - case aggregation.LastValue: - record.LastValue, _, err = agg.LastValue() - if err != nil { - return err - } - case aggregation.Sum: - record.Sum, err = agg.Sum() - if err != nil { - return err - } - } - - e.Records = append(e.Records, record) - return nil - }) - }) -} - -// GetRecords returns all Records found by the SDK. -func (e *Exporter) GetRecords() []ExportRecord { - return e.Records -} - -var errNotFound = fmt.Errorf("record not found") - -// GetByName returns the first Record with a matching instrument name. -func (e *Exporter) GetByName(name string) (ExportRecord, error) { - for _, rec := range e.Records { - if rec.InstrumentName == name { - return rec, nil - } - } - return ExportRecord{}, errNotFound -} - -// GetByNameAndAttributes returns the first Record with a matching name and the sub-set of attributes. -func (e *Exporter) GetByNameAndAttributes(name string, attributes []attribute.KeyValue) (ExportRecord, error) { - for _, rec := range e.Records { - if rec.InstrumentName == name && subSet(attributes, rec.Attributes) { - return rec, nil - } - } - return ExportRecord{}, errNotFound -} - -// subSet returns true if attributesA is a subset of attributesB. -func subSet(attributesA, attributesB []attribute.KeyValue) bool { - b := attribute.NewSet(attributesB...) - - for _, kv := range attributesA { - if v, found := b.Value(kv.Key); !found || v != kv.Value { - return false - } - } - return true -} - -// NewDescriptor is a test helper for constructing test metric -// descriptors using standard options. -func NewDescriptor(name string, ikind sdkapi.InstrumentKind, nkind number.Kind, opts ...instrument.Option) sdkapi.Descriptor { - cfg := instrument.NewConfig(opts...) - return sdkapi.NewDescriptor(name, ikind, nkind, cfg.Description(), cfg.Unit()) -} diff --git a/sdk/metric/metrictest/exporter_test.go b/sdk/metric/metrictest/exporter_test.go deleted file mode 100644 index 10f4fb4358c..00000000000 --- a/sdk/metric/metrictest/exporter_test.go +++ /dev/null @@ -1,414 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metrictest_test // import "go.opentelemetry.io/otel/sdk/metric/metrictest" - -import ( - "context" - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/instrument" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - "go.opentelemetry.io/otel/sdk/metric/metrictest" -) - -func TestSyncInstruments(t *testing.T) { - ctx := context.Background() - mp, exp := metrictest.NewTestMeterProvider() - meter := mp.Meter("go.opentelemetry.io/otel/sdk/metric/metrictest/exporter_TestSyncInstruments") - - t.Run("Float Counter", func(t *testing.T) { - fcnt, err := meter.SyncFloat64().Counter("fCount") - require.NoError(t, err) - - fcnt.Add(ctx, 2) - - err = exp.Collect(context.Background()) - require.NoError(t, err) - - out, err := exp.GetByName("fCount") - require.NoError(t, err) - assert.InDelta(t, 2.0, out.Sum.AsFloat64(), 0.0001) - assert.Equal(t, aggregation.SumKind, out.AggregationKind) - }) - - t.Run("Float UpDownCounter", func(t *testing.T) { - fudcnt, err := meter.SyncFloat64().UpDownCounter("fUDCount") - require.NoError(t, err) - - fudcnt.Add(ctx, 3) - - err = exp.Collect(context.Background()) - require.NoError(t, err) - - out, err := exp.GetByName("fUDCount") - require.NoError(t, err) - assert.InDelta(t, 3.0, out.Sum.AsFloat64(), 0.0001) - assert.Equal(t, aggregation.SumKind, out.AggregationKind) - }) - - t.Run("Float Histogram", func(t *testing.T) { - fhis, err := meter.SyncFloat64().Histogram("fHist") - require.NoError(t, err) - - fhis.Record(ctx, 4) - fhis.Record(ctx, 5) - - err = exp.Collect(context.Background()) - require.NoError(t, err) - - out, err := exp.GetByName("fHist") - require.NoError(t, err) - assert.InDelta(t, 9.0, out.Sum.AsFloat64(), 0.0001) - assert.EqualValues(t, 2, out.Count) - assert.Equal(t, aggregation.HistogramKind, out.AggregationKind) - }) - - t.Run("Int Counter", func(t *testing.T) { - icnt, err := meter.SyncInt64().Counter("iCount") - require.NoError(t, err) - - icnt.Add(ctx, 22) - - err = exp.Collect(context.Background()) - require.NoError(t, err) - - out, err := exp.GetByName("iCount") - require.NoError(t, err) - assert.EqualValues(t, 22, out.Sum.AsInt64()) - assert.Equal(t, aggregation.SumKind, out.AggregationKind) - }) - t.Run("Int UpDownCounter", func(t *testing.T) { - iudcnt, err := meter.SyncInt64().UpDownCounter("iUDCount") - require.NoError(t, err) - - iudcnt.Add(ctx, 23) - - err = exp.Collect(context.Background()) - require.NoError(t, err) - - out, err := exp.GetByName("iUDCount") - require.NoError(t, err) - assert.EqualValues(t, 23, out.Sum.AsInt64()) - assert.Equal(t, aggregation.SumKind, out.AggregationKind) - }) - t.Run("Int Histogram", func(t *testing.T) { - ihis, err := meter.SyncInt64().Histogram("iHist") - require.NoError(t, err) - - ihis.Record(ctx, 24) - ihis.Record(ctx, 25) - - err = exp.Collect(context.Background()) - require.NoError(t, err) - - out, err := exp.GetByName("iHist") - require.NoError(t, err) - assert.EqualValues(t, 49, out.Sum.AsInt64()) - assert.EqualValues(t, 2, out.Count) - assert.Equal(t, aggregation.HistogramKind, out.AggregationKind) - }) -} - -func TestSyncDeltaInstruments(t *testing.T) { - ctx := context.Background() - mp, exp := metrictest.NewTestMeterProvider(metrictest.WithTemporalitySelector(aggregation.DeltaTemporalitySelector())) - meter := mp.Meter("go.opentelemetry.io/otel/sdk/metric/metrictest/exporter_TestSyncDeltaInstruments") - - t.Run("Float Counter", func(t *testing.T) { - fcnt, err := meter.SyncFloat64().Counter("fCount") - require.NoError(t, err) - - fcnt.Add(ctx, 2) - - err = exp.Collect(context.Background()) - require.NoError(t, err) - - out, err := exp.GetByName("fCount") - require.NoError(t, err) - assert.InDelta(t, 2.0, out.Sum.AsFloat64(), 0.0001) - assert.Equal(t, aggregation.SumKind, out.AggregationKind) - }) - - t.Run("Float UpDownCounter", func(t *testing.T) { - fudcnt, err := meter.SyncFloat64().UpDownCounter("fUDCount") - require.NoError(t, err) - - fudcnt.Add(ctx, 3) - - err = exp.Collect(context.Background()) - require.NoError(t, err) - - out, err := exp.GetByName("fUDCount") - require.NoError(t, err) - assert.InDelta(t, 3.0, out.Sum.AsFloat64(), 0.0001) - assert.Equal(t, aggregation.SumKind, out.AggregationKind) - }) - - t.Run("Float Histogram", func(t *testing.T) { - fhis, err := meter.SyncFloat64().Histogram("fHist") - require.NoError(t, err) - - fhis.Record(ctx, 4) - fhis.Record(ctx, 5) - - err = exp.Collect(context.Background()) - require.NoError(t, err) - - out, err := exp.GetByName("fHist") - require.NoError(t, err) - assert.InDelta(t, 9.0, out.Sum.AsFloat64(), 0.0001) - assert.EqualValues(t, 2, out.Count) - assert.Equal(t, aggregation.HistogramKind, out.AggregationKind) - }) - - t.Run("Int Counter", func(t *testing.T) { - icnt, err := meter.SyncInt64().Counter("iCount") - require.NoError(t, err) - - icnt.Add(ctx, 22) - - err = exp.Collect(context.Background()) - require.NoError(t, err) - - out, err := exp.GetByName("iCount") - require.NoError(t, err) - assert.EqualValues(t, 22, out.Sum.AsInt64()) - assert.Equal(t, aggregation.SumKind, out.AggregationKind) - }) - t.Run("Int UpDownCounter", func(t *testing.T) { - iudcnt, err := meter.SyncInt64().UpDownCounter("iUDCount") - require.NoError(t, err) - - iudcnt.Add(ctx, 23) - - err = exp.Collect(context.Background()) - require.NoError(t, err) - - out, err := exp.GetByName("iUDCount") - require.NoError(t, err) - assert.EqualValues(t, 23, out.Sum.AsInt64()) - assert.Equal(t, aggregation.SumKind, out.AggregationKind) - }) - t.Run("Int Histogram", func(t *testing.T) { - ihis, err := meter.SyncInt64().Histogram("iHist") - require.NoError(t, err) - - ihis.Record(ctx, 24) - ihis.Record(ctx, 25) - - err = exp.Collect(context.Background()) - require.NoError(t, err) - - out, err := exp.GetByName("iHist") - require.NoError(t, err) - assert.EqualValues(t, 49, out.Sum.AsInt64()) - assert.EqualValues(t, 2, out.Count) - assert.Equal(t, aggregation.HistogramKind, out.AggregationKind) - }) -} - -func TestAsyncInstruments(t *testing.T) { - ctx := context.Background() - mp, exp := metrictest.NewTestMeterProvider() - - t.Run("Float Counter", func(t *testing.T) { - meter := mp.Meter("go.opentelemetry.io/otel/sdk/metric/metrictest/exporter_TestAsyncCounter_FloatCounter") - - fcnt, err := meter.AsyncFloat64().Counter("fCount") - require.NoError(t, err) - - err = meter.RegisterCallback( - []instrument.Asynchronous{ - fcnt, - }, func(context.Context) { - fcnt.Observe(ctx, 2) - }) - require.NoError(t, err) - - err = exp.Collect(context.Background()) - require.NoError(t, err) - - out, err := exp.GetByName("fCount") - require.NoError(t, err) - assert.InDelta(t, 2.0, out.Sum.AsFloat64(), 0.0001) - assert.Equal(t, aggregation.SumKind, out.AggregationKind) - }) - - t.Run("Float UpDownCounter", func(t *testing.T) { - meter := mp.Meter("go.opentelemetry.io/otel/sdk/metric/metrictest/exporter_TestAsyncCounter_FloatUpDownCounter") - - fudcnt, err := meter.AsyncFloat64().UpDownCounter("fUDCount") - require.NoError(t, err) - - err = meter.RegisterCallback( - []instrument.Asynchronous{ - fudcnt, - }, func(context.Context) { - fudcnt.Observe(ctx, 3) - }) - require.NoError(t, err) - - err = exp.Collect(context.Background()) - require.NoError(t, err) - - out, err := exp.GetByName("fUDCount") - require.NoError(t, err) - assert.InDelta(t, 3.0, out.Sum.AsFloat64(), 0.0001) - assert.Equal(t, aggregation.SumKind, out.AggregationKind) - }) - - t.Run("Float Gauge", func(t *testing.T) { - meter := mp.Meter("go.opentelemetry.io/otel/sdk/metric/metrictest/exporter_TestAsyncCounter_FloatGauge") - - fgauge, err := meter.AsyncFloat64().Gauge("fGauge") - require.NoError(t, err) - - err = meter.RegisterCallback( - []instrument.Asynchronous{ - fgauge, - }, func(context.Context) { - fgauge.Observe(ctx, 4) - }) - require.NoError(t, err) - - err = exp.Collect(context.Background()) - require.NoError(t, err) - - out, err := exp.GetByName("fGauge") - require.NoError(t, err) - assert.InDelta(t, 4.0, out.LastValue.AsFloat64(), 0.0001) - assert.Equal(t, aggregation.LastValueKind, out.AggregationKind) - }) - - t.Run("Int Counter", func(t *testing.T) { - meter := mp.Meter("go.opentelemetry.io/otel/sdk/metric/metrictest/exporter_TestAsyncCounter_IntCounter") - - icnt, err := meter.AsyncInt64().Counter("iCount") - require.NoError(t, err) - - err = meter.RegisterCallback( - []instrument.Asynchronous{ - icnt, - }, func(context.Context) { - icnt.Observe(ctx, 22) - }) - require.NoError(t, err) - - err = exp.Collect(context.Background()) - require.NoError(t, err) - - out, err := exp.GetByName("iCount") - require.NoError(t, err) - assert.EqualValues(t, 22, out.Sum.AsInt64()) - assert.Equal(t, aggregation.SumKind, out.AggregationKind) - }) - - t.Run("Int UpDownCounter", func(t *testing.T) { - meter := mp.Meter("go.opentelemetry.io/otel/sdk/metric/metrictest/exporter_TestAsyncCounter_IntUpDownCounter") - - iudcnt, err := meter.AsyncInt64().UpDownCounter("iUDCount") - require.NoError(t, err) - - err = meter.RegisterCallback( - []instrument.Asynchronous{ - iudcnt, - }, func(context.Context) { - iudcnt.Observe(ctx, 23) - }) - require.NoError(t, err) - - err = exp.Collect(context.Background()) - require.NoError(t, err) - - out, err := exp.GetByName("iUDCount") - require.NoError(t, err) - assert.EqualValues(t, 23, out.Sum.AsInt64()) - assert.Equal(t, aggregation.SumKind, out.AggregationKind) - }) - t.Run("Int Gauge", func(t *testing.T) { - meter := mp.Meter("go.opentelemetry.io/otel/sdk/metric/metrictest/exporter_TestAsyncCounter_IntGauge") - - igauge, err := meter.AsyncInt64().Gauge("iGauge") - require.NoError(t, err) - - err = meter.RegisterCallback( - []instrument.Asynchronous{ - igauge, - }, func(context.Context) { - igauge.Observe(ctx, 25) - }) - require.NoError(t, err) - - err = exp.Collect(context.Background()) - require.NoError(t, err) - - out, err := exp.GetByName("iGauge") - require.NoError(t, err) - assert.EqualValues(t, 25, out.LastValue.AsInt64()) - assert.Equal(t, aggregation.LastValueKind, out.AggregationKind) - }) -} - -func ExampleExporter_GetByName() { - mp, exp := metrictest.NewTestMeterProvider() - meter := mp.Meter("go.opentelemetry.io/otel/sdk/metric/metrictest/exporter_ExampleExporter_GetByName") - - cnt, err := meter.SyncFloat64().Counter("fCount") - if err != nil { - panic("could not acquire counter") - } - - cnt.Add(context.Background(), 2.5) - - err = exp.Collect(context.Background()) - if err != nil { - panic("collection failed") - } - - out, _ := exp.GetByName("fCount") - - fmt.Println(out.Sum.AsFloat64()) - // Output: 2.5 -} - -func ExampleExporter_GetByNameAndAttributes() { - mp, exp := metrictest.NewTestMeterProvider() - meter := mp.Meter("go.opentelemetry.io/otel/sdk/metric/metrictest/exporter_ExampleExporter_GetByNameAndAttributes") - - cnt, err := meter.SyncFloat64().Counter("fCount") - if err != nil { - panic("could not acquire counter") - } - - cnt.Add(context.Background(), 4, attribute.String("foo", "bar"), attribute.Bool("found", false)) - - err = exp.Collect(context.Background()) - if err != nil { - panic("collection failed") - } - - out, err := exp.GetByNameAndAttributes("fCount", []attribute.KeyValue{attribute.String("foo", "bar")}) - if err != nil { - println(err.Error()) - } - - fmt.Println(out.Sum.AsFloat64()) - // Output: 4 -} diff --git a/sdk/metric/number/kind_string.go b/sdk/metric/number/kind_string.go deleted file mode 100644 index 6288c7ea295..00000000000 --- a/sdk/metric/number/kind_string.go +++ /dev/null @@ -1,24 +0,0 @@ -// Code generated by "stringer -type=Kind"; DO NOT EDIT. - -package number - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[Int64Kind-0] - _ = x[Float64Kind-1] -} - -const _Kind_name = "Int64KindFloat64Kind" - -var _Kind_index = [...]uint8{0, 9, 20} - -func (i Kind) String() string { - if i < 0 || i >= Kind(len(_Kind_index)-1) { - return "Kind(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _Kind_name[_Kind_index[i]:_Kind_index[i+1]] -} diff --git a/sdk/metric/number/number.go b/sdk/metric/number/number.go deleted file mode 100644 index 6ba16112fde..00000000000 --- a/sdk/metric/number/number.go +++ /dev/null @@ -1,539 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package number // import "go.opentelemetry.io/otel/sdk/metric/number" - -//go:generate stringer -type=Kind - -import ( - "fmt" - "math" - "sync/atomic" - - "go.opentelemetry.io/otel/internal" -) - -// Kind describes the data type of the Number. -type Kind int8 - -const ( - // Int64Kind means that the Number stores int64. - Int64Kind Kind = iota - // Float64Kind means that the Number stores float64. - Float64Kind -) - -// Zero returns a zero value for a given Kind. -func (k Kind) Zero() Number { - switch k { - case Int64Kind: - return NewInt64Number(0) - case Float64Kind: - return NewFloat64Number(0.) - default: - return Number(0) - } -} - -// Minimum returns the minimum representable value -// for a given Kind. -func (k Kind) Minimum() Number { - switch k { - case Int64Kind: - return NewInt64Number(math.MinInt64) - case Float64Kind: - return NewFloat64Number(-1. * math.MaxFloat64) - default: - return Number(0) - } -} - -// Maximum returns the maximum representable value -// for a given Kind. -func (k Kind) Maximum() Number { - switch k { - case Int64Kind: - return NewInt64Number(math.MaxInt64) - case Float64Kind: - return NewFloat64Number(math.MaxFloat64) - default: - return Number(0) - } -} - -// Number represents either an integral or a floating point value. It -// needs to be accompanied with a source of Kind that describes -// the actual type of the value stored within Number. -type Number uint64 - -// - constructors - -// NewNumberFromRaw creates a new Number from a raw value. -func NewNumberFromRaw(r uint64) Number { - return Number(r) -} - -// NewInt64Number creates an integral Number. -func NewInt64Number(i int64) Number { - return NewNumberFromRaw(internal.Int64ToRaw(i)) -} - -// NewFloat64Number creates a floating point Number. -func NewFloat64Number(f float64) Number { - return NewNumberFromRaw(internal.Float64ToRaw(f)) -} - -// NewNumberSignChange returns a number with the same magnitude and -// the opposite sign. `kind` must describe the kind of number in `nn`. -func NewNumberSignChange(kind Kind, nn Number) Number { - switch kind { - case Int64Kind: - return NewInt64Number(-nn.AsInt64()) - case Float64Kind: - return NewFloat64Number(-nn.AsFloat64()) - } - return nn -} - -// - as x - -// AsNumber gets the Number. -func (n *Number) AsNumber() Number { - return *n -} - -// AsRaw gets the uninterpreted raw value. Might be useful for some -// atomic operations. -func (n *Number) AsRaw() uint64 { - return uint64(*n) -} - -// AsInt64 assumes that the value contains an int64 and returns it as -// such. -func (n *Number) AsInt64() int64 { - return internal.RawToInt64(n.AsRaw()) -} - -// AsFloat64 assumes that the measurement value contains a float64 and -// returns it as such. -func (n *Number) AsFloat64() float64 { - return internal.RawToFloat64(n.AsRaw()) -} - -// - as x atomic - -// AsNumberAtomic gets the Number atomically. -func (n *Number) AsNumberAtomic() Number { - return NewNumberFromRaw(n.AsRawAtomic()) -} - -// AsRawAtomic gets the uninterpreted raw value atomically. Might be -// useful for some atomic operations. -func (n *Number) AsRawAtomic() uint64 { - return atomic.LoadUint64(n.AsRawPtr()) -} - -// AsInt64Atomic assumes that the number contains an int64 and returns -// it as such atomically. -func (n *Number) AsInt64Atomic() int64 { - return atomic.LoadInt64(n.AsInt64Ptr()) -} - -// AsFloat64Atomic assumes that the measurement value contains a -// float64 and returns it as such atomically. -func (n *Number) AsFloat64Atomic() float64 { - return internal.RawToFloat64(n.AsRawAtomic()) -} - -// - as x ptr - -// AsRawPtr gets the pointer to the raw, uninterpreted raw -// value. Might be useful for some atomic operations. -func (n *Number) AsRawPtr() *uint64 { - return (*uint64)(n) -} - -// AsInt64Ptr assumes that the number contains an int64 and returns a -// pointer to it. -func (n *Number) AsInt64Ptr() *int64 { - return internal.RawPtrToInt64Ptr(n.AsRawPtr()) -} - -// AsFloat64Ptr assumes that the number contains a float64 and returns a -// pointer to it. -func (n *Number) AsFloat64Ptr() *float64 { - return internal.RawPtrToFloat64Ptr(n.AsRawPtr()) -} - -// - coerce - -// CoerceToInt64 casts the number to int64. May result in -// data/precision loss. -func (n *Number) CoerceToInt64(kind Kind) int64 { - switch kind { - case Int64Kind: - return n.AsInt64() - case Float64Kind: - return int64(n.AsFloat64()) - default: - // you get what you deserve - return 0 - } -} - -// CoerceToFloat64 casts the number to float64. May result in -// data/precision loss. -func (n *Number) CoerceToFloat64(kind Kind) float64 { - switch kind { - case Int64Kind: - return float64(n.AsInt64()) - case Float64Kind: - return n.AsFloat64() - default: - // you get what you deserve - return 0 - } -} - -// - set - -// SetNumber sets the number to the passed number. Both should be of -// the same kind. -func (n *Number) SetNumber(nn Number) { - *n.AsRawPtr() = nn.AsRaw() -} - -// SetRaw sets the number to the passed raw value. Both number and the -// raw number should represent the same kind. -func (n *Number) SetRaw(r uint64) { - *n.AsRawPtr() = r -} - -// SetInt64 assumes that the number contains an int64 and sets it to -// the passed value. -func (n *Number) SetInt64(i int64) { - *n.AsInt64Ptr() = i -} - -// SetFloat64 assumes that the number contains a float64 and sets it -// to the passed value. -func (n *Number) SetFloat64(f float64) { - *n.AsFloat64Ptr() = f -} - -// - set atomic - -// SetNumberAtomic sets the number to the passed number -// atomically. Both should be of the same kind. -func (n *Number) SetNumberAtomic(nn Number) { - atomic.StoreUint64(n.AsRawPtr(), nn.AsRaw()) -} - -// SetRawAtomic sets the number to the passed raw value -// atomically. Both number and the raw number should represent the -// same kind. -func (n *Number) SetRawAtomic(r uint64) { - atomic.StoreUint64(n.AsRawPtr(), r) -} - -// SetInt64Atomic assumes that the number contains an int64 and sets -// it to the passed value atomically. -func (n *Number) SetInt64Atomic(i int64) { - atomic.StoreInt64(n.AsInt64Ptr(), i) -} - -// SetFloat64Atomic assumes that the number contains a float64 and -// sets it to the passed value atomically. -func (n *Number) SetFloat64Atomic(f float64) { - atomic.StoreUint64(n.AsRawPtr(), internal.Float64ToRaw(f)) -} - -// - swap - -// SwapNumber sets the number to the passed number and returns the old -// number. Both this number and the passed number should be of the -// same kind. -func (n *Number) SwapNumber(nn Number) Number { - old := *n - n.SetNumber(nn) - return old -} - -// SwapRaw sets the number to the passed raw value and returns the old -// raw value. Both number and the raw number should represent the same -// kind. -func (n *Number) SwapRaw(r uint64) uint64 { - old := n.AsRaw() - n.SetRaw(r) - return old -} - -// SwapInt64 assumes that the number contains an int64, sets it to the -// passed value and returns the old int64 value. -func (n *Number) SwapInt64(i int64) int64 { - old := n.AsInt64() - n.SetInt64(i) - return old -} - -// SwapFloat64 assumes that the number contains an float64, sets it to -// the passed value and returns the old float64 value. -func (n *Number) SwapFloat64(f float64) float64 { - old := n.AsFloat64() - n.SetFloat64(f) - return old -} - -// - swap atomic - -// SwapNumberAtomic sets the number to the passed number and returns -// the old number atomically. Both this number and the passed number -// should be of the same kind. -func (n *Number) SwapNumberAtomic(nn Number) Number { - return NewNumberFromRaw(atomic.SwapUint64(n.AsRawPtr(), nn.AsRaw())) -} - -// SwapRawAtomic sets the number to the passed raw value and returns -// the old raw value atomically. Both number and the raw number should -// represent the same kind. -func (n *Number) SwapRawAtomic(r uint64) uint64 { - return atomic.SwapUint64(n.AsRawPtr(), r) -} - -// SwapInt64Atomic assumes that the number contains an int64, sets it -// to the passed value and returns the old int64 value atomically. -func (n *Number) SwapInt64Atomic(i int64) int64 { - return atomic.SwapInt64(n.AsInt64Ptr(), i) -} - -// SwapFloat64Atomic assumes that the number contains an float64, sets -// it to the passed value and returns the old float64 value -// atomically. -func (n *Number) SwapFloat64Atomic(f float64) float64 { - return internal.RawToFloat64(atomic.SwapUint64(n.AsRawPtr(), internal.Float64ToRaw(f))) -} - -// - add - -// AddNumber assumes that this and the passed number are of the passed -// kind and adds the passed number to this number. -func (n *Number) AddNumber(kind Kind, nn Number) { - switch kind { - case Int64Kind: - n.AddInt64(nn.AsInt64()) - case Float64Kind: - n.AddFloat64(nn.AsFloat64()) - } -} - -// AddRaw assumes that this number and the passed raw value are of the -// passed kind and adds the passed raw value to this number. -func (n *Number) AddRaw(kind Kind, r uint64) { - n.AddNumber(kind, NewNumberFromRaw(r)) -} - -// AddInt64 assumes that the number contains an int64 and adds the -// passed int64 to it. -func (n *Number) AddInt64(i int64) { - *n.AsInt64Ptr() += i -} - -// AddFloat64 assumes that the number contains a float64 and adds the -// passed float64 to it. -func (n *Number) AddFloat64(f float64) { - *n.AsFloat64Ptr() += f -} - -// - add atomic - -// AddNumberAtomic assumes that this and the passed number are of the -// passed kind and adds the passed number to this number atomically. -func (n *Number) AddNumberAtomic(kind Kind, nn Number) { - switch kind { - case Int64Kind: - n.AddInt64Atomic(nn.AsInt64()) - case Float64Kind: - n.AddFloat64Atomic(nn.AsFloat64()) - } -} - -// AddRawAtomic assumes that this number and the passed raw value are -// of the passed kind and adds the passed raw value to this number -// atomically. -func (n *Number) AddRawAtomic(kind Kind, r uint64) { - n.AddNumberAtomic(kind, NewNumberFromRaw(r)) -} - -// AddInt64Atomic assumes that the number contains an int64 and adds -// the passed int64 to it atomically. -func (n *Number) AddInt64Atomic(i int64) { - atomic.AddInt64(n.AsInt64Ptr(), i) -} - -// AddFloat64Atomic assumes that the number contains a float64 and -// adds the passed float64 to it atomically. -func (n *Number) AddFloat64Atomic(f float64) { - for { - o := n.AsFloat64Atomic() - if n.CompareAndSwapFloat64(o, o+f) { - break - } - } -} - -// - compare and swap (atomic only) - -// CompareAndSwapNumber does the atomic CAS operation on this -// number. This number and passed old and new numbers should be of the -// same kind. -func (n *Number) CompareAndSwapNumber(on, nn Number) bool { - return atomic.CompareAndSwapUint64(n.AsRawPtr(), on.AsRaw(), nn.AsRaw()) -} - -// CompareAndSwapRaw does the atomic CAS operation on this -// number. This number and passed old and new raw values should be of -// the same kind. -func (n *Number) CompareAndSwapRaw(or, nr uint64) bool { - return atomic.CompareAndSwapUint64(n.AsRawPtr(), or, nr) -} - -// CompareAndSwapInt64 assumes that this number contains an int64 and -// does the atomic CAS operation on it. -func (n *Number) CompareAndSwapInt64(oi, ni int64) bool { - return atomic.CompareAndSwapInt64(n.AsInt64Ptr(), oi, ni) -} - -// CompareAndSwapFloat64 assumes that this number contains a float64 and -// does the atomic CAS operation on it. -func (n *Number) CompareAndSwapFloat64(of, nf float64) bool { - return atomic.CompareAndSwapUint64(n.AsRawPtr(), internal.Float64ToRaw(of), internal.Float64ToRaw(nf)) -} - -// - compare - -// CompareNumber compares two Numbers given their kind. Both numbers -// should have the same kind. This returns: -// -// 0 if the numbers are equal -// -1 if the subject `n` is less than the argument `nn` -// +1 if the subject `n` is greater than the argument `nn` -func (n *Number) CompareNumber(kind Kind, nn Number) int { - switch kind { - case Int64Kind: - return n.CompareInt64(nn.AsInt64()) - case Float64Kind: - return n.CompareFloat64(nn.AsFloat64()) - default: - // you get what you deserve - return 0 - } -} - -// CompareRaw compares two numbers, where one is input as a raw -// uint64, interpreting both values as a `kind` of number. -func (n *Number) CompareRaw(kind Kind, r uint64) int { - return n.CompareNumber(kind, NewNumberFromRaw(r)) -} - -// CompareInt64 assumes that the Number contains an int64 and performs -// a comparison between the value and the other value. It returns the -// typical result of the compare function: -1 if the value is less -// than the other, 0 if both are equal, 1 if the value is greater than -// the other. -func (n *Number) CompareInt64(i int64) int { - this := n.AsInt64() - if this < i { - return -1 - } else if this > i { - return 1 - } - return 0 -} - -// CompareFloat64 assumes that the Number contains a float64 and -// performs a comparison between the value and the other value. It -// returns the typical result of the compare function: -1 if the value -// is less than the other, 0 if both are equal, 1 if the value is -// greater than the other. -// -// Do not compare NaN values. -func (n *Number) CompareFloat64(f float64) int { - this := n.AsFloat64() - if this < f { - return -1 - } else if this > f { - return 1 - } - return 0 -} - -// - relations to zero - -// IsPositive returns true if the actual value is greater than zero. -func (n *Number) IsPositive(kind Kind) bool { - return n.compareWithZero(kind) > 0 -} - -// IsNegative returns true if the actual value is less than zero. -func (n *Number) IsNegative(kind Kind) bool { - return n.compareWithZero(kind) < 0 -} - -// IsZero returns true if the actual value is equal to zero. -func (n *Number) IsZero(kind Kind) bool { - return n.compareWithZero(kind) == 0 -} - -// - misc - -// Emit returns a string representation of the raw value of the -// Number. A %d is used for integral values, %f for floating point -// values. -func (n *Number) Emit(kind Kind) string { - switch kind { - case Int64Kind: - return fmt.Sprintf("%d", n.AsInt64()) - case Float64Kind: - return fmt.Sprintf("%f", n.AsFloat64()) - default: - return "" - } -} - -// AsInterface returns the number as an interface{}, typically used -// for Kind-correct JSON conversion. -func (n *Number) AsInterface(kind Kind) interface{} { - switch kind { - case Int64Kind: - return n.AsInt64() - case Float64Kind: - return n.AsFloat64() - default: - return math.NaN() - } -} - -// - private stuff - -func (n *Number) compareWithZero(kind Kind) int { - switch kind { - case Int64Kind: - return n.CompareInt64(0) - case Float64Kind: - return n.CompareFloat64(0.) - default: - // you get what you deserve - return 0 - } -} diff --git a/sdk/metric/number/number_test.go b/sdk/metric/number/number_test.go deleted file mode 100644 index e8d675c7fc3..00000000000 --- a/sdk/metric/number/number_test.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package number - -import ( - "math" - "testing" - "unsafe" - - "github.com/stretchr/testify/require" -) - -func TestNumber(t *testing.T) { - iNeg := NewInt64Number(-42) - iZero := NewInt64Number(0) - iPos := NewInt64Number(42) - i64Numbers := [3]Number{iNeg, iZero, iPos} - - for idx, i := range []int64{-42, 0, 42} { - n := i64Numbers[idx] - if got := n.AsInt64(); got != i { - t.Errorf("Number %#v (%s) int64 check failed, expected %d, got %d", n, n.Emit(Int64Kind), i, got) - } - } - - for _, n := range i64Numbers { - expected := unsafe.Pointer(&n) - got := unsafe.Pointer(n.AsRawPtr()) - if expected != got { - t.Errorf("Getting raw pointer failed, got %v, expected %v", got, expected) - } - } - - fNeg := NewFloat64Number(-42.) - fZero := NewFloat64Number(0.) - fPos := NewFloat64Number(42.) - f64Numbers := [3]Number{fNeg, fZero, fPos} - - for idx, f := range []float64{-42., 0., 42.} { - n := f64Numbers[idx] - if got := n.AsFloat64(); got != f { - t.Errorf("Number %#v (%s) float64 check failed, expected %f, got %f", n, n.Emit(Int64Kind), f, got) - } - } - - for _, n := range f64Numbers { - expected := unsafe.Pointer(&n) - got := unsafe.Pointer(n.AsRawPtr()) - if expected != got { - t.Errorf("Getting raw pointer failed, got %v, expected %v", got, expected) - } - } - - cmpsForNeg := [3]int{0, -1, -1} - cmpsForZero := [3]int{1, 0, -1} - cmpsForPos := [3]int{1, 1, 0} - - type testcase struct { - // n needs to be aligned for 64-bit atomic operations. - n Number - // nums needs to be aligned for 64-bit atomic operations. - nums [3]Number - kind Kind - pos bool - zero bool - neg bool - cmps [3]int - } - testcases := []testcase{ - { - n: iNeg, - kind: Int64Kind, - pos: false, - zero: false, - neg: true, - nums: i64Numbers, - cmps: cmpsForNeg, - }, - { - n: iZero, - kind: Int64Kind, - pos: false, - zero: true, - neg: false, - nums: i64Numbers, - cmps: cmpsForZero, - }, - { - n: iPos, - kind: Int64Kind, - pos: true, - zero: false, - neg: false, - nums: i64Numbers, - cmps: cmpsForPos, - }, - { - n: fNeg, - kind: Float64Kind, - pos: false, - zero: false, - neg: true, - nums: f64Numbers, - cmps: cmpsForNeg, - }, - { - n: fZero, - kind: Float64Kind, - pos: false, - zero: true, - neg: false, - nums: f64Numbers, - cmps: cmpsForZero, - }, - { - n: fPos, - kind: Float64Kind, - pos: true, - zero: false, - neg: false, - nums: f64Numbers, - cmps: cmpsForPos, - }, - } - for _, tt := range testcases { - if got := tt.n.IsPositive(tt.kind); got != tt.pos { - t.Errorf("Number %#v (%s) positive check failed, expected %v, got %v", tt.n, tt.n.Emit(tt.kind), tt.pos, got) - } - if got := tt.n.IsZero(tt.kind); got != tt.zero { - t.Errorf("Number %#v (%s) zero check failed, expected %v, got %v", tt.n, tt.n.Emit(tt.kind), tt.pos, got) - } - if got := tt.n.IsNegative(tt.kind); got != tt.neg { - t.Errorf("Number %#v (%s) negative check failed, expected %v, got %v", tt.n, tt.n.Emit(tt.kind), tt.pos, got) - } - for i := 0; i < 3; i++ { - if got := tt.n.CompareRaw(tt.kind, tt.nums[i].AsRaw()); got != tt.cmps[i] { - t.Errorf("Number %#v (%s) compare check with %#v (%s) failed, expected %d, got %d", tt.n, tt.n.Emit(tt.kind), tt.nums[i], tt.nums[i].Emit(tt.kind), tt.cmps[i], got) - } - } - } -} - -func TestNumberZero(t *testing.T) { - zero := Number(0) - zerof := NewFloat64Number(0) - zeroi := NewInt64Number(0) - - if zero != zerof || zero != zeroi { - t.Errorf("Invalid zero representations") - } -} - -func TestNumberAsInterface(t *testing.T) { - i64 := NewInt64Number(10) - f64 := NewFloat64Number(11.11) - require.Equal(t, int64(10), (&i64).AsInterface(Int64Kind).(int64)) - require.Equal(t, 11.11, (&f64).AsInterface(Float64Kind).(float64)) -} - -func TestNumberSignChange(t *testing.T) { - t.Run("Int64", func(t *testing.T) { - posInt := NewInt64Number(10) - negInt := NewInt64Number(-10) - - require.Equal(t, posInt, NewNumberSignChange(Int64Kind, negInt)) - require.Equal(t, negInt, NewNumberSignChange(Int64Kind, posInt)) - }) - - t.Run("Float64", func(t *testing.T) { - posFloat := NewFloat64Number(10) - negFloat := NewFloat64Number(-10) - - require.Equal(t, posFloat, NewNumberSignChange(Float64Kind, negFloat)) - require.Equal(t, negFloat, NewNumberSignChange(Float64Kind, posFloat)) - }) - - t.Run("Float64Zero", func(t *testing.T) { - posFloat := NewFloat64Number(0) - negFloat := NewFloat64Number(math.Copysign(0, -1)) - - require.Equal(t, posFloat, NewNumberSignChange(Float64Kind, negFloat)) - require.Equal(t, negFloat, NewNumberSignChange(Float64Kind, posFloat)) - }) - - t.Run("Float64Inf", func(t *testing.T) { - posFloat := NewFloat64Number(math.Inf(+1)) - negFloat := NewFloat64Number(math.Inf(-1)) - - require.Equal(t, posFloat, NewNumberSignChange(Float64Kind, negFloat)) - require.Equal(t, negFloat, NewNumberSignChange(Float64Kind, posFloat)) - }) - - t.Run("Float64NaN", func(t *testing.T) { - posFloat := NewFloat64Number(math.NaN()) - negFloat := NewFloat64Number(math.Copysign(math.NaN(), -1)) - - require.Equal(t, posFloat, NewNumberSignChange(Float64Kind, negFloat)) - require.Equal(t, negFloat, NewNumberSignChange(Float64Kind, posFloat)) - }) -} diff --git a/sdk/metric/periodic_reader.go b/sdk/metric/periodic_reader.go new file mode 100644 index 00000000000..9c66ac5d552 --- /dev/null +++ b/sdk/metric/periodic_reader.go @@ -0,0 +1,245 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/metric/aggregation" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/metric/view" +) + +// Default periodic reader timing. +const ( + defaultTimeout = time.Millisecond * 30000 + defaultInterval = time.Millisecond * 60000 +) + +// periodicReaderConfig contains configuration options for a PeriodicReader. +type periodicReaderConfig struct { + interval time.Duration + timeout time.Duration + temporalitySelector TemporalitySelector + aggregationSelector AggregationSelector +} + +// newPeriodicReaderConfig returns a periodicReaderConfig configured with +// options. +func newPeriodicReaderConfig(options []PeriodicReaderOption) periodicReaderConfig { + c := periodicReaderConfig{ + interval: defaultInterval, + timeout: defaultTimeout, + temporalitySelector: DefaultTemporalitySelector, + aggregationSelector: DefaultAggregationSelector, + } + for _, o := range options { + c = o.applyPeriodic(c) + } + return c +} + +// PeriodicReaderOption applies a configuration option value to a PeriodicReader. +type PeriodicReaderOption interface { + applyPeriodic(periodicReaderConfig) periodicReaderConfig +} + +// periodicReaderOptionFunc applies a set of options to a periodicReaderConfig. +type periodicReaderOptionFunc func(periodicReaderConfig) periodicReaderConfig + +// applyPeriodic returns a periodicReaderConfig with option(s) applied. +func (o periodicReaderOptionFunc) applyPeriodic(conf periodicReaderConfig) periodicReaderConfig { + return o(conf) +} + +// WithTimeout configures the time a PeriodicReader waits for an export to +// complete before canceling it. +// +// If this option is not used or d is less than or equal to zero, 30 seconds +// is used as the default. +func WithTimeout(d time.Duration) PeriodicReaderOption { + return periodicReaderOptionFunc(func(conf periodicReaderConfig) periodicReaderConfig { + if d <= 0 { + return conf + } + conf.timeout = d + return conf + }) +} + +// WithInterval configures the intervening time between exports for a +// PeriodicReader. +// +// If this option is not used or d is less than or equal to zero, 60 seconds +// is used as the default. +func WithInterval(d time.Duration) PeriodicReaderOption { + return periodicReaderOptionFunc(func(conf periodicReaderConfig) periodicReaderConfig { + if d <= 0 { + return conf + } + conf.interval = d + return conf + }) +} + +// NewPeriodicReader returns a Reader that collects and exports metric data to +// the exporter at a defined interval. By default, the returned Reader will +// collect and export data every 60 seconds, and will cancel export attempts +// that exceed 30 seconds. The export time is not counted towards the interval +// between attempts. +// +// The Collect method of the returned Reader continues to gather and return +// metric data to the user. It will not automatically send that data to the +// exporter. That is left to the user to accomplish. +func NewPeriodicReader(exporter Exporter, options ...PeriodicReaderOption) Reader { + conf := newPeriodicReaderConfig(options) + ctx, cancel := context.WithCancel(context.Background()) + r := &periodicReader{ + timeout: conf.timeout, + exporter: exporter, + cancel: cancel, + + temporalitySelector: conf.temporalitySelector, + aggregationSelector: conf.aggregationSelector, + } + + r.wg.Add(1) + go func() { + defer r.wg.Done() + r.run(ctx, conf.interval) + }() + + return r +} + +// periodicReader is a Reader that continuously collects and exports metric +// data at a set interval. +type periodicReader struct { + producer atomic.Value + + timeout time.Duration + exporter Exporter + + temporalitySelector TemporalitySelector + aggregationSelector AggregationSelector + + wg sync.WaitGroup + cancel context.CancelFunc + shutdownOnce sync.Once +} + +// Compile time check the periodicReader implements Reader and is comparable. +var _ = map[Reader]struct{}{&periodicReader{}: {}} + +// newTicker allows testing override. +var newTicker = time.NewTicker + +// run continuously collects and exports metric data at the specified +// interval. This will run until ctx is canceled or times out. +func (r *periodicReader) run(ctx context.Context, interval time.Duration) { + ticker := newTicker(interval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + m, err := r.Collect(ctx) + if err == nil { + c, cancel := context.WithTimeout(ctx, r.timeout) + err = r.exporter.Export(c, m) + cancel() + } + if err != nil { + otel.Handle(err) + } + case <-ctx.Done(): + return + } + } +} + +// register registers p as the producer of this reader. +func (r *periodicReader) register(p producer) { + // Only register once. If producer is already set, do nothing. + if !r.producer.CompareAndSwap(nil, produceHolder{produce: p.produce}) { + msg := "did not register periodic reader" + global.Error(errDuplicateRegister, msg) + } +} + +// temporality reports the Temporality for the instrument kind provided. +func (r *periodicReader) temporality(kind view.InstrumentKind) metricdata.Temporality { + return r.temporalitySelector(kind) +} + +// aggregation returns what Aggregation to use for kind. +func (r *periodicReader) aggregation(kind view.InstrumentKind) aggregation.Aggregation { // nolint:revive // import-shadow for method scoped by type. + return r.aggregationSelector(kind) +} + +// Collect gathers and returns all metric data related to the Reader from +// the SDK. The returned metric data is not exported to the configured +// exporter, it is left to the caller to handle that if desired. +// +// An error is returned if this is called after Shutdown. +func (r *periodicReader) Collect(ctx context.Context) (metricdata.ResourceMetrics, error) { + p := r.producer.Load() + if p == nil { + return metricdata.ResourceMetrics{}, ErrReaderNotRegistered + } + + ph, ok := p.(produceHolder) + if !ok { + // The atomic.Value is entirely in the periodicReader's control so + // this should never happen. In the unforeseen case that this does + // happen, return an error instead of panicking so a users code does + // not halt in the processes. + err := fmt.Errorf("periodic reader: invalid producer: %T", p) + return metricdata.ResourceMetrics{}, err + } + return ph.produce(ctx) +} + +// ForceFlush flushes the Exporter. +func (r *periodicReader) ForceFlush(ctx context.Context) error { + return r.exporter.ForceFlush(ctx) +} + +// Shutdown stops the export pipeline. +func (r *periodicReader) Shutdown(ctx context.Context) error { + err := ErrReaderShutdown + r.shutdownOnce.Do(func() { + // Stop the run loop. + r.cancel() + r.wg.Wait() + + // Any future call to Collect will now return ErrReaderShutdown. + r.producer.Store(produceHolder{ + produce: shutdownProducer{}.produce, + }) + + err = r.exporter.Shutdown(ctx) + }) + return err +} diff --git a/sdk/metric/periodic_reader_test.go b/sdk/metric/periodic_reader_test.go new file mode 100644 index 00000000000..5689aa11b85 --- /dev/null +++ b/sdk/metric/periodic_reader_test.go @@ -0,0 +1,225 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/metric/view" +) + +const testDur = time.Second * 2 + +func TestWithTimeout(t *testing.T) { + test := func(d time.Duration) time.Duration { + opts := []PeriodicReaderOption{WithTimeout(d)} + return newPeriodicReaderConfig(opts).timeout + } + + assert.Equal(t, testDur, test(testDur)) + assert.Equal(t, defaultTimeout, newPeriodicReaderConfig(nil).timeout) + assert.Equal(t, defaultTimeout, test(time.Duration(0)), "invalid timeout should use default") + assert.Equal(t, defaultTimeout, test(time.Duration(-1)), "invalid timeout should use default") +} + +func TestWithInterval(t *testing.T) { + test := func(d time.Duration) time.Duration { + opts := []PeriodicReaderOption{WithInterval(d)} + return newPeriodicReaderConfig(opts).interval + } + + assert.Equal(t, testDur, test(testDur)) + assert.Equal(t, defaultInterval, newPeriodicReaderConfig(nil).interval) + assert.Equal(t, defaultInterval, test(time.Duration(0)), "invalid interval should use default") + assert.Equal(t, defaultInterval, test(time.Duration(-1)), "invalid interval should use default") +} + +type fnExporter struct { + exportFunc func(context.Context, metricdata.ResourceMetrics) error + flushFunc func(context.Context) error + shutdownFunc func(context.Context) error +} + +var _ Exporter = (*fnExporter)(nil) + +func (e *fnExporter) Export(ctx context.Context, m metricdata.ResourceMetrics) error { + if e.exportFunc != nil { + return e.exportFunc(ctx, m) + } + return nil +} + +func (e *fnExporter) ForceFlush(ctx context.Context) error { + if e.flushFunc != nil { + return e.flushFunc(ctx) + } + return nil +} + +func (e *fnExporter) Shutdown(ctx context.Context) error { + if e.shutdownFunc != nil { + return e.shutdownFunc(ctx) + } + return nil +} + +type periodicReaderTestSuite struct { + *readerTestSuite + + ErrReader Reader +} + +func (ts *periodicReaderTestSuite) SetupTest() { + ts.readerTestSuite.SetupTest() + + e := &fnExporter{ + exportFunc: func(context.Context, metricdata.ResourceMetrics) error { return assert.AnError }, + flushFunc: func(context.Context) error { return assert.AnError }, + shutdownFunc: func(context.Context) error { return assert.AnError }, + } + + ts.ErrReader = NewPeriodicReader(e) +} + +func (ts *periodicReaderTestSuite) TearDownTest() { + ts.readerTestSuite.TearDownTest() + + _ = ts.ErrReader.Shutdown(context.Background()) +} + +func (ts *periodicReaderTestSuite) TestForceFlushPropagated() { + ts.Equal(assert.AnError, ts.ErrReader.ForceFlush(context.Background())) +} + +func (ts *periodicReaderTestSuite) TestShutdownPropagated() { + ts.Equal(assert.AnError, ts.ErrReader.Shutdown(context.Background())) +} + +func TestPeriodicReader(t *testing.T) { + suite.Run(t, &periodicReaderTestSuite{ + readerTestSuite: &readerTestSuite{ + Factory: func() Reader { + return NewPeriodicReader(new(fnExporter)) + }, + }, + }) +} + +type chErrorHandler struct { + Err chan error +} + +func newChErrorHandler() *chErrorHandler { + return &chErrorHandler{ + Err: make(chan error, 1), + } +} + +func (eh chErrorHandler) Handle(err error) { + eh.Err <- err +} + +func TestPeriodicReaderRun(t *testing.T) { + // Override the ticker C chan so tests are not flaky and rely on timing. + defer func(orig func(time.Duration) *time.Ticker) { + newTicker = orig + }(newTicker) + // Keep this at size zero so when triggered with a send it will hang until + // the select case is selected and the collection loop is started. + trigger := make(chan time.Time) + newTicker = func(d time.Duration) *time.Ticker { + ticker := time.NewTicker(d) + ticker.C = trigger + return ticker + } + + // Register an error handler to validate export errors are passed to + // otel.Handle. + defer func(orig otel.ErrorHandler) { + otel.SetErrorHandler(orig) + }(otel.GetErrorHandler()) + eh := newChErrorHandler() + otel.SetErrorHandler(eh) + + exp := &fnExporter{ + exportFunc: func(_ context.Context, m metricdata.ResourceMetrics) error { + // The testProducer produces testMetrics. + assert.Equal(t, testMetrics, m) + return assert.AnError + }, + } + + r := NewPeriodicReader(exp) + r.register(testProducer{}) + trigger <- time.Now() + assert.Equal(t, assert.AnError, <-eh.Err) + + // Ensure Reader is allowed clean up attempt. + _ = r.Shutdown(context.Background()) +} + +func BenchmarkPeriodicReader(b *testing.B) { + b.Run("Collect", benchReaderCollectFunc( + NewPeriodicReader(new(fnExporter)), + )) +} + +func TestPeriodiclReaderTemporality(t *testing.T) { + tests := []struct { + name string + options []PeriodicReaderOption + // Currently only testing constant temporality. This should be expanded + // if we put more advanced selection in the SDK + wantTemporality metricdata.Temporality + }{ + { + name: "default", + wantTemporality: metricdata.CumulativeTemporality, + }, + { + name: "delta", + options: []PeriodicReaderOption{ + WithTemporalitySelector(deltaTemporalitySelector), + }, + wantTemporality: metricdata.DeltaTemporality, + }, + { + name: "repeats overwrite", + options: []PeriodicReaderOption{ + WithTemporalitySelector(deltaTemporalitySelector), + WithTemporalitySelector(cumulativeTemporalitySelector), + }, + wantTemporality: metricdata.CumulativeTemporality, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var undefinedInstrument view.InstrumentKind + rdr := NewPeriodicReader(new(fnExporter), tt.options...) + assert.Equal(t, tt.wantTemporality, rdr.temporality(undefinedInstrument)) + }) + } +} diff --git a/sdk/metric/pipeline.go b/sdk/metric/pipeline.go new file mode 100644 index 00000000000..d4f7d4f8082 --- /dev/null +++ b/sdk/metric/pipeline.go @@ -0,0 +1,349 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + + "go.opentelemetry.io/otel/metric/unit" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/metric/aggregation" + "go.opentelemetry.io/otel/sdk/metric/internal" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/metric/view" + "go.opentelemetry.io/otel/sdk/resource" +) + +type aggregator interface { + Aggregation() metricdata.Aggregation +} +type instrumentKey struct { + name string + unit unit.Unit +} + +type instrumentValue struct { + description string + aggregator aggregator +} + +func newPipeline(res *resource.Resource) *pipeline { + if res == nil { + res = resource.Empty() + } + return &pipeline{ + resource: res, + aggregations: make(map[instrumentation.Scope]map[instrumentKey]instrumentValue), + } +} + +// pipeline connects all of the instruments created by a meter provider to a Reader. +// This is the object that will be `Reader.register()` when a meter provider is created. +// +// As instruments are created the instrument should be checked if it exists in the +// views of a the Reader, and if so each aggregator should be added to the pipeline. +type pipeline struct { + resource *resource.Resource + + sync.Mutex + aggregations map[instrumentation.Scope]map[instrumentKey]instrumentValue + callbacks []func(context.Context) +} + +var errAlreadyRegistered = errors.New("instrument already registered") + +// addAggregator will stores an aggregator with an instrument description. The aggregator +// is used when `produce()` is called. +func (p *pipeline) addAggregator(scope instrumentation.Scope, name, description string, instUnit unit.Unit, agg aggregator) error { + p.Lock() + defer p.Unlock() + if p.aggregations == nil { + p.aggregations = map[instrumentation.Scope]map[instrumentKey]instrumentValue{} + } + if p.aggregations[scope] == nil { + p.aggregations[scope] = map[instrumentKey]instrumentValue{} + } + inst := instrumentKey{ + name: name, + unit: instUnit, + } + if _, ok := p.aggregations[scope][inst]; ok { + return fmt.Errorf("%w: name %s, scope: %s", errAlreadyRegistered, name, scope) + } + + p.aggregations[scope][inst] = instrumentValue{ + description: description, + aggregator: agg, + } + return nil +} + +// addCallback registers a callback to be run when `produce()` is called. +func (p *pipeline) addCallback(callback func(context.Context)) { + p.Lock() + defer p.Unlock() + p.callbacks = append(p.callbacks, callback) +} + +// callbackKey is a context key type used to identify context that came from the SDK. +type callbackKey int + +// produceKey is the context key to tell if a Observe is called within a callback. +// Its value of zero is arbitrary. If this package defined other context keys, +// they would have different integer values. +const produceKey callbackKey = 0 + +// produce returns aggregated metrics from a single collection. +// +// This method is safe to call concurrently. +func (p *pipeline) produce(ctx context.Context) (metricdata.ResourceMetrics, error) { + p.Lock() + defer p.Unlock() + + ctx = context.WithValue(ctx, produceKey, struct{}{}) + + for _, callback := range p.callbacks { + // TODO make the callbacks parallel. ( #3034 ) + callback(ctx) + if err := ctx.Err(); err != nil { + // This means the context expired before we finished running callbacks. + return metricdata.ResourceMetrics{}, err + } + } + + sm := make([]metricdata.ScopeMetrics, 0, len(p.aggregations)) + for scope, instruments := range p.aggregations { + metrics := make([]metricdata.Metrics, 0, len(instruments)) + for inst, instValue := range instruments { + data := instValue.aggregator.Aggregation() + if data != nil { + metrics = append(metrics, metricdata.Metrics{ + Name: inst.name, + Description: instValue.description, + Unit: inst.unit, + Data: data, + }) + } + } + if len(metrics) > 0 { + sm = append(sm, metricdata.ScopeMetrics{ + Scope: scope, + Metrics: metrics, + }) + } + } + + return metricdata.ResourceMetrics{ + Resource: p.resource, + ScopeMetrics: sm, + }, nil +} + +// pipelineRegistry manages creating pipelines, and aggregators. Meters retrieve +// new Aggregators from a pipelineRegistry. +type pipelineRegistry struct { + views map[Reader][]view.View + pipelines map[Reader]*pipeline +} + +func newPipelineRegistries(views map[Reader][]view.View) *pipelineRegistry { + pipelines := map[Reader]*pipeline{} + for rdr := range views { + pipe := &pipeline{} + rdr.register(pipe) + pipelines[rdr] = pipe + } + return &pipelineRegistry{ + views: views, + pipelines: pipelines, + } +} + +// TODO (#3053) Only register callbacks if any instrument matches in a view. +func (reg *pipelineRegistry) registerCallback(fn func(context.Context)) { + for _, pipe := range reg.pipelines { + pipe.addCallback(fn) + } +} + +// createAggregators will create all backing aggregators for an instrument. +// It will return an error if an instrument is registered more than once. +// Note: There may be returned aggregators with an error. +func createAggregators[N int64 | float64](reg *pipelineRegistry, inst view.Instrument, instUnit unit.Unit) ([]internal.Aggregator[N], error) { + var aggs []internal.Aggregator[N] + + errs := &multierror{} + for rdr, views := range reg.views { + pipe := reg.pipelines[rdr] + rdrAggs, err := createAggregatorsForReader[N](rdr, views, inst) + if err != nil { + errs.append(err) + } + for inst, agg := range rdrAggs { + err := pipe.addAggregator(inst.scope, inst.name, inst.description, instUnit, agg) + if err != nil { + errs.append(err) + } + aggs = append(aggs, agg) + } + } + return aggs, errs.errorOrNil() +} + +type multierror struct { + wrapped error + errors []string +} + +func (m *multierror) errorOrNil() error { + if len(m.errors) == 0 { + return nil + } + return fmt.Errorf("%w: %s", m.wrapped, strings.Join(m.errors, "; ")) +} + +func (m *multierror) append(err error) { + m.errors = append(m.errors, err.Error()) +} + +// instrumentID is used to identify multiple instruments being mapped to the same aggregator. +// e.g. using an exact match view with a name=* view. +// You can't use a view.Instrument here because not all Aggregators are comparable. +type instrumentID struct { + scope instrumentation.Scope + name string + description string +} + +var errCreatingAggregators = errors.New("could not create all aggregators") + +func createAggregatorsForReader[N int64 | float64](rdr Reader, views []view.View, inst view.Instrument) (map[instrumentID]internal.Aggregator[N], error) { + aggs := map[instrumentID]internal.Aggregator[N]{} + errs := &multierror{ + wrapped: errCreatingAggregators, + } + for _, v := range views { + inst, match := v.TransformInstrument(inst) + + ident := instrumentID{ + scope: inst.Scope, + name: inst.Name, + description: inst.Description, + } + + if _, ok := aggs[ident]; ok || !match { + continue + } + + if inst.Aggregation == nil { + inst.Aggregation = rdr.aggregation(inst.Kind) + } else if _, ok := inst.Aggregation.(aggregation.Default); ok { + inst.Aggregation = rdr.aggregation(inst.Kind) + } + + if err := isAggregatorCompatible(inst.Kind, inst.Aggregation); err != nil { + err = fmt.Errorf("creating aggregator with instrumentKind: %d, aggregation %v: %w", inst.Kind, inst.Aggregation, err) + errs.append(err) + continue + } + + agg := createAggregator[N](inst.Aggregation, rdr.temporality(inst.Kind), isMonotonic(inst.Kind)) + if agg != nil { + // TODO (#3011): If filtering is done at the instrument level add here. + // This is where the aggregator and the view are both in scope. + aggs[ident] = agg + } + } + return aggs, errs.errorOrNil() +} + +func isMonotonic(kind view.InstrumentKind) bool { + switch kind { + case view.AsyncCounter, view.SyncCounter, view.SyncHistogram: + return true + } + return false +} + +// createAggregator takes the config (Aggregation and Temporality) and produces a memory backed Aggregator. +// TODO (#3011): If filterting is done by the Aggregator it should be passed here. +func createAggregator[N int64 | float64](agg aggregation.Aggregation, temporality metricdata.Temporality, monotonic bool) internal.Aggregator[N] { + switch agg := agg.(type) { + case aggregation.Drop: + return nil + case aggregation.LastValue: + return internal.NewLastValue[N]() + case aggregation.Sum: + if temporality == metricdata.CumulativeTemporality { + return internal.NewCumulativeSum[N](monotonic) + } + return internal.NewDeltaSum[N](monotonic) + case aggregation.ExplicitBucketHistogram: + if temporality == metricdata.CumulativeTemporality { + return internal.NewCumulativeHistogram[N](agg) + } + return internal.NewDeltaHistogram[N](agg) + } + return nil +} + +// TODO: review need for aggregation check after https://github.com/open-telemetry/opentelemetry-specification/issues/2710 +var errIncompatibleAggregation = errors.New("incompatible aggregation") +var errUnknownAggregation = errors.New("unrecognized aggregation") + +// is aggregatorCompatible checks if the aggregation can be used by the instrument. +// Current compatibility: +// +// | Instrument Kind | Drop | LastValue | Sum | Histogram | Exponential Histogram | +// |----------------------|------|-----------|-----|-----------|-----------------------| +// | Sync Counter | X | | X | X | X | +// | Sync UpDown Counter | X | | X | | | +// | Sync Histogram | X | | X | X | X | +// | Async Counter | X | | X | | | +// | Async UpDown Counter | X | | X | | | +// | Async Gauge | X | X | | | |. +func isAggregatorCompatible(kind view.InstrumentKind, agg aggregation.Aggregation) error { + switch agg.(type) { + case aggregation.ExplicitBucketHistogram: + if kind == view.SyncCounter || kind == view.SyncHistogram { + return nil + } + return errIncompatibleAggregation + case aggregation.Sum: + switch kind { + case view.AsyncCounter, view.AsyncUpDownCounter, view.SyncCounter, view.SyncHistogram, view.SyncUpDownCounter: + return nil + default: + return errIncompatibleAggregation + } + case aggregation.LastValue: + if kind == view.AsyncGauge { + return nil + } + return errIncompatibleAggregation + case aggregation.Drop: + return nil + default: + // This is used passed checking for default, it should be an error at this point. + return fmt.Errorf("%w: %v", errUnknownAggregation, agg) + } +} diff --git a/sdk/metric/pipeline_registry_test.go b/sdk/metric/pipeline_registry_test.go new file mode 100644 index 00000000000..d4e47f35eb5 --- /dev/null +++ b/sdk/metric/pipeline_registry_test.go @@ -0,0 +1,587 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/otel/metric/unit" + "go.opentelemetry.io/otel/sdk/metric/aggregation" + "go.opentelemetry.io/otel/sdk/metric/internal" + "go.opentelemetry.io/otel/sdk/metric/view" +) + +type invalidAggregation struct { + aggregation.Aggregation +} + +func (invalidAggregation) Copy() aggregation.Aggregation { + return invalidAggregation{} +} +func (invalidAggregation) Err() error { + return nil +} + +func testCreateAggregators[N int64 | float64](t *testing.T) { + changeAggView, _ := view.New( + view.MatchInstrumentName("foo"), + view.WithSetAggregation(aggregation.ExplicitBucketHistogram{}), + ) + renameView, _ := view.New( + view.MatchInstrumentName("foo"), + view.WithRename("bar"), + ) + defaultAggView, _ := view.New( + view.MatchInstrumentName("foo"), + view.WithSetAggregation(aggregation.Default{}), + ) + invalidAggView, _ := view.New( + view.MatchInstrumentName("foo"), + view.WithSetAggregation(invalidAggregation{}), + ) + + instruments := []view.Instrument{ + {Name: "foo", Kind: view.InstrumentKind(0)}, //Unknown kind + {Name: "foo", Kind: view.SyncCounter}, + {Name: "foo", Kind: view.SyncUpDownCounter}, + {Name: "foo", Kind: view.SyncHistogram}, + {Name: "foo", Kind: view.AsyncCounter}, + {Name: "foo", Kind: view.AsyncUpDownCounter}, + {Name: "foo", Kind: view.AsyncGauge}, + } + + testcases := []struct { + name string + reader Reader + views []view.View + inst view.Instrument + wantKind internal.Aggregator[N] //Aggregators should match len and types + wantLen int + wantErr error + }{ + { + name: "drop should return 0 aggregators", + reader: NewManualReader(WithAggregationSelector(func(ik view.InstrumentKind) aggregation.Aggregation { return aggregation.Drop{} })), + views: []view.View{{}}, + inst: instruments[view.SyncCounter], + }, + { + name: "default agg should use reader", + reader: NewManualReader(WithTemporalitySelector(deltaTemporalitySelector)), + views: []view.View{defaultAggView}, + inst: instruments[view.SyncUpDownCounter], + wantKind: internal.NewDeltaSum[N](false), + wantLen: 1, + }, + { + name: "default agg should use reader", + reader: NewManualReader(WithTemporalitySelector(deltaTemporalitySelector)), + views: []view.View{defaultAggView}, + inst: instruments[view.SyncHistogram], + wantKind: internal.NewDeltaHistogram[N](aggregation.ExplicitBucketHistogram{}), + wantLen: 1, + }, + { + name: "default agg should use reader", + reader: NewManualReader(WithTemporalitySelector(deltaTemporalitySelector)), + views: []view.View{defaultAggView}, + inst: instruments[view.AsyncCounter], + wantKind: internal.NewDeltaSum[N](true), + wantLen: 1, + }, + { + name: "default agg should use reader", + reader: NewManualReader(WithTemporalitySelector(deltaTemporalitySelector)), + views: []view.View{defaultAggView}, + inst: instruments[view.AsyncUpDownCounter], + wantKind: internal.NewDeltaSum[N](false), + wantLen: 1, + }, + { + name: "default agg should use reader", + reader: NewManualReader(WithTemporalitySelector(deltaTemporalitySelector)), + views: []view.View{defaultAggView}, + inst: instruments[view.AsyncGauge], + wantKind: internal.NewLastValue[N](), + wantLen: 1, + }, + { + name: "default agg should use reader", + reader: NewManualReader(WithTemporalitySelector(deltaTemporalitySelector)), + views: []view.View{defaultAggView}, + inst: instruments[view.SyncCounter], + wantKind: internal.NewDeltaSum[N](true), + wantLen: 1, + }, + { + name: "reader should set default agg", + reader: NewManualReader(), + views: []view.View{{}}, + inst: instruments[view.SyncUpDownCounter], + wantKind: internal.NewCumulativeSum[N](false), + wantLen: 1, + }, + { + name: "reader should set default agg", + reader: NewManualReader(), + views: []view.View{{}}, + inst: instruments[view.SyncHistogram], + wantKind: internal.NewCumulativeHistogram[N](aggregation.ExplicitBucketHistogram{}), + wantLen: 1, + }, + { + name: "reader should set default agg", + reader: NewManualReader(), + views: []view.View{{}}, + inst: instruments[view.AsyncCounter], + wantKind: internal.NewCumulativeSum[N](true), + wantLen: 1, + }, + { + name: "reader should set default agg", + reader: NewManualReader(), + views: []view.View{{}}, + inst: instruments[view.AsyncUpDownCounter], + wantKind: internal.NewCumulativeSum[N](false), + wantLen: 1, + }, + { + name: "reader should set default agg", + reader: NewManualReader(), + views: []view.View{{}}, + inst: instruments[view.AsyncGauge], + wantKind: internal.NewLastValue[N](), + wantLen: 1, + }, + { + name: "reader should set default agg", + reader: NewManualReader(), + views: []view.View{{}}, + inst: instruments[view.SyncCounter], + wantKind: internal.NewCumulativeSum[N](true), + wantLen: 1, + }, + { + name: "view should overwrite reader", + reader: NewManualReader(), + views: []view.View{changeAggView}, + inst: instruments[view.SyncCounter], + wantKind: internal.NewCumulativeHistogram[N](aggregation.ExplicitBucketHistogram{}), + wantLen: 1, + }, + { + name: "multiple views should create multiple aggregators", + reader: NewManualReader(), + views: []view.View{{}, renameView}, + inst: instruments[view.SyncCounter], + wantKind: internal.NewCumulativeSum[N](true), + wantLen: 2, + }, + { + name: "reader with invalid aggregation should error", + reader: NewManualReader(WithAggregationSelector(func(ik view.InstrumentKind) aggregation.Aggregation { return aggregation.Default{} })), + views: []view.View{{}}, + inst: instruments[view.SyncCounter], + wantErr: errCreatingAggregators, + }, + { + name: "view with invalid aggregation should error", + reader: NewManualReader(), + views: []view.View{invalidAggView}, + inst: instruments[view.SyncCounter], + wantErr: errCreatingAggregators, + }, + } + for _, tt := range testcases { + t.Run(tt.name, func(t *testing.T) { + got, err := createAggregatorsForReader[N](tt.reader, tt.views, tt.inst) + assert.ErrorIs(t, err, tt.wantErr) + require.Len(t, got, tt.wantLen) + for _, agg := range got { + assert.IsType(t, tt.wantKind, agg) + } + }) + } +} + +func testInvalidInstrumentShouldPanic[N int64 | float64]() { + reader := NewManualReader() + views := []view.View{{}} + inst := view.Instrument{ + Name: "foo", + Kind: view.InstrumentKind(255), + } + _, _ = createAggregatorsForReader[N](reader, views, inst) +} + +func TestInvalidInstrumentShouldPanic(t *testing.T) { + assert.Panics(t, testInvalidInstrumentShouldPanic[int64]) + assert.Panics(t, testInvalidInstrumentShouldPanic[float64]) +} + +func TestCreateAggregators(t *testing.T) { + t.Run("Int64", testCreateAggregators[int64]) + t.Run("Float64", testCreateAggregators[float64]) +} + +func TestPipelineRegistryCreateAggregators(t *testing.T) { + renameView, _ := view.New( + view.MatchInstrumentName("foo"), + view.WithRename("bar"), + ) + testRdr := NewManualReader() + testRdrHistogram := NewManualReader(WithAggregationSelector(func(ik view.InstrumentKind) aggregation.Aggregation { return aggregation.ExplicitBucketHistogram{} })) + + testCases := []struct { + name string + views map[Reader][]view.View + inst view.Instrument + wantCount int + }{ + { + name: "No views have no aggregators", + inst: view.Instrument{Name: "foo"}, + }, + { + name: "1 reader 1 view gets 1 aggregator", + inst: view.Instrument{Name: "foo"}, + views: map[Reader][]view.View{ + testRdr: { + {}, + }, + }, + wantCount: 1, + }, + { + name: "1 reader 2 views gets 2 aggregator", + inst: view.Instrument{Name: "foo"}, + views: map[Reader][]view.View{ + testRdr: { + {}, + renameView, + }, + }, + wantCount: 2, + }, + { + name: "2 readers 1 view each gets 2 aggregators", + inst: view.Instrument{Name: "foo"}, + views: map[Reader][]view.View{ + testRdr: { + {}, + }, + testRdrHistogram: { + {}, + }, + }, + wantCount: 2, + }, + { + name: "2 reader 2 views each gets 4 aggregators", + inst: view.Instrument{Name: "foo"}, + views: map[Reader][]view.View{ + testRdr: { + {}, + renameView, + }, + testRdrHistogram: { + {}, + renameView, + }, + }, + wantCount: 4, + }, + { + name: "An instrument is duplicated in two views share the same aggregator", + inst: view.Instrument{Name: "foo"}, + views: map[Reader][]view.View{ + testRdr: { + {}, + {}, + }, + }, + wantCount: 1, + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + reg := newPipelineRegistries(tt.views) + testPipelineRegistryCreateIntAggregators(t, reg, tt.wantCount) + reg = newPipelineRegistries(tt.views) + testPipelineRegistryCreateFloatAggregators(t, reg, tt.wantCount) + }) + } +} + +func testPipelineRegistryCreateIntAggregators(t *testing.T, reg *pipelineRegistry, wantCount int) { + inst := view.Instrument{Name: "foo", Kind: view.SyncCounter} + + aggs, err := createAggregators[int64](reg, inst, unit.Dimensionless) + assert.NoError(t, err) + + require.Len(t, aggs, wantCount) +} + +func testPipelineRegistryCreateFloatAggregators(t *testing.T, reg *pipelineRegistry, wantCount int) { + inst := view.Instrument{Name: "foo", Kind: view.SyncCounter} + + aggs, err := createAggregators[float64](reg, inst, unit.Dimensionless) + assert.NoError(t, err) + + require.Len(t, aggs, wantCount) +} + +func TestPipelineRegistryCreateAggregatorsIncompatibleInstrument(t *testing.T) { + testRdrHistogram := NewManualReader(WithAggregationSelector(func(ik view.InstrumentKind) aggregation.Aggregation { return aggregation.ExplicitBucketHistogram{} })) + + views := map[Reader][]view.View{ + testRdrHistogram: { + {}, + }, + } + reg := newPipelineRegistries(views) + inst := view.Instrument{Name: "foo", Kind: view.AsyncGauge} + + intAggs, err := createAggregators[int64](reg, inst, unit.Dimensionless) + assert.Error(t, err) + assert.Len(t, intAggs, 0) + + reg = newPipelineRegistries(views) + + floatAggs, err := createAggregators[float64](reg, inst, unit.Dimensionless) + assert.Error(t, err) + assert.Len(t, floatAggs, 0) +} + +func TestPipelineRegistryCreateAggregatorsDuplicateErrors(t *testing.T) { + renameView, _ := view.New( + view.MatchInstrumentName("bar"), + view.WithRename("foo"), + ) + views := map[Reader][]view.View{ + NewManualReader(): { + {}, + renameView, + }, + } + + fooInst := view.Instrument{Name: "foo", Kind: view.SyncCounter} + barInst := view.Instrument{Name: "bar", Kind: view.SyncCounter} + + reg := newPipelineRegistries(views) + + intAggs, err := createAggregators[int64](reg, fooInst, unit.Dimensionless) + assert.NoError(t, err) + assert.Len(t, intAggs, 1) + + // The Rename view should error, because it creates a foo instrument. + intAggs, err = createAggregators[int64](reg, barInst, unit.Dimensionless) + assert.Error(t, err) + assert.Len(t, intAggs, 2) + + // Creating a float foo instrument should error because there is an int foo instrument. + floatAggs, err := createAggregators[float64](reg, fooInst, unit.Dimensionless) + assert.Error(t, err) + assert.Len(t, floatAggs, 1) + + fooInst = view.Instrument{Name: "foo-float", Kind: view.SyncCounter} + + _, err = createAggregators[float64](reg, fooInst, unit.Dimensionless) + assert.NoError(t, err) + + floatAggs, err = createAggregators[float64](reg, barInst, unit.Dimensionless) + assert.Error(t, err) + assert.Len(t, floatAggs, 2) +} + +func TestIsAggregatorCompatible(t *testing.T) { + var undefinedInstrument view.InstrumentKind + + testCases := []struct { + name string + kind view.InstrumentKind + agg aggregation.Aggregation + want error + }{ + { + name: "SyncCounter and Drop", + kind: view.SyncCounter, + agg: aggregation.Drop{}, + }, + { + name: "SyncCounter and LastValue", + kind: view.SyncCounter, + agg: aggregation.LastValue{}, + want: errIncompatibleAggregation, + }, + { + name: "SyncCounter and Sum", + kind: view.SyncCounter, + agg: aggregation.Sum{}, + }, + { + name: "SyncCounter and ExplicitBucketHistogram", + kind: view.SyncCounter, + agg: aggregation.ExplicitBucketHistogram{}, + }, + { + name: "SyncUpDownCounter and Drop", + kind: view.SyncUpDownCounter, + agg: aggregation.Drop{}, + }, + { + name: "SyncUpDownCounter and LastValue", + kind: view.SyncUpDownCounter, + agg: aggregation.LastValue{}, + want: errIncompatibleAggregation, + }, + { + name: "SyncUpDownCounter and Sum", + kind: view.SyncUpDownCounter, + agg: aggregation.Sum{}, + }, + { + name: "SyncUpDownCounter and ExplicitBucketHistogram", + kind: view.SyncUpDownCounter, + agg: aggregation.ExplicitBucketHistogram{}, + want: errIncompatibleAggregation, + }, + { + name: "SyncHistogram and Drop", + kind: view.SyncHistogram, + agg: aggregation.Drop{}, + }, + { + name: "SyncHistogram and LastValue", + kind: view.SyncHistogram, + agg: aggregation.LastValue{}, + want: errIncompatibleAggregation, + }, + { + name: "SyncHistogram and Sum", + kind: view.SyncHistogram, + agg: aggregation.Sum{}, + }, + { + name: "SyncHistogram and ExplicitBucketHistogram", + kind: view.SyncHistogram, + agg: aggregation.ExplicitBucketHistogram{}, + }, + { + name: "AsyncCounter and Drop", + kind: view.AsyncCounter, + agg: aggregation.Drop{}, + }, + { + name: "AsyncCounter and LastValue", + kind: view.AsyncCounter, + agg: aggregation.LastValue{}, + want: errIncompatibleAggregation, + }, + { + name: "AsyncCounter and Sum", + kind: view.AsyncCounter, + agg: aggregation.Sum{}, + }, + { + name: "AsyncCounter and ExplicitBucketHistogram", + kind: view.AsyncCounter, + agg: aggregation.ExplicitBucketHistogram{}, + want: errIncompatibleAggregation, + }, + { + name: "AsyncUpDownCounter and Drop", + kind: view.AsyncUpDownCounter, + agg: aggregation.Drop{}, + }, + { + name: "AsyncUpDownCounter and LastValue", + kind: view.AsyncUpDownCounter, + agg: aggregation.LastValue{}, + want: errIncompatibleAggregation, + }, + { + name: "AsyncUpDownCounter and Sum", + kind: view.AsyncUpDownCounter, + agg: aggregation.Sum{}, + }, + { + name: "AsyncUpDownCounter and ExplicitBucketHistogram", + kind: view.AsyncUpDownCounter, + agg: aggregation.ExplicitBucketHistogram{}, + want: errIncompatibleAggregation, + }, + { + name: "AsyncGauge and Drop", + kind: view.AsyncGauge, + agg: aggregation.Drop{}, + }, + { + name: "AsyncGauge and aggregation.LastValue{}", + kind: view.AsyncGauge, + agg: aggregation.LastValue{}, + }, + { + name: "AsyncGauge and Sum", + kind: view.AsyncGauge, + agg: aggregation.Sum{}, + want: errIncompatibleAggregation, + }, + { + name: "AsyncGauge and ExplicitBucketHistogram", + kind: view.AsyncGauge, + agg: aggregation.ExplicitBucketHistogram{}, + want: errIncompatibleAggregation, + }, + { + name: "Default aggregation should error", + kind: view.SyncCounter, + agg: aggregation.Default{}, + want: errUnknownAggregation, + }, + { + name: "unknown kind with Sum should error", + kind: undefinedInstrument, + agg: aggregation.Sum{}, + want: errIncompatibleAggregation, + }, + { + name: "unknown kind with LastValue should error", + kind: undefinedInstrument, + agg: aggregation.LastValue{}, + want: errIncompatibleAggregation, + }, + { + name: "unknown kind with Histogram should error", + kind: undefinedInstrument, + agg: aggregation.ExplicitBucketHistogram{}, + want: errIncompatibleAggregation, + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + err := isAggregatorCompatible(tt.kind, tt.agg) + assert.ErrorIs(t, err, tt.want) + }) + } +} diff --git a/sdk/metric/pipeline_test.go b/sdk/metric/pipeline_test.go new file mode 100644 index 00000000000..91134fec517 --- /dev/null +++ b/sdk/metric/pipeline_test.go @@ -0,0 +1,216 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "sync" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/unit" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/resource" +) + +type testSumAggregator struct{} + +func (testSumAggregator) Aggregation() metricdata.Aggregation { + return metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: false, + DataPoints: []metricdata.DataPoint[int64]{}} +} + +func TestEmptyPipeline(t *testing.T) { + pipe := &pipeline{} + + output, err := pipe.produce(context.Background()) + require.NoError(t, err) + assert.Nil(t, output.Resource) + assert.Len(t, output.ScopeMetrics, 0) + + err = pipe.addAggregator(instrumentation.Scope{}, "name", "desc", unit.Dimensionless, testSumAggregator{}) + assert.NoError(t, err) + + require.NotPanics(t, func() { + pipe.addCallback(func(ctx context.Context) {}) + }) + + output, err = pipe.produce(context.Background()) + require.NoError(t, err) + assert.Nil(t, output.Resource) + require.Len(t, output.ScopeMetrics, 1) + require.Len(t, output.ScopeMetrics[0].Metrics, 1) +} + +func TestNewPipeline(t *testing.T) { + pipe := newPipeline(nil) + + output, err := pipe.produce(context.Background()) + require.NoError(t, err) + assert.Equal(t, resource.Empty(), output.Resource) + assert.Len(t, output.ScopeMetrics, 0) + + err = pipe.addAggregator(instrumentation.Scope{}, "name", "desc", unit.Dimensionless, testSumAggregator{}) + assert.NoError(t, err) + + require.NotPanics(t, func() { + pipe.addCallback(func(ctx context.Context) {}) + }) + + output, err = pipe.produce(context.Background()) + require.NoError(t, err) + assert.Equal(t, resource.Empty(), output.Resource) + require.Len(t, output.ScopeMetrics, 1) + require.Len(t, output.ScopeMetrics[0].Metrics, 1) +} + +func TestPipelineDuplicateRegistration(t *testing.T) { + type instrumentID struct { + scope instrumentation.Scope + name string + description string + unit unit.Unit + } + testCases := []struct { + name string + secondInst instrumentID + want error + wantScopeLen int + wantMetricsLen int + }{ + { + name: "exact should error", + secondInst: instrumentID{ + scope: instrumentation.Scope{}, + name: "name", + description: "desc", + unit: unit.Dimensionless, + }, + want: errAlreadyRegistered, + wantScopeLen: 1, + wantMetricsLen: 1, + }, + { + name: "description should not be identifying", + secondInst: instrumentID{ + scope: instrumentation.Scope{}, + name: "name", + description: "other desc", + unit: unit.Dimensionless, + }, + want: errAlreadyRegistered, + wantScopeLen: 1, + wantMetricsLen: 1, + }, + { + name: "scope should be identifying", + secondInst: instrumentID{ + scope: instrumentation.Scope{ + Name: "newScope", + }, + name: "name", + description: "desc", + unit: unit.Dimensionless, + }, + wantScopeLen: 2, + wantMetricsLen: 1, + }, + { + name: "name should be identifying", + secondInst: instrumentID{ + scope: instrumentation.Scope{}, + name: "newName", + description: "desc", + unit: unit.Dimensionless, + }, + wantScopeLen: 1, + wantMetricsLen: 2, + }, + { + name: "unit should be identifying", + secondInst: instrumentID{ + scope: instrumentation.Scope{}, + name: "name", + description: "desc", + unit: unit.Bytes, + }, + wantScopeLen: 1, + wantMetricsLen: 2, + }, + } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + pipe := newPipeline(nil) + err := pipe.addAggregator(instrumentation.Scope{}, "name", "desc", unit.Dimensionless, testSumAggregator{}) + require.NoError(t, err) + + err = pipe.addAggregator(tt.secondInst.scope, tt.secondInst.name, tt.secondInst.description, tt.secondInst.unit, testSumAggregator{}) + assert.ErrorIs(t, err, tt.want) + + if tt.wantScopeLen > 0 { + output, err := pipe.produce(context.Background()) + assert.NoError(t, err) + require.Len(t, output.ScopeMetrics, tt.wantScopeLen) + require.Len(t, output.ScopeMetrics[0].Metrics, tt.wantMetricsLen) + } + }) + } +} + +func TestPipelineUsesResource(t *testing.T) { + res := resource.NewWithAttributes("noSchema", attribute.String("test", "resource")) + pipe := newPipeline(res) + + output, err := pipe.produce(context.Background()) + assert.NoError(t, err) + assert.Equal(t, res, output.Resource) +} + +func TestPipelineConcurrency(t *testing.T) { + pipe := newPipeline(nil) + ctx := context.Background() + + var wg sync.WaitGroup + const threads = 2 + for i := 0; i < threads; i++ { + wg.Add(1) + go func() { + defer wg.Done() + _, _ = pipe.produce(ctx) + }() + + wg.Add(1) + go func() { + defer wg.Done() + _ = pipe.addAggregator(instrumentation.Scope{}, "name", "desc", unit.Dimensionless, testSumAggregator{}) + }() + + wg.Add(1) + go func() { + defer wg.Done() + pipe.addCallback(func(ctx context.Context) {}) + }() + } + wg.Wait() +} diff --git a/sdk/metric/processor/basic/basic.go b/sdk/metric/processor/basic/basic.go deleted file mode 100644 index 8ed07484bef..00000000000 --- a/sdk/metric/processor/basic/basic.go +++ /dev/null @@ -1,383 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package basic // import "go.opentelemetry.io/otel/sdk/metric/processor/basic" - -import ( - "errors" - "fmt" - "sync" - "time" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/metric/aggregator" - "go.opentelemetry.io/otel/sdk/metric/export" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" -) - -type ( - // Processor is a basic metric processor. - Processor struct { - aggregation.TemporalitySelector - export.AggregatorSelector - - state - } - - stateKey struct { - // TODO: This code is organized to support multiple - // accumulators which could theoretically produce the - // data for the same instrument, and this code has - // logic to combine data properly from multiple - // accumulators. However, the use of - // *sdkapi.Descriptor in the stateKey makes such - // combination impossible, because each accumulator - // allocates its own instruments. This can be fixed - // by using the instrument name and kind instead of - // the descriptor pointer. See - // https://github.com/open-telemetry/opentelemetry-go/issues/862. - descriptor *sdkapi.Descriptor - distinct attribute.Distinct - } - - stateValue struct { - // attrs corresponds to the stateKey.distinct field. - attrs *attribute.Set - - // updated indicates the last sequence number when this value had - // Process() called by an accumulator. - updated int64 - - // stateful indicates that a cumulative aggregation is - // being maintained, taken from the process start time. - stateful bool - - // currentOwned indicates that "current" was allocated - // by the processor in order to merge results from - // multiple Accumulators during a single collection - // round, which may happen either because: - // (1) multiple Accumulators output the same Accumulation. - // (2) one Accumulator is configured with dimensionality reduction. - currentOwned bool - - // current refers to the output from a single Accumulator - // (if !currentOwned) or it refers to an Aggregator - // owned by the processor used to accumulate multiple - // values in a single collection round. - current aggregator.Aggregator - - // cumulative, if non-nil, refers to an Aggregator owned - // by the processor used to store the last cumulative - // value. - cumulative aggregator.Aggregator - } - - state struct { - config config - - // RWMutex implements locking for the `Reader` interface. - sync.RWMutex - values map[stateKey]*stateValue - - processStart time.Time - intervalStart time.Time - intervalEnd time.Time - - // startedCollection and finishedCollection are the - // number of StartCollection() and FinishCollection() - // calls, used to ensure that the sequence of starts - // and finishes are correctly balanced. - - startedCollection int64 - finishedCollection int64 - } -) - -var _ export.Processor = &Processor{} -var _ export.Checkpointer = &Processor{} -var _ export.Reader = &state{} - -// ErrInconsistentState is returned when the sequence of collection's starts and finishes are incorrectly balanced. -var ErrInconsistentState = fmt.Errorf("inconsistent processor state") - -// ErrInvalidTemporality is returned for unknown metric.Temporality. -var ErrInvalidTemporality = fmt.Errorf("invalid aggregation temporality") - -// New returns a basic Processor that is also a Checkpointer using the provided -// AggregatorSelector to select Aggregators. The TemporalitySelector -// is consulted to determine the kind(s) of exporter that will consume -// data, so that this Processor can prepare to compute Cumulative Aggregations -// as needed. -func New(aselector export.AggregatorSelector, tselector aggregation.TemporalitySelector, opts ...Option) *Processor { - return NewFactory(aselector, tselector, opts...).NewCheckpointer().(*Processor) -} - -type factory struct { - aselector export.AggregatorSelector - tselector aggregation.TemporalitySelector - config config -} - -// NewFactory returns a new basic CheckpointerFactory. -func NewFactory(aselector export.AggregatorSelector, tselector aggregation.TemporalitySelector, opts ...Option) export.CheckpointerFactory { - var config config - for _, opt := range opts { - config = opt.applyProcessor(config) - } - return factory{ - aselector: aselector, - tselector: tselector, - config: config, - } -} - -var _ export.CheckpointerFactory = factory{} - -func (f factory) NewCheckpointer() export.Checkpointer { - now := time.Now() - p := &Processor{ - AggregatorSelector: f.aselector, - TemporalitySelector: f.tselector, - state: state{ - values: map[stateKey]*stateValue{}, - processStart: now, - intervalStart: now, - config: f.config, - }, - } - return p -} - -// Process implements export.Processor. -func (b *Processor) Process(accum export.Accumulation) error { - if b.startedCollection != b.finishedCollection+1 { - return ErrInconsistentState - } - desc := accum.Descriptor() - key := stateKey{ - descriptor: desc, - distinct: accum.Attributes().Equivalent(), - } - agg := accum.Aggregator() - - // Check if there is an existing value. - value, ok := b.state.values[key] - if !ok { - stateful := b.TemporalityFor(desc, agg.Aggregation().Kind()).MemoryRequired(desc.InstrumentKind()) - - newValue := &stateValue{ - attrs: accum.Attributes(), - updated: b.state.finishedCollection, - stateful: stateful, - current: agg, - } - if stateful { - if desc.InstrumentKind().PrecomputedSum() { - // To convert precomputed sums to - // deltas requires two aggregators to - // be allocated, one for the prior - // value and one for the output delta. - // This functionality was removed from - // the basic processor in PR #2350. - return aggregation.ErrNoCumulativeToDelta - } - // In this case allocate one aggregator to - // save the current state. - b.AggregatorFor(desc, &newValue.cumulative) - } - b.state.values[key] = newValue - return nil - } - - // Advance the update sequence number. - sameCollection := b.state.finishedCollection == value.updated - value.updated = b.state.finishedCollection - - // At this point in the code, we have located an existing - // value for some stateKey. This can be because: - // - // (a) stateful aggregation is being used, the entry was - // entered during a prior collection, and this is the first - // time processing an accumulation for this stateKey in the - // current collection. Since this is the first time - // processing an accumulation for this stateKey during this - // collection, we don't know yet whether there are multiple - // accumulators at work. If there are multiple accumulators, - // they'll hit case (b) the second time through. - // - // (b) multiple accumulators are being used, whether stateful - // or not. - // - // Case (a) occurs when the instrument and the exporter - // require memory to work correctly, either because the - // instrument reports a PrecomputedSum to a DeltaExporter or - // the reverse, a non-PrecomputedSum instrument with a - // CumulativeExporter. This logic is encapsulated in - // Temporality.MemoryRequired(InstrumentKind). - // - // Case (b) occurs when the variable `sameCollection` is true, - // indicating that the stateKey for Accumulation has already - // been seen in the same collection. When this happens, it - // implies that multiple Accumulators are being used, or that - // a single Accumulator has been configured with a attribute key - // filter. - - if !sameCollection { - if !value.currentOwned { - // This is the first Accumulation we've seen - // for this stateKey during this collection. - // Just keep a reference to the Accumulator's - // Aggregator. All the other cases copy - // Aggregator state. - value.current = agg - return nil - } - return agg.SynchronizedMove(value.current, desc) - } - - // If the current is not owned, take ownership of a copy - // before merging below. - if !value.currentOwned { - tmp := value.current - b.AggregatorSelector.AggregatorFor(desc, &value.current) - value.currentOwned = true - if err := tmp.SynchronizedMove(value.current, desc); err != nil { - return err - } - } - - // Combine this Accumulation with the prior Accumulation. - return value.current.Merge(agg, desc) -} - -// Reader returns the associated Reader. Use the -// Reader Locker interface to synchronize access to this -// object. The Reader.ForEach() method cannot be called -// concurrently with Process(). -func (b *Processor) Reader() export.Reader { - return &b.state -} - -// StartCollection signals to the Processor one or more Accumulators -// will begin calling Process() calls during collection. -func (b *Processor) StartCollection() { - if b.startedCollection != 0 { - b.intervalStart = b.intervalEnd - } - b.startedCollection++ -} - -// FinishCollection signals to the Processor that a complete -// collection has finished and that ForEach will be called to access -// the Reader. -func (b *Processor) FinishCollection() error { - b.intervalEnd = time.Now() - if b.startedCollection != b.finishedCollection+1 { - return ErrInconsistentState - } - defer func() { b.finishedCollection++ }() - - for key, value := range b.values { - mkind := key.descriptor.InstrumentKind() - stale := value.updated != b.finishedCollection - stateless := !value.stateful - - // The following branch updates stateful aggregators. Skip - // these updates if the aggregator is not stateful or if the - // aggregator is stale. - if stale || stateless { - // If this processor does not require memeory, - // stale, stateless entries can be removed. - // This implies that they were not updated - // over the previous full collection interval. - if stale && stateless && !b.config.Memory { - delete(b.values, key) - } - continue - } - - // The only kind of aggregators that are not stateless - // are the ones needing delta to cumulative - // conversion. Merge aggregator state in this case. - if !mkind.PrecomputedSum() { - // This line is equivalent to: - // value.cumulative = value.cumulative + value.current - if err := value.cumulative.Merge(value.current, key.descriptor); err != nil { - return err - } - } - } - return nil -} - -// ForEach iterates through the Reader, passing an -// export.Record with the appropriate Cumulative or Delta aggregation -// to an exporter. -func (b *state) ForEach(exporter aggregation.TemporalitySelector, f func(export.Record) error) error { - if b.startedCollection != b.finishedCollection { - return ErrInconsistentState - } - for key, value := range b.values { - mkind := key.descriptor.InstrumentKind() - - var agg aggregation.Aggregation - var start time.Time - - aggTemp := exporter.TemporalityFor(key.descriptor, value.current.Aggregation().Kind()) - - switch aggTemp { - case aggregation.CumulativeTemporality: - // If stateful, the sum has been computed. If stateless, the - // input was already cumulative. Either way, use the checkpointed - // value: - if value.stateful { - agg = value.cumulative.Aggregation() - } else { - agg = value.current.Aggregation() - } - start = b.processStart - - case aggregation.DeltaTemporality: - // Precomputed sums are a special case. - if mkind.PrecomputedSum() { - // This functionality was removed from - // the basic processor in PR #2350. - return aggregation.ErrNoCumulativeToDelta - } - agg = value.current.Aggregation() - start = b.intervalStart - - default: - return fmt.Errorf("%v: %w", aggTemp, ErrInvalidTemporality) - } - - // If the processor does not have Config.Memory and it was not updated - // in the prior round, do not visit this value. - if !b.config.Memory && value.updated != (b.finishedCollection-1) { - continue - } - - if err := f(export.NewRecord( - key.descriptor, - value.attrs, - agg, - start, - b.intervalEnd, - )); err != nil && !errors.Is(err, aggregation.ErrNoData) { - return err - } - } - return nil -} diff --git a/sdk/metric/processor/basic/basic_test.go b/sdk/metric/processor/basic/basic_test.go deleted file mode 100644 index 21d816b44a0..00000000000 --- a/sdk/metric/processor/basic/basic_test.go +++ /dev/null @@ -1,510 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package basic_test - -import ( - "context" - "errors" - "fmt" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/instrument" - "go.opentelemetry.io/otel/sdk/instrumentation" - sdk "go.opentelemetry.io/otel/sdk/metric" - "go.opentelemetry.io/otel/sdk/metric/aggregator" - "go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest" - "go.opentelemetry.io/otel/sdk/metric/export" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - "go.opentelemetry.io/otel/sdk/metric/metrictest" - "go.opentelemetry.io/otel/sdk/metric/number" - "go.opentelemetry.io/otel/sdk/metric/processor/basic" - "go.opentelemetry.io/otel/sdk/metric/processor/processortest" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" - "go.opentelemetry.io/otel/sdk/resource" -) - -func requireNotAfter(t *testing.T, t1, t2 time.Time) { - require.False(t, t1.After(t2), "expected %v ≤ %v", t1, t2) -} - -// TestProcessor tests all the non-error paths in this package. -func TestProcessor(t *testing.T) { - type exportCase struct { - kind aggregation.Temporality - } - type instrumentCase struct { - kind sdkapi.InstrumentKind - } - type numberCase struct { - kind number.Kind - } - type aggregatorCase struct { - kind aggregation.Kind - } - - for _, tc := range []exportCase{ - {kind: aggregation.CumulativeTemporality}, - {kind: aggregation.DeltaTemporality}, - } { - t.Run(tc.kind.String(), func(t *testing.T) { - for _, ic := range []instrumentCase{ - {kind: sdkapi.CounterInstrumentKind}, - {kind: sdkapi.UpDownCounterInstrumentKind}, - {kind: sdkapi.HistogramInstrumentKind}, - {kind: sdkapi.CounterObserverInstrumentKind}, - {kind: sdkapi.UpDownCounterObserverInstrumentKind}, - {kind: sdkapi.GaugeObserverInstrumentKind}, - } { - t.Run(ic.kind.String(), func(t *testing.T) { - for _, nc := range []numberCase{ - {kind: number.Int64Kind}, - {kind: number.Float64Kind}, - } { - t.Run(nc.kind.String(), func(t *testing.T) { - for _, ac := range []aggregatorCase{ - {kind: aggregation.SumKind}, - {kind: aggregation.HistogramKind}, - {kind: aggregation.LastValueKind}, - } { - t.Run(ac.kind.String(), func(t *testing.T) { - testProcessor( - t, - tc.kind, - ic.kind, - nc.kind, - ac.kind, - ) - }) - } - }) - } - }) - } - }) - } -} - -func asNumber(nkind number.Kind, value int64) number.Number { - if nkind == number.Int64Kind { - return number.NewInt64Number(value) - } - return number.NewFloat64Number(float64(value)) -} - -func updateFor(t *testing.T, desc *sdkapi.Descriptor, selector export.AggregatorSelector, value int64, labs ...attribute.KeyValue) export.Accumulation { - ls := attribute.NewSet(labs...) - var agg aggregator.Aggregator - selector.AggregatorFor(desc, &agg) - require.NoError(t, agg.Update(context.Background(), asNumber(desc.NumberKind(), value), desc)) - - return export.NewAccumulation(desc, &ls, agg) -} - -func testProcessor( - t *testing.T, - aggTemp aggregation.Temporality, - mkind sdkapi.InstrumentKind, - nkind number.Kind, - akind aggregation.Kind, -) { - // This code tests for errors when the export kind is Delta - // and the instrument kind is PrecomputedSum(). - expectConversion := !(aggTemp == aggregation.DeltaTemporality && mkind.PrecomputedSum()) - requireConversion := func(t *testing.T, err error) { - if expectConversion { - require.NoError(t, err) - } else { - require.Equal(t, aggregation.ErrNoCumulativeToDelta, err) - } - } - - // Note: this selector uses the instrument name to dictate - // aggregation kind. - selector := processortest.AggregatorSelector() - - labs1 := []attribute.KeyValue{attribute.String("L1", "V")} - labs2 := []attribute.KeyValue{attribute.String("L2", "V")} - - testBody := func(t *testing.T, hasMemory bool, nAccum, nCheckpoint int) { - processor := basic.New(selector, aggregation.ConstantTemporalitySelector(aggTemp), basic.WithMemory(hasMemory)) - - instSuffix := fmt.Sprint(".", strings.ToLower(akind.String())) - - desc1 := metrictest.NewDescriptor(fmt.Sprint("inst1", instSuffix), mkind, nkind) - desc2 := metrictest.NewDescriptor(fmt.Sprint("inst2", instSuffix), mkind, nkind) - - for nc := 0; nc < nCheckpoint; nc++ { - // The input is 10 per update, scaled by - // the number of checkpoints for - // cumulative instruments: - input := int64(10) - cumulativeMultiplier := int64(nc + 1) - if mkind.PrecomputedSum() { - input *= cumulativeMultiplier - } - - processor.StartCollection() - - for na := 0; na < nAccum; na++ { - requireConversion(t, processor.Process(updateFor(t, &desc1, selector, input, labs1...))) - requireConversion(t, processor.Process(updateFor(t, &desc2, selector, input, labs2...))) - } - - // Note: in case of !expectConversion, we still get no error here - // because the Process() skipped entering state for those records. - require.NoError(t, processor.FinishCollection()) - - if nc < nCheckpoint-1 { - continue - } - - reader := processor.Reader() - - for _, repetitionAfterEmptyInterval := range []bool{false, true} { - if repetitionAfterEmptyInterval { - // We're repeating the test after another - // interval with no updates. - processor.StartCollection() - require.NoError(t, processor.FinishCollection()) - } - - // Test the final checkpoint state. - records1 := processortest.NewOutput(attribute.DefaultEncoder()) - require.NoError(t, reader.ForEach(aggregation.ConstantTemporalitySelector(aggTemp), records1.AddRecord)) - - if !expectConversion { - require.EqualValues(t, map[string]float64{}, records1.Map()) - continue - } - - var multiplier int64 - - if mkind.Asynchronous() { - // Asynchronous tests accumulate results multiply by the - // number of Accumulators, unless LastValue aggregation. - // If a precomputed sum, we expect cumulative inputs. - if mkind.PrecomputedSum() { - require.NotEqual(t, aggTemp, aggregation.DeltaTemporality) - if akind == aggregation.LastValueKind { - multiplier = cumulativeMultiplier - } else { - multiplier = cumulativeMultiplier * int64(nAccum) - } - } else { - if aggTemp == aggregation.CumulativeTemporality && akind != aggregation.LastValueKind { - multiplier = cumulativeMultiplier * int64(nAccum) - } else if akind == aggregation.LastValueKind { - multiplier = 1 - } else { - multiplier = int64(nAccum) - } - } - } else { - // Synchronous accumulate results from multiple accumulators, - // use that number as the baseline multiplier. - multiplier = int64(nAccum) - if aggTemp == aggregation.CumulativeTemporality { - // If a cumulative exporter, include prior checkpoints. - multiplier *= cumulativeMultiplier - } - if akind == aggregation.LastValueKind { - // If a last-value aggregator, set multiplier to 1.0. - multiplier = 1 - } - } - - exp := map[string]float64{} - if hasMemory || !repetitionAfterEmptyInterval { - exp = map[string]float64{ - fmt.Sprintf("inst1%s/L1=V/", instSuffix): float64(multiplier * 10), // attrs1 - fmt.Sprintf("inst2%s/L2=V/", instSuffix): float64(multiplier * 10), // attrs2 - } - } - - require.EqualValues(t, exp, records1.Map(), "with repetition=%v", repetitionAfterEmptyInterval) - } - } - } - - for _, hasMem := range []bool{false, true} { - t.Run(fmt.Sprintf("HasMemory=%v", hasMem), func(t *testing.T) { - // For 1 to 3 checkpoints: - for nAccum := 1; nAccum <= 3; nAccum++ { - t.Run(fmt.Sprintf("NumAccum=%d", nAccum), func(t *testing.T) { - // For 1 to 3 accumulators: - for nCheckpoint := 1; nCheckpoint <= 3; nCheckpoint++ { - t.Run(fmt.Sprintf("NumCkpt=%d", nCheckpoint), func(t *testing.T) { - testBody(t, hasMem, nAccum, nCheckpoint) - }) - } - }) - } - }) - } -} - -type bogusExporter struct{} - -func (bogusExporter) TemporalityFor(*sdkapi.Descriptor, aggregation.Kind) aggregation.Temporality { - return 100 -} - -func (bogusExporter) Export(context.Context, export.Reader) error { - panic("Not called") -} - -func TestBasicInconsistent(t *testing.T) { - // Test double-start - b := basic.New(processortest.AggregatorSelector(), aggregation.StatelessTemporalitySelector()) - - b.StartCollection() - b.StartCollection() - require.Equal(t, basic.ErrInconsistentState, b.FinishCollection()) - - // Test finish without start - b = basic.New(processortest.AggregatorSelector(), aggregation.StatelessTemporalitySelector()) - - require.Equal(t, basic.ErrInconsistentState, b.FinishCollection()) - - // Test no finish - b = basic.New(processortest.AggregatorSelector(), aggregation.StatelessTemporalitySelector()) - - b.StartCollection() - require.Equal( - t, - basic.ErrInconsistentState, - b.ForEach( - aggregation.StatelessTemporalitySelector(), - func(export.Record) error { return nil }, - ), - ) - - // Test no start - b = basic.New(processortest.AggregatorSelector(), aggregation.StatelessTemporalitySelector()) - - desc := metrictest.NewDescriptor("inst", sdkapi.CounterInstrumentKind, number.Int64Kind) - accum := export.NewAccumulation(&desc, attribute.EmptySet(), aggregatortest.NoopAggregator{}) - require.Equal(t, basic.ErrInconsistentState, b.Process(accum)) - - // Test invalid kind: - b = basic.New(processortest.AggregatorSelector(), aggregation.StatelessTemporalitySelector()) - b.StartCollection() - require.NoError(t, b.Process(accum)) - require.NoError(t, b.FinishCollection()) - - err := b.ForEach( - bogusExporter{}, - func(export.Record) error { return nil }, - ) - require.True(t, errors.Is(err, basic.ErrInvalidTemporality)) -} - -func TestBasicTimestamps(t *testing.T) { - beforeNew := time.Now() - time.Sleep(time.Nanosecond) - b := basic.New(processortest.AggregatorSelector(), aggregation.StatelessTemporalitySelector()) - time.Sleep(time.Nanosecond) - afterNew := time.Now() - - desc := metrictest.NewDescriptor("inst", sdkapi.CounterInstrumentKind, number.Int64Kind) - accum := export.NewAccumulation(&desc, attribute.EmptySet(), aggregatortest.NoopAggregator{}) - - b.StartCollection() - _ = b.Process(accum) - require.NoError(t, b.FinishCollection()) - - var start1, end1 time.Time - - require.NoError(t, b.ForEach(aggregation.StatelessTemporalitySelector(), func(rec export.Record) error { - start1 = rec.StartTime() - end1 = rec.EndTime() - return nil - })) - - // The first start time is set in the constructor. - requireNotAfter(t, beforeNew, start1) - requireNotAfter(t, start1, afterNew) - - for i := 0; i < 2; i++ { - b.StartCollection() - require.NoError(t, b.Process(accum)) - require.NoError(t, b.FinishCollection()) - - var start2, end2 time.Time - - require.NoError(t, b.ForEach(aggregation.StatelessTemporalitySelector(), func(rec export.Record) error { - start2 = rec.StartTime() - end2 = rec.EndTime() - return nil - })) - - // Subsequent intervals have their start and end aligned. - require.Equal(t, start2, end1) - requireNotAfter(t, start1, end1) - requireNotAfter(t, start2, end2) - - start1 = start2 - end1 = end2 - } -} - -func TestStatefulNoMemoryCumulative(t *testing.T) { - aggTempSel := aggregation.CumulativeTemporalitySelector() - - desc := metrictest.NewDescriptor("inst.sum", sdkapi.CounterInstrumentKind, number.Int64Kind) - selector := processortest.AggregatorSelector() - - processor := basic.New(selector, aggTempSel, basic.WithMemory(false)) - reader := processor.Reader() - - for i := 1; i < 3; i++ { - // Empty interval - processor.StartCollection() - require.NoError(t, processor.FinishCollection()) - - // Verify zero elements - records := processortest.NewOutput(attribute.DefaultEncoder()) - require.NoError(t, reader.ForEach(aggTempSel, records.AddRecord)) - require.EqualValues(t, map[string]float64{}, records.Map()) - - // Add 10 - processor.StartCollection() - _ = processor.Process(updateFor(t, &desc, selector, 10, attribute.String("A", "B"))) - require.NoError(t, processor.FinishCollection()) - - // Verify one element - records = processortest.NewOutput(attribute.DefaultEncoder()) - require.NoError(t, reader.ForEach(aggTempSel, records.AddRecord)) - require.EqualValues(t, map[string]float64{ - "inst.sum/A=B/": float64(i * 10), - }, records.Map()) - } -} - -func TestMultiObserverSum(t *testing.T) { - for _, test := range []struct { - name string - aggregation.TemporalitySelector - expectProcessErr error - }{ - {"cumulative", aggregation.CumulativeTemporalitySelector(), nil}, - {"delta", aggregation.DeltaTemporalitySelector(), aggregation.ErrNoCumulativeToDelta}, - } { - t.Run(test.name, func(t *testing.T) { - aggTempSel := test.TemporalitySelector - desc := metrictest.NewDescriptor("observe.sum", sdkapi.CounterObserverInstrumentKind, number.Int64Kind) - selector := processortest.AggregatorSelector() - - processor := basic.New(selector, aggTempSel, basic.WithMemory(false)) - reader := processor.Reader() - - for i := 1; i < 3; i++ { - // Add i*10*3 times - processor.StartCollection() - require.True(t, errors.Is(processor.Process(updateFor(t, &desc, selector, int64(i*10), attribute.String("A", "B"))), test.expectProcessErr)) - require.True(t, errors.Is(processor.Process(updateFor(t, &desc, selector, int64(i*10), attribute.String("A", "B"))), test.expectProcessErr)) - require.True(t, errors.Is(processor.Process(updateFor(t, &desc, selector, int64(i*10), attribute.String("A", "B"))), test.expectProcessErr)) - require.NoError(t, processor.FinishCollection()) - - // Verify one element - records := processortest.NewOutput(attribute.DefaultEncoder()) - if test.expectProcessErr == nil { - require.NoError(t, reader.ForEach(aggTempSel, records.AddRecord)) - require.EqualValues(t, map[string]float64{ - "observe.sum/A=B/": float64(3 * 10 * i), - }, records.Map()) - } else { - require.NoError(t, reader.ForEach(aggTempSel, records.AddRecord)) - require.EqualValues(t, map[string]float64{}, records.Map()) - } - } - }) - } -} - -func TestCounterObserverEndToEnd(t *testing.T) { - ctx := context.Background() - eselector := aggregation.CumulativeTemporalitySelector() - proc := basic.New( - processortest.AggregatorSelector(), - eselector, - ) - accum := sdk.NewAccumulator(proc) - meter := sdkapi.WrapMeterImpl(accum) - - var calls int64 - ctr, err := meter.AsyncInt64().Counter("observer.sum") - require.NoError(t, err) - err = meter.RegisterCallback([]instrument.Asynchronous{ctr}, func(ctx context.Context) { - calls++ - ctr.Observe(ctx, calls) - }) - require.NoError(t, err) - reader := proc.Reader() - - var startTime [3]time.Time - var endTime [3]time.Time - - for i := range startTime { - data := proc.Reader() - data.Lock() - proc.StartCollection() - accum.Collect(ctx) - require.NoError(t, proc.FinishCollection()) - - exporter := processortest.New(eselector, attribute.DefaultEncoder()) - require.NoError(t, exporter.Export(ctx, resource.Empty(), processortest.OneInstrumentationLibraryReader( - instrumentation.Library{ - Name: "test", - }, reader))) - - require.EqualValues(t, map[string]float64{ - "observer.sum//": float64(i + 1), - }, exporter.Values()) - - var record export.Record - require.NoError(t, data.ForEach(eselector, func(r export.Record) error { - record = r - return nil - })) - - // Try again, but ask for a Delta - require.Equal( - t, - aggregation.ErrNoCumulativeToDelta, - data.ForEach( - aggregation.ConstantTemporalitySelector(aggregation.DeltaTemporality), - func(r export.Record) error { - t.Fail() - return nil - }, - ), - ) - - startTime[i] = record.StartTime() - endTime[i] = record.EndTime() - data.Unlock() - } - - require.Equal(t, startTime[0], startTime[1]) - require.Equal(t, startTime[0], startTime[2]) - requireNotAfter(t, endTime[0], endTime[1]) - requireNotAfter(t, endTime[1], endTime[2]) -} diff --git a/sdk/metric/processor/basic/config.go b/sdk/metric/processor/basic/config.go deleted file mode 100644 index ca8127629dc..00000000000 --- a/sdk/metric/processor/basic/config.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package basic // import "go.opentelemetry.io/otel/sdk/metric/processor/basic" - -// config contains the options for configuring a basic metric processor. -type config struct { - // Memory controls whether the processor remembers metric instruments and - // attribute sets that were previously reported. When Memory is true, - // Reader.ForEach() will visit metrics that were not updated in the most - // recent interval. - Memory bool -} - -// Option configures a basic processor configuration. -type Option interface { - applyProcessor(config) config -} - -// WithMemory sets the memory behavior of a Processor. If this is true, the -// processor will report metric instruments and attribute sets that were -// previously reported but not updated in the most recent interval. -func WithMemory(memory bool) Option { - return memoryOption(memory) -} - -type memoryOption bool - -func (m memoryOption) applyProcessor(cfg config) config { - cfg.Memory = bool(m) - return cfg -} diff --git a/sdk/metric/processor/processortest/test.go b/sdk/metric/processor/processortest/test.go deleted file mode 100644 index ce9318c3532..00000000000 --- a/sdk/metric/processor/processortest/test.go +++ /dev/null @@ -1,432 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package processortest // import "go.opentelemetry.io/otel/sdk/metric/processor/processortest" - -import ( - "context" - "fmt" - "strings" - "sync" - "time" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/instrumentation" - "go.opentelemetry.io/otel/sdk/metric/aggregator" - "go.opentelemetry.io/otel/sdk/metric/aggregator/histogram" - "go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue" - "go.opentelemetry.io/otel/sdk/metric/aggregator/sum" - "go.opentelemetry.io/otel/sdk/metric/export" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" - "go.opentelemetry.io/otel/sdk/resource" -) - -type ( - // mapKey is the unique key for a metric, consisting of its unique - // descriptor, distinct attributes, and distinct resource attributes. - mapKey struct { - desc *sdkapi.Descriptor - attrs attribute.Distinct - resource attribute.Distinct - } - - // mapValue is value stored in a processor used to produce a - // Reader. - mapValue struct { - attrs *attribute.Set - resource *resource.Resource - aggregator aggregator.Aggregator - } - - // Output implements export.Reader. - Output struct { - m map[mapKey]mapValue - attrEncoder attribute.Encoder - sync.RWMutex - } - - // testAggregatorSelector returns aggregators consistent with - // the test variables below, needed for testing stateful - // processors, which clone Aggregators using AggregatorFor(desc). - testAggregatorSelector struct{} - - // testCheckpointer is a export.Checkpointer. - testCheckpointer struct { - started int - finished int - *Processor - } - - // Processor is a testing implementation of export.Processor that - // assembles its results as a map[string]float64. - Processor struct { - export.AggregatorSelector - output *Output - } - - // Exporter is a testing implementation of export.Exporter that - // assembles its results as a map[string]float64. - Exporter struct { - aggregation.TemporalitySelector - output *Output - exportCount int - - // InjectErr supports returning conditional errors from - // the Export() routine. This must be set before the - // Exporter is first used. - InjectErr func(export.Record) error - } -) - -type testFactory struct { - selector export.AggregatorSelector - encoder attribute.Encoder -} - -// NewCheckpointerFactory returns a new CheckpointerFactory for the selector -// and encoder pair. -func NewCheckpointerFactory(selector export.AggregatorSelector, encoder attribute.Encoder) export.CheckpointerFactory { - return testFactory{ - selector: selector, - encoder: encoder, - } -} - -// NewCheckpointer returns a new Checkpointer for Processor p. -func NewCheckpointer(p *Processor) export.Checkpointer { - return &testCheckpointer{ - Processor: p, - } -} - -func (f testFactory) NewCheckpointer() export.Checkpointer { - return NewCheckpointer(NewProcessor(f.selector, f.encoder)) -} - -// NewProcessor returns a new testing Processor implementation. -// Verify expected outputs using Values(), e.g.: -// -// require.EqualValues(t, map[string]float64{ -// "counter.sum/A=1,B=2/R=V": 100, -// }, processor.Values()) -// -// Where in the example A=1,B=2 is the encoded attributes and R=V is the -// encoded resource value. -func NewProcessor(selector export.AggregatorSelector, encoder attribute.Encoder) *Processor { - return &Processor{ - AggregatorSelector: selector, - output: NewOutput(encoder), - } -} - -// Process implements export.Processor. -func (p *Processor) Process(accum export.Accumulation) error { - return p.output.AddAccumulation(accum) -} - -// Values returns the mapping from attribute set to point values for the -// accumulations that were processed. Point values are chosen as either the -// Sum or the LastValue, whichever is implemented. (All the built-in -// Aggregators implement one of these interfaces.) -func (p *Processor) Values() map[string]float64 { - return p.output.Map() -} - -// Reset clears the state of this test processor. -func (p *Processor) Reset() { - p.output.Reset() -} - -// StartCollection implements export.Checkpointer. -func (c *testCheckpointer) StartCollection() { - if c.started != c.finished { - panic(fmt.Sprintf("collection was already started: %d != %d", c.started, c.finished)) - } - - c.started++ -} - -// FinishCollection implements export.Checkpointer. -func (c *testCheckpointer) FinishCollection() error { - if c.started-1 != c.finished { - return fmt.Errorf("collection was not started: %d != %d", c.started, c.finished) - } - - c.finished++ - return nil -} - -// Reader implements export.Checkpointer. -func (c *testCheckpointer) Reader() export.Reader { - return c.Processor.output -} - -// AggregatorSelector returns a policy that is consistent with the -// test descriptors above. I.e., it returns sum.New() for counter -// instruments and lastvalue.New() for lastValue instruments. -func AggregatorSelector() export.AggregatorSelector { - return testAggregatorSelector{} -} - -// AggregatorFor implements export.AggregatorSelector. -func (testAggregatorSelector) AggregatorFor(desc *sdkapi.Descriptor, aggPtrs ...*aggregator.Aggregator) { - switch { - case strings.HasSuffix(desc.Name(), ".disabled"): - for i := range aggPtrs { - *aggPtrs[i] = nil - } - case strings.HasSuffix(desc.Name(), ".sum"): - aggs := sum.New(len(aggPtrs)) - for i := range aggPtrs { - *aggPtrs[i] = &aggs[i] - } - case strings.HasSuffix(desc.Name(), ".lastvalue"): - aggs := lastvalue.New(len(aggPtrs)) - for i := range aggPtrs { - *aggPtrs[i] = &aggs[i] - } - case strings.HasSuffix(desc.Name(), ".histogram"): - aggs := histogram.New(len(aggPtrs), desc) - for i := range aggPtrs { - *aggPtrs[i] = &aggs[i] - } - default: - panic(fmt.Sprint("Invalid instrument name for test AggregatorSelector: ", desc.Name())) - } -} - -// NewOutput is a helper for testing an expected set of Accumulations -// (from an Accumulator) or an expected set of Records (from a -// Processor). If testing with an Accumulator, it may be simpler to -// use the test Processor in this package. -func NewOutput(attrEncoder attribute.Encoder) *Output { - return &Output{ - m: make(map[mapKey]mapValue), - attrEncoder: attrEncoder, - } -} - -// ForEach implements export.Reader. -func (o *Output) ForEach(_ aggregation.TemporalitySelector, ff func(export.Record) error) error { - for key, value := range o.m { - if err := ff(export.NewRecord( - key.desc, - value.attrs, - value.aggregator.Aggregation(), - time.Time{}, - time.Time{}, - )); err != nil { - return err - } - } - return nil -} - -// AddRecord adds a string representation of the exported metric data -// to a map for use in testing. The value taken from the record is -// either the Sum() or the LastValue() of its Aggregation(), whichever -// is defined. Record timestamps are ignored. -func (o *Output) AddRecord(rec export.Record) error { - return o.AddRecordWithResource(rec, resource.Empty()) -} - -// AddRecordWithResource merges rec into this Output. -func (o *Output) AddInstrumentationLibraryRecord(_ instrumentation.Library, rec export.Record) error { - return o.AddRecordWithResource(rec, resource.Empty()) -} - -// AddRecordWithResource merges rec into this Output scoping it with res. -func (o *Output) AddRecordWithResource(rec export.Record, res *resource.Resource) error { - key := mapKey{ - desc: rec.Descriptor(), - attrs: rec.Attributes().Equivalent(), - resource: res.Equivalent(), - } - if _, ok := o.m[key]; !ok { - var agg aggregator.Aggregator - testAggregatorSelector{}.AggregatorFor(rec.Descriptor(), &agg) - o.m[key] = mapValue{ - aggregator: agg, - attrs: rec.Attributes(), - resource: res, - } - } - return o.m[key].aggregator.Merge(rec.Aggregation().(aggregator.Aggregator), rec.Descriptor()) -} - -// Map returns the calculated values for test validation from a set of -// Accumulations or a set of Records. When mapping records or -// accumulations into floating point values, the Sum() or LastValue() -// is chosen, whichever is implemented by the underlying Aggregator. -func (o *Output) Map() map[string]float64 { - r := make(map[string]float64) - err := o.ForEach(aggregation.StatelessTemporalitySelector(), func(record export.Record) error { - for key, entry := range o.m { - encoded := entry.attrs.Encoded(o.attrEncoder) - rencoded := entry.resource.Encoded(o.attrEncoder) - value := 0.0 - if s, ok := entry.aggregator.(aggregation.Sum); ok { - sum, _ := s.Sum() - value = sum.CoerceToFloat64(key.desc.NumberKind()) - } else if l, ok := entry.aggregator.(aggregation.LastValue); ok { - last, _, _ := l.LastValue() - value = last.CoerceToFloat64(key.desc.NumberKind()) - } else { - panic(fmt.Sprintf("Unhandled aggregator type: %T", entry.aggregator)) - } - name := fmt.Sprint(key.desc.Name(), "/", encoded, "/", rencoded) - r[name] = value - } - return nil - }) - if err != nil { - panic(fmt.Sprint("Unexpected processor error: ", err)) - } - return r -} - -// Reset restores the Output to its initial state, with no accumulated -// metric data. -func (o *Output) Reset() { - o.m = map[mapKey]mapValue{} -} - -// AddAccumulation adds a string representation of the exported metric -// data to a map for use in testing. The value taken from the -// accumulation is either the Sum() or the LastValue() of its -// Aggregator().Aggregation(), whichever is defined. -func (o *Output) AddAccumulation(acc export.Accumulation) error { - return o.AddRecord( - export.NewRecord( - acc.Descriptor(), - acc.Attributes(), - acc.Aggregator().Aggregation(), - time.Time{}, - time.Time{}, - ), - ) -} - -// New returns a new testing Exporter implementation. -// Verify exporter outputs using Values(), e.g.,: -// -// require.EqualValues(t, map[string]float64{ -// "counter.sum/A=1,B=2/R=V": 100, -// }, exporter.Values()) -// -// Where in the example A=1,B=2 is the encoded attributes and R=V is the -// encoded resource value. -func New(selector aggregation.TemporalitySelector, encoder attribute.Encoder) *Exporter { - return &Exporter{ - TemporalitySelector: selector, - output: NewOutput(encoder), - } -} - -// Export records all the measurements aggregated in ckpt for res. -func (e *Exporter) Export(_ context.Context, res *resource.Resource, ckpt export.InstrumentationLibraryReader) error { - e.output.Lock() - defer e.output.Unlock() - e.exportCount++ - return ckpt.ForEach(func(library instrumentation.Library, mr export.Reader) error { - return mr.ForEach(e.TemporalitySelector, func(r export.Record) error { - if e.InjectErr != nil { - if err := e.InjectErr(r); err != nil { - return err - } - } - return e.output.AddRecordWithResource(r, res) - }) - }) -} - -// Values returns the mapping from attribute set to point values for the -// accumulations that were processed. Point values are chosen as either the -// Sum or the LastValue, whichever is implemented. (All the built-in -// Aggregators implement one of these interfaces.) -func (e *Exporter) Values() map[string]float64 { - e.output.Lock() - defer e.output.Unlock() - return e.output.Map() -} - -// ExportCount returns the number of times Export() has been called -// since the last Reset(). -func (e *Exporter) ExportCount() int { - e.output.Lock() - defer e.output.Unlock() - return e.exportCount -} - -// Reset sets the exporter's output to the initial, empty state and -// resets the export count to zero. -func (e *Exporter) Reset() { - e.output.Lock() - defer e.output.Unlock() - e.output.Reset() - e.exportCount = 0 -} - -// OneInstrumentationLibraryReader returns an InstrumentationLibraryReader for -// a single instrumentation library. -func OneInstrumentationLibraryReader(l instrumentation.Library, r export.Reader) export.InstrumentationLibraryReader { - return oneLibraryReader{l, r} -} - -type oneLibraryReader struct { - library instrumentation.Library - reader export.Reader -} - -func (o oneLibraryReader) ForEach(readerFunc func(instrumentation.Library, export.Reader) error) error { - return readerFunc(o.library, o.reader) -} - -// MultiInstrumentationLibraryReader returns an InstrumentationLibraryReader -// for a group of records that came from multiple instrumentation libraries. -func MultiInstrumentationLibraryReader(records map[instrumentation.Library][]export.Record) export.InstrumentationLibraryReader { - return instrumentationLibraryReader{records: records} -} - -type instrumentationLibraryReader struct { - records map[instrumentation.Library][]export.Record -} - -var _ export.InstrumentationLibraryReader = instrumentationLibraryReader{} - -func (m instrumentationLibraryReader) ForEach(fn func(instrumentation.Library, export.Reader) error) error { - for library, records := range m.records { - if err := fn(library, &metricReader{records: records}); err != nil { - return err - } - } - return nil -} - -type metricReader struct { - sync.RWMutex - records []export.Record -} - -var _ export.Reader = &metricReader{} - -func (m *metricReader) ForEach(_ aggregation.TemporalitySelector, fn func(export.Record) error) error { - for _, record := range m.records { - if err := fn(record); err != nil && err != aggregation.ErrNoData { - return err - } - } - return nil -} diff --git a/sdk/metric/processor/processortest/test_test.go b/sdk/metric/processor/processortest/test_test.go deleted file mode 100644 index 98c15f2f763..00000000000 --- a/sdk/metric/processor/processortest/test_test.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package processortest_test - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/instrument" - "go.opentelemetry.io/otel/sdk/instrumentation" - metricsdk "go.opentelemetry.io/otel/sdk/metric" - "go.opentelemetry.io/otel/sdk/metric/export" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - "go.opentelemetry.io/otel/sdk/metric/processor/processortest" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" - "go.opentelemetry.io/otel/sdk/resource" -) - -func generateTestData(t *testing.T, proc export.Processor) { - ctx := context.Background() - accum := metricsdk.NewAccumulator(proc) - meter := sdkapi.WrapMeterImpl(accum) - - counter, err := meter.SyncFloat64().Counter("counter.sum") - require.NoError(t, err) - - counter.Add(ctx, 100, attribute.String("K1", "V1")) - counter.Add(ctx, 101, attribute.String("K1", "V2")) - - counterObserver, err := meter.AsyncInt64().Counter("observer.sum") - require.NoError(t, err) - - err = meter.RegisterCallback([]instrument.Asynchronous{counterObserver}, func(ctx context.Context) { - counterObserver.Observe(ctx, 10, attribute.String("K1", "V1")) - counterObserver.Observe(ctx, 11, attribute.String("K1", "V2")) - }) - require.NoError(t, err) - - accum.Collect(ctx) -} - -func TestProcessorTesting(t *testing.T) { - // Test the Processor test helper using a real Accumulator to - // generate Accumulations. - checkpointer := processortest.NewCheckpointer( - processortest.NewProcessor( - processortest.AggregatorSelector(), - attribute.DefaultEncoder(), - ), - ) - generateTestData(t, checkpointer) - - res := resource.NewSchemaless(attribute.String("R", "V")) - expect := map[string]float64{ - "counter.sum/K1=V1/R=V": 100, - "counter.sum/K1=V2/R=V": 101, - "observer.sum/K1=V1/R=V": 10, - "observer.sum/K1=V2/R=V": 11, - } - - // Export the data and validate it again. - exporter := processortest.New( - aggregation.StatelessTemporalitySelector(), - attribute.DefaultEncoder(), - ) - - err := exporter.Export(context.Background(), res, processortest.OneInstrumentationLibraryReader( - instrumentation.Library{ - Name: "test", - }, - checkpointer.Reader(), - )) - require.NoError(t, err) - require.EqualValues(t, expect, exporter.Values()) -} diff --git a/sdk/metric/processor/reducer/doc.go b/sdk/metric/processor/reducer/doc.go deleted file mode 100644 index e25079c526f..00000000000 --- a/sdk/metric/processor/reducer/doc.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package reducer implements a metrics Processor component to reduce attributes. - -This package is currently in a pre-GA phase. Backwards incompatible changes -may be introduced in subsequent minor version releases as we work to track the -evolving OpenTelemetry specification and user feedback. - -The metrics Processor component this package implements applies an -attribute.Filter to each processed export.Accumulation to remove attributes -before passing the result to another Processor. This Processor can be used to -reduce inherent dimensionality in the data, as a way to control the cost of -collecting high cardinality metric data. - -For example, to compose a push controller with a reducer and a basic -metric processor: - - type someFilter struct{ - // configuration for this filter - // ... - } - - func (someFilter) AttributeFilterFor(_ *sdkapi.Descriptor) attribute.Filter { - return func(attr kv.KeyValue) bool { - // return true to keep this attr, false to drop this attr. - // ... - } - } - - func setupMetrics(exporter export.Exporter) (stop func()) { - basicProcessorFactory := basic.NewFactory( - simple.NewWithHistogramDistribution(), - exporter, - ) - - reducerProcessor := reducer.NewFactory(someFilter{...}, basicProcessorFactory) - - controller := controller.New( - reducerProcessor, - exporter, - opts..., - ) - controller.Start() - global.SetMeterProvider(controller.Provider()) - return controller.Stop -*/ -package reducer // import "go.opentelemetry.io/otel/sdk/metric/processor/reducer" diff --git a/sdk/metric/processor/reducer/reducer.go b/sdk/metric/processor/reducer/reducer.go deleted file mode 100644 index cd6daeb19d4..00000000000 --- a/sdk/metric/processor/reducer/reducer.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package reducer // import "go.opentelemetry.io/otel/sdk/metric/processor/reducer" - -import ( - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/metric/export" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" -) - -type ( - // Processor implements "dimensionality reduction" by - // filtering keys from export attribute sets. - Processor struct { - export.Checkpointer - filterSelector AttributeFilterSelector - } - - // AttributeFilterSelector selects an attribute filter based on the - // instrument described by the descriptor. - AttributeFilterSelector interface { - AttributeFilterFor(descriptor *sdkapi.Descriptor) attribute.Filter - } -) - -var _ export.Processor = &Processor{} -var _ export.Checkpointer = &Processor{} - -// New returns a dimensionality-reducing Processor that passes data to the -// next stage in an export pipeline. -func New(filterSelector AttributeFilterSelector, ckpter export.Checkpointer) *Processor { - return &Processor{ - Checkpointer: ckpter, - filterSelector: filterSelector, - } -} - -// Process implements export.Processor. -func (p *Processor) Process(accum export.Accumulation) error { - // Note: the removed attributes are returned and ignored here. - // Conceivably these inputs could be useful to a sampler. - reduced, _ := accum.Attributes().Filter( - p.filterSelector.AttributeFilterFor( - accum.Descriptor(), - ), - ) - return p.Checkpointer.Process( - export.NewAccumulation( - accum.Descriptor(), - &reduced, - accum.Aggregator(), - ), - ) -} diff --git a/sdk/metric/processor/reducer/reducer_test.go b/sdk/metric/processor/reducer/reducer_test.go deleted file mode 100644 index 12fbb7f86e0..00000000000 --- a/sdk/metric/processor/reducer/reducer_test.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package reducer_test - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/instrument" - "go.opentelemetry.io/otel/sdk/instrumentation" - metricsdk "go.opentelemetry.io/otel/sdk/metric" - "go.opentelemetry.io/otel/sdk/metric/export/aggregation" - "go.opentelemetry.io/otel/sdk/metric/processor/basic" - "go.opentelemetry.io/otel/sdk/metric/processor/processortest" - "go.opentelemetry.io/otel/sdk/metric/processor/reducer" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" - "go.opentelemetry.io/otel/sdk/resource" -) - -var ( - kvs1 = []attribute.KeyValue{ - attribute.Int("A", 1), - attribute.Int("B", 2), - attribute.Int("C", 3), - } - kvs2 = []attribute.KeyValue{ - attribute.Int("A", 1), - attribute.Int("B", 0), - attribute.Int("C", 3), - } -) - -type testFilter struct{} - -func (testFilter) AttributeFilterFor(_ *sdkapi.Descriptor) attribute.Filter { - return func(attr attribute.KeyValue) bool { - return attr.Key == "A" || attr.Key == "C" - } -} - -func generateData(t *testing.T, impl sdkapi.MeterImpl) { - ctx := context.Background() - meter := sdkapi.WrapMeterImpl(impl) - - counter, err := meter.SyncFloat64().Counter("counter.sum") - require.NoError(t, err) - counter.Add(ctx, 100, kvs1...) - counter.Add(ctx, 100, kvs2...) - - counterObserver, err := meter.AsyncInt64().Counter("observer.sum") - require.NoError(t, err) - err = meter.RegisterCallback([]instrument.Asynchronous{counterObserver}, func(ctx context.Context) { - counterObserver.Observe(ctx, 10, kvs1...) - counterObserver.Observe(ctx, 10, kvs2...) - }) - require.NoError(t, err) -} - -func TestFilterProcessor(t *testing.T) { - testProc := processortest.NewProcessor( - processortest.AggregatorSelector(), - attribute.DefaultEncoder(), - ) - accum := metricsdk.NewAccumulator( - reducer.New(testFilter{}, processortest.NewCheckpointer(testProc)), - ) - generateData(t, accum) - - accum.Collect(context.Background()) - - require.EqualValues(t, map[string]float64{ - "counter.sum/A=1,C=3/": 200, - "observer.sum/A=1,C=3/": 20, - }, testProc.Values()) -} - -// Test a filter with the ../basic Processor. -func TestFilterBasicProcessor(t *testing.T) { - basicProc := basic.New(processortest.AggregatorSelector(), aggregation.CumulativeTemporalitySelector()) - accum := metricsdk.NewAccumulator( - reducer.New(testFilter{}, basicProc), - ) - exporter := processortest.New(basicProc, attribute.DefaultEncoder()) - - generateData(t, accum) - - basicProc.StartCollection() - accum.Collect(context.Background()) - if err := basicProc.FinishCollection(); err != nil { - t.Error(err) - } - - res := resource.NewSchemaless(attribute.String("R", "V")) - require.NoError(t, exporter.Export(context.Background(), res, processortest.OneInstrumentationLibraryReader(instrumentation.Library{ - Name: "test", - }, basicProc.Reader()))) - - require.EqualValues(t, map[string]float64{ - "counter.sum/A=1,C=3/R=V": 200, - "observer.sum/A=1,C=3/R=V": 20, - }, exporter.Values()) -} diff --git a/sdk/metric/provider.go b/sdk/metric/provider.go new file mode 100644 index 00000000000..7f8f32bf104 --- /dev/null +++ b/sdk/metric/provider.go @@ -0,0 +1,126 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" +) + +// MeterProvider handles the creation and coordination of Meters. All Meters +// created by a MeterProvider will be associated with the same Resource, have +// the same Views applied to them, and have their produced metric telemetry +// passed to the configured Readers. +type MeterProvider struct { + res *resource.Resource + + meters meterRegistry + + forceFlush, shutdown func(context.Context) error +} + +// Compile-time check MeterProvider implements metric.MeterProvider. +var _ metric.MeterProvider = (*MeterProvider)(nil) + +// NewMeterProvider returns a new and configured MeterProvider. +// +// By default, the returned MeterProvider is configured with the default +// Resource and no Readers. Readers cannot be added after a MeterProvider is +// created. This means the returned MeterProvider, one created with no +// Readers, will perform no operations. +func NewMeterProvider(options ...Option) *MeterProvider { + conf := newConfig(options) + + flush, sdown := conf.readerSignals() + + registry := newPipelineRegistries(conf.readers) + + return &MeterProvider{ + res: conf.res, + + meters: meterRegistry{ + registry: registry, + }, + + forceFlush: flush, + shutdown: sdown, + } +} + +// Meter returns a Meter with the given name and configured with options. +// +// The name should be the name of the instrumentation scope creating +// telemetry. This name may be the same as the instrumented code only if that +// code provides built-in instrumentation. +// +// If name is empty, the default (go.opentelemetry.io/otel/sdk/meter) will be +// used. +// +// Calls to the Meter method after Shutdown has been called will return Meters +// that perform no operations. +// +// This method is safe to call concurrently. +func (mp *MeterProvider) Meter(name string, options ...metric.MeterOption) metric.Meter { + c := metric.NewMeterConfig(options...) + return mp.meters.Get(instrumentation.Scope{ + Name: name, + Version: c.InstrumentationVersion(), + SchemaURL: c.SchemaURL(), + }) +} + +// ForceFlush flushes all pending telemetry. +// +// This method honors the deadline or cancellation of ctx. An appropriate +// error will be returned in these situations. There is no guaranteed that all +// telemetry be flushed or all resources have been released in these +// situations. +// +// This method is safe to call concurrently. +func (mp *MeterProvider) ForceFlush(ctx context.Context) error { + if mp.forceFlush != nil { + return mp.forceFlush(ctx) + } + return nil +} + +// Shutdown shuts down the MeterProvider flushing all pending telemetry and +// releasing any held computational resources. +// +// This call is idempotent. The first call will perform all flush and +// releasing operations. Subsequent calls will perform no action and will +// return an error stating this. +// +// Measurements made by instruments from meters this MeterProvider created +// will not be exported after Shutdown is called. +// +// This method honors the deadline or cancellation of ctx. An appropriate +// error will be returned in these situations. There is no guaranteed that all +// telemetry be flushed or all resources have been released in these +// situations. +// +// This method is safe to call concurrently. +func (mp *MeterProvider) Shutdown(ctx context.Context) error { + if mp.shutdown != nil { + return mp.shutdown(ctx) + } + return nil +} diff --git a/sdk/metric/provider_test.go b/sdk/metric/provider_test.go new file mode 100644 index 00000000000..aefb23f8690 --- /dev/null +++ b/sdk/metric/provider_test.go @@ -0,0 +1,79 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package metric + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMeterConcurrentSafe(t *testing.T) { + const name = "TestMeterConcurrentSafe meter" + mp := NewMeterProvider() + + go func() { + _ = mp.Meter(name) + }() + + _ = mp.Meter(name) +} + +func TestForceFlushConcurrentSafe(t *testing.T) { + mp := NewMeterProvider() + + go func() { + _ = mp.ForceFlush(context.Background()) + }() + + _ = mp.ForceFlush(context.Background()) +} + +func TestShutdownConcurrentSafe(t *testing.T) { + mp := NewMeterProvider() + + go func() { + _ = mp.Shutdown(context.Background()) + }() + + _ = mp.Shutdown(context.Background()) +} + +func TestMeterDoesNotPanicForEmptyMeterProvider(t *testing.T) { + mp := MeterProvider{} + assert.NotPanics(t, func() { _ = mp.Meter("") }) +} + +func TestForceFlushDoesNotPanicForEmptyMeterProvider(t *testing.T) { + mp := MeterProvider{} + assert.NotPanics(t, func() { _ = mp.ForceFlush(context.Background()) }) +} + +func TestShutdownDoesNotPanicForEmptyMeterProvider(t *testing.T) { + mp := MeterProvider{} + assert.NotPanics(t, func() { _ = mp.Shutdown(context.Background()) }) +} + +func TestMeterProviderReturnsSameMeter(t *testing.T) { + mp := MeterProvider{} + mtr := mp.Meter("") + + assert.Same(t, mtr, mp.Meter("")) + assert.NotSame(t, mtr, mp.Meter("diff")) +} diff --git a/sdk/metric/reader.go b/sdk/metric/reader.go new file mode 100644 index 00000000000..ff5f987070d --- /dev/null +++ b/sdk/metric/reader.go @@ -0,0 +1,216 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package metric // import "go.opentelemetry.io/otel/sdk/metric" + +import ( + "context" + "fmt" + + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/metric/aggregation" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/metric/view" +) + +// errDuplicateRegister is logged by a Reader when an attempt to registered it +// more than once occurs. +var errDuplicateRegister = fmt.Errorf("duplicate reader registration") + +// ErrReaderNotRegistered is returned if Collect or Shutdown are called before +// the reader is registered with a MeterProvider. +var ErrReaderNotRegistered = fmt.Errorf("reader is not registered") + +// ErrReaderShutdown is returned if Collect or Shutdown are called after a +// reader has been Shutdown once. +var ErrReaderShutdown = fmt.Errorf("reader is shutdown") + +// Reader is the interface used between the SDK and an +// exporter. Control flow is bi-directional through the +// Reader, since the SDK initiates ForceFlush and Shutdown +// while the initiates collection. The Register() method here +// informs the Reader that it can begin reading, signaling the +// start of bi-directional control flow. +// +// Typically, push-based exporters that are periodic will +// implement PeroidicExporter themselves and construct a +// PeriodicReader to satisfy this interface. +// +// Pull-based exporters will typically implement Register +// themselves, since they read on demand. +type Reader interface { + // register registers a Reader with a MeterProvider. + // The producer argument allows the Reader to signal the sdk to collect + // and send aggregated metric measurements. + register(producer) + + // temporality reports the Temporality for the instrument kind provided. + temporality(view.InstrumentKind) metricdata.Temporality + + // aggregation returns what Aggregation to use for an instrument kind. + aggregation(view.InstrumentKind) aggregation.Aggregation // nolint:revive // import-shadow for method scoped by type. + + // Collect gathers and returns all metric data related to the Reader from + // the SDK. An error is returned if this is called after Shutdown. + Collect(context.Context) (metricdata.ResourceMetrics, error) + + // ForceFlush flushes all metric measurements held in an export pipeline. + // + // This deadline or cancellation of the passed context are honored. An appropriate + // error will be returned in these situations. There is no guaranteed that all + // telemetry be flushed or all resources have been released in these + // situations. + ForceFlush(context.Context) error + + // Shutdown flushes all metric measurements held in an export pipeline and releases any + // held computational resources. + // + // This deadline or cancellation of the passed context are honored. An appropriate + // error will be returned in these situations. There is no guaranteed that all + // telemetry be flushed or all resources have been released in these + // situations. + // + // After Shutdown is called, calls to Collect will perform no operation and instead will return + // an error indicating the shutdown state. + Shutdown(context.Context) error +} + +// producer produces metrics for a Reader. +type producer interface { + // produce returns aggregated metrics from a single collection. + // + // This method is safe to call concurrently. + produce(context.Context) (metricdata.ResourceMetrics, error) +} + +// produceHolder is used as an atomic.Value to wrap the non-concrete producer +// type. +type produceHolder struct { + produce func(context.Context) (metricdata.ResourceMetrics, error) +} + +// shutdownProducer produces an ErrReaderShutdown error always. +type shutdownProducer struct{} + +// produce returns an ErrReaderShutdown error. +func (p shutdownProducer) produce(context.Context) (metricdata.ResourceMetrics, error) { + return metricdata.ResourceMetrics{}, ErrReaderShutdown +} + +// ReaderOption applies a configuration option value to either a ManualReader or +// a PeriodicReader. +type ReaderOption interface { + ManualReaderOption + PeriodicReaderOption +} + +// TemporalitySelector selects the temporality to use based on the InstrumentKind. +type TemporalitySelector func(view.InstrumentKind) metricdata.Temporality + +// DefaultTemporalitySelector is the default TemporalitySelector used if +// WithTemporalitySelector is not provided. CumulativeTemporality will be used +// for all instrument kinds if this TemporalitySelector is used. +func DefaultTemporalitySelector(view.InstrumentKind) metricdata.Temporality { + return metricdata.CumulativeTemporality +} + +// WithTemporalitySelector sets the TemporalitySelector a reader will use to +// determine the Temporality of an instrument based on its kind. If this +// option is not used, the reader will use the DefaultTemporalitySelector. +func WithTemporalitySelector(selector TemporalitySelector) ReaderOption { + return temporalitySelectorOption{selector: selector} +} + +type temporalitySelectorOption struct { + selector func(instrument view.InstrumentKind) metricdata.Temporality +} + +// applyManual returns a manualReaderConfig with option applied. +func (t temporalitySelectorOption) applyManual(mrc manualReaderConfig) manualReaderConfig { + mrc.temporalitySelector = t.selector + return mrc +} + +// applyPeriodic returns a periodicReaderConfig with option applied. +func (t temporalitySelectorOption) applyPeriodic(prc periodicReaderConfig) periodicReaderConfig { + prc.temporalitySelector = t.selector + return prc +} + +// AggregationSelector selects the aggregation and the parameters to use for +// that aggregation based on the InstrumentKind. +type AggregationSelector func(view.InstrumentKind) aggregation.Aggregation + +// DefaultAggregationSelector returns the default aggregation and parameters +// that will be used to summarize measurement made from an instrument of +// InstrumentKind. This AggregationSelector using the following selection +// mapping: Counter ⇨ Sum, Asynchronous Counter ⇨ Sum, UpDownCounter ⇨ Sum, +// Asynchronous UpDownCounter ⇨ Sum, Asynchronous Gauge ⇨ LastValue, +// Histogram ⇨ ExplicitBucketHistogram. +func DefaultAggregationSelector(ik view.InstrumentKind) aggregation.Aggregation { + switch ik { + case view.SyncCounter, view.SyncUpDownCounter, view.AsyncCounter, view.AsyncUpDownCounter: + return aggregation.Sum{} + case view.AsyncGauge: + return aggregation.LastValue{} + case view.SyncHistogram: + return aggregation.ExplicitBucketHistogram{ + Boundaries: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 1000}, + NoMinMax: false, + } + } + panic("unknown instrument kind") +} + +// WithAggregationSelector sets the AggregationSelector a reader will use to +// determine the aggregation to use for an instrument based on its kind. If +// this option is not used, the reader will use the DefaultAggregationSelector +// or the aggregation explicitly passed for a view matching an instrument. +func WithAggregationSelector(selector AggregationSelector) ReaderOption { + // Deep copy and validate before using. + wrapped := func(ik view.InstrumentKind) aggregation.Aggregation { + a := selector(ik) + cpA := a.Copy() + if err := cpA.Err(); err != nil { + cpA = DefaultAggregationSelector(ik) + global.Error( + err, "using default aggregation instead", + "aggregation", a, + "replacement", cpA, + ) + } + return cpA + } + + return aggregationSelectorOption{selector: wrapped} +} + +type aggregationSelectorOption struct { + selector AggregationSelector +} + +// applyManual returns a manualReaderConfig with option applied. +func (t aggregationSelectorOption) applyManual(c manualReaderConfig) manualReaderConfig { + c.aggregationSelector = t.selector + return c +} + +// applyPeriodic returns a periodicReaderConfig with option applied. +func (t aggregationSelectorOption) applyPeriodic(c periodicReaderConfig) periodicReaderConfig { + c.aggregationSelector = t.selector + return c +} diff --git a/sdk/metric/reader_test.go b/sdk/metric/reader_test.go new file mode 100644 index 00000000000..630d11a869b --- /dev/null +++ b/sdk/metric/reader_test.go @@ -0,0 +1,241 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package metric // import "go.opentelemetry.io/otel/sdk/metric/reader" + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/go-logr/logr/testr" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/suite" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/unit" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/metric/metricdata" + "go.opentelemetry.io/otel/sdk/metric/view" + "go.opentelemetry.io/otel/sdk/resource" +) + +type readerTestSuite struct { + suite.Suite + + Factory func() Reader + Reader Reader +} + +func (ts *readerTestSuite) SetupSuite() { + otel.SetLogger(testr.New(ts.T())) +} + +func (ts *readerTestSuite) SetupTest() { + ts.Reader = ts.Factory() +} + +func (ts *readerTestSuite) TearDownTest() { + // Ensure Reader is allowed attempt to clean up. + _ = ts.Reader.Shutdown(context.Background()) +} + +func (ts *readerTestSuite) TestErrorForNotRegistered() { + _, err := ts.Reader.Collect(context.Background()) + ts.ErrorIs(err, ErrReaderNotRegistered) +} + +func (ts *readerTestSuite) TestProducer() { + ts.Reader.register(testProducer{}) + m, err := ts.Reader.Collect(context.Background()) + ts.NoError(err) + ts.Equal(testMetrics, m) +} + +func (ts *readerTestSuite) TestCollectAfterShutdown() { + ctx := context.Background() + ts.Reader.register(testProducer{}) + ts.Require().NoError(ts.Reader.Shutdown(ctx)) + + m, err := ts.Reader.Collect(ctx) + ts.ErrorIs(err, ErrReaderShutdown) + ts.Equal(metricdata.ResourceMetrics{}, m) +} + +func (ts *readerTestSuite) TestShutdownTwice() { + ctx := context.Background() + ts.Reader.register(testProducer{}) + ts.Require().NoError(ts.Reader.Shutdown(ctx)) + ts.ErrorIs(ts.Reader.Shutdown(ctx), ErrReaderShutdown) +} + +func (ts *readerTestSuite) TestMultipleForceFlush() { + ctx := context.Background() + ts.Reader.register(testProducer{}) + ts.Require().NoError(ts.Reader.ForceFlush(ctx)) + ts.NoError(ts.Reader.ForceFlush(ctx)) +} + +func (ts *readerTestSuite) TestMultipleRegister() { + p0 := testProducer{ + produceFunc: func(ctx context.Context) (metricdata.ResourceMetrics, error) { + // Differentiate this producer from the second by returning an + // error. + return testMetrics, assert.AnError + }, + } + p1 := testProducer{} + + ts.Reader.register(p0) + // This should be ignored. + ts.Reader.register(p1) + + _, err := ts.Reader.Collect(context.Background()) + ts.Equal(assert.AnError, err) +} + +func (ts *readerTestSuite) TestMethodConcurrency() { + // Requires the race-detector (a default test option for the project). + + // All reader methods should be concurrent-safe. + ts.Reader.register(testProducer{}) + ctx := context.Background() + + var wg sync.WaitGroup + const threads = 2 + for i := 0; i < threads; i++ { + wg.Add(1) + go func() { + defer wg.Done() + _, _ = ts.Reader.Collect(ctx) + }() + + wg.Add(1) + go func() { + defer wg.Done() + _ = ts.Reader.ForceFlush(ctx) + }() + + wg.Add(1) + go func() { + defer wg.Done() + _ = ts.Reader.Shutdown(ctx) + }() + } + wg.Wait() +} + +func (ts *readerTestSuite) TestShutdownBeforeRegister() { + ctx := context.Background() + ts.Require().NoError(ts.Reader.Shutdown(ctx)) + // Registering after shutdown should not revert the shutdown. + ts.Reader.register(testProducer{}) + + m, err := ts.Reader.Collect(ctx) + ts.ErrorIs(err, ErrReaderShutdown) + ts.Equal(metricdata.ResourceMetrics{}, m) +} + +var testMetrics = metricdata.ResourceMetrics{ + Resource: resource.NewSchemaless(attribute.String("test", "Reader")), + ScopeMetrics: []metricdata.ScopeMetrics{{ + Scope: instrumentation.Scope{Name: "sdk/metric/test/reader"}, + Metrics: []metricdata.Metrics{{ + Name: "fake data", + Description: "Data used to test a reader", + Unit: unit.Dimensionless, + Data: metricdata.Sum[int64]{ + Temporality: metricdata.CumulativeTemporality, + IsMonotonic: true, + DataPoints: []metricdata.DataPoint[int64]{{ + Attributes: attribute.NewSet(attribute.String("user", "alice")), + StartTime: time.Now(), + Time: time.Now().Add(time.Second), + Value: -1, + }}, + }, + }}, + }}, +} + +type testProducer struct { + produceFunc func(context.Context) (metricdata.ResourceMetrics, error) +} + +func (p testProducer) produce(ctx context.Context) (metricdata.ResourceMetrics, error) { + if p.produceFunc != nil { + return p.produceFunc(ctx) + } + return testMetrics, nil +} + +func benchReaderCollectFunc(r Reader) func(*testing.B) { + ctx := context.Background() + r.register(testProducer{}) + + // Store bechmark results in a closure to prevent the compiler from + // inlining and skipping the function. + var ( + collectedMetrics metricdata.ResourceMetrics + err error + ) + + return func(b *testing.B) { + b.ReportAllocs() + b.ResetTimer() + + for n := 0; n < b.N; n++ { + collectedMetrics, err = r.Collect(ctx) + assert.Equalf(b, testMetrics, collectedMetrics, "unexpected Collect response: (%#v, %v)", collectedMetrics, err) + } + } +} + +func TestDefaultAggregationSelector(t *testing.T) { + var undefinedInstrument view.InstrumentKind + assert.Panics(t, func() { DefaultAggregationSelector(undefinedInstrument) }) + + iKinds := []view.InstrumentKind{ + view.SyncCounter, + view.SyncUpDownCounter, + view.SyncHistogram, + view.AsyncCounter, + view.AsyncUpDownCounter, + view.AsyncGauge, + } + + for _, ik := range iKinds { + assert.NoError(t, DefaultAggregationSelector(ik).Err(), ik) + } +} + +func TestDefaultTemporalitySelector(t *testing.T) { + var undefinedInstrument view.InstrumentKind + for _, ik := range []view.InstrumentKind{ + undefinedInstrument, + view.SyncCounter, + view.SyncUpDownCounter, + view.SyncHistogram, + view.AsyncCounter, + view.AsyncUpDownCounter, + view.AsyncGauge, + } { + assert.Equal(t, metricdata.CumulativeTemporality, DefaultTemporalitySelector(ik)) + } +} diff --git a/sdk/metric/refcount_mapped.go b/sdk/metric/refcount_mapped.go deleted file mode 100644 index d9d2cb701c2..00000000000 --- a/sdk/metric/refcount_mapped.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric // import "go.opentelemetry.io/otel/sdk/metric" - -import ( - "sync/atomic" -) - -// refcountMapped atomically counts the number of references (usages) of an entry -// while also keeping a state of mapped/unmapped into a different data structure -// (an external map or list for example). -// -// refcountMapped uses an atomic value where the least significant bit is used to -// keep the state of mapping ('1' is used for unmapped and '0' is for mapped) and -// the rest of the bits are used for refcounting. -type refcountMapped struct { - // refcount has to be aligned for 64-bit atomic operations. - value int64 -} - -// ref returns true if the entry is still mapped and increases the -// reference usages, if unmapped returns false. -func (rm *refcountMapped) ref() bool { - // Check if this entry was marked as unmapped between the moment - // we got a reference to it (or will be removed very soon) and here. - return atomic.AddInt64(&rm.value, 2)&1 == 0 -} - -func (rm *refcountMapped) unref() { - atomic.AddInt64(&rm.value, -2) -} - -// tryUnmap flips the mapped bit to "unmapped" state and returns true if both of the -// following conditions are true upon entry to this function: -// - There are no active references; -// - The mapped bit is in "mapped" state. -// -// Otherwise no changes are done to mapped bit and false is returned. -func (rm *refcountMapped) tryUnmap() bool { - if atomic.LoadInt64(&rm.value) != 0 { - return false - } - return atomic.CompareAndSwapInt64( - &rm.value, - 0, - 1, - ) -} diff --git a/sdk/metric/registry/doc.go b/sdk/metric/registry/doc.go deleted file mode 100644 index b401408beef..00000000000 --- a/sdk/metric/registry/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package registry provides a non-standalone implementation of -MeterProvider that adds uniqueness checking for instrument descriptors -on top of other MeterProvider it wraps. - -This package is currently in a pre-GA phase. Backwards incompatible changes -may be introduced in subsequent minor version releases as we work to track the -evolving OpenTelemetry specification and user feedback. -*/ -package registry // import "go.opentelemetry.io/otel/sdk/metric/registry" diff --git a/sdk/metric/registry/registry.go b/sdk/metric/registry/registry.go deleted file mode 100644 index 4d339ab7d69..00000000000 --- a/sdk/metric/registry/registry.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package registry // import "go.opentelemetry.io/otel/sdk/metric/registry" - -import ( - "context" - "fmt" - "sync" - - "go.opentelemetry.io/otel/metric/instrument" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" -) - -// UniqueInstrumentMeterImpl implements the metric.MeterImpl interface, adding -// uniqueness checking for instrument descriptors. -type UniqueInstrumentMeterImpl struct { - lock sync.Mutex - impl sdkapi.MeterImpl - state map[string]sdkapi.InstrumentImpl -} - -var _ sdkapi.MeterImpl = (*UniqueInstrumentMeterImpl)(nil) - -// ErrMetricKindMismatch is the standard error for mismatched metric -// instrument definitions. -var ErrMetricKindMismatch = fmt.Errorf( - "a metric was already registered by this name with another kind or number type") - -// NewUniqueInstrumentMeterImpl returns a wrapped metric.MeterImpl -// with the addition of instrument name uniqueness checking. -func NewUniqueInstrumentMeterImpl(impl sdkapi.MeterImpl) *UniqueInstrumentMeterImpl { - return &UniqueInstrumentMeterImpl{ - impl: impl, - state: map[string]sdkapi.InstrumentImpl{}, - } -} - -// MeterImpl gives the caller access to the underlying MeterImpl -// used by this UniqueInstrumentMeterImpl. -func (u *UniqueInstrumentMeterImpl) MeterImpl() sdkapi.MeterImpl { - return u.impl -} - -// NewMetricKindMismatchError formats an error that describes a -// mismatched metric instrument definition. -func NewMetricKindMismatchError(desc sdkapi.Descriptor) error { - return fmt.Errorf("metric %s registered as %s %s: %w", - desc.Name(), - desc.NumberKind(), - desc.InstrumentKind(), - ErrMetricKindMismatch) -} - -// Compatible determines whether two sdkapi.Descriptors are considered -// the same for the purpose of uniqueness checking. -func Compatible(candidate, existing sdkapi.Descriptor) bool { - return candidate.InstrumentKind() == existing.InstrumentKind() && - candidate.NumberKind() == existing.NumberKind() -} - -// checkUniqueness returns an ErrMetricKindMismatch error if there is -// a conflict between a descriptor that was already registered and the -// `descriptor` argument. If there is an existing compatible -// registration, this returns the already-registered instrument. If -// there is no conflict and no prior registration, returns (nil, nil). -func (u *UniqueInstrumentMeterImpl) checkUniqueness(descriptor sdkapi.Descriptor) (sdkapi.InstrumentImpl, error) { - impl, ok := u.state[descriptor.Name()] - if !ok { - return nil, nil - } - - if !Compatible(descriptor, impl.Descriptor()) { - return nil, NewMetricKindMismatchError(impl.Descriptor()) - } - - return impl, nil -} - -// NewSyncInstrument implements sdkapi.MeterImpl. -func (u *UniqueInstrumentMeterImpl) NewSyncInstrument(descriptor sdkapi.Descriptor) (sdkapi.SyncImpl, error) { - u.lock.Lock() - defer u.lock.Unlock() - - impl, err := u.checkUniqueness(descriptor) - - if err != nil { - return nil, err - } else if impl != nil { - return impl.(sdkapi.SyncImpl), nil - } - - syncInst, err := u.impl.NewSyncInstrument(descriptor) - if err != nil { - return nil, err - } - u.state[descriptor.Name()] = syncInst - return syncInst, nil -} - -// NewAsyncInstrument implements sdkapi.MeterImpl. -func (u *UniqueInstrumentMeterImpl) NewAsyncInstrument(descriptor sdkapi.Descriptor) (sdkapi.AsyncImpl, error) { - u.lock.Lock() - defer u.lock.Unlock() - - impl, err := u.checkUniqueness(descriptor) - - if err != nil { - return nil, err - } else if impl != nil { - return impl.(sdkapi.AsyncImpl), nil - } - - asyncInst, err := u.impl.NewAsyncInstrument(descriptor) - if err != nil { - return nil, err - } - u.state[descriptor.Name()] = asyncInst - return asyncInst, nil -} - -// RegisterCallback registers callback with insts. -func (u *UniqueInstrumentMeterImpl) RegisterCallback(insts []instrument.Asynchronous, callback func(context.Context)) error { - u.lock.Lock() - defer u.lock.Unlock() - - return u.impl.RegisterCallback(insts, callback) -} diff --git a/sdk/metric/registry/registry_test.go b/sdk/metric/registry/registry_test.go deleted file mode 100644 index 3fe8d296045..00000000000 --- a/sdk/metric/registry/registry_test.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package registry_test - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel/metric" - metricsdk "go.opentelemetry.io/otel/sdk/metric" - "go.opentelemetry.io/otel/sdk/metric/registry" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" -) - -type ( - newFunc func(m metric.Meter, name string) (sdkapi.InstrumentImpl, error) -) - -var ( - allNew = map[string]newFunc{ - "counter.int64": func(m metric.Meter, name string) (sdkapi.InstrumentImpl, error) { - return unwrap(m.SyncInt64().Counter(name)) - }, - "counter.float64": func(m metric.Meter, name string) (sdkapi.InstrumentImpl, error) { - return unwrap(m.SyncFloat64().Counter(name)) - }, - "histogram.int64": func(m metric.Meter, name string) (sdkapi.InstrumentImpl, error) { - return unwrap(m.SyncInt64().Histogram(name)) - }, - "histogram.float64": func(m metric.Meter, name string) (sdkapi.InstrumentImpl, error) { - return unwrap(m.SyncFloat64().Histogram(name)) - }, - "gaugeobserver.int64": func(m metric.Meter, name string) (sdkapi.InstrumentImpl, error) { - return unwrap(m.AsyncInt64().Gauge(name)) - }, - "gaugeobserver.float64": func(m metric.Meter, name string) (sdkapi.InstrumentImpl, error) { - return unwrap(m.AsyncFloat64().Gauge(name)) - }, - } -) - -func unwrap(impl interface{}, err error) (sdkapi.InstrumentImpl, error) { - if impl == nil { - return nil, err - } - if s, ok := impl.(interface { - SyncImpl() sdkapi.SyncImpl - }); ok { - return s.SyncImpl(), err - } - if a, ok := impl.(interface { - AsyncImpl() sdkapi.AsyncImpl - }); ok { - return a.AsyncImpl(), err - } - return nil, err -} - -// TODO Replace with controller. -func testMeterWithRegistry(name string) metric.Meter { - return sdkapi.WrapMeterImpl( - registry.NewUniqueInstrumentMeterImpl( - metricsdk.NewAccumulator(nil), - ), - ) -} - -func TestRegistrySameInstruments(t *testing.T) { - for _, nf := range allNew { - meter := testMeterWithRegistry("meter") - inst1, err1 := nf(meter, "this") - inst2, err2 := nf(meter, "this") - - require.NoError(t, err1) - require.NoError(t, err2) - require.Equal(t, inst1, inst2) - } -} - -func TestRegistryDiffInstruments(t *testing.T) { - for origName, origf := range allNew { - meter := testMeterWithRegistry("meter") - - _, err := origf(meter, "this") - require.NoError(t, err) - - for newName, nf := range allNew { - if newName == origName { - continue - } - - other, err := nf(meter, "this") - require.Error(t, err) - require.Nil(t, other) - require.True(t, errors.Is(err, registry.ErrMetricKindMismatch)) - } - } -} diff --git a/sdk/metric/sdk.go b/sdk/metric/sdk.go deleted file mode 100644 index a942f86f2d4..00000000000 --- a/sdk/metric/sdk.go +++ /dev/null @@ -1,423 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric // import "go.opentelemetry.io/otel/sdk/metric" - -import ( - "context" - "fmt" - "runtime" - "sync" - "sync/atomic" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/instrument" - "go.opentelemetry.io/otel/sdk/metric/aggregator" - "go.opentelemetry.io/otel/sdk/metric/export" - "go.opentelemetry.io/otel/sdk/metric/number" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" -) - -type ( - // Accumulator implements the OpenTelemetry Meter API. The - // Accumulator is bound to a single export.Processor in - // `NewAccumulator()`. - // - // The Accumulator supports a Collect() API to gather and export - // current data. Collect() should be arranged according to - // the processor model. Push-based processors will setup a - // timer to call Collect() periodically. Pull-based processors - // will call Collect() when a pull request arrives. - Accumulator struct { - // current maps `mapkey` to *record. - current sync.Map - - callbackLock sync.Mutex - callbacks map[*callback]struct{} - - // currentEpoch is the current epoch number. It is - // incremented in `Collect()`. - currentEpoch int64 - - // processor is the configured processor+configuration. - processor export.Processor - - // collectLock prevents simultaneous calls to Collect(). - collectLock sync.Mutex - } - - callback struct { - insts map[*asyncInstrument]struct{} - f func(context.Context) - } - - asyncContextKey struct{} - - asyncInstrument struct { - baseInstrument - instrument.Asynchronous - } - - syncInstrument struct { - baseInstrument - instrument.Synchronous - } - - // mapkey uniquely describes a metric instrument in terms of its - // InstrumentID and the encoded form of its attributes. - mapkey struct { - descriptor *sdkapi.Descriptor - ordered attribute.Distinct - } - - // record maintains the state of one metric instrument. Due - // the use of lock-free algorithms, there may be more than one - // `record` in existence at a time, although at most one can - // be referenced from the `Accumulator.current` map. - record struct { - // refMapped keeps track of refcounts and the mapping state to the - // Accumulator.current map. - refMapped refcountMapped - - // updateCount is incremented on every Update. - updateCount int64 - - // collectedCount is set to updateCount on collection, - // supports checking for no updates during a round. - collectedCount int64 - - // attrs is the stored attribute set for this record, except in cases - // where a attribute set is shared due to batch recording. - attrs attribute.Set - - // sortSlice has a single purpose - as a temporary place for sorting - // during attributes creation to avoid allocation. - sortSlice attribute.Sortable - - // inst is a pointer to the corresponding instrument. - inst *baseInstrument - - // current implements the actual RecordOne() API, - // depending on the type of aggregation. If nil, the - // metric was disabled by the exporter. - current aggregator.Aggregator - checkpoint aggregator.Aggregator - } - - baseInstrument struct { - meter *Accumulator - descriptor sdkapi.Descriptor - } -) - -var ( - _ sdkapi.MeterImpl = &Accumulator{} - - // ErrUninitializedInstrument is returned when an instrument is used when uninitialized. - ErrUninitializedInstrument = fmt.Errorf("use of an uninitialized instrument") - - // ErrBadInstrument is returned when an instrument from another SDK is - // attempted to be registered with this SDK. - ErrBadInstrument = fmt.Errorf("use of a instrument from another SDK") -) - -func (b *baseInstrument) Descriptor() sdkapi.Descriptor { - return b.descriptor -} - -func (a *asyncInstrument) Implementation() interface{} { - return a -} - -func (s *syncInstrument) Implementation() interface{} { - return s -} - -// acquireHandle gets or creates a `*record` corresponding to `kvs`, -// the input attributes. -func (b *baseInstrument) acquireHandle(kvs []attribute.KeyValue) *record { - // This memory allocation may not be used, but it's - // needed for the `sortSlice` field, to avoid an - // allocation while sorting. - rec := &record{} - rec.attrs = attribute.NewSetWithSortable(kvs, &rec.sortSlice) - - // Create lookup key for sync.Map (one allocation, as this - // passes through an interface{}) - mk := mapkey{ - descriptor: &b.descriptor, - ordered: rec.attrs.Equivalent(), - } - - if actual, ok := b.meter.current.Load(mk); ok { - // Existing record case. - existingRec := actual.(*record) - if existingRec.refMapped.ref() { - // At this moment it is guaranteed that the entry is in - // the map and will not be removed. - return existingRec - } - // This entry is no longer mapped, try to add a new entry. - } - - rec.refMapped = refcountMapped{value: 2} - rec.inst = b - - b.meter.processor.AggregatorFor(&b.descriptor, &rec.current, &rec.checkpoint) - - for { - // Load/Store: there's a memory allocation to place `mk` into - // an interface here. - if actual, loaded := b.meter.current.LoadOrStore(mk, rec); loaded { - // Existing record case. Cannot change rec here because if fail - // will try to add rec again to avoid new allocations. - oldRec := actual.(*record) - if oldRec.refMapped.ref() { - // At this moment it is guaranteed that the entry is in - // the map and will not be removed. - return oldRec - } - // This loaded entry is marked as unmapped (so Collect will remove - // it from the map immediately), try again - this is a busy waiting - // strategy to wait until Collect() removes this entry from the map. - // - // This can be improved by having a list of "Unmapped" entries for - // one time only usages, OR we can make this a blocking path and use - // a Mutex that protects the delete operation (delete only if the old - // record is associated with the key). - - // Let collector get work done to remove the entry from the map. - runtime.Gosched() - continue - } - // The new entry was added to the map, good to go. - return rec - } -} - -// RecordOne captures a single synchronous metric event. -// -// The order of the input array `kvs` may be sorted after the function is called. -func (s *syncInstrument) RecordOne(ctx context.Context, num number.Number, kvs []attribute.KeyValue) { - h := s.acquireHandle(kvs) - defer h.unbind() - h.captureOne(ctx, num) -} - -// ObserveOne captures a single asynchronous metric event. - -// The order of the input array `kvs` may be sorted after the function is called. -func (a *asyncInstrument) ObserveOne(ctx context.Context, num number.Number, attrs []attribute.KeyValue) { - h := a.acquireHandle(attrs) - defer h.unbind() - h.captureOne(ctx, num) -} - -// NewAccumulator constructs a new Accumulator for the given -// processor. This Accumulator supports only a single processor. -// -// The Accumulator does not start any background process to collect itself -// periodically, this responsibility lies with the processor, typically, -// depending on the type of export. For example, a pull-based -// processor will call Collect() when it receives a request to scrape -// current metric values. A push-based processor should configure its -// own periodic collection. -func NewAccumulator(processor export.Processor) *Accumulator { - return &Accumulator{ - processor: processor, - callbacks: map[*callback]struct{}{}, - } -} - -var _ sdkapi.MeterImpl = &Accumulator{} - -// NewSyncInstrument implements sdkapi.MetricImpl. -func (m *Accumulator) NewSyncInstrument(descriptor sdkapi.Descriptor) (sdkapi.SyncImpl, error) { - return &syncInstrument{ - baseInstrument: baseInstrument{ - descriptor: descriptor, - meter: m, - }, - }, nil -} - -// NewAsyncInstrument implements sdkapi.MetricImpl. -func (m *Accumulator) NewAsyncInstrument(descriptor sdkapi.Descriptor) (sdkapi.AsyncImpl, error) { - a := &asyncInstrument{ - baseInstrument: baseInstrument{ - descriptor: descriptor, - meter: m, - }, - } - return a, nil -} - -// RegisterCallback registers f to be called for insts. -func (m *Accumulator) RegisterCallback(insts []instrument.Asynchronous, f func(context.Context)) error { - cb := &callback{ - insts: map[*asyncInstrument]struct{}{}, - f: f, - } - for _, inst := range insts { - impl, ok := inst.(sdkapi.AsyncImpl) - if !ok { - return ErrBadInstrument - } - - ai, err := m.fromAsync(impl) - if err != nil { - return err - } - cb.insts[ai] = struct{}{} - } - - m.callbackLock.Lock() - defer m.callbackLock.Unlock() - m.callbacks[cb] = struct{}{} - return nil -} - -// Collect traverses the list of active records and observers and -// exports data for each active instrument. Collect() may not be -// called concurrently. -// -// During the collection pass, the export.Processor will receive -// one Export() call per current aggregation. -// -// Returns the number of records that were checkpointed. -func (m *Accumulator) Collect(ctx context.Context) int { - m.collectLock.Lock() - defer m.collectLock.Unlock() - - m.runAsyncCallbacks(ctx) - checkpointed := m.collectInstruments() - m.currentEpoch++ - - return checkpointed -} - -func (m *Accumulator) collectInstruments() int { - checkpointed := 0 - - m.current.Range(func(key interface{}, value interface{}) bool { - // Note: always continue to iterate over the entire - // map by returning `true` in this function. - inuse := value.(*record) - - mods := atomic.LoadInt64(&inuse.updateCount) - coll := inuse.collectedCount - - if mods != coll { - // Updates happened in this interval, - // checkpoint and continue. - checkpointed += m.checkpointRecord(inuse) - inuse.collectedCount = mods - return true - } - - // Having no updates since last collection, try to unmap: - if unmapped := inuse.refMapped.tryUnmap(); !unmapped { - // The record is referenced by a binding, continue. - return true - } - - // If any other goroutines are now trying to re-insert this - // entry in the map, they are busy calling Gosched() awaiting - // this deletion: - m.current.Delete(inuse.mapkey()) - - // There's a potential race between `LoadInt64` and - // `tryUnmap` in this function. Since this is the - // last we'll see of this record, checkpoint - mods = atomic.LoadInt64(&inuse.updateCount) - if mods != coll { - checkpointed += m.checkpointRecord(inuse) - } - return true - }) - - return checkpointed -} - -func (m *Accumulator) runAsyncCallbacks(ctx context.Context) { - m.callbackLock.Lock() - defer m.callbackLock.Unlock() - - ctx = context.WithValue(ctx, asyncContextKey{}, m) - - for cb := range m.callbacks { - cb.f(ctx) - } -} - -func (m *Accumulator) checkpointRecord(r *record) int { - if r.current == nil { - return 0 - } - err := r.current.SynchronizedMove(r.checkpoint, &r.inst.descriptor) - if err != nil { - otel.Handle(err) - return 0 - } - - a := export.NewAccumulation(&r.inst.descriptor, &r.attrs, r.checkpoint) - err = m.processor.Process(a) - if err != nil { - otel.Handle(err) - } - return 1 -} - -func (r *record) captureOne(ctx context.Context, num number.Number) { - if r.current == nil { - // The instrument is disabled according to the AggregatorSelector. - return - } - if err := aggregator.RangeTest(num, &r.inst.descriptor); err != nil { - otel.Handle(err) - return - } - if err := r.current.Update(ctx, num, &r.inst.descriptor); err != nil { - otel.Handle(err) - return - } - // Record was modified, inform the Collect() that things need - // to be collected while the record is still mapped. - atomic.AddInt64(&r.updateCount, 1) -} - -func (r *record) unbind() { - r.refMapped.unref() -} - -func (r *record) mapkey() mapkey { - return mapkey{ - descriptor: &r.inst.descriptor, - ordered: r.attrs.Equivalent(), - } -} - -// fromSync gets an async implementation object, checking for -// uninitialized instruments and instruments created by another SDK. -func (m *Accumulator) fromAsync(async sdkapi.AsyncImpl) (*asyncInstrument, error) { - if async == nil { - return nil, ErrUninitializedInstrument - } - inst, ok := async.Implementation().(*asyncInstrument) - if !ok { - return nil, ErrBadInstrument - } - return inst, nil -} diff --git a/sdk/metric/sdkapi/descriptor.go b/sdk/metric/sdkapi/descriptor.go deleted file mode 100644 index 778e9321eea..00000000000 --- a/sdk/metric/sdkapi/descriptor.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sdkapi // import "go.opentelemetry.io/otel/sdk/metric/sdkapi" - -import ( - "go.opentelemetry.io/otel/metric/unit" - "go.opentelemetry.io/otel/sdk/metric/number" -) - -// Descriptor contains all the settings that describe an instrument, -// including its name, metric kind, number kind, and the configurable -// options. -type Descriptor struct { - name string - instrumentKind InstrumentKind - numberKind number.Kind - description string - unit unit.Unit -} - -// NewDescriptor returns a Descriptor with the given contents. -func NewDescriptor(name string, ikind InstrumentKind, nkind number.Kind, description string, u unit.Unit) Descriptor { - return Descriptor{ - name: name, - instrumentKind: ikind, - numberKind: nkind, - description: description, - unit: u, - } -} - -// Name returns the metric instrument's name. -func (d Descriptor) Name() string { - return d.name -} - -// InstrumentKind returns the specific kind of instrument. -func (d Descriptor) InstrumentKind() InstrumentKind { - return d.instrumentKind -} - -// Description provides a human-readable description of the metric -// instrument. -func (d Descriptor) Description() string { - return d.description -} - -// Unit describes the units of the metric instrument. Unitless -// metrics return the empty string. -func (d Descriptor) Unit() unit.Unit { - return d.unit -} - -// NumberKind returns whether this instrument is declared over int64, -// float64, or uint64 values. -func (d Descriptor) NumberKind() number.Kind { - return d.numberKind -} diff --git a/sdk/metric/sdkapi/descriptor_test.go b/sdk/metric/sdkapi/descriptor_test.go deleted file mode 100644 index 1f084472535..00000000000 --- a/sdk/metric/sdkapi/descriptor_test.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sdkapi - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel/metric/unit" - "go.opentelemetry.io/otel/sdk/metric/number" -) - -func TestDescriptorGetters(t *testing.T) { - d := NewDescriptor("name", HistogramInstrumentKind, number.Int64Kind, "my description", "my unit") - require.Equal(t, "name", d.Name()) - require.Equal(t, HistogramInstrumentKind, d.InstrumentKind()) - require.Equal(t, number.Int64Kind, d.NumberKind()) - require.Equal(t, "my description", d.Description()) - require.Equal(t, unit.Unit("my unit"), d.Unit()) -} diff --git a/sdk/metric/sdkapi/instrumentkind.go b/sdk/metric/sdkapi/instrumentkind.go deleted file mode 100644 index c7406a3e49a..00000000000 --- a/sdk/metric/sdkapi/instrumentkind.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:generate stringer -type=InstrumentKind - -package sdkapi // import "go.opentelemetry.io/otel/sdk/metric/sdkapi" - -// InstrumentKind describes the kind of instrument. -type InstrumentKind int8 - -const ( - // HistogramInstrumentKind indicates a Histogram instrument. - HistogramInstrumentKind InstrumentKind = iota - // GaugeObserverInstrumentKind indicates an GaugeObserver instrument. - GaugeObserverInstrumentKind - - // CounterInstrumentKind indicates a Counter instrument. - CounterInstrumentKind - // UpDownCounterInstrumentKind indicates a UpDownCounter instrument. - UpDownCounterInstrumentKind - - // CounterObserverInstrumentKind indicates a CounterObserver instrument. - CounterObserverInstrumentKind - // UpDownCounterObserverInstrumentKind indicates a UpDownCounterObserver - // instrument. - UpDownCounterObserverInstrumentKind -) - -// Synchronous returns whether this is a synchronous kind of instrument. -func (k InstrumentKind) Synchronous() bool { - switch k { - case CounterInstrumentKind, UpDownCounterInstrumentKind, HistogramInstrumentKind: - return true - } - return false -} - -// Asynchronous returns whether this is an asynchronous kind of instrument. -func (k InstrumentKind) Asynchronous() bool { - return !k.Synchronous() -} - -// Adding returns whether this kind of instrument adds its inputs (as opposed to Grouping). -func (k InstrumentKind) Adding() bool { - switch k { - case CounterInstrumentKind, UpDownCounterInstrumentKind, CounterObserverInstrumentKind, UpDownCounterObserverInstrumentKind: - return true - } - return false -} - -// Grouping returns whether this kind of instrument groups its inputs (as opposed to Adding). -func (k InstrumentKind) Grouping() bool { - return !k.Adding() -} - -// Monotonic returns whether this kind of instrument exposes a non-decreasing sum. -func (k InstrumentKind) Monotonic() bool { - switch k { - case CounterInstrumentKind, CounterObserverInstrumentKind: - return true - } - return false -} - -// PrecomputedSum returns whether this kind of instrument receives precomputed sums. -func (k InstrumentKind) PrecomputedSum() bool { - return k.Adding() && k.Asynchronous() -} diff --git a/sdk/metric/sdkapi/instrumentkind_string.go b/sdk/metric/sdkapi/instrumentkind_string.go deleted file mode 100644 index 3a2e79d823e..00000000000 --- a/sdk/metric/sdkapi/instrumentkind_string.go +++ /dev/null @@ -1,28 +0,0 @@ -// Code generated by "stringer -type=InstrumentKind"; DO NOT EDIT. - -package sdkapi - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[HistogramInstrumentKind-0] - _ = x[GaugeObserverInstrumentKind-1] - _ = x[CounterInstrumentKind-2] - _ = x[UpDownCounterInstrumentKind-3] - _ = x[CounterObserverInstrumentKind-4] - _ = x[UpDownCounterObserverInstrumentKind-5] -} - -const _InstrumentKind_name = "HistogramInstrumentKindGaugeObserverInstrumentKindCounterInstrumentKindUpDownCounterInstrumentKindCounterObserverInstrumentKindUpDownCounterObserverInstrumentKind" - -var _InstrumentKind_index = [...]uint8{0, 23, 50, 71, 98, 127, 162} - -func (i InstrumentKind) String() string { - if i < 0 || i >= InstrumentKind(len(_InstrumentKind_index)-1) { - return "InstrumentKind(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _InstrumentKind_name[_InstrumentKind_index[i]:_InstrumentKind_index[i+1]] -} diff --git a/sdk/metric/sdkapi/instrumentkind_test.go b/sdk/metric/sdkapi/instrumentkind_test.go deleted file mode 100644 index cd1db02a898..00000000000 --- a/sdk/metric/sdkapi/instrumentkind_test.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sdkapi_test - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel/sdk/metric/sdkapi" -) - -func TestInstrumentKinds(t *testing.T) { - require.Equal(t, sdkapi.HistogramInstrumentKind.String(), "HistogramInstrumentKind") - require.Equal(t, sdkapi.GaugeObserverInstrumentKind.String(), "GaugeObserverInstrumentKind") - require.Equal(t, sdkapi.CounterInstrumentKind.String(), "CounterInstrumentKind") - require.Equal(t, sdkapi.UpDownCounterInstrumentKind.String(), "UpDownCounterInstrumentKind") - require.Equal(t, sdkapi.CounterObserverInstrumentKind.String(), "CounterObserverInstrumentKind") - require.Equal(t, sdkapi.UpDownCounterObserverInstrumentKind.String(), "UpDownCounterObserverInstrumentKind") -} diff --git a/sdk/metric/sdkapi/noop.go b/sdk/metric/sdkapi/noop.go deleted file mode 100644 index 64a28d7b35d..00000000000 --- a/sdk/metric/sdkapi/noop.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sdkapi // import "go.opentelemetry.io/otel/sdk/metric/sdkapi" - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/instrument" - "go.opentelemetry.io/otel/sdk/metric/number" -) // import ( -// "context" - -// "go.opentelemetry.io/otel/attribute" -// "go.opentelemetry.io/otel/sdk/metric/number" -// ) - -type noopInstrument struct { - descriptor Descriptor -} -type noopSyncInstrument struct { - noopInstrument - - instrument.Synchronous -} -type noopAsyncInstrument struct { - noopInstrument - - instrument.Asynchronous -} - -var _ SyncImpl = noopSyncInstrument{} -var _ AsyncImpl = noopAsyncInstrument{} - -// NewNoopSyncInstrument returns a No-op implementation of the -// synchronous instrument interface. -func NewNoopSyncInstrument() SyncImpl { - return noopSyncInstrument{ - noopInstrument: noopInstrument{ - descriptor: Descriptor{ - instrumentKind: CounterInstrumentKind, - }, - }, - } -} - -// NewNoopAsyncInstrument returns a No-op implementation of the -// asynchronous instrument interface. -func NewNoopAsyncInstrument() AsyncImpl { - return noopAsyncInstrument{ - noopInstrument: noopInstrument{ - descriptor: Descriptor{ - instrumentKind: CounterObserverInstrumentKind, - }, - }, - } -} - -func (noopInstrument) Implementation() interface{} { - return nil -} - -func (n noopInstrument) Descriptor() Descriptor { - return n.descriptor -} - -func (noopSyncInstrument) RecordOne(context.Context, number.Number, []attribute.KeyValue) { -} - -func (noopAsyncInstrument) ObserveOne(context.Context, number.Number, []attribute.KeyValue) { -} diff --git a/sdk/metric/sdkapi/sdkapi.go b/sdk/metric/sdkapi/sdkapi.go deleted file mode 100644 index 86226c456db..00000000000 --- a/sdk/metric/sdkapi/sdkapi.go +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sdkapi // import "go.opentelemetry.io/otel/sdk/metric/sdkapi" - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric/instrument" - "go.opentelemetry.io/otel/sdk/metric/number" -) - -// MeterImpl is the interface an SDK must implement to supply a Meter -// implementation. -type MeterImpl interface { - // NewSyncInstrument returns a newly constructed - // synchronous instrument implementation or an error, should - // one occur. - NewSyncInstrument(descriptor Descriptor) (SyncImpl, error) - - // NewAsyncInstrument returns a newly constructed - // asynchronous instrument implementation or an error, should - // one occur. - NewAsyncInstrument(descriptor Descriptor) (AsyncImpl, error) - - // Etc. - RegisterCallback(insts []instrument.Asynchronous, callback func(context.Context)) error -} - -// InstrumentImpl is a common interface for synchronous and -// asynchronous instruments. -type InstrumentImpl interface { - // Implementation returns the underlying implementation of the - // instrument, which allows the implementation to gain access - // to its own representation especially from a `Measurement`. - Implementation() interface{} - - // Descriptor returns a copy of the instrument's Descriptor. - Descriptor() Descriptor -} - -// SyncImpl is the implementation-level interface to a generic -// synchronous instrument (e.g., Histogram and Counter instruments). -type SyncImpl interface { - InstrumentImpl - instrument.Synchronous - - // RecordOne captures a single synchronous metric event. - RecordOne(ctx context.Context, n number.Number, attrs []attribute.KeyValue) -} - -// AsyncImpl is an implementation-level interface to an -// asynchronous instrument (e.g., Observer instruments). -type AsyncImpl interface { - InstrumentImpl - instrument.Asynchronous - - // ObserveOne captures a single synchronous metric event. - ObserveOne(ctx context.Context, n number.Number, attrs []attribute.KeyValue) -} - -// AsyncRunner is expected to convert into an AsyncSingleRunner or an -// AsyncBatchRunner. SDKs will encounter an error if the AsyncRunner -// does not satisfy one of these interfaces. -type AsyncRunner interface { - // AnyRunner is a non-exported method with no functional use - // other than to make this a non-empty interface. - AnyRunner() -} - -// AsyncSingleRunner is an interface implemented by single-observer -// callbacks. -type AsyncSingleRunner interface { - // Run accepts a single instrument and function for capturing - // observations of that instrument. Each call to the function - // receives one captured observation. (The function accepts - // multiple observations so the same implementation can be - // used for batch runners.) - Run(ctx context.Context, single AsyncImpl, capture func([]attribute.KeyValue, ...Observation)) - - AsyncRunner -} - -// AsyncBatchRunner is an interface implemented by batch-observer -// callbacks. -type AsyncBatchRunner interface { - // Run accepts a function for capturing observations of - // multiple instruments. - Run(ctx context.Context, capture func([]attribute.KeyValue, ...Observation)) - - AsyncRunner -} - -// NewMeasurement constructs a single observation, a binding between -// an asynchronous instrument and a number. -func NewMeasurement(inst SyncImpl, n number.Number) Measurement { - return Measurement{ - instrument: inst, - number: n, - } -} - -// Measurement is a low-level type used with synchronous instruments -// as a direct interface to the SDK via `RecordBatch`. -type Measurement struct { - // number needs to be aligned for 64-bit atomic operations. - number number.Number - instrument SyncImpl -} - -// SyncImpl returns the instrument that created this measurement. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (m Measurement) SyncImpl() SyncImpl { - return m.instrument -} - -// Number returns a number recorded in this measurement. -func (m Measurement) Number() number.Number { - return m.number -} - -// NewObservation constructs a single observation, a binding between -// an asynchronous instrument and a number. -func NewObservation(inst AsyncImpl, n number.Number) Observation { - return Observation{ - instrument: inst, - number: n, - } -} - -// Observation is a low-level type used with asynchronous instruments -// as a direct interface to the SDK via `BatchObserver`. -type Observation struct { - // number needs to be aligned for 64-bit atomic operations. - number number.Number - instrument AsyncImpl -} - -// AsyncImpl returns the instrument that created this observation. -// This returns an implementation-level object for use by the SDK, -// users should not refer to this. -func (m Observation) AsyncImpl() AsyncImpl { - return m.instrument -} - -// Number returns a number recorded in this observation. -func (m Observation) Number() number.Number { - return m.number -} diff --git a/sdk/metric/sdkapi/sdkapi_test.go b/sdk/metric/sdkapi/sdkapi_test.go deleted file mode 100644 index 69fec0fe692..00000000000 --- a/sdk/metric/sdkapi/sdkapi_test.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sdkapi - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel/sdk/metric/number" -) - -func TestMeasurementGetters(t *testing.T) { - num := number.NewFloat64Number(1.5) - si := NewNoopSyncInstrument() - meas := NewMeasurement(si, num) - - require.Equal(t, si, meas.SyncImpl()) - require.Equal(t, num, meas.Number()) -} - -func TestObservationGetters(t *testing.T) { - num := number.NewFloat64Number(1.5) - ai := NewNoopAsyncInstrument() - obs := NewObservation(ai, num) - - require.Equal(t, ai, obs.AsyncImpl()) - require.Equal(t, num, obs.Number()) -} diff --git a/sdk/metric/sdkapi/wrap.go b/sdk/metric/sdkapi/wrap.go deleted file mode 100644 index aa6356f7e8f..00000000000 --- a/sdk/metric/sdkapi/wrap.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sdkapi // import "go.opentelemetry.io/otel/sdk/metric/sdkapi" - -import ( - "context" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/instrument" - "go.opentelemetry.io/otel/metric/instrument/asyncfloat64" - "go.opentelemetry.io/otel/metric/instrument/asyncint64" - "go.opentelemetry.io/otel/metric/instrument/syncfloat64" - "go.opentelemetry.io/otel/metric/instrument/syncint64" - "go.opentelemetry.io/otel/sdk/metric/number" -) - -type ( - meter struct{ MeterImpl } - sfMeter struct{ meter } - siMeter struct{ meter } - afMeter struct{ meter } - aiMeter struct{ meter } - - iAdder struct{ SyncImpl } - fAdder struct{ SyncImpl } - iRecorder struct{ SyncImpl } - fRecorder struct{ SyncImpl } - iObserver struct{ AsyncImpl } - fObserver struct{ AsyncImpl } -) - -// WrapMeterImpl wraps impl to be a full implementation of a Meter. -func WrapMeterImpl(impl MeterImpl) metric.Meter { - return meter{impl} -} - -// UnwrapMeterImpl unwraps the Meter to its bare MeterImpl. -func UnwrapMeterImpl(m metric.Meter) MeterImpl { - mm, ok := m.(meter) - if !ok { - return nil - } - return mm.MeterImpl -} - -func (m meter) AsyncFloat64() asyncfloat64.InstrumentProvider { - return afMeter{m} -} - -func (m meter) AsyncInt64() asyncint64.InstrumentProvider { - return aiMeter{m} -} - -func (m meter) SyncFloat64() syncfloat64.InstrumentProvider { - return sfMeter{m} -} - -func (m meter) SyncInt64() syncint64.InstrumentProvider { - return siMeter{m} -} - -func (m meter) RegisterCallback(insts []instrument.Asynchronous, cb func(ctx context.Context)) error { - return m.MeterImpl.RegisterCallback(insts, cb) -} - -func (m meter) newSync(name string, ikind InstrumentKind, nkind number.Kind, opts []instrument.Option) (SyncImpl, error) { - cfg := instrument.NewConfig(opts...) - return m.NewSyncInstrument(NewDescriptor(name, ikind, nkind, cfg.Description(), cfg.Unit())) -} - -func (m meter) newAsync(name string, ikind InstrumentKind, nkind number.Kind, opts []instrument.Option) (AsyncImpl, error) { - cfg := instrument.NewConfig(opts...) - return m.NewAsyncInstrument(NewDescriptor(name, ikind, nkind, cfg.Description(), cfg.Unit())) -} - -func (m afMeter) Counter(name string, opts ...instrument.Option) (asyncfloat64.Counter, error) { - inst, err := m.newAsync(name, CounterObserverInstrumentKind, number.Float64Kind, opts) - return fObserver{inst}, err -} - -func (m afMeter) UpDownCounter(name string, opts ...instrument.Option) (asyncfloat64.UpDownCounter, error) { - inst, err := m.newAsync(name, UpDownCounterObserverInstrumentKind, number.Float64Kind, opts) - return fObserver{inst}, err -} - -func (m afMeter) Gauge(name string, opts ...instrument.Option) (asyncfloat64.Gauge, error) { - inst, err := m.newAsync(name, GaugeObserverInstrumentKind, number.Float64Kind, opts) - return fObserver{inst}, err -} - -func (m aiMeter) Counter(name string, opts ...instrument.Option) (asyncint64.Counter, error) { - inst, err := m.newAsync(name, CounterObserverInstrumentKind, number.Int64Kind, opts) - return iObserver{inst}, err -} - -func (m aiMeter) UpDownCounter(name string, opts ...instrument.Option) (asyncint64.UpDownCounter, error) { - inst, err := m.newAsync(name, UpDownCounterObserverInstrumentKind, number.Int64Kind, opts) - return iObserver{inst}, err -} - -func (m aiMeter) Gauge(name string, opts ...instrument.Option) (asyncint64.Gauge, error) { - inst, err := m.newAsync(name, GaugeObserverInstrumentKind, number.Int64Kind, opts) - return iObserver{inst}, err -} - -func (m sfMeter) Counter(name string, opts ...instrument.Option) (syncfloat64.Counter, error) { - inst, err := m.newSync(name, CounterInstrumentKind, number.Float64Kind, opts) - return fAdder{inst}, err -} - -func (m sfMeter) UpDownCounter(name string, opts ...instrument.Option) (syncfloat64.UpDownCounter, error) { - inst, err := m.newSync(name, UpDownCounterInstrumentKind, number.Float64Kind, opts) - return fAdder{inst}, err -} - -func (m sfMeter) Histogram(name string, opts ...instrument.Option) (syncfloat64.Histogram, error) { - inst, err := m.newSync(name, HistogramInstrumentKind, number.Float64Kind, opts) - return fRecorder{inst}, err -} - -func (m siMeter) Counter(name string, opts ...instrument.Option) (syncint64.Counter, error) { - inst, err := m.newSync(name, CounterInstrumentKind, number.Int64Kind, opts) - return iAdder{inst}, err -} - -func (m siMeter) UpDownCounter(name string, opts ...instrument.Option) (syncint64.UpDownCounter, error) { - inst, err := m.newSync(name, UpDownCounterInstrumentKind, number.Int64Kind, opts) - return iAdder{inst}, err -} - -func (m siMeter) Histogram(name string, opts ...instrument.Option) (syncint64.Histogram, error) { - inst, err := m.newSync(name, HistogramInstrumentKind, number.Int64Kind, opts) - return iRecorder{inst}, err -} - -func (a fAdder) Add(ctx context.Context, value float64, attrs ...attribute.KeyValue) { - if a.SyncImpl != nil { - a.SyncImpl.RecordOne(ctx, number.NewFloat64Number(value), attrs) - } -} - -func (a iAdder) Add(ctx context.Context, value int64, attrs ...attribute.KeyValue) { - if a.SyncImpl != nil { - a.SyncImpl.RecordOne(ctx, number.NewInt64Number(value), attrs) - } -} - -func (a fRecorder) Record(ctx context.Context, value float64, attrs ...attribute.KeyValue) { - if a.SyncImpl != nil { - a.SyncImpl.RecordOne(ctx, number.NewFloat64Number(value), attrs) - } -} - -func (a iRecorder) Record(ctx context.Context, value int64, attrs ...attribute.KeyValue) { - if a.SyncImpl != nil { - a.SyncImpl.RecordOne(ctx, number.NewInt64Number(value), attrs) - } -} - -func (a fObserver) Observe(ctx context.Context, value float64, attrs ...attribute.KeyValue) { - if a.AsyncImpl != nil { - a.AsyncImpl.ObserveOne(ctx, number.NewFloat64Number(value), attrs) - } -} - -func (a iObserver) Observe(ctx context.Context, value int64, attrs ...attribute.KeyValue) { - if a.AsyncImpl != nil { - a.AsyncImpl.ObserveOne(ctx, number.NewInt64Number(value), attrs) - } -} diff --git a/sdk/metric/selector/simple/simple.go b/sdk/metric/selector/simple/simple.go deleted file mode 100644 index 5451072f607..00000000000 --- a/sdk/metric/selector/simple/simple.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package simple // import "go.opentelemetry.io/otel/sdk/metric/selector/simple" - -import ( - "go.opentelemetry.io/otel/sdk/metric/aggregator" - "go.opentelemetry.io/otel/sdk/metric/aggregator/histogram" - "go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue" - "go.opentelemetry.io/otel/sdk/metric/aggregator/sum" - "go.opentelemetry.io/otel/sdk/metric/export" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" -) - -type ( - selectorInexpensive struct{} - selectorHistogram struct { - options []histogram.Option - } -) - -var ( - _ export.AggregatorSelector = selectorInexpensive{} - _ export.AggregatorSelector = selectorHistogram{} -) - -// NewWithInexpensiveDistribution returns a simple aggregator selector -// that uses minmaxsumcount aggregators for `Histogram` -// instruments. This selector is faster and uses less memory than the -// others in this package because minmaxsumcount aggregators maintain -// the least information about the distribution among these choices. -func NewWithInexpensiveDistribution() export.AggregatorSelector { - return selectorInexpensive{} -} - -// NewWithHistogramDistribution returns a simple aggregator selector -// that uses histogram aggregators for `Histogram` instruments. -// This selector is a good default choice for most metric exporters. -func NewWithHistogramDistribution(options ...histogram.Option) export.AggregatorSelector { - return selectorHistogram{options: options} -} - -func sumAggs(aggPtrs []*aggregator.Aggregator) { - aggs := sum.New(len(aggPtrs)) - for i := range aggPtrs { - *aggPtrs[i] = &aggs[i] - } -} - -func lastValueAggs(aggPtrs []*aggregator.Aggregator) { - aggs := lastvalue.New(len(aggPtrs)) - for i := range aggPtrs { - *aggPtrs[i] = &aggs[i] - } -} - -func (selectorInexpensive) AggregatorFor(descriptor *sdkapi.Descriptor, aggPtrs ...*aggregator.Aggregator) { - switch descriptor.InstrumentKind() { - case sdkapi.GaugeObserverInstrumentKind: - lastValueAggs(aggPtrs) - case sdkapi.HistogramInstrumentKind: - aggs := sum.New(len(aggPtrs)) - for i := range aggPtrs { - *aggPtrs[i] = &aggs[i] - } - default: - sumAggs(aggPtrs) - } -} - -func (s selectorHistogram) AggregatorFor(descriptor *sdkapi.Descriptor, aggPtrs ...*aggregator.Aggregator) { - switch descriptor.InstrumentKind() { - case sdkapi.GaugeObserverInstrumentKind: - lastValueAggs(aggPtrs) - case sdkapi.HistogramInstrumentKind: - aggs := histogram.New(len(aggPtrs), descriptor, s.options...) - for i := range aggPtrs { - *aggPtrs[i] = &aggs[i] - } - default: - sumAggs(aggPtrs) - } -} diff --git a/sdk/metric/selector/simple/simple_test.go b/sdk/metric/selector/simple/simple_test.go deleted file mode 100644 index 9e946864e53..00000000000 --- a/sdk/metric/selector/simple/simple_test.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright The OpenTelemetry Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package simple_test - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel/sdk/metric/aggregator" - "go.opentelemetry.io/otel/sdk/metric/aggregator/histogram" - "go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue" - "go.opentelemetry.io/otel/sdk/metric/aggregator/sum" - "go.opentelemetry.io/otel/sdk/metric/export" - "go.opentelemetry.io/otel/sdk/metric/metrictest" - "go.opentelemetry.io/otel/sdk/metric/number" - "go.opentelemetry.io/otel/sdk/metric/sdkapi" - "go.opentelemetry.io/otel/sdk/metric/selector/simple" -) - -var ( - testCounterDesc = metrictest.NewDescriptor("counter", sdkapi.CounterInstrumentKind, number.Int64Kind) - testUpDownCounterDesc = metrictest.NewDescriptor("updowncounter", sdkapi.UpDownCounterInstrumentKind, number.Int64Kind) - testCounterObserverDesc = metrictest.NewDescriptor("counterobserver", sdkapi.CounterObserverInstrumentKind, number.Int64Kind) - testUpDownCounterObserverDesc = metrictest.NewDescriptor("updowncounterobserver", sdkapi.UpDownCounterObserverInstrumentKind, number.Int64Kind) - testHistogramDesc = metrictest.NewDescriptor("histogram", sdkapi.HistogramInstrumentKind, number.Int64Kind) - testGaugeObserverDesc = metrictest.NewDescriptor("gauge", sdkapi.GaugeObserverInstrumentKind, number.Int64Kind) -) - -func oneAgg(sel export.AggregatorSelector, desc *sdkapi.Descriptor) aggregator.Aggregator { - var agg aggregator.Aggregator - sel.AggregatorFor(desc, &agg) - return agg -} - -func testFixedSelectors(t *testing.T, sel export.AggregatorSelector) { - require.IsType(t, (*lastvalue.Aggregator)(nil), oneAgg(sel, &testGaugeObserverDesc)) - require.IsType(t, (*sum.Aggregator)(nil), oneAgg(sel, &testCounterDesc)) - require.IsType(t, (*sum.Aggregator)(nil), oneAgg(sel, &testUpDownCounterDesc)) - require.IsType(t, (*sum.Aggregator)(nil), oneAgg(sel, &testCounterObserverDesc)) - require.IsType(t, (*sum.Aggregator)(nil), oneAgg(sel, &testUpDownCounterObserverDesc)) -} - -func TestInexpensiveDistribution(t *testing.T) { - inex := simple.NewWithInexpensiveDistribution() - require.IsType(t, (*sum.Aggregator)(nil), oneAgg(inex, &testHistogramDesc)) - testFixedSelectors(t, inex) -} - -func TestHistogramDistribution(t *testing.T) { - hist := simple.NewWithHistogramDistribution() - require.IsType(t, (*histogram.Aggregator)(nil), oneAgg(hist, &testHistogramDesc)) - testFixedSelectors(t, hist) -} diff --git a/sdk/metric/view/doc.go b/sdk/metric/view/doc.go new file mode 100644 index 00000000000..e92e57aed10 --- /dev/null +++ b/sdk/metric/view/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package view provides types and functionality that customize the metric +// telemetry an SDK will produce. The View type is used when a Reader is +// registered with a MeterProvider in the go.opentelemetry.io/otel/sdk/metric +// package. See the WithReader option in that package for more information on +// how this registration takes place. +package view // import "go.opentelemetry.io/otel/sdk/metric/view" diff --git a/sdk/metric/view/example_test.go b/sdk/metric/view/example_test.go new file mode 100644 index 00000000000..bf2480055fc --- /dev/null +++ b/sdk/metric/view/example_test.go @@ -0,0 +1,199 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package view // import "go.opentelemetry.io/otel/sdk/metric/view" + +import ( + "fmt" + + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/metric/aggregation" +) + +func Example() { + // The "active-users" instrument created by the + // "github.com/super/noisy/instrumentation/package" your project includes + // has a bug, it records a measurment any time a user has any activity. + // This is causing a lot of strain on your program without providing any + // value to you. The next version of + // "github.com/super/noisy/instrumentation/package" corrects the + // instrumentation to only record a value when a user logs in, but it + // isn't out yet. + // + // Use a View to drop these measurments while you wait for the fix to come + // from upstream. + + v, err := New( + MatchInstrumentName("active-users"), + MatchInstrumentationScope(instrumentation.Scope{ + Name: "github.com/super/noisy/instrumentation/package", + Version: "v0.22.0", // Only match the problematic instrumentation version. + }), + WithSetAggregation(aggregation.Drop{}), + ) + if err != nil { + panic(err) + } + + // The SDK this view is registered with calls TransformInstrument when an + // instrument is created. Test that our fix will work as intended. + i, _ := v.TransformInstrument(Instrument{ + Name: "active-users", + Scope: instrumentation.Scope{ + Name: "github.com/super/noisy/instrumentation/package", + Version: "v0.22.0", + }, + Aggregation: aggregation.LastValue{}, + }) + fmt.Printf("Instrument{%q: %s}: %#v\n", i.Name, i.Scope.Version, i.Aggregation) + + // Also, ensure the next version will not be transformed. + _, ok := v.TransformInstrument(Instrument{ + Name: "active-users", + Scope: instrumentation.Scope{ + Name: "github.com/super/noisy/instrumentation/package", + Version: "v0.23.0", + }, + Aggregation: aggregation.LastValue{}, + }) + fmt.Printf("Instrument{\"active-users\": v0.23.0} matched: %t\n", ok) + // Output: + // + // Instrument{"active-users": v0.22.0}: aggregation.Drop{} + // Instrument{"active-users": v0.23.0} matched: false +} + +func ExampleMatchInstrumentName() { + v, err := New(MatchInstrumentName("request-*")) // Wildcard match. + if err != nil { + panic(err) + } + + for _, i := range []Instrument{ + {Name: "request-count"}, + {Name: "request-rate"}, + {Name: "latency"}, + } { + // The SDK calls TransformInstrument when an instrument is created. + _, ok := v.TransformInstrument(i) + fmt.Printf("Instrument{%q} matched: %t\n", i.Name, ok) + } + // Output: + // Instrument{"request-count"} matched: true + // Instrument{"request-rate"} matched: true + // Instrument{"latency"} matched: false +} + +func ExampleMatchInstrumentKind() { + v, err := New(MatchInstrumentKind(SyncCounter)) + if err != nil { + panic(err) + } + + for _, i := range []Instrument{ + {Name: "synchronous counter", Kind: SyncCounter}, + {Name: "synchronous histogram", Kind: SyncHistogram}, + {Name: "asynchronous counter", Kind: AsyncCounter}, + } { + // The SDK calls TransformInstrument when an instrument is created. + _, ok := v.TransformInstrument(i) + fmt.Printf("Instrument{%q} matched: %t\n", i.Name, ok) + } + // Output: + // Instrument{"synchronous counter"} matched: true + // Instrument{"synchronous histogram"} matched: false + // Instrument{"asynchronous counter"} matched: false +} + +func ExampleMatchInstrumentationScope() { + v, err := New(MatchInstrumentationScope(instrumentation.Scope{ + Name: "custom/instrumentation/package", + Version: "v0.22.0", // Only match this version of instrumentation. + })) + if err != nil { + panic(err) + } + + for _, i := range []Instrument{ + {Name: "v1.0.0 instrumentation", Scope: instrumentation.Scope{ + Name: "custom/instrumentation/package", + Version: "v1.0.0", + }}, + {Name: "v0.22.0 instrumentation", Scope: instrumentation.Scope{ + Name: "custom/instrumentation/package", + Version: "v0.22.0", + }}, + } { + // The SDK calls TransformInstrument when an instrument is created. + _, ok := v.TransformInstrument(i) + fmt.Printf("Instrument{%q} matched: %t\n", i.Name, ok) + } + // Output: + // Instrument{"v1.0.0 instrumentation"} matched: false + // Instrument{"v0.22.0 instrumentation"} matched: true +} + +func ExampleWithRename() { + v, err := New(MatchInstrumentName("bad-name"), WithRename("good-name")) + if err != nil { + panic(err) + } + + // The SDK calls TransformInstrument when an instrument is created. + i, _ := v.TransformInstrument(Instrument{Name: "bad-name"}) + fmt.Printf("Instrument{%q}\n", i.Name) + // Output: Instrument{"good-name"} +} + +func ExampleWithSetDescription() { + v, err := New( + MatchInstrumentName("requests"), + WithSetDescription("Number of requests received"), + ) + if err != nil { + panic(err) + } + + // The SDK calls TransformInstrument when an instrument is created. + i, _ := v.TransformInstrument(Instrument{ + Name: "requests", + Description: "incorrect description", + }) + fmt.Printf("Instrument{%q: %s}\n", i.Name, i.Description) + // Output: Instrument{"requests": Number of requests received} +} + +func ExampleWithSetAggregation() { + v, err := New(MatchInstrumentationScope(instrumentation.Scope{ + Name: "super/noisy/instrumentation/package", + }), WithSetAggregation(aggregation.Drop{})) + if err != nil { + panic(err) + } + + // The SDK calls TransformInstrument when an instrument is created. + i, _ := v.TransformInstrument(Instrument{ + Name: "active-users", + Scope: instrumentation.Scope{ + Name: "super/noisy/instrumentation/package", + Version: "v0.5.0", + }, + Aggregation: aggregation.LastValue{}, + }) + fmt.Printf("Instrument{%q}: %#v\n", i.Name, i.Aggregation) + // Output: Instrument{"active-users"}: aggregation.Drop{} +} diff --git a/exporters/otlp/otlpmetric/internal/metrictransform/resource.go b/sdk/metric/view/instrument.go similarity index 59% rename from exporters/otlp/otlpmetric/internal/metrictransform/resource.go rename to sdk/metric/view/instrument.go index dbf0c5e490a..e024b37a6ff 100644 --- a/exporters/otlp/otlpmetric/internal/metrictransform/resource.go +++ b/sdk/metric/view/instrument.go @@ -12,17 +12,22 @@ // See the License for the specific language governing permissions and // limitations under the License. -package metrictransform // import "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal/metrictransform" +//go:build go1.18 +// +build go1.18 + +package view // import "go.opentelemetry.io/otel/sdk/metric/view" import ( - "go.opentelemetry.io/otel/sdk/resource" - resourcepb "go.opentelemetry.io/proto/otlp/resource/v1" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/metric/aggregation" ) -// Resource transforms a Resource into an OTLP Resource. -func Resource(r *resource.Resource) *resourcepb.Resource { - if r == nil { - return nil - } - return &resourcepb.Resource{Attributes: ResourceAttributes(r)} +// Instrument uniquely identifies an instrument within a meter. +type Instrument struct { + Scope instrumentation.Scope + + Name string + Description string + Kind InstrumentKind + Aggregation aggregation.Aggregation } diff --git a/sdk/metric/view/instrumentkind.go b/sdk/metric/view/instrumentkind.go new file mode 100644 index 00000000000..d5fc67953f9 --- /dev/null +++ b/sdk/metric/view/instrumentkind.go @@ -0,0 +1,46 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package view // import "go.opentelemetry.io/otel/sdk/metric/view" + +// InstrumentKind describes the kind of instrument a Meter can create. +type InstrumentKind uint8 + +// These are all the instrument kinds supported by the SDK. +const ( + // undefinedInstrument is an uninitialized instrument kind, should not be used. + //nolint:deadcode,varcheck + undefinedInstrument InstrumentKind = iota + // SyncCounter is an instrument kind that records increasing values + // synchronously in application code. + SyncCounter + // SyncUpDownCounter is an instrument kind that records increasing and + // decreasing values synchronously in application code. + SyncUpDownCounter + // SyncHistogram is an instrument kind that records a distribution of + // values synchronously in application code. + SyncHistogram + // AsyncCounter is an instrument kind that records increasing values in an + // asynchronous callback. + AsyncCounter + // AsyncUpDownCounter is an instrument kind that records increasing and + // decreasing values in an asynchronous callback. + AsyncUpDownCounter + // AsyncGauge is an instrument kind that records current values in an + // asynchronous callback. + AsyncGauge +) diff --git a/sdk/metric/view/view.go b/sdk/metric/view/view.go new file mode 100644 index 00000000000..946d7755230 --- /dev/null +++ b/sdk/metric/view/view.go @@ -0,0 +1,235 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package view // import "go.opentelemetry.io/otel/sdk/metric/view" + +import ( + "fmt" + "regexp" + "strings" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/metric/aggregation" +) + +// View provides users with the flexibility to customize the metrics that are +// output by the SDK. A View can be used to ignore, change the name, +// description, and aggregation of, and customize which attribute(s) are to be +// reported by Instruments. +// +// An empty View will match all instruments, and do no transformations. +type View struct { + instrumentName *regexp.Regexp + hasWildcard bool + scope instrumentation.Scope + instrumentKind InstrumentKind + + filter attribute.Filter + name string + description string + agg aggregation.Aggregation +} + +// New returns a new configured View. If there are any duplicate Options passed, +// the last one passed will take precedence. The unique, de-duplicated, +// Options are all applied to the View. An instrument needs to match all of +// the match Options passed for the View to be applied to it. Similarly, all +// transform operation Options are applied to matched Instruments. +func New(opts ...Option) (View, error) { + v := View{} + + for _, opt := range opts { + v = opt.apply(v) + } + + emptyScope := instrumentation.Scope{} + if v.instrumentName == nil && + v.scope == emptyScope && + v.instrumentKind == undefinedInstrument { + return View{}, fmt.Errorf("must provide at least 1 match option") + } + + if v.hasWildcard && v.name != "" { + return View{}, fmt.Errorf("invalid view: view name specified for multiple instruments") + } + + return v, nil +} + +// TransformInstrument will check if an instrument matches this view +// and will convert it if it does. +func (v View) TransformInstrument(inst Instrument) (transformed Instrument, match bool) { + if !v.match(inst) { + return Instrument{}, false + } + if v.name != "" { + inst.Name = v.name + } + if v.description != "" { + inst.Description = v.description + } + if v.agg != nil { + inst.Aggregation = v.agg + } + return inst, true +} + +// AttributeFilter returns a function that returns only attributes specified by +// WithFilterAttributes. If no filter was provided nil is returned. +func (v View) AttributeFilter() func(attribute.Set) attribute.Set { + if v.filter == nil { + return nil + } + return func(input attribute.Set) attribute.Set { + out, _ := input.Filter(v.filter) + return out + } +} + +func (v View) matchName(name string) bool { + return v.instrumentName == nil || v.instrumentName.MatchString(name) +} + +func (v View) matchScopeName(name string) bool { + return v.scope.Name == "" || name == v.scope.Name +} + +func (v View) matchScopeVersion(version string) bool { + return v.scope.Version == "" || version == v.scope.Version +} + +func (v View) matchScopeSchemaURL(schemaURL string) bool { + return v.scope.SchemaURL == "" || schemaURL == v.scope.SchemaURL +} + +func (v View) matchInstrumentKind(kind InstrumentKind) bool { + return v.instrumentKind == undefinedInstrument || kind == v.instrumentKind +} + +func (v View) match(i Instrument) bool { + return v.matchName(i.Name) && + v.matchScopeName(i.Scope.Name) && + v.matchScopeSchemaURL(i.Scope.SchemaURL) && + v.matchScopeVersion(i.Scope.Version) && + v.matchInstrumentKind(i.Kind) +} + +// Option applies a configuration option value to a View. +type Option interface { + apply(View) View +} + +type optionFunc func(View) View + +func (f optionFunc) apply(v View) View { + return f(v) +} + +// MatchInstrumentName will match an instrument based on the its name. +// This will accept wildcards of * for zero or more characters, and ? for +// exactly one character. A name of "*" (default) will match all instruments. +func MatchInstrumentName(name string) Option { + return optionFunc(func(v View) View { + if strings.ContainsAny(name, "*?") { + v.hasWildcard = true + } + name = regexp.QuoteMeta(name) + name = "^" + name + "$" + name = strings.ReplaceAll(name, "\\?", ".") + name = strings.ReplaceAll(name, "\\*", ".*") + v.instrumentName = regexp.MustCompile(name) + return v + }) +} + +// MatchInstrumentKind with match an instrument based on the instrument's kind. +// The default is to match all instrument kinds. +func MatchInstrumentKind(kind InstrumentKind) Option { + return optionFunc(func(v View) View { + v.instrumentKind = kind + return v + }) +} + +// MatchInstrumentationScope will do an exact match on any +// instrumentation.Scope field that is non-empty (""). The default is to match all +// instrumentation scopes. +func MatchInstrumentationScope(scope instrumentation.Scope) Option { + return optionFunc(func(v View) View { + v.scope = scope + return v + }) +} + +// WithRename will rename the instrument the view matches. If not used or empty the +// instrument name will not be changed. Must be used with a non-wildcard +// instrument name match. The default does not change the instrument name. +func WithRename(name string) Option { + return optionFunc(func(v View) View { + v.name = name + return v + }) +} + +// WithSetDescription will change the description of the instruments the view +// matches to desc. If not used or empty the description will not be changed. +func WithSetDescription(desc string) Option { + return optionFunc(func(v View) View { + v.description = desc + return v + }) +} + +// WithFilterAttributes will select attributes that have a matching key. If not used +// or empty no filter will be applied. +func WithFilterAttributes(keys ...attribute.Key) Option { + return optionFunc(func(v View) View { + if len(keys) == 0 { + return v + } + filterKeys := map[attribute.Key]struct{}{} + for _, key := range keys { + filterKeys[key] = struct{}{} + } + + v.filter = attribute.Filter(func(kv attribute.KeyValue) bool { + _, ok := filterKeys[kv.Key] + return ok + }) + return v + }) +} + +// WithSetAggregation will use the aggregation a for matching instruments. If +// this option is not provided, the reader defined aggregation for the +// instrument will be used. +// +// If a is misconfigured, it will not be used and an error will be logged. +func WithSetAggregation(a aggregation.Aggregation) Option { + cpA := a.Copy() + if err := cpA.Err(); err != nil { + global.Error(err, "not using aggregation with view", "aggregation", a) + return optionFunc(func(v View) View { return v }) + } + + return optionFunc(func(v View) View { + v.agg = cpA + return v + }) +} diff --git a/sdk/metric/view/view_test.go b/sdk/metric/view/view_test.go new file mode 100644 index 00000000000..c461bf14a9b --- /dev/null +++ b/sdk/metric/view/view_test.go @@ -0,0 +1,447 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.18 +// +build go1.18 + +package view // import "go.opentelemetry.io/otel/sdk/metric/view" + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/instrumentation" +) + +var matchInstrument = Instrument{ + Scope: instrumentation.Scope{ + Name: "bar", + Version: "v1.0.0", + SchemaURL: "stuff.test/", + }, + Name: "foo", + Kind: SyncCounter, + Description: "", +} + +var noMatchInstrument = Instrument{ + Scope: instrumentation.Scope{ + Name: "notfoo", + Version: "v0.x.0", + SchemaURL: "notstuff.test/", + }, + Name: "notstuff", + Description: "", + Kind: undefinedInstrument, +} + +var emptyDescription = Instrument{} + +func TestViewTransformInstrument(t *testing.T) { + tests := []struct { + name string + options []Option + match Instrument + notMatch Instrument + }{ + { + name: "instrument name", + options: []Option{ + MatchInstrumentName("foo"), + }, + match: matchInstrument, + notMatch: emptyDescription, + }, + { + name: "Scope name", + options: []Option{ + MatchInstrumentationScope(instrumentation.Scope{ + Name: "bar", + }), + }, + match: matchInstrument, + notMatch: emptyDescription, + }, + { + name: "Scope version", + options: []Option{ + MatchInstrumentationScope(instrumentation.Scope{ + Version: "v1.0.0", + }), + }, + + match: matchInstrument, + notMatch: emptyDescription, + }, + { + name: "Scope SchemaURL", + options: []Option{ + MatchInstrumentationScope(instrumentation.Scope{ + SchemaURL: "stuff.test/", + }), + }, + match: matchInstrument, + notMatch: emptyDescription, + }, { + name: "instrument kind", + options: []Option{ + MatchInstrumentKind(SyncCounter), + }, + match: matchInstrument, + notMatch: emptyDescription, + }, + { + name: "Expands *", + options: []Option{ + MatchInstrumentName("f*"), + }, + match: matchInstrument, + notMatch: emptyDescription, + }, + { + name: "composite literal name", + options: []Option{ + MatchInstrumentName("foo"), + MatchInstrumentationScope(instrumentation.Scope{ + Name: "bar", + Version: "v1.0.0", + SchemaURL: "stuff.test/", + }), + }, + match: matchInstrument, + notMatch: emptyDescription, + }, + { + name: "rename", + options: []Option{ + MatchInstrumentName("foo"), + WithRename("baz"), + }, + match: Instrument{ + Scope: instrumentation.Scope{ + Name: "bar", + Version: "v1.0.0", + SchemaURL: "stuff.test/", + }, + Name: "baz", + Description: "", + Kind: SyncCounter, + }, + notMatch: emptyDescription, + }, + { + name: "change description", + options: []Option{ + MatchInstrumentName("foo"), + WithSetDescription("descriptive stuff"), + }, + match: Instrument{ + Scope: instrumentation.Scope{ + Name: "bar", + Version: "v1.0.0", + SchemaURL: "stuff.test/", + }, + Name: "foo", + Description: "descriptive stuff", + Kind: SyncCounter, + }, + notMatch: emptyDescription, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + v, err := New(tt.options...) + require.NoError(t, err) + + t.Run("match", func(t *testing.T) { + got, match := v.TransformInstrument(matchInstrument) + assert.Equal(t, tt.match, got) + assert.True(t, match) + }) + + t.Run("does not match", func(t *testing.T) { + got, match := v.TransformInstrument(noMatchInstrument) + assert.Equal(t, tt.notMatch, got) + assert.False(t, match) + }) + }) + } +} + +func TestViewMatchName(t *testing.T) { + tests := []struct { + name string + matchName string + matches []string + notMatches []string + hasWildcard bool + }{ + { + name: "exact", + matchName: "foo", + matches: []string{"foo"}, + notMatches: []string{"foobar", "barfoo", "barfoobaz"}, + hasWildcard: false, + }, + { + name: "*", + matchName: "*", + matches: []string{"foo", "foobar", "barfoo", "barfoobaz", ""}, + notMatches: []string{}, + hasWildcard: true, + }, + { + name: "front ?", + matchName: "?foo", + matches: []string{"1foo", "afoo"}, + notMatches: []string{"foo", "foobar", "barfoo", "barfoobaz"}, + hasWildcard: true, + }, + { + name: "back ?", + matchName: "foo?", + matches: []string{"foo1", "fooz"}, + notMatches: []string{"foo", "foobar", "barfoo", "barfoobaz"}, + hasWildcard: true, + }, + { + name: "front *", + matchName: "*foo", + matches: []string{"foo", "barfoo"}, + notMatches: []string{"foobar", "barfoobaz"}, + hasWildcard: true, + }, + { + name: "back *", + matchName: "foo*", + matches: []string{"foo", "foobar"}, + notMatches: []string{"barfoo", "barfoobaz"}, + hasWildcard: true, + }, + { + name: "both *", + matchName: "*foo*", + matches: []string{"foo", "foobar", "barfoo", "barfoobaz"}, + notMatches: []string{"baz"}, + hasWildcard: true, + }, + { + name: "front **", + matchName: "**foo", + matches: []string{"foo", "barfoo", "1foo", "afoo"}, + notMatches: []string{"foobar", "barfoobaz", "baz", "foo1", "fooz"}, + hasWildcard: true, + }, + { + name: "back **", + matchName: "foo**", + matches: []string{"foo", "foobar", "foo1", "fooz"}, + notMatches: []string{"barfoo", "barfoobaz", "baz", "1foo", "afoo"}, + hasWildcard: true, + }, + { + name: "front *?", + matchName: "*?foo", + matches: []string{"barfoo", "1foo", "afoo"}, + notMatches: []string{"foo", "foobar", "barfoobaz", "baz", "foo1", "fooz"}, + hasWildcard: true, + }, + { + name: "front ?*", + matchName: "?*foo", + matches: []string{"barfoo", "1foo", "afoo"}, + notMatches: []string{"foo", "foobar", "barfoobaz", "baz", "foo1", "fooz"}, + hasWildcard: true, + }, + { + name: "back *?", + matchName: "foo*?", + matches: []string{"foobar", "foo1", "fooz"}, + notMatches: []string{"foo", "barfoo", "barfoobaz", "baz", "1foo", "afoo"}, + hasWildcard: true, + }, + { + name: "back ?*", + matchName: "foo?*", + matches: []string{"foobar", "foo1", "fooz"}, + notMatches: []string{"foo", "barfoo", "barfoobaz", "baz", "1foo", "afoo"}, + hasWildcard: true, + }, + { + name: "middle *", + matchName: "foo*bar", + matches: []string{"foobar", "foo1bar", "foomanybar"}, + notMatches: []string{"foo", "barfoo", "barfoobaz", "baz", "1foo", "afoo", "foo1", "fooz"}, + hasWildcard: true, + }, + { + name: "middle ?", + matchName: "foo?bar", + matches: []string{"foo1bar", "fooabar"}, + notMatches: []string{"foobar", "foo", "barfoo", "barfoobaz", "baz", "1foo", "afoo", "foo1", "fooz", "foomanybar"}, + hasWildcard: true, + }, + { + name: "meta chars", + matchName: ".+()|[]{}^$-_", + matches: []string{".+()|[]{}^$-_"}, // Note this is not a valid name. + notMatches: []string{"foobar", "foo", "barfoo", "barfoobaz", "baz", "1foo", "afoo", "foo1", "fooz", "foomanybar", "foo1bar", "fooabar"}, + hasWildcard: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + v, err := New(MatchInstrumentName(tt.matchName)) + require.NoError(t, err) + + t.Log(v.instrumentName.String()) + assert.Equal(t, tt.hasWildcard, v.hasWildcard) + for _, name := range tt.matches { + assert.Truef(t, v.matchName(name), "name: %s", name) + } + for _, name := range tt.notMatches { + assert.Falsef(t, v.matchName(name), "name: %s", name) + } + }) + } +} + +func TestViewAttributeFilterNoFilter(t *testing.T) { + v, err := New( + MatchInstrumentName("*"), + ) + require.NoError(t, err) + filter := v.AttributeFilter() + assert.Nil(t, filter) + + v, err = New( + MatchInstrumentName("*"), + WithFilterAttributes(), + ) + require.NoError(t, err) + filter = v.AttributeFilter() + assert.Nil(t, filter) + + v, err = New( + MatchInstrumentName("*"), + WithFilterAttributes([]attribute.Key{}...), + ) + require.NoError(t, err) + filter = v.AttributeFilter() + assert.Nil(t, filter) +} + +func TestViewAttributeFilter(t *testing.T) { + inputSet := attribute.NewSet( + attribute.String("foo", "bar"), + attribute.Int("power-level", 9001), + attribute.Float64("lifeUniverseEverything", 42.0), + ) + + tests := []struct { + name string + filter []attribute.Key + want attribute.Set + }{ + { + name: "Match 1", + filter: []attribute.Key{ + attribute.Key("power-level"), + }, + want: attribute.NewSet( + attribute.Int("power-level", 9001), + ), + }, + { + name: "Match 2", + filter: []attribute.Key{ + attribute.Key("foo"), + attribute.Key("lifeUniverseEverything"), + }, + want: attribute.NewSet( + attribute.Float64("lifeUniverseEverything", 42.0), + attribute.String("foo", "bar"), + ), + }, + { + name: "Don't match", + filter: []attribute.Key{ + attribute.Key("nothing"), + }, + want: attribute.NewSet(), + }, + { + name: "Match some", + filter: []attribute.Key{ + attribute.Key("power-level"), + attribute.Key("nothing"), + }, + want: attribute.NewSet( + attribute.Int("power-level", 9001), + ), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + v, err := New( + MatchInstrumentName("*"), + WithFilterAttributes(tt.filter...), + ) + require.NoError(t, err) + filter := v.AttributeFilter() + require.NotNil(t, filter) + + got := filter(inputSet) + assert.Equal(t, got.Equivalent(), tt.want.Equivalent()) + }) + } +} + +func TestNewErrors(t *testing.T) { + tests := []struct { + name string + options []Option + }{ + { + name: "No Match Option", + options: []Option{}, + }, + { + name: "Match * with view name", + options: []Option{ + MatchInstrumentName("*"), + WithRename("newName"), + }, + }, + { + name: "Match expand * with view name", + options: []Option{ + MatchInstrumentName("old*"), + WithRename("newName"), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := New(tt.options...) + + assert.Equal(t, View{}, got) + assert.Error(t, err) + }) + } +}