diff --git a/connector/elasticapmconnector/README.md b/connector/elasticapmconnector/README.md index 72fa5985f..af13b409c 100644 --- a/connector/elasticapmconnector/README.md +++ b/connector/elasticapmconnector/README.md @@ -39,11 +39,27 @@ By default, aggregated metrics will be exported without any client metadata. It propagate client metadata from input to exported metrics by specifying a list of metadata keys in `elasticapm::aggregation::metadata_keys`. +By default, cardinality for aggregated metrics will be limited. +Each limit defines a `max_cardinality`. There are four limits that can be configured: +- `elasticapm::aggregation::limit::resource`: configures the max cardinality of resources +- `elasticapm::aggregation::limit::scope`: configures the max cardinality of scopes within a resource +- `elasticapm::aggregation::limit::metric`: configures the max cardinality of metrics within a scope +- `elasticapm::aggregation::limit::datapoint`: configures the max cardinality of datapoints within a metric + ```yaml elasticapm: aggregation: directory: /path/to/aggregation/directory metadata_keys: [list, of, metadata, keys] + limit: + resource: + max_cardinality: 8000 + scope: + max_cardinality: 4000 + metric: + max_cardinality: 4000 + datapoint: + max_cardinality: 4000 ``` ### Metrics produced by the connector diff --git a/connector/elasticapmconnector/config.go b/connector/elasticapmconnector/config.go index fbc56d98b..35af4a16a 100644 --- a/connector/elasticapmconnector/config.go +++ b/connector/elasticapmconnector/config.go @@ -21,9 +21,10 @@ import ( "fmt" "time" - lsmconfig "github.com/elastic/opentelemetry-collector-components/processor/lsmintervalprocessor/config" signaltometricsconfig "github.com/open-telemetry/opentelemetry-collector-contrib/connector/signaltometricsconnector/config" "go.opentelemetry.io/collector/component" + + lsmconfig "github.com/elastic/opentelemetry-collector-components/processor/lsmintervalprocessor/config" ) var _ component.Config = (*Config)(nil) @@ -64,6 +65,27 @@ type AggregationConfig struct { // in all other cases -- using this configuration may lead to invalid behavior, // and will not be supported. Intervals []time.Duration `mapstructure:"intervals"` + + // Limit holds optional cardinality limits for aggregated metrics + Limit AggregationLimitConfig `mapstructure:"limit"` +} + +type AggregationLimitConfig struct { + // ResourceLimit defines the max cardinality of resources + ResourceLimit LimitConfig `mapstructure:"resource"` + + // ScopeLimit defines the max cardinality of scopes within a resource + ScopeLimit LimitConfig `mapstructure:"scope"` + + // MetricLimit defines the max cardinality of metrics within a scope + MetricLimit LimitConfig `mapstructure:"metric"` + + // DatapointLimit defines the max cardinality of datapoints within a metric + DatapointLimit LimitConfig `mapstructure:"datapoint"` +} + +type LimitConfig struct { + MaxCardinality int64 `mapstructure:"max_cardinality"` } func (cfg Config) Validate() error { @@ -87,13 +109,48 @@ func (cfg Config) lsmConfig() *lsmconfig.Config { }, }) } + lsmConfig := &lsmconfig.Config{ Intervals: intervalsConfig, ExponentialHistogramMaxBuckets: 160, } + if cfg.Aggregation != nil { lsmConfig.Directory = cfg.Aggregation.Directory lsmConfig.MetadataKeys = cfg.Aggregation.MetadataKeys + lsmConfig.ResourceLimit = lsmconfig.LimitConfig{ + MaxCardinality: cfg.Aggregation.Limit.ResourceLimit.MaxCardinality, + Overflow: lsmconfig.OverflowConfig{ + Attributes: []lsmconfig.Attribute{ + {Key: "service.name", Value: "_other"}, // Specific attribute required for APU UI compatibility + {Key: "overflow", Value: "resource"}, + }, + }, + } + lsmConfig.ScopeLimit = lsmconfig.LimitConfig{ + MaxCardinality: cfg.Aggregation.Limit.ScopeLimit.MaxCardinality, + Overflow: lsmconfig.OverflowConfig{ + Attributes: []lsmconfig.Attribute{ + {Key: "overflow", Value: "scope"}, + }, + }, + } + lsmConfig.MetricLimit = lsmconfig.LimitConfig{ + MaxCardinality: cfg.Aggregation.Limit.MetricLimit.MaxCardinality, + Overflow: lsmconfig.OverflowConfig{ + Attributes: []lsmconfig.Attribute{ + {Key: "overflow", Value: "metric"}, + }, + }, + } + lsmConfig.DatapointLimit = lsmconfig.LimitConfig{ + MaxCardinality: cfg.Aggregation.Limit.DatapointLimit.MaxCardinality, + Overflow: lsmconfig.OverflowConfig{ + Attributes: []lsmconfig.Attribute{ + {Key: "overflow", Value: "datapoint"}, + }, + }, + } } return lsmConfig } diff --git a/connector/elasticapmconnector/config_test.go b/connector/elasticapmconnector/config_test.go index 41eca1261..5a54a2bd1 100644 --- a/connector/elasticapmconnector/config_test.go +++ b/connector/elasticapmconnector/config_test.go @@ -22,12 +22,13 @@ import ( "testing" "time" - "github.com/elastic/opentelemetry-collector-components/connector/elasticapmconnector/internal/metadata" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/component" "go.opentelemetry.io/collector/confmap/confmaptest" "go.opentelemetry.io/collector/confmap/xconfmap" + + "github.com/elastic/opentelemetry-collector-components/connector/elasticapmconnector/internal/metadata" ) func TestConfig(t *testing.T) { @@ -47,6 +48,20 @@ func TestConfig(t *testing.T) { Directory: "/path/to/aggregation/state", MetadataKeys: []string{"a", "B", "c"}, Intervals: []time.Duration{time.Second, time.Minute}, + Limit: AggregationLimitConfig{ + ResourceLimit: LimitConfig{ + MaxCardinality: 1, + }, + ScopeLimit: LimitConfig{ + MaxCardinality: 1, + }, + MetricLimit: LimitConfig{ + MaxCardinality: 1, + }, + DatapointLimit: LimitConfig{ + MaxCardinality: 1, + }, + }, }, }, }, diff --git a/connector/elasticapmconnector/connector_test.go b/connector/elasticapmconnector/connector_test.go index e14dadfa3..ec1d69d28 100644 --- a/connector/elasticapmconnector/connector_test.go +++ b/connector/elasticapmconnector/connector_test.go @@ -24,7 +24,8 @@ import ( "path/filepath" "testing" - "github.com/elastic/opentelemetry-collector-components/connector/elasticapmconnector/internal/metadata" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden" + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.opentelemetry.io/collector/client" @@ -36,25 +37,43 @@ import ( "go.opentelemetry.io/collector/pdata/plog" "go.opentelemetry.io/collector/pdata/pmetric" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden" - "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest/pmetrictest" + "github.com/elastic/opentelemetry-collector-components/connector/elasticapmconnector/internal/metadata" ) var update = flag.Bool("update", false, "Update golden files") func TestConnector_LogsToMetrics(t *testing.T) { + oneCardinalityLimitConfig := LimitConfig{ + MaxCardinality: 1, + } + oneCardinalityAggregationConfig := Config{ + Aggregation: &AggregationConfig{ + Limit: AggregationLimitConfig{ + ResourceLimit: oneCardinalityLimitConfig, + ScopeLimit: oneCardinalityLimitConfig, + MetricLimit: oneCardinalityLimitConfig, + DatapointLimit: oneCardinalityLimitConfig, + }, + }, + } + testCases := []struct { name string + cfg *Config }{ - {name: "logs/service_summary"}, + // output should remain the same for all provided configs + {name: "logs/service_summary", cfg: &Config{}}, + {name: "logs/service_summary", cfg: &oneCardinalityAggregationConfig}, + + // output should show overflow behavior + {name: "logs/service_summary_overflow", cfg: &oneCardinalityAggregationConfig}, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { nextMetrics := &consumertest.MetricsSink{} - cfg := &Config{} - l2m := newLogsToMetrics(t, connectortest.NewNopSettings(metadata.Type), cfg, nextMetrics) + l2m := newLogsToMetrics(t, connectortest.NewNopSettings(metadata.Type), tc.cfg, nextMetrics) dir := filepath.Join("testdata", tc.name) input, err := golden.ReadLogs(filepath.Join(dir, "input.yaml")) @@ -76,18 +95,37 @@ func TestConnector_LogsToMetrics(t *testing.T) { } func TestConnector_MetricsToMetrics(t *testing.T) { + oneCardinalityLimitConfig := LimitConfig{ + MaxCardinality: 1, + } + oneCardinalityAggregationConfig := Config{ + Aggregation: &AggregationConfig{ + Limit: AggregationLimitConfig{ + ResourceLimit: oneCardinalityLimitConfig, + ScopeLimit: oneCardinalityLimitConfig, + MetricLimit: oneCardinalityLimitConfig, + DatapointLimit: oneCardinalityLimitConfig, + }, + }, + } + testCases := []struct { name string + cfg *Config }{ - {name: "metrics/service_summary"}, + // output should remain the same for all provided configs + {name: "metrics/service_summary", cfg: &Config{}}, + {name: "metrics/service_summary", cfg: &oneCardinalityAggregationConfig}, + + // output should show overflow + {name: "metrics/service_summary_overflow", cfg: &oneCardinalityAggregationConfig}, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { nextMetrics := &consumertest.MetricsSink{} - cfg := &Config{} - m2m := newMetricsConnector(t, connectortest.NewNopSettings(metadata.Type), cfg, nextMetrics) + m2m := newMetricsConnector(t, connectortest.NewNopSettings(metadata.Type), tc.cfg, nextMetrics) dir := filepath.Join("testdata", tc.name) input, err := golden.ReadMetrics(filepath.Join(dir, "input.yaml")) @@ -109,19 +147,53 @@ func TestConnector_MetricsToMetrics(t *testing.T) { } func TestConnector_TracesToMetrics(t *testing.T) { + fourCardinalityLimitConfig := LimitConfig{ + MaxCardinality: 4, // min limit to prevent overflow behavior + } + fourCardinalityAggregationConfig := Config{ + Aggregation: &AggregationConfig{ + Limit: AggregationLimitConfig{ + ResourceLimit: fourCardinalityLimitConfig, + ScopeLimit: fourCardinalityLimitConfig, + MetricLimit: fourCardinalityLimitConfig, + DatapointLimit: fourCardinalityLimitConfig, + }, + }, + } + + oneCardinalityLimitConfig := LimitConfig{ + MaxCardinality: 1, + } + oneCardinalityAggregationConfig := Config{ + Aggregation: &AggregationConfig{ + Limit: AggregationLimitConfig{ + ResourceLimit: oneCardinalityLimitConfig, + ScopeLimit: oneCardinalityLimitConfig, + MetricLimit: oneCardinalityLimitConfig, + DatapointLimit: oneCardinalityLimitConfig, + }, + }, + } + testCases := []struct { name string + cfg *Config }{ - {name: "traces/transaction_metrics"}, - {name: "traces/span_metrics"}, + // output should remain the same for all provided configs + {name: "traces/transaction_metrics", cfg: &Config{}}, + {name: "traces/transaction_metrics", cfg: &fourCardinalityAggregationConfig}, + {name: "traces/span_metrics", cfg: &Config{}}, + {name: "traces/span_metrics", cfg: &fourCardinalityAggregationConfig}, + + // output should show overflow + {name: "traces/span_metrics_overflow", cfg: &oneCardinalityAggregationConfig}, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { nextMetrics := &consumertest.MetricsSink{} - cfg := &Config{} - t2m := newTracesConnector(t, connectortest.NewNopSettings(metadata.Type), cfg, nextMetrics) + t2m := newTracesConnector(t, connectortest.NewNopSettings(metadata.Type), tc.cfg, nextMetrics) dir := filepath.Join("testdata", tc.name) input, err := golden.ReadTraces(filepath.Join(dir, "input.yaml")) @@ -159,6 +231,7 @@ func TestConnector_AggregationDirectory(t *testing.T) { require.NoError(t, err) require.NotEmpty(t, entries) } + func TestConnector_AggregationMetadataKeys(t *testing.T) { cfg := &Config{Aggregation: &AggregationConfig{MetadataKeys: []string{"k"}}} diff --git a/connector/elasticapmconnector/factory.go b/connector/elasticapmconnector/factory.go index 41e869d1a..600ceb2e8 100644 --- a/connector/elasticapmconnector/factory.go +++ b/connector/elasticapmconnector/factory.go @@ -48,7 +48,24 @@ func NewFactory() connector.Factory { // createDefaultConfig creates the default configuration. func createDefaultConfig() component.Config { - return &Config{} + return &Config{ + Aggregation: &AggregationConfig{ + Limit: AggregationLimitConfig{ + ResourceLimit: LimitConfig{ + MaxCardinality: 8000, + }, + ScopeLimit: LimitConfig{ + MaxCardinality: 4000, + }, + MetricLimit: LimitConfig{ + MaxCardinality: 4000, + }, + DatapointLimit: LimitConfig{ + MaxCardinality: 4000, + }, + }, + }, + } } func createLogsToMetrics( diff --git a/connector/elasticapmconnector/testdata/config/full.yaml b/connector/elasticapmconnector/testdata/config/full.yaml index c36ef82c0..a0f60c454 100644 --- a/connector/elasticapmconnector/testdata/config/full.yaml +++ b/connector/elasticapmconnector/testdata/config/full.yaml @@ -3,3 +3,12 @@ elasticapm: directory: /path/to/aggregation/state metadata_keys: [a, B, c] intervals: [1s, 1m] + limit: + resource: + max_cardinality: 1 + scope: + max_cardinality: 1 + metric: + max_cardinality: 1 + datapoint: + max_cardinality: 1 \ No newline at end of file diff --git a/connector/elasticapmconnector/testdata/logs/service_summary_overflow/aggregated_metrics.yaml b/connector/elasticapmconnector/testdata/logs/service_summary_overflow/aggregated_metrics.yaml new file mode 100644 index 000000000..d8abaa55e --- /dev/null +++ b/connector/elasticapmconnector/testdata/logs/service_summary_overflow/aggregated_metrics.yaml @@ -0,0 +1,139 @@ +resourceMetrics: + - resource: + attributes: + - key: service.name + value: + stringValue: foo + - key: deployment.environment + value: + stringValue: qa + - key: telemetry.sdk.language + value: + stringValue: go + - key: agent.name + value: + stringValue: unknown + scopeMetrics: + - scope: + name: github.com/open-telemetry/opentelemetry-collector-contrib/connector/signaltometricsconnector + metrics: + - name: service_summary + sum: + aggregationTemporality: 1 + dataPoints: + - asInt: 1 + attributes: + - key: data_stream.dataset + value: + stringValue: service_summary.1m + - key: metricset.name + value: + stringValue: service_summary + - key: metricset.interval + value: + stringValue: "1m" + - key: processor.event + value: + stringValue: metric + - name: service_summary + sum: + aggregationTemporality: 1 + dataPoints: + - asInt: 1 + attributes: + - key: data_stream.dataset + value: + stringValue: service_summary.10m + - key: metricset.name + value: + stringValue: service_summary + - key: metricset.interval + value: + stringValue: "10m" + - key: processor.event + value: + stringValue: metric + - name: service_summary + sum: + aggregationTemporality: 1 + dataPoints: + - asInt: 1 + attributes: + - key: data_stream.dataset + value: + stringValue: service_summary.60m + - key: metricset.name + value: + stringValue: service_summary + - key: metricset.interval + value: + stringValue: "60m" + - key: processor.event + value: + stringValue: metric + - resource: + attributes: + - key: service.name + value: + stringValue: "_other" + - key: overflow + value: + stringValue: resource + scopeMetrics: + - scope: + name: github.com/open-telemetry/opentelemetry-collector-contrib/connector/signaltometricsconnector + metrics: + - name: service_summary + sum: + aggregationTemporality: 1 + dataPoints: + - asInt: 1 + attributes: + - key: data_stream.dataset + value: + stringValue: service_summary.1m + - key: metricset.name + value: + stringValue: service_summary + - key: metricset.interval + value: + stringValue: "1m" + - key: processor.event + value: + stringValue: metric + - name: service_summary + sum: + aggregationTemporality: 1 + dataPoints: + - asInt: 1 + attributes: + - key: data_stream.dataset + value: + stringValue: service_summary.10m + - key: metricset.name + value: + stringValue: service_summary + - key: metricset.interval + value: + stringValue: "10m" + - key: processor.event + value: + stringValue: metric + - name: service_summary + sum: + aggregationTemporality: 1 + dataPoints: + - asInt: 1 + attributes: + - key: data_stream.dataset + value: + stringValue: service_summary.60m + - key: metricset.name + value: + stringValue: service_summary + - key: metricset.interval + value: + stringValue: "60m" + - key: processor.event + value: + stringValue: metric diff --git a/connector/elasticapmconnector/testdata/logs/service_summary_overflow/input.yaml b/connector/elasticapmconnector/testdata/logs/service_summary_overflow/input.yaml new file mode 100644 index 000000000..55e57212f --- /dev/null +++ b/connector/elasticapmconnector/testdata/logs/service_summary_overflow/input.yaml @@ -0,0 +1,38 @@ +resourceLogs: + - resource: + attributes: + - key: service.name + value: + stringValue: foo + - key: deployment.environment + value: + stringValue: qa + - key: telemetry.sdk.language + value: + stringValue: go + scopeLogs: + - scope: + name: my.library1 + logRecords: + - body: + stringValue: This is a log message + timeUnixNano: "1581452773000000789" + - resource: + attributes: + - key: service.name + value: + stringValue: bar + - key: deployment.environment + value: + stringValue: qa + - key: telemetry.sdk.language + value: + stringValue: go + scopeLogs: + - scope: + name: my.library1 + logRecords: + - body: + stringValue: This is a log message + timeUnixNano: "1581452773000000789" + diff --git a/connector/elasticapmconnector/testdata/metrics/service_summary_overflow/aggregated_metrics.yaml b/connector/elasticapmconnector/testdata/metrics/service_summary_overflow/aggregated_metrics.yaml new file mode 100644 index 000000000..6e86394b0 --- /dev/null +++ b/connector/elasticapmconnector/testdata/metrics/service_summary_overflow/aggregated_metrics.yaml @@ -0,0 +1,145 @@ +resourceMetrics: + - resource: + attributes: + - key: deployment.environment + value: + stringValue: qa + - key: service.name + value: + stringValue: foo + - key: telemetry.sdk.language + value: + stringValue: go + - key: agent.name + value: + stringValue: unknown + scopeMetrics: + - metrics: + - name: service_summary + sum: + aggregationTemporality: 1 + dataPoints: + - asInt: "1" + attributes: + - key: data_stream.dataset + value: + stringValue: service_summary.1m + - key: metricset.interval + value: + stringValue: 1m + - key: metricset.name + value: + stringValue: service_summary + - key: processor.event + value: + stringValue: metric + timeUnixNano: "1000000" + - name: service_summary + sum: + aggregationTemporality: 1 + dataPoints: + - asInt: "1" + attributes: + - key: data_stream.dataset + value: + stringValue: service_summary.10m + - key: metricset.interval + value: + stringValue: 10m + - key: metricset.name + value: + stringValue: service_summary + - key: processor.event + value: + stringValue: metric + timeUnixNano: "1000000" + - name: service_summary + sum: + aggregationTemporality: 1 + dataPoints: + - asInt: "1" + attributes: + - key: data_stream.dataset + value: + stringValue: service_summary.60m + - key: metricset.interval + value: + stringValue: 60m + - key: metricset.name + value: + stringValue: service_summary + - key: processor.event + value: + stringValue: metric + timeUnixNano: "1000000" + scope: + name: github.com/open-telemetry/opentelemetry-collector-contrib/connector/signaltometricsconnector + - resource: + attributes: + - key: service.name + value: + stringValue: "_other" + - key: overflow + value: + stringValue: resource + scopeMetrics: + - metrics: + - name: service_summary + sum: + aggregationTemporality: 1 + dataPoints: + - asInt: "1" + attributes: + - key: data_stream.dataset + value: + stringValue: service_summary.1m + - key: metricset.interval + value: + stringValue: 1m + - key: metricset.name + value: + stringValue: service_summary + - key: processor.event + value: + stringValue: metric + timeUnixNano: "1000000" + - name: service_summary + sum: + aggregationTemporality: 1 + dataPoints: + - asInt: "1" + attributes: + - key: data_stream.dataset + value: + stringValue: service_summary.10m + - key: metricset.interval + value: + stringValue: 10m + - key: metricset.name + value: + stringValue: service_summary + - key: processor.event + value: + stringValue: metric + timeUnixNano: "1000000" + - name: service_summary + sum: + aggregationTemporality: 1 + dataPoints: + - asInt: "1" + attributes: + - key: data_stream.dataset + value: + stringValue: service_summary.60m + - key: metricset.interval + value: + stringValue: 60m + - key: metricset.name + value: + stringValue: service_summary + - key: processor.event + value: + stringValue: metric + timeUnixNano: "1000000" + scope: + name: github.com/open-telemetry/opentelemetry-collector-contrib/connector/signaltometricsconnector diff --git a/connector/elasticapmconnector/testdata/metrics/service_summary_overflow/input.yaml b/connector/elasticapmconnector/testdata/metrics/service_summary_overflow/input.yaml new file mode 100644 index 000000000..44b311d44 --- /dev/null +++ b/connector/elasticapmconnector/testdata/metrics/service_summary_overflow/input.yaml @@ -0,0 +1,37 @@ +resourceMetrics: + - resource: + attributes: + - key: service.name + value: + stringValue: foo + - key: deployment.environment + value: + stringValue: qa + - key: telemetry.sdk.language + value: + stringValue: go + scopeMetrics: + - scope: {} + metrics: + - name: foo + sum: + dataPoints: + - asInt: 1 + - resource: + attributes: + - key: service.name + value: + stringValue: bar + - key: deployment.environment + value: + stringValue: qa + - key: telemetry.sdk.language + value: + stringValue: go + scopeMetrics: + - scope: {} + metrics: + - name: foo + sum: + dataPoints: + - asInt: 1 \ No newline at end of file diff --git a/connector/elasticapmconnector/testdata/traces/span_metrics_overflow/aggregated_metrics.yaml b/connector/elasticapmconnector/testdata/traces/span_metrics_overflow/aggregated_metrics.yaml new file mode 100644 index 000000000..32efa794b --- /dev/null +++ b/connector/elasticapmconnector/testdata/traces/span_metrics_overflow/aggregated_metrics.yaml @@ -0,0 +1,133 @@ +resourceMetrics: + - resource: + attributes: + - key: agent.name + value: + stringValue: otlp/go + - key: deployment.environment + value: + stringValue: qa + - key: service.name + value: + stringValue: foo + - key: telemetry.sdk.language + value: + stringValue: go + scopeMetrics: + - metrics: + - description: Overflow metric count due to metric limit + name: _overflow_metric + sum: + aggregationTemporality: 1 + dataPoints: + - asInt: "2" + attributes: + - key: data_stream.dataset + value: + stringValue: .1m + - key: metricset.interval + value: + stringValue: 1m + - key: processor.event + value: + stringValue: metric + - key: overflow + value: + stringValue: metric + - description: Overflow metric count due to metric limit + name: _overflow_metric + sum: + aggregationTemporality: 1 + dataPoints: + - asInt: "2" + attributes: + - key: data_stream.dataset + value: + stringValue: .10m + - key: metricset.interval + value: + stringValue: 10m + - key: processor.event + value: + stringValue: metric + - key: overflow + value: + stringValue: metric + - description: Overflow metric count due to metric limit + name: _overflow_metric + sum: + aggregationTemporality: 1 + dataPoints: + - asInt: "2" + attributes: + - key: data_stream.dataset + value: + stringValue: .60m + - key: metricset.interval + value: + stringValue: 60m + - key: processor.event + value: + stringValue: metric + - key: overflow + value: + stringValue: metric + - name: service_summary + sum: + aggregationTemporality: 1 + dataPoints: + - asInt: "2" + attributes: + - key: data_stream.dataset + value: + stringValue: service_summary.1m + - key: metricset.interval + value: + stringValue: 1m + - key: metricset.name + value: + stringValue: service_summary + - key: processor.event + value: + stringValue: metric + timeUnixNano: "1000000" + - name: service_summary + sum: + aggregationTemporality: 1 + dataPoints: + - asInt: "2" + attributes: + - key: data_stream.dataset + value: + stringValue: service_summary.10m + - key: metricset.interval + value: + stringValue: 10m + - key: metricset.name + value: + stringValue: service_summary + - key: processor.event + value: + stringValue: metric + timeUnixNano: "1000000" + - name: service_summary + sum: + aggregationTemporality: 1 + dataPoints: + - asInt: "2" + attributes: + - key: data_stream.dataset + value: + stringValue: service_summary.60m + - key: metricset.interval + value: + stringValue: 60m + - key: metricset.name + value: + stringValue: service_summary + - key: processor.event + value: + stringValue: metric + timeUnixNano: "1000000" + scope: + name: github.com/open-telemetry/opentelemetry-collector-contrib/connector/signaltometricsconnector diff --git a/connector/elasticapmconnector/testdata/traces/span_metrics_overflow/input.yaml b/connector/elasticapmconnector/testdata/traces/span_metrics_overflow/input.yaml new file mode 100644 index 000000000..b06f3b10b --- /dev/null +++ b/connector/elasticapmconnector/testdata/traces/span_metrics_overflow/input.yaml @@ -0,0 +1,69 @@ +resourceSpans: + - resource: + attributes: + - key: service.name + value: + stringValue: foo + - key: deployment.environment + value: + stringValue: qa + - key: telemetry.sdk.language + value: + stringValue: go + - key: agent.name + value: + stringValue: otlp/go + - key: agent.version + value: + stringValue: unknown + scopeSpans: + - scope: {} + spans: + - attributes: + - key: db.name + value: + stringValue: main + - key: db.system + value: + stringValue: mysql + - key: event.outcome + value: + stringValue: success + - key: event.success_count + value: + intValue: 1 + - key: processor.event + value: + stringValue: span + - key: service.target.name + value: + stringValue: main + - key: service.target.type + value: + stringValue: mysql + - key: span.duration.us + value: + intValue: 500000 + - key: span.name + value: + stringValue: th-value-8 + - key: span.representative_count + value: + doubleValue: 1.0 # Should be 2, elastictrace doesn't handle ot=th at time of writing + - key: span.type + value: + stringValue: db + - key: span.destination.service.resource + value: + stringValue: mysql + - key: span.subtype + value: + stringValue: mysql + - key: timestamp.us + value: + intValue: 1581452772000000 + endTimeUnixNano: "1581452772500000804" + name: th-value-8 # represents 2 sampled spans + parentSpanId: "bcff497b5a47310f" + startTimeUnixNano: "1581452772000000381" + traceState: "ot=th:8"