-
Notifications
You must be signed in to change notification settings - Fork 692
Generator performance #4232
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Generator performance #4232
Changes from all commits
11f1f4a
dc9a05b
a55131f
1e654b4
1a761d7
4f6115d
6687d1a
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -2,16 +2,24 @@ package generator | |
|
|
||
| import ( | ||
| "context" | ||
| "flag" | ||
| "fmt" | ||
| "os" | ||
| "path/filepath" | ||
| "runtime" | ||
| "strconv" | ||
| "testing" | ||
| "time" | ||
|
|
||
| "github.com/go-kit/log" | ||
| "github.com/grafana/dskit/services" | ||
| "github.com/grafana/tempo/modules/generator/processor/spanmetrics" | ||
| "github.com/grafana/tempo/modules/generator/storage" | ||
| "github.com/grafana/tempo/modules/overrides" | ||
| "github.com/grafana/tempo/pkg/tempopb" | ||
| common_v1 "github.com/grafana/tempo/pkg/tempopb/common/v1" | ||
| trace_v1 "github.com/grafana/tempo/pkg/tempopb/trace/v1" | ||
| "github.com/grafana/tempo/pkg/util/test" | ||
| "github.com/prometheus/client_golang/prometheus" | ||
| "github.com/prometheus/common/model" | ||
| "github.com/stretchr/testify/assert" | ||
|
|
@@ -129,3 +137,140 @@ func (l testLogger) Log(keyvals ...interface{}) error { | |
| l.t.Log(keyvals...) | ||
| return nil | ||
| } | ||
|
|
||
| func BenchmarkPushSpans(b *testing.B) { | ||
| var ( | ||
| tenant = "test-tenant" | ||
| reg = prometheus.NewRegistry() | ||
| ctx = context.Background() | ||
| log = log.NewNopLogger() | ||
| cfg = &Config{} | ||
|
|
||
| walcfg = &storage.Config{ | ||
| Path: b.TempDir(), | ||
| } | ||
|
|
||
| o = &mockOverrides{ | ||
| processors: map[string]struct{}{ | ||
| "span-metrics": {}, | ||
| "service-graphs": {}, | ||
| }, | ||
| spanMetricsEnableTargetInfo: true, | ||
| spanMetricsTargetInfoExcludedDimensions: []string{"excluded}"}, | ||
| } | ||
| ) | ||
|
|
||
| cfg.RegisterFlagsAndApplyDefaults("", &flag.FlagSet{}) | ||
|
|
||
| wal, err := storage.New(walcfg, o, tenant, reg, log) | ||
| require.NoError(b, err) | ||
|
|
||
| inst, err := newInstance(cfg, tenant, o, wal, reg, log, nil, nil) | ||
| require.NoError(b, err) | ||
| defer inst.shutdown() | ||
|
|
||
| req := &tempopb.PushSpansRequest{ | ||
| Batches: []*trace_v1.ResourceSpans{ | ||
| test.MakeBatch(100, nil), | ||
| test.MakeBatch(100, nil), | ||
| test.MakeBatch(100, nil), | ||
| test.MakeBatch(100, nil), | ||
| }, | ||
| } | ||
|
|
||
| // Add more resource attributes to get closer to real data | ||
| // Add integer to increase cardinality. | ||
| // Currently this is about 80 active series | ||
| // TODO - Get more series | ||
| for i, b := range req.Batches { | ||
| b.Resource.Attributes = append(b.Resource.Attributes, []*common_v1.KeyValue{ | ||
| {Key: "k8s.cluster.name", Value: &common_v1.AnyValue{Value: &common_v1.AnyValue_StringValue{StringValue: "test" + strconv.Itoa(i)}}}, | ||
| {Key: "k8s.namespace.name", Value: &common_v1.AnyValue{Value: &common_v1.AnyValue_StringValue{StringValue: "test" + strconv.Itoa(i)}}}, | ||
| {Key: "k8s.node.name", Value: &common_v1.AnyValue{Value: &common_v1.AnyValue_StringValue{StringValue: "test" + strconv.Itoa(i)}}}, | ||
| {Key: "k8s.pod.ip", Value: &common_v1.AnyValue{Value: &common_v1.AnyValue_StringValue{StringValue: "test" + strconv.Itoa(i)}}}, | ||
| {Key: "k8s.pod.name", Value: &common_v1.AnyValue{Value: &common_v1.AnyValue_StringValue{StringValue: "test" + strconv.Itoa(i)}}}, | ||
| {Key: "excluded", Value: &common_v1.AnyValue{Value: &common_v1.AnyValue_StringValue{StringValue: "test" + strconv.Itoa(i)}}}, | ||
| }...) | ||
| } | ||
|
|
||
| b.ResetTimer() | ||
| for i := 0; i < b.N; i++ { | ||
| inst.pushSpans(ctx, req) | ||
| } | ||
|
|
||
| b.StopTimer() | ||
| runtime.GC() | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why force a GC after the benchmark has been timed? Is this to avoid impact on later benchmarks, or to get accurate memory summary below?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In this case I was trying to see if the benchmarks could help measure inuse memory, so it is recording |
||
| mem := runtime.MemStats{} | ||
| runtime.ReadMemStats(&mem) | ||
| b.ReportMetric(float64(mem.HeapInuse), "heap_in_use") | ||
| } | ||
|
|
||
| func BenchmarkCollect(b *testing.B) { | ||
| var ( | ||
| tenant = "test-tenant" | ||
| reg = prometheus.NewRegistry() | ||
| ctx = context.Background() | ||
| log = log.NewNopLogger() | ||
| cfg = &Config{} | ||
|
|
||
| walcfg = &storage.Config{ | ||
| Path: b.TempDir(), | ||
| } | ||
|
|
||
| o = &mockOverrides{ | ||
| processors: map[string]struct{}{ | ||
| "span-metrics": {}, | ||
| "service-graphs": {}, | ||
| }, | ||
| spanMetricsDimensions: []string{"k8s.cluster.name", "k8s.namespace.name"}, | ||
| spanMetricsEnableTargetInfo: true, | ||
| spanMetricsTargetInfoExcludedDimensions: []string{"excluded}"}, | ||
| // nativeHistograms: overrides.HistogramMethodBoth, | ||
| } | ||
| ) | ||
|
|
||
| cfg.RegisterFlagsAndApplyDefaults("", &flag.FlagSet{}) | ||
|
|
||
| wal, err := storage.New(walcfg, o, tenant, reg, log) | ||
| require.NoError(b, err) | ||
|
|
||
| inst, err := newInstance(cfg, tenant, o, wal, reg, log, nil, nil) | ||
| require.NoError(b, err) | ||
| defer inst.shutdown() | ||
|
|
||
| req := &tempopb.PushSpansRequest{ | ||
| Batches: []*trace_v1.ResourceSpans{ | ||
| test.MakeBatch(100, nil), | ||
| test.MakeBatch(100, nil), | ||
| test.MakeBatch(100, nil), | ||
| test.MakeBatch(100, nil), | ||
| }, | ||
| } | ||
|
|
||
| // Add more resource attributes to get closer to real data | ||
| // Add integer to increase cardinality. | ||
| // Currently this is about 80 active series | ||
| // TODO - Get more series | ||
| for i, b := range req.Batches { | ||
| b.Resource.Attributes = append(b.Resource.Attributes, []*common_v1.KeyValue{ | ||
| {Key: "k8s.cluster.name", Value: &common_v1.AnyValue{Value: &common_v1.AnyValue_StringValue{StringValue: "test" + strconv.Itoa(i)}}}, | ||
| {Key: "k8s.namespace.name", Value: &common_v1.AnyValue{Value: &common_v1.AnyValue_StringValue{StringValue: "test" + strconv.Itoa(i)}}}, | ||
| {Key: "k8s.node.name", Value: &common_v1.AnyValue{Value: &common_v1.AnyValue_StringValue{StringValue: "test" + strconv.Itoa(i)}}}, | ||
| {Key: "k8s.pod.ip", Value: &common_v1.AnyValue{Value: &common_v1.AnyValue_StringValue{StringValue: "test" + strconv.Itoa(i)}}}, | ||
| {Key: "k8s.pod.name", Value: &common_v1.AnyValue{Value: &common_v1.AnyValue_StringValue{StringValue: "test" + strconv.Itoa(i)}}}, | ||
| {Key: "excluded", Value: &common_v1.AnyValue{Value: &common_v1.AnyValue_StringValue{StringValue: "test" + strconv.Itoa(i)}}}, | ||
| }...) | ||
| } | ||
| inst.pushSpans(ctx, req) | ||
|
|
||
| b.ResetTimer() | ||
| for i := 0; i < b.N; i++ { | ||
| inst.registry.CollectMetrics(ctx) | ||
| } | ||
|
|
||
| b.StopTimer() | ||
| runtime.GC() | ||
| mem := runtime.MemStats{} | ||
| runtime.ReadMemStats(&mem) | ||
| b.ReportMetric(float64(mem.HeapInuse), "heap_in_use") | ||
| } | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I love the full stack benchmark. This is something we might want to do for other components as well.