forked from grafana/tempo
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathconfig.go
More file actions
160 lines (137 loc) · 5.57 KB
/
config.go
File metadata and controls
160 lines (137 loc) · 5.57 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
package frontend
import (
"flag"
"time"
"github.com/go-kit/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/grafana/tempo/modules/frontend/pipeline"
v1 "github.com/grafana/tempo/modules/frontend/v1"
"github.com/grafana/tempo/pkg/usagestats"
)
var statVersion = usagestats.NewString("frontend_version")
type Config struct {
Config v1.Config `yaml:",inline"`
MaxRetries int `yaml:"max_retries,omitempty"`
Search SearchConfig `yaml:"search"`
TraceByID TraceByIDConfig `yaml:"trace_by_id"`
Metrics MetricsConfig `yaml:"metrics"`
MultiTenantQueriesEnabled bool `yaml:"multi_tenant_queries_enabled"`
ResponseConsumers int `yaml:"response_consumers"`
Weights pipeline.WeightsConfig `yaml:"weights"`
MCPServer MCPServerConfig `yaml:"mcp_server"`
// the maximum time limit that tempo will work on an api request. this includes both
// grpc and http requests and applies to all "api" frontend query endpoints such as
// traceql, tag search, tag value search, trace by id and all streaming gRPC endpoints.
// 0 disables
APITimeout time.Duration `yaml:"api_timeout,omitempty"`
// A list of regexes for black listing requests, these will apply for every request regardless the endpoint
URLDenyList []string `yaml:"url_deny_list,omitempty"`
// Maximum allowed size of the raw TraceQL Query expression in bytes
MaxQueryExpressionSizeBytes int `yaml:"max_query_expression_size_bytes,omitempty"`
// A list of headers allowed through the HTTP pipeline. Everything else will be stripped.
AllowedHeaders []string `yaml:"allowed_headers,omitempty"`
// RF1After specifies the time after which RF1 logic is applied.
RF1After time.Time `yaml:"rf1_after" category:"advanced"`
// QueryEndCutoff prevents querying incomplete recent data.
QueryEndCutoff time.Duration `yaml:"query_end_cutoff,omitempty"`
}
type MCPServerConfig struct {
Enabled bool `yaml:"enabled"`
}
type SearchConfig struct {
Timeout time.Duration `yaml:"timeout,omitempty"`
Sharder SearchSharderConfig `yaml:",inline"`
SLO SLOConfig `yaml:",inline"`
MetadataSLO SLOConfig `yaml:"metadata_slo,omitempty"`
}
type TraceByIDConfig struct {
QueryShards int `yaml:"query_shards,omitempty"`
ConcurrentShards int `yaml:"concurrent_shards,omitempty"`
SLO SLOConfig `yaml:",inline"`
// RF1After specifies the time after which RF1 logic is applied, injected by the configuration
// or determined at runtime based on search request parameters.
RF1After time.Time `yaml:"-"`
}
type MetricsConfig struct {
Sharder QueryRangeSharderConfig `yaml:",inline"`
SLO SLOConfig `yaml:",inline"`
MaxIntervals uint64 `yaml:"max_intervals,omitempty"`
}
type SLOConfig struct {
DurationSLO time.Duration `yaml:"duration_slo,omitempty"`
ThroughputBytesSLO float64 `yaml:"throughput_bytes_slo,omitempty"`
}
func (cfg *Config) RegisterFlagsAndApplyDefaults(string, *flag.FlagSet) {
slo := SLOConfig{
DurationSLO: 0,
ThroughputBytesSLO: 0,
}
cfg.Config.MaxOutstandingPerTenant = 2000
cfg.Config.MaxBatchSize = 7
cfg.MaxRetries = 2
cfg.ResponseConsumers = 10
cfg.Search = SearchConfig{
Sharder: SearchSharderConfig{
QueryBackendAfter: 15 * time.Minute,
QueryIngestersUntil: 30 * time.Minute,
DefaultLimit: 20,
MaxLimit: 0,
MaxDuration: 168 * time.Hour, // 1 week
ConcurrentRequests: defaultConcurrentRequests,
TargetBytesPerRequest: defaultTargetBytesPerRequest,
MostRecentShards: defaultMostRecentShards,
IngesterShards: 3,
DefaultSpansPerSpanSet: 3,
MaxSpansPerSpanSet: 100,
},
SLO: slo,
}
cfg.TraceByID = TraceByIDConfig{
QueryShards: 50,
SLO: slo,
}
cfg.Metrics = MetricsConfig{
Sharder: QueryRangeSharderConfig{
MaxDuration: 3 * time.Hour,
QueryBackendAfter: 30 * time.Minute,
ConcurrentRequests: defaultConcurrentRequests,
TargetBytesPerRequest: defaultTargetBytesPerRequest,
Interval: 5 * time.Minute,
MaxExemplars: 100,
MaxResponseSeries: 0,
StreamingShards: defaultStreamingShards,
},
SLO: slo,
}
cfg.Weights = pipeline.WeightsConfig{
RequestWithWeights: true,
RetryWithWeights: true,
MaxRegexConditions: 1,
MaxTraceQLConditions: 4,
}
// enabling an mcp server opens the door to send tracing data to an LLM. it should require
// explicit enabling
cfg.MCPServer = MCPServerConfig{
Enabled: false,
}
// set default max query size to 128 KiB, queries larger than this will be rejected
cfg.MaxQueryExpressionSizeBytes = 128 * 1024
// enable multi tenant queries by default
cfg.MultiTenantQueriesEnabled = true
cfg.Metrics.MaxIntervals = 10_000
}
type CortexNoQuerierLimits struct{}
// InitFrontend initializes V1 frontend
//
// Returned RoundTripper can be wrapped in more round-tripper middlewares, and then eventually registered
// into HTTP server using the Handler from this package. Returned RoundTripper is always non-nil
// (if there are no errors), and it uses the returned frontend (if any).
func InitFrontend(cfg v1.Config, log log.Logger, reg prometheus.Registerer) (pipeline.RoundTripper, *v1.Frontend, error) {
statVersion.Set("v1")
// No scheduler = use original frontend.
fr, err := v1.New(cfg, log, reg)
if err != nil {
return nil, nil, err
}
return fr, fr, nil
}