Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions modules/querier/worker/frontend_processor.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ import (
"github.com/grafana/dskit/httpgrpc"
"github.com/grafana/tempo/pkg/util/httpgrpcutil"
"github.com/opentracing/opentracing-go"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
Expand All @@ -27,6 +29,12 @@ var processorBackoffConfig = backoff.Config{
MaxBackoff: 1 * time.Second,
}

var metricWorkerRequests = promauto.NewCounter(prometheus.CounterOpts{
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

i don't mind this but do you expect it to be significantly different than the querier request rates?

Copy link
Copy Markdown
Member Author

@electron0zero electron0zero Mar 27, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yup, I think request rate is for batches, and this is at job level (single request). we can rate the counter to capture how much work each querier is doing...

Namespace: "tempo",
Name: "querier_worker_request_executed_total",
Help: "The total number of requests executed by the querier worker.",
})

func newFrontendProcessor(cfg Config, handler RequestHandler, log log.Logger) processor {
return &frontendProcessor{
log: log,
Expand Down Expand Up @@ -185,6 +193,8 @@ func (fp *frontendProcessor) runRequest(ctx context.Context, request *httpgrpc.H
level.Error(fp.log).Log("msg", "error processing query", "err", errMsg)
}

metricWorkerRequests.Inc()

return response
}

Expand Down
7 changes: 7 additions & 0 deletions modules/querier/worker/frontend_processor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"github.com/go-kit/log"
"github.com/grafana/dskit/grpcclient"
"github.com/grafana/dskit/httpgrpc"
dto "github.com/prometheus/client_model/go"
"github.com/stretchr/testify/require"
)

Expand Down Expand Up @@ -46,6 +47,12 @@ func TestRunRequests(t *testing.T) {
for i, resp := range resps {
require.Equal(t, []byte{byte(i)}, resp.Body)
}

// check that counter metric is working
var m = &dto.Metric{}
err := metricWorkerRequests.Write(m)
require.NoError(t, err)
require.Equal(t, float64(totalRequests), m.Counter.GetValue())
}

func TestHandleSendError(t *testing.T) {
Expand Down
10 changes: 10 additions & 0 deletions modules/querier/worker/worker.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ import (
"github.com/grafana/dskit/httpgrpc"
"github.com/grafana/dskit/services"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"google.golang.org/grpc"

"github.com/grafana/tempo/pkg/util"
Expand All @@ -33,6 +34,12 @@ type Config struct {
GRPCClientConfig grpcclient.Config `yaml:"grpc_client_config"`
}

var metricConcurrency = promauto.NewGauge(prometheus.GaugeOpts{
Namespace: "tempo",
Name: "querier_actual_concurrency",
Help: "The actual value of concurrency.",
})

func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
f.StringVar(&cfg.FrontendAddress, "querier.frontend-address", "", "Address of query frontend service, in host:port format. If -querier.scheduler-address is set as well, querier will use scheduler instead. Only one of -querier.frontend-address or -querier.scheduler-address can be set. If neither is set, queries are only received via HTTP endpoint.")

Expand Down Expand Up @@ -242,6 +249,9 @@ func (w *querierWorker) resetConcurrency() {
if totalConcurrency > w.cfg.MaxConcurrentRequests {
level.Warn(w.log).Log("msg", "total worker concurrency is greater than promql max concurrency. Queries may be queued in the querier which reduces QOS")
}

// capture the current concurrency metric
metricConcurrency.Set(float64(totalConcurrency))
Comment thread
electron0zero marked this conversation as resolved.
Outdated
}

func (w *querierWorker) connect(ctx context.Context, address string) (*grpc.ClientConn, error) {
Expand Down