update vendor
Signed-off-by: Jess Frazelle <acidburn@microsoft.com>
This commit is contained in:
parent
19a32db84d
commit
94d1cfbfbf
10501 changed files with 2307943 additions and 29279 deletions
11
vendor/github.com/prometheus/client_golang/.travis.yml
generated
vendored
11
vendor/github.com/prometheus/client_golang/.travis.yml
generated
vendored
|
@ -2,9 +2,12 @@ sudo: false
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- 1.7.x # See README.md for current minimum version.
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
|
||||
script:
|
||||
- go test -short ./...
|
||||
- make check_license style unused test-short
|
||||
- if [[ ! $TRAVIS_GO_VERSION =~ ^1\.(7|8|9)\.[x0-9]+$ ]]; then make staticcheck; fi
|
||||
|
|
2
vendor/github.com/prometheus/client_golang/CONTRIBUTING.md
generated
vendored
2
vendor/github.com/prometheus/client_golang/CONTRIBUTING.md
generated
vendored
|
@ -16,3 +16,5 @@ Prometheus uses GitHub to manage reviews of pull requests.
|
|||
and the _Formatting and style_ section of Peter Bourgon's [Go: Best
|
||||
Practices for Production
|
||||
Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).
|
||||
|
||||
* Be sure to sign off on the [DCO](https://github.com/probot/dco#how-it-works)
|
||||
|
|
23
vendor/github.com/prometheus/client_golang/Dockerfile
generated
vendored
Normal file
23
vendor/github.com/prometheus/client_golang/Dockerfile
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
# This Dockerfile builds an image for a client_golang example.
|
||||
#
|
||||
# Use as (from the root for the client_golang repository):
|
||||
# docker build -f examples/$name/Dockerfile -t prometheus/golang-example-$name .
|
||||
|
||||
# Builder image, where we build the example.
|
||||
FROM golang:1 AS builder
|
||||
WORKDIR /go/src/github.com/prometheus/client_golang
|
||||
COPY . .
|
||||
WORKDIR /go/src/github.com/prometheus/client_golang/prometheus
|
||||
RUN go get -d
|
||||
WORKDIR /go/src/github.com/prometheus/client_golang/examples/random
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -a -tags netgo -ldflags '-w'
|
||||
WORKDIR /go/src/github.com/prometheus/client_golang/examples/simple
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -a -tags netgo -ldflags '-w'
|
||||
|
||||
# Final image.
|
||||
FROM prom/busybox
|
||||
LABEL maintainer="The Prometheus Authors <prometheus-developers@googlegroups.com>"
|
||||
COPY --from=builder /go/src/github.com/prometheus/client_golang/examples/random \
|
||||
/go/src/github.com/prometheus/client_golang/examples/simple ./
|
||||
EXPOSE 8080
|
||||
CMD echo Please run an example. Either /random or /simple
|
3
vendor/github.com/prometheus/client_golang/MAINTAINERS.md
generated
vendored
3
vendor/github.com/prometheus/client_golang/MAINTAINERS.md
generated
vendored
|
@ -1 +1,2 @@
|
|||
* Björn Rabenstein <beorn@soundcloud.com>
|
||||
* Krasi Georgiev <kgeorgie@redhat.com> for `api/...`
|
||||
* Björn Rabenstein <beorn@soundcloud.com> for everything else
|
||||
|
|
32
vendor/github.com/prometheus/client_golang/Makefile
generated
vendored
Normal file
32
vendor/github.com/prometheus/client_golang/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
# Copyright 2018 The Prometheus Authors
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
include Makefile.common
|
||||
|
||||
# http.CloseNotifier is deprecated but we don't want to remove support
|
||||
# from client_golang to not break anybody still using it.
|
||||
STATICCHECK_IGNORE = \
|
||||
github.com/prometheus/client_golang/prometheus/promhttp/delegator*.go:SA1019 \
|
||||
github.com/prometheus/client_golang/prometheus/promhttp/instrument_server_test.go:SA1019 \
|
||||
github.com/prometheus/client_golang/prometheus/http.go:SA1019
|
||||
|
||||
.PHONY: get_dep
|
||||
get_dep:
|
||||
@echo ">> getting dependencies"
|
||||
$(GO) get -t ./...
|
||||
|
||||
.PHONY: test
|
||||
test: get_dep common-test
|
||||
|
||||
.PHONY: test-short
|
||||
test-short: get_dep common-test-short
|
132
vendor/github.com/prometheus/client_golang/Makefile.common
generated
vendored
Normal file
132
vendor/github.com/prometheus/client_golang/Makefile.common
generated
vendored
Normal file
|
@ -0,0 +1,132 @@
|
|||
# Copyright 2018 The Prometheus Authors
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
# A common Makefile that includes rules to be reused in different prometheus projects.
|
||||
# !!! Open PRs only against the prometheus/prometheus/Makefile.common repository!
|
||||
|
||||
# Example usage :
|
||||
# Create the main Makefile in the root project directory.
|
||||
# include Makefile.common
|
||||
# customTarget:
|
||||
# @echo ">> Running customTarget"
|
||||
#
|
||||
|
||||
# Ensure GOBIN is not set during build so that promu is installed to the correct path
|
||||
unexport GOBIN
|
||||
|
||||
GO ?= go
|
||||
GOFMT ?= $(GO)fmt
|
||||
FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH)))
|
||||
PROMU := $(FIRST_GOPATH)/bin/promu
|
||||
STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck
|
||||
GOVENDOR := $(FIRST_GOPATH)/bin/govendor
|
||||
pkgs = ./...
|
||||
|
||||
PREFIX ?= $(shell pwd)
|
||||
BIN_DIR ?= $(shell pwd)
|
||||
DOCKER_IMAGE_TAG ?= $(subst /,-,$(shell git rev-parse --abbrev-ref HEAD))
|
||||
DOCKER_REPO ?= prom
|
||||
|
||||
.PHONY: all
|
||||
all: style staticcheck unused build test
|
||||
|
||||
# This rule is used to forward a target like "build" to "common-build". This
|
||||
# allows a new "build" target to be defined in a Makefile which includes this
|
||||
# one and override "common-build" without override warnings.
|
||||
%: common-% ;
|
||||
|
||||
.PHONY: common-style
|
||||
common-style:
|
||||
@echo ">> checking code style"
|
||||
@fmtRes=$$($(GOFMT) -d $$(find . -path ./vendor -prune -o -name '*.go' -print)); \
|
||||
if [ -n "$${fmtRes}" ]; then \
|
||||
echo "gofmt checking failed!"; echo "$${fmtRes}"; echo; \
|
||||
echo "Please ensure you are using $$($(GO) version) for formatting code."; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
.PHONY: common-check_license
|
||||
common-check_license:
|
||||
@echo ">> checking license header"
|
||||
@licRes=$$(for file in $$(find . -type f -iname '*.go' ! -path './vendor/*') ; do \
|
||||
awk 'NR<=3' $$file | grep -Eq "(Copyright|generated|GENERATED)" || echo $$file; \
|
||||
done); \
|
||||
if [ -n "$${licRes}" ]; then \
|
||||
echo "license header checking failed:"; echo "$${licRes}"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
.PHONY: common-test-short
|
||||
common-test-short:
|
||||
@echo ">> running short tests"
|
||||
$(GO) test -short $(pkgs)
|
||||
|
||||
.PHONY: common-test
|
||||
common-test:
|
||||
@echo ">> running all tests"
|
||||
$(GO) test -race $(pkgs)
|
||||
|
||||
.PHONY: common-format
|
||||
common-format:
|
||||
@echo ">> formatting code"
|
||||
$(GO) fmt $(pkgs)
|
||||
|
||||
.PHONY: common-vet
|
||||
common-vet:
|
||||
@echo ">> vetting code"
|
||||
$(GO) vet $(pkgs)
|
||||
|
||||
.PHONY: common-staticcheck
|
||||
common-staticcheck: $(STATICCHECK)
|
||||
@echo ">> running staticcheck"
|
||||
$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs)
|
||||
|
||||
.PHONY: common-unused
|
||||
common-unused: $(GOVENDOR)
|
||||
@echo ">> running check for unused packages"
|
||||
@$(GOVENDOR) list +unused | grep . && exit 1 || echo 'No unused packages'
|
||||
|
||||
.PHONY: common-build
|
||||
common-build: promu
|
||||
@echo ">> building binaries"
|
||||
$(PROMU) build --prefix $(PREFIX)
|
||||
|
||||
.PHONY: common-tarball
|
||||
common-tarball: promu
|
||||
@echo ">> building release tarball"
|
||||
$(PROMU) tarball --prefix $(PREFIX) $(BIN_DIR)
|
||||
|
||||
.PHONY: common-docker
|
||||
common-docker:
|
||||
docker build -t "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" .
|
||||
|
||||
.PHONY: common-docker-publish
|
||||
common-docker-publish:
|
||||
docker push "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME)"
|
||||
|
||||
.PHONY: common-docker-tag-latest
|
||||
common-docker-tag-latest:
|
||||
docker tag "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)" "$(DOCKER_REPO)/$(DOCKER_IMAGE_NAME):latest"
|
||||
|
||||
.PHONY: promu
|
||||
promu:
|
||||
GOOS= GOARCH= $(GO) get -u github.com/prometheus/promu
|
||||
|
||||
.PHONY: $(STATICCHECK)
|
||||
$(STATICCHECK):
|
||||
GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck
|
||||
|
||||
.PHONY: $(GOVENDOR)
|
||||
$(GOVENDOR):
|
||||
GOOS= GOARCH= $(GO) get -u github.com/kardianos/govendor
|
131
vendor/github.com/prometheus/client_golang/api/client.go
generated
vendored
Normal file
131
vendor/github.com/prometheus/client_golang/api/client.go
generated
vendored
Normal file
|
@ -0,0 +1,131 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build go1.7
|
||||
|
||||
// Package api provides clients for the HTTP APIs.
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DefaultRoundTripper is used if no RoundTripper is set in Config.
|
||||
var DefaultRoundTripper http.RoundTripper = &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).DialContext,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
// Config defines configuration parameters for a new client.
|
||||
type Config struct {
|
||||
// The address of the Prometheus to connect to.
|
||||
Address string
|
||||
|
||||
// RoundTripper is used by the Client to drive HTTP requests. If not
|
||||
// provided, DefaultRoundTripper will be used.
|
||||
RoundTripper http.RoundTripper
|
||||
}
|
||||
|
||||
func (cfg *Config) roundTripper() http.RoundTripper {
|
||||
if cfg.RoundTripper == nil {
|
||||
return DefaultRoundTripper
|
||||
}
|
||||
return cfg.RoundTripper
|
||||
}
|
||||
|
||||
// Client is the interface for an API client.
|
||||
type Client interface {
|
||||
URL(ep string, args map[string]string) *url.URL
|
||||
Do(context.Context, *http.Request) (*http.Response, []byte, error)
|
||||
}
|
||||
|
||||
// NewClient returns a new Client.
|
||||
//
|
||||
// It is safe to use the returned Client from multiple goroutines.
|
||||
func NewClient(cfg Config) (Client, error) {
|
||||
u, err := url.Parse(cfg.Address)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u.Path = strings.TrimRight(u.Path, "/")
|
||||
|
||||
return &httpClient{
|
||||
endpoint: u,
|
||||
client: http.Client{Transport: cfg.roundTripper()},
|
||||
}, nil
|
||||
}
|
||||
|
||||
type httpClient struct {
|
||||
endpoint *url.URL
|
||||
client http.Client
|
||||
}
|
||||
|
||||
func (c *httpClient) URL(ep string, args map[string]string) *url.URL {
|
||||
p := path.Join(c.endpoint.Path, ep)
|
||||
|
||||
for arg, val := range args {
|
||||
arg = ":" + arg
|
||||
p = strings.Replace(p, arg, val, -1)
|
||||
}
|
||||
|
||||
u := *c.endpoint
|
||||
u.Path = p
|
||||
|
||||
return &u
|
||||
}
|
||||
|
||||
func (c *httpClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
|
||||
if ctx != nil {
|
||||
req = req.WithContext(ctx)
|
||||
}
|
||||
resp, err := c.client.Do(req)
|
||||
defer func() {
|
||||
if resp != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
var body []byte
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
body, err = ioutil.ReadAll(resp.Body)
|
||||
close(done)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err = resp.Body.Close()
|
||||
<-done
|
||||
if err == nil {
|
||||
err = ctx.Err()
|
||||
}
|
||||
case <-done:
|
||||
}
|
||||
|
||||
return resp, body, err
|
||||
}
|
115
vendor/github.com/prometheus/client_golang/api/client_test.go
generated
vendored
Normal file
115
vendor/github.com/prometheus/client_golang/api/client_test.go
generated
vendored
Normal file
|
@ -0,0 +1,115 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build go1.7
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestConfig(t *testing.T) {
|
||||
c := Config{}
|
||||
if c.roundTripper() != DefaultRoundTripper {
|
||||
t.Fatalf("expected default roundtripper for nil RoundTripper field")
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientURL(t *testing.T) {
|
||||
tests := []struct {
|
||||
address string
|
||||
endpoint string
|
||||
args map[string]string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
address: "http://localhost:9090",
|
||||
endpoint: "/test",
|
||||
expected: "http://localhost:9090/test",
|
||||
},
|
||||
{
|
||||
address: "http://localhost",
|
||||
endpoint: "/test",
|
||||
expected: "http://localhost/test",
|
||||
},
|
||||
{
|
||||
address: "http://localhost:9090",
|
||||
endpoint: "test",
|
||||
expected: "http://localhost:9090/test",
|
||||
},
|
||||
{
|
||||
address: "http://localhost:9090/prefix",
|
||||
endpoint: "/test",
|
||||
expected: "http://localhost:9090/prefix/test",
|
||||
},
|
||||
{
|
||||
address: "https://localhost:9090/",
|
||||
endpoint: "/test/",
|
||||
expected: "https://localhost:9090/test",
|
||||
},
|
||||
{
|
||||
address: "http://localhost:9090",
|
||||
endpoint: "/test/:param",
|
||||
args: map[string]string{
|
||||
"param": "content",
|
||||
},
|
||||
expected: "http://localhost:9090/test/content",
|
||||
},
|
||||
{
|
||||
address: "http://localhost:9090",
|
||||
endpoint: "/test/:param/more/:param",
|
||||
args: map[string]string{
|
||||
"param": "content",
|
||||
},
|
||||
expected: "http://localhost:9090/test/content/more/content",
|
||||
},
|
||||
{
|
||||
address: "http://localhost:9090",
|
||||
endpoint: "/test/:param/more/:foo",
|
||||
args: map[string]string{
|
||||
"param": "content",
|
||||
"foo": "bar",
|
||||
},
|
||||
expected: "http://localhost:9090/test/content/more/bar",
|
||||
},
|
||||
{
|
||||
address: "http://localhost:9090",
|
||||
endpoint: "/test/:param",
|
||||
args: map[string]string{
|
||||
"nonexistent": "content",
|
||||
},
|
||||
expected: "http://localhost:9090/test/:param",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
ep, err := url.Parse(test.address)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
hclient := &httpClient{
|
||||
endpoint: ep,
|
||||
client: http.Client{Transport: DefaultRoundTripper},
|
||||
}
|
||||
|
||||
u := hclient.URL(test.endpoint, test.args)
|
||||
if u.String() != test.expected {
|
||||
t.Errorf("unexpected result: got %s, want %s", u, test.expected)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
502
vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go
generated
vendored
Normal file
502
vendor/github.com/prometheus/client_golang/api/prometheus/v1/api.go
generated
vendored
Normal file
|
@ -0,0 +1,502 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build go1.7
|
||||
|
||||
// Package v1 provides bindings to the Prometheus HTTP API v1:
|
||||
// http://prometheus.io/docs/querying/api/
|
||||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/api"
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
const (
|
||||
statusAPIError = 422
|
||||
|
||||
apiPrefix = "/api/v1"
|
||||
|
||||
epAlertManagers = apiPrefix + "/alertmanagers"
|
||||
epQuery = apiPrefix + "/query"
|
||||
epQueryRange = apiPrefix + "/query_range"
|
||||
epLabelValues = apiPrefix + "/label/:name/values"
|
||||
epSeries = apiPrefix + "/series"
|
||||
epTargets = apiPrefix + "/targets"
|
||||
epSnapshot = apiPrefix + "/admin/tsdb/snapshot"
|
||||
epDeleteSeries = apiPrefix + "/admin/tsdb/delete_series"
|
||||
epCleanTombstones = apiPrefix + "/admin/tsdb/clean_tombstones"
|
||||
epConfig = apiPrefix + "/status/config"
|
||||
epFlags = apiPrefix + "/status/flags"
|
||||
)
|
||||
|
||||
// ErrorType models the different API error types.
|
||||
type ErrorType string
|
||||
|
||||
// HealthStatus models the health status of a scrape target.
|
||||
type HealthStatus string
|
||||
|
||||
const (
|
||||
// Possible values for ErrorType.
|
||||
ErrBadData ErrorType = "bad_data"
|
||||
ErrTimeout = "timeout"
|
||||
ErrCanceled = "canceled"
|
||||
ErrExec = "execution"
|
||||
ErrBadResponse = "bad_response"
|
||||
|
||||
// Possible values for HealthStatus.
|
||||
HealthGood HealthStatus = "up"
|
||||
HealthUnknown HealthStatus = "unknown"
|
||||
HealthBad HealthStatus = "down"
|
||||
)
|
||||
|
||||
// Error is an error returned by the API.
|
||||
type Error struct {
|
||||
Type ErrorType
|
||||
Msg string
|
||||
}
|
||||
|
||||
func (e *Error) Error() string {
|
||||
return fmt.Sprintf("%s: %s", e.Type, e.Msg)
|
||||
}
|
||||
|
||||
// Range represents a sliced time range.
|
||||
type Range struct {
|
||||
// The boundaries of the time range.
|
||||
Start, End time.Time
|
||||
// The maximum time between two slices within the boundaries.
|
||||
Step time.Duration
|
||||
}
|
||||
|
||||
// API provides bindings for Prometheus's v1 API.
|
||||
type API interface {
|
||||
// AlertManagers returns an overview of the current state of the Prometheus alert manager discovery.
|
||||
AlertManagers(ctx context.Context) (AlertManagersResult, error)
|
||||
// CleanTombstones removes the deleted data from disk and cleans up the existing tombstones.
|
||||
CleanTombstones(ctx context.Context) error
|
||||
// Config returns the current Prometheus configuration.
|
||||
Config(ctx context.Context) (ConfigResult, error)
|
||||
// DeleteSeries deletes data for a selection of series in a time range.
|
||||
DeleteSeries(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) error
|
||||
// Flags returns the flag values that Prometheus was launched with.
|
||||
Flags(ctx context.Context) (FlagsResult, error)
|
||||
// LabelValues performs a query for the values of the given label.
|
||||
LabelValues(ctx context.Context, label string) (model.LabelValues, error)
|
||||
// Query performs a query for the given time.
|
||||
Query(ctx context.Context, query string, ts time.Time) (model.Value, error)
|
||||
// QueryRange performs a query for the given range.
|
||||
QueryRange(ctx context.Context, query string, r Range) (model.Value, error)
|
||||
// Series finds series by label matchers.
|
||||
Series(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]model.LabelSet, error)
|
||||
// Snapshot creates a snapshot of all current data into snapshots/<datetime>-<rand>
|
||||
// under the TSDB's data directory and returns the directory as response.
|
||||
Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error)
|
||||
// Targets returns an overview of the current state of the Prometheus target discovery.
|
||||
Targets(ctx context.Context) (TargetsResult, error)
|
||||
}
|
||||
|
||||
// AlertManagersResult contains the result from querying the alertmanagers endpoint.
|
||||
type AlertManagersResult struct {
|
||||
Active []AlertManager `json:"activeAlertManagers"`
|
||||
Dropped []AlertManager `json:"droppedAlertManagers"`
|
||||
}
|
||||
|
||||
// AlertManager models a configured Alert Manager.
|
||||
type AlertManager struct {
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
// ConfigResult contains the result from querying the config endpoint.
|
||||
type ConfigResult struct {
|
||||
YAML string `json:"yaml"`
|
||||
}
|
||||
|
||||
// FlagsResult contains the result from querying the flag endpoint.
|
||||
type FlagsResult map[string]string
|
||||
|
||||
// SnapshotResult contains the result from querying the snapshot endpoint.
|
||||
type SnapshotResult struct {
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// TargetsResult contains the result from querying the targets endpoint.
|
||||
type TargetsResult struct {
|
||||
Active []ActiveTarget `json:"activeTargets"`
|
||||
Dropped []DroppedTarget `json:"droppedTargets"`
|
||||
}
|
||||
|
||||
// ActiveTarget models an active Prometheus scrape target.
|
||||
type ActiveTarget struct {
|
||||
DiscoveredLabels model.LabelSet `json:"discoveredLabels"`
|
||||
Labels model.LabelSet `json:"labels"`
|
||||
ScrapeURL string `json:"scrapeUrl"`
|
||||
LastError string `json:"lastError"`
|
||||
LastScrape time.Time `json:"lastScrape"`
|
||||
Health HealthStatus `json:"health"`
|
||||
}
|
||||
|
||||
// DroppedTarget models a dropped Prometheus scrape target.
|
||||
type DroppedTarget struct {
|
||||
DiscoveredLabels model.LabelSet `json:"discoveredLabels"`
|
||||
}
|
||||
|
||||
// queryResult contains result data for a query.
|
||||
type queryResult struct {
|
||||
Type model.ValueType `json:"resultType"`
|
||||
Result interface{} `json:"result"`
|
||||
|
||||
// The decoded value.
|
||||
v model.Value
|
||||
}
|
||||
|
||||
func (qr *queryResult) UnmarshalJSON(b []byte) error {
|
||||
v := struct {
|
||||
Type model.ValueType `json:"resultType"`
|
||||
Result json.RawMessage `json:"result"`
|
||||
}{}
|
||||
|
||||
err := json.Unmarshal(b, &v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch v.Type {
|
||||
case model.ValScalar:
|
||||
var sv model.Scalar
|
||||
err = json.Unmarshal(v.Result, &sv)
|
||||
qr.v = &sv
|
||||
|
||||
case model.ValVector:
|
||||
var vv model.Vector
|
||||
err = json.Unmarshal(v.Result, &vv)
|
||||
qr.v = vv
|
||||
|
||||
case model.ValMatrix:
|
||||
var mv model.Matrix
|
||||
err = json.Unmarshal(v.Result, &mv)
|
||||
qr.v = mv
|
||||
|
||||
default:
|
||||
err = fmt.Errorf("unexpected value type %q", v.Type)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// NewAPI returns a new API for the client.
|
||||
//
|
||||
// It is safe to use the returned API from multiple goroutines.
|
||||
func NewAPI(c api.Client) API {
|
||||
return &httpAPI{client: apiClient{c}}
|
||||
}
|
||||
|
||||
type httpAPI struct {
|
||||
client api.Client
|
||||
}
|
||||
|
||||
func (h *httpAPI) AlertManagers(ctx context.Context) (AlertManagersResult, error) {
|
||||
u := h.client.URL(epAlertManagers, nil)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return AlertManagersResult{}, err
|
||||
}
|
||||
|
||||
_, body, err := h.client.Do(ctx, req)
|
||||
if err != nil {
|
||||
return AlertManagersResult{}, err
|
||||
}
|
||||
|
||||
var res AlertManagersResult
|
||||
err = json.Unmarshal(body, &res)
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (h *httpAPI) CleanTombstones(ctx context.Context) error {
|
||||
u := h.client.URL(epCleanTombstones, nil)
|
||||
|
||||
req, err := http.NewRequest(http.MethodPost, u.String(), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, _, err = h.client.Do(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (h *httpAPI) Config(ctx context.Context) (ConfigResult, error) {
|
||||
u := h.client.URL(epConfig, nil)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return ConfigResult{}, err
|
||||
}
|
||||
|
||||
_, body, err := h.client.Do(ctx, req)
|
||||
if err != nil {
|
||||
return ConfigResult{}, err
|
||||
}
|
||||
|
||||
var res ConfigResult
|
||||
err = json.Unmarshal(body, &res)
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (h *httpAPI) DeleteSeries(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) error {
|
||||
u := h.client.URL(epDeleteSeries, nil)
|
||||
q := u.Query()
|
||||
|
||||
for _, m := range matches {
|
||||
q.Add("match[]", m)
|
||||
}
|
||||
|
||||
q.Set("start", startTime.Format(time.RFC3339Nano))
|
||||
q.Set("end", endTime.Format(time.RFC3339Nano))
|
||||
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
req, err := http.NewRequest(http.MethodPost, u.String(), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, _, err = h.client.Do(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func (h *httpAPI) Flags(ctx context.Context) (FlagsResult, error) {
|
||||
u := h.client.URL(epFlags, nil)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return FlagsResult{}, err
|
||||
}
|
||||
|
||||
_, body, err := h.client.Do(ctx, req)
|
||||
if err != nil {
|
||||
return FlagsResult{}, err
|
||||
}
|
||||
|
||||
var res FlagsResult
|
||||
err = json.Unmarshal(body, &res)
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (h *httpAPI) LabelValues(ctx context.Context, label string) (model.LabelValues, error) {
|
||||
u := h.client.URL(epLabelValues, map[string]string{"name": label})
|
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, body, err := h.client.Do(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var labelValues model.LabelValues
|
||||
err = json.Unmarshal(body, &labelValues)
|
||||
return labelValues, err
|
||||
}
|
||||
|
||||
func (h *httpAPI) Query(ctx context.Context, query string, ts time.Time) (model.Value, error) {
|
||||
u := h.client.URL(epQuery, nil)
|
||||
q := u.Query()
|
||||
|
||||
q.Set("query", query)
|
||||
if !ts.IsZero() {
|
||||
q.Set("time", ts.Format(time.RFC3339Nano))
|
||||
}
|
||||
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, body, err := h.client.Do(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var qres queryResult
|
||||
err = json.Unmarshal(body, &qres)
|
||||
|
||||
return model.Value(qres.v), err
|
||||
}
|
||||
|
||||
func (h *httpAPI) QueryRange(ctx context.Context, query string, r Range) (model.Value, error) {
|
||||
u := h.client.URL(epQueryRange, nil)
|
||||
q := u.Query()
|
||||
|
||||
var (
|
||||
start = r.Start.Format(time.RFC3339Nano)
|
||||
end = r.End.Format(time.RFC3339Nano)
|
||||
step = strconv.FormatFloat(r.Step.Seconds(), 'f', 3, 64)
|
||||
)
|
||||
|
||||
q.Set("query", query)
|
||||
q.Set("start", start)
|
||||
q.Set("end", end)
|
||||
q.Set("step", step)
|
||||
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, body, err := h.client.Do(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var qres queryResult
|
||||
err = json.Unmarshal(body, &qres)
|
||||
|
||||
return model.Value(qres.v), err
|
||||
}
|
||||
|
||||
func (h *httpAPI) Series(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) ([]model.LabelSet, error) {
|
||||
u := h.client.URL(epSeries, nil)
|
||||
q := u.Query()
|
||||
|
||||
for _, m := range matches {
|
||||
q.Add("match[]", m)
|
||||
}
|
||||
|
||||
q.Set("start", startTime.Format(time.RFC3339Nano))
|
||||
q.Set("end", endTime.Format(time.RFC3339Nano))
|
||||
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, body, err := h.client.Do(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var mset []model.LabelSet
|
||||
err = json.Unmarshal(body, &mset)
|
||||
return mset, err
|
||||
}
|
||||
|
||||
func (h *httpAPI) Snapshot(ctx context.Context, skipHead bool) (SnapshotResult, error) {
|
||||
u := h.client.URL(epSnapshot, nil)
|
||||
q := u.Query()
|
||||
|
||||
q.Set("skip_head", strconv.FormatBool(skipHead))
|
||||
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
req, err := http.NewRequest(http.MethodPost, u.String(), nil)
|
||||
if err != nil {
|
||||
return SnapshotResult{}, err
|
||||
}
|
||||
|
||||
_, body, err := h.client.Do(ctx, req)
|
||||
if err != nil {
|
||||
return SnapshotResult{}, err
|
||||
}
|
||||
|
||||
var res SnapshotResult
|
||||
err = json.Unmarshal(body, &res)
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (h *httpAPI) Targets(ctx context.Context) (TargetsResult, error) {
|
||||
u := h.client.URL(epTargets, nil)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
|
||||
if err != nil {
|
||||
return TargetsResult{}, err
|
||||
}
|
||||
|
||||
_, body, err := h.client.Do(ctx, req)
|
||||
if err != nil {
|
||||
return TargetsResult{}, err
|
||||
}
|
||||
|
||||
var res TargetsResult
|
||||
err = json.Unmarshal(body, &res)
|
||||
return res, err
|
||||
}
|
||||
|
||||
// apiClient wraps a regular client and processes successful API responses.
|
||||
// Successful also includes responses that errored at the API level.
|
||||
type apiClient struct {
|
||||
api.Client
|
||||
}
|
||||
|
||||
type apiResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data json.RawMessage `json:"data"`
|
||||
ErrorType ErrorType `json:"errorType"`
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
func apiError(code int) bool {
|
||||
// These are the codes that Prometheus sends when it returns an error.
|
||||
return code == statusAPIError || code == http.StatusBadRequest
|
||||
}
|
||||
|
||||
func (c apiClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
|
||||
resp, body, err := c.Client.Do(ctx, req)
|
||||
if err != nil {
|
||||
return resp, body, err
|
||||
}
|
||||
|
||||
code := resp.StatusCode
|
||||
|
||||
if code/100 != 2 && !apiError(code) {
|
||||
return resp, body, &Error{
|
||||
Type: ErrBadResponse,
|
||||
Msg: fmt.Sprintf("bad response code %d", resp.StatusCode),
|
||||
}
|
||||
}
|
||||
|
||||
var result apiResponse
|
||||
|
||||
if err = json.Unmarshal(body, &result); err != nil {
|
||||
return resp, body, &Error{
|
||||
Type: ErrBadResponse,
|
||||
Msg: err.Error(),
|
||||
}
|
||||
}
|
||||
|
||||
if apiError(code) != (result.Status == "error") {
|
||||
err = &Error{
|
||||
Type: ErrBadResponse,
|
||||
Msg: "inconsistent body for response code",
|
||||
}
|
||||
}
|
||||
|
||||
if apiError(code) && result.Status == "error" {
|
||||
err = &Error{
|
||||
Type: result.ErrorType,
|
||||
Msg: result.Error,
|
||||
}
|
||||
}
|
||||
|
||||
return resp, []byte(result.Data), err
|
||||
}
|
706
vendor/github.com/prometheus/client_golang/api/prometheus/v1/api_test.go
generated
vendored
Normal file
706
vendor/github.com/prometheus/client_golang/api/prometheus/v1/api_test.go
generated
vendored
Normal file
|
@ -0,0 +1,706 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build go1.7
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
)
|
||||
|
||||
type apiTest struct {
|
||||
do func() (interface{}, error)
|
||||
inErr error
|
||||
inRes interface{}
|
||||
|
||||
reqPath string
|
||||
reqParam url.Values
|
||||
reqMethod string
|
||||
res interface{}
|
||||
err error
|
||||
}
|
||||
|
||||
type apiTestClient struct {
|
||||
*testing.T
|
||||
curTest apiTest
|
||||
}
|
||||
|
||||
func (c *apiTestClient) URL(ep string, args map[string]string) *url.URL {
|
||||
path := ep
|
||||
for k, v := range args {
|
||||
path = strings.Replace(path, ":"+k, v, -1)
|
||||
}
|
||||
u := &url.URL{
|
||||
Host: "test:9090",
|
||||
Path: path,
|
||||
}
|
||||
return u
|
||||
}
|
||||
|
||||
func (c *apiTestClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
|
||||
|
||||
test := c.curTest
|
||||
|
||||
if req.URL.Path != test.reqPath {
|
||||
c.Errorf("unexpected request path: want %s, got %s", test.reqPath, req.URL.Path)
|
||||
}
|
||||
if req.Method != test.reqMethod {
|
||||
c.Errorf("unexpected request method: want %s, got %s", test.reqMethod, req.Method)
|
||||
}
|
||||
|
||||
b, err := json.Marshal(test.inRes)
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
|
||||
resp := &http.Response{}
|
||||
if test.inErr != nil {
|
||||
resp.StatusCode = statusAPIError
|
||||
} else {
|
||||
resp.StatusCode = http.StatusOK
|
||||
}
|
||||
|
||||
return resp, b, test.inErr
|
||||
}
|
||||
|
||||
func TestAPIs(t *testing.T) {
|
||||
|
||||
testTime := time.Now()
|
||||
|
||||
client := &apiTestClient{T: t}
|
||||
|
||||
promAPI := &httpAPI{
|
||||
client: client,
|
||||
}
|
||||
|
||||
doAlertManagers := func() func() (interface{}, error) {
|
||||
return func() (interface{}, error) {
|
||||
return promAPI.AlertManagers(context.Background())
|
||||
}
|
||||
}
|
||||
|
||||
doCleanTombstones := func() func() (interface{}, error) {
|
||||
return func() (interface{}, error) {
|
||||
return nil, promAPI.CleanTombstones(context.Background())
|
||||
}
|
||||
}
|
||||
|
||||
doConfig := func() func() (interface{}, error) {
|
||||
return func() (interface{}, error) {
|
||||
return promAPI.Config(context.Background())
|
||||
}
|
||||
}
|
||||
|
||||
doDeleteSeries := func(matcher string, startTime time.Time, endTime time.Time) func() (interface{}, error) {
|
||||
return func() (interface{}, error) {
|
||||
return nil, promAPI.DeleteSeries(context.Background(), []string{matcher}, startTime, endTime)
|
||||
}
|
||||
}
|
||||
|
||||
doFlags := func() func() (interface{}, error) {
|
||||
return func() (interface{}, error) {
|
||||
return promAPI.Flags(context.Background())
|
||||
}
|
||||
}
|
||||
|
||||
doLabelValues := func(label string) func() (interface{}, error) {
|
||||
return func() (interface{}, error) {
|
||||
return promAPI.LabelValues(context.Background(), label)
|
||||
}
|
||||
}
|
||||
|
||||
doQuery := func(q string, ts time.Time) func() (interface{}, error) {
|
||||
return func() (interface{}, error) {
|
||||
return promAPI.Query(context.Background(), q, ts)
|
||||
}
|
||||
}
|
||||
|
||||
doQueryRange := func(q string, rng Range) func() (interface{}, error) {
|
||||
return func() (interface{}, error) {
|
||||
return promAPI.QueryRange(context.Background(), q, rng)
|
||||
}
|
||||
}
|
||||
|
||||
doSeries := func(matcher string, startTime time.Time, endTime time.Time) func() (interface{}, error) {
|
||||
return func() (interface{}, error) {
|
||||
return promAPI.Series(context.Background(), []string{matcher}, startTime, endTime)
|
||||
}
|
||||
}
|
||||
|
||||
doSnapshot := func(skipHead bool) func() (interface{}, error) {
|
||||
return func() (interface{}, error) {
|
||||
return promAPI.Snapshot(context.Background(), skipHead)
|
||||
}
|
||||
}
|
||||
|
||||
doTargets := func() func() (interface{}, error) {
|
||||
return func() (interface{}, error) {
|
||||
return promAPI.Targets(context.Background())
|
||||
}
|
||||
}
|
||||
|
||||
queryTests := []apiTest{
|
||||
{
|
||||
do: doQuery("2", testTime),
|
||||
inRes: &queryResult{
|
||||
Type: model.ValScalar,
|
||||
Result: &model.Scalar{
|
||||
Value: 2,
|
||||
Timestamp: model.TimeFromUnix(testTime.Unix()),
|
||||
},
|
||||
},
|
||||
|
||||
reqMethod: "GET",
|
||||
reqPath: "/api/v1/query",
|
||||
reqParam: url.Values{
|
||||
"query": []string{"2"},
|
||||
"time": []string{testTime.Format(time.RFC3339Nano)},
|
||||
},
|
||||
res: &model.Scalar{
|
||||
Value: 2,
|
||||
Timestamp: model.TimeFromUnix(testTime.Unix()),
|
||||
},
|
||||
},
|
||||
{
|
||||
do: doQuery("2", testTime),
|
||||
inErr: fmt.Errorf("some error"),
|
||||
|
||||
reqMethod: "GET",
|
||||
reqPath: "/api/v1/query",
|
||||
reqParam: url.Values{
|
||||
"query": []string{"2"},
|
||||
"time": []string{testTime.Format(time.RFC3339Nano)},
|
||||
},
|
||||
err: fmt.Errorf("some error"),
|
||||
},
|
||||
|
||||
{
|
||||
do: doQueryRange("2", Range{
|
||||
Start: testTime.Add(-time.Minute),
|
||||
End: testTime,
|
||||
Step: time.Minute,
|
||||
}),
|
||||
inErr: fmt.Errorf("some error"),
|
||||
|
||||
reqMethod: "GET",
|
||||
reqPath: "/api/v1/query_range",
|
||||
reqParam: url.Values{
|
||||
"query": []string{"2"},
|
||||
"start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)},
|
||||
"end": []string{testTime.Format(time.RFC3339Nano)},
|
||||
"step": []string{time.Minute.String()},
|
||||
},
|
||||
err: fmt.Errorf("some error"),
|
||||
},
|
||||
|
||||
{
|
||||
do: doLabelValues("mylabel"),
|
||||
inRes: []string{"val1", "val2"},
|
||||
reqMethod: "GET",
|
||||
reqPath: "/api/v1/label/mylabel/values",
|
||||
res: model.LabelValues{"val1", "val2"},
|
||||
},
|
||||
|
||||
{
|
||||
do: doLabelValues("mylabel"),
|
||||
inErr: fmt.Errorf("some error"),
|
||||
reqMethod: "GET",
|
||||
reqPath: "/api/v1/label/mylabel/values",
|
||||
err: fmt.Errorf("some error"),
|
||||
},
|
||||
|
||||
{
|
||||
do: doSeries("up", testTime.Add(-time.Minute), testTime),
|
||||
inRes: []map[string]string{
|
||||
{
|
||||
"__name__": "up",
|
||||
"job": "prometheus",
|
||||
"instance": "localhost:9090"},
|
||||
},
|
||||
reqMethod: "GET",
|
||||
reqPath: "/api/v1/series",
|
||||
reqParam: url.Values{
|
||||
"match": []string{"up"},
|
||||
"start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)},
|
||||
"end": []string{testTime.Format(time.RFC3339Nano)},
|
||||
},
|
||||
res: []model.LabelSet{
|
||||
model.LabelSet{
|
||||
"__name__": "up",
|
||||
"job": "prometheus",
|
||||
"instance": "localhost:9090",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
do: doSeries("up", testTime.Add(-time.Minute), testTime),
|
||||
inErr: fmt.Errorf("some error"),
|
||||
reqMethod: "GET",
|
||||
reqPath: "/api/v1/series",
|
||||
reqParam: url.Values{
|
||||
"match": []string{"up"},
|
||||
"start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)},
|
||||
"end": []string{testTime.Format(time.RFC3339Nano)},
|
||||
},
|
||||
err: fmt.Errorf("some error"),
|
||||
},
|
||||
|
||||
{
|
||||
do: doSnapshot(true),
|
||||
inRes: map[string]string{
|
||||
"name": "20171210T211224Z-2be650b6d019eb54",
|
||||
},
|
||||
reqMethod: "POST",
|
||||
reqPath: "/api/v1/admin/tsdb/snapshot",
|
||||
reqParam: url.Values{
|
||||
"skip_head": []string{"true"},
|
||||
},
|
||||
res: SnapshotResult{
|
||||
Name: "20171210T211224Z-2be650b6d019eb54",
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
do: doSnapshot(true),
|
||||
inErr: fmt.Errorf("some error"),
|
||||
reqMethod: "POST",
|
||||
reqPath: "/api/v1/admin/tsdb/snapshot",
|
||||
err: fmt.Errorf("some error"),
|
||||
},
|
||||
|
||||
{
|
||||
do: doCleanTombstones(),
|
||||
reqMethod: "POST",
|
||||
reqPath: "/api/v1/admin/tsdb/clean_tombstones",
|
||||
},
|
||||
|
||||
{
|
||||
do: doCleanTombstones(),
|
||||
inErr: fmt.Errorf("some error"),
|
||||
reqMethod: "POST",
|
||||
reqPath: "/api/v1/admin/tsdb/clean_tombstones",
|
||||
err: fmt.Errorf("some error"),
|
||||
},
|
||||
|
||||
{
|
||||
do: doDeleteSeries("up", testTime.Add(-time.Minute), testTime),
|
||||
inRes: []map[string]string{
|
||||
{
|
||||
"__name__": "up",
|
||||
"job": "prometheus",
|
||||
"instance": "localhost:9090"},
|
||||
},
|
||||
reqMethod: "POST",
|
||||
reqPath: "/api/v1/admin/tsdb/delete_series",
|
||||
reqParam: url.Values{
|
||||
"match": []string{"up"},
|
||||
"start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)},
|
||||
"end": []string{testTime.Format(time.RFC3339Nano)},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
do: doDeleteSeries("up", testTime.Add(-time.Minute), testTime),
|
||||
inErr: fmt.Errorf("some error"),
|
||||
reqMethod: "POST",
|
||||
reqPath: "/api/v1/admin/tsdb/delete_series",
|
||||
reqParam: url.Values{
|
||||
"match": []string{"up"},
|
||||
"start": []string{testTime.Add(-time.Minute).Format(time.RFC3339Nano)},
|
||||
"end": []string{testTime.Format(time.RFC3339Nano)},
|
||||
},
|
||||
err: fmt.Errorf("some error"),
|
||||
},
|
||||
|
||||
{
|
||||
do: doConfig(),
|
||||
reqMethod: "GET",
|
||||
reqPath: "/api/v1/status/config",
|
||||
inRes: map[string]string{
|
||||
"yaml": "<content of the loaded config file in YAML>",
|
||||
},
|
||||
res: ConfigResult{
|
||||
YAML: "<content of the loaded config file in YAML>",
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
do: doConfig(),
|
||||
reqMethod: "GET",
|
||||
reqPath: "/api/v1/status/config",
|
||||
inErr: fmt.Errorf("some error"),
|
||||
err: fmt.Errorf("some error"),
|
||||
},
|
||||
|
||||
{
|
||||
do: doFlags(),
|
||||
reqMethod: "GET",
|
||||
reqPath: "/api/v1/status/flags",
|
||||
inRes: map[string]string{
|
||||
"alertmanager.notification-queue-capacity": "10000",
|
||||
"alertmanager.timeout": "10s",
|
||||
"log.level": "info",
|
||||
"query.lookback-delta": "5m",
|
||||
"query.max-concurrency": "20",
|
||||
},
|
||||
res: FlagsResult{
|
||||
"alertmanager.notification-queue-capacity": "10000",
|
||||
"alertmanager.timeout": "10s",
|
||||
"log.level": "info",
|
||||
"query.lookback-delta": "5m",
|
||||
"query.max-concurrency": "20",
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
do: doFlags(),
|
||||
reqMethod: "GET",
|
||||
reqPath: "/api/v1/status/flags",
|
||||
inErr: fmt.Errorf("some error"),
|
||||
err: fmt.Errorf("some error"),
|
||||
},
|
||||
|
||||
{
|
||||
do: doAlertManagers(),
|
||||
reqMethod: "GET",
|
||||
reqPath: "/api/v1/alertmanagers",
|
||||
inRes: map[string]interface{}{
|
||||
"activeAlertManagers": []map[string]string{
|
||||
{
|
||||
"url": "http://127.0.0.1:9091/api/v1/alerts",
|
||||
},
|
||||
},
|
||||
"droppedAlertManagers": []map[string]string{
|
||||
{
|
||||
"url": "http://127.0.0.1:9092/api/v1/alerts",
|
||||
},
|
||||
},
|
||||
},
|
||||
res: AlertManagersResult{
|
||||
Active: []AlertManager{
|
||||
{
|
||||
URL: "http://127.0.0.1:9091/api/v1/alerts",
|
||||
},
|
||||
},
|
||||
Dropped: []AlertManager{
|
||||
{
|
||||
URL: "http://127.0.0.1:9092/api/v1/alerts",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
do: doAlertManagers(),
|
||||
reqMethod: "GET",
|
||||
reqPath: "/api/v1/alertmanagers",
|
||||
inErr: fmt.Errorf("some error"),
|
||||
err: fmt.Errorf("some error"),
|
||||
},
|
||||
|
||||
{
|
||||
do: doTargets(),
|
||||
reqMethod: "GET",
|
||||
reqPath: "/api/v1/targets",
|
||||
inRes: map[string]interface{}{
|
||||
"activeTargets": []map[string]interface{}{
|
||||
{
|
||||
"discoveredLabels": map[string]string{
|
||||
"__address__": "127.0.0.1:9090",
|
||||
"__metrics_path__": "/metrics",
|
||||
"__scheme__": "http",
|
||||
"job": "prometheus",
|
||||
},
|
||||
"labels": map[string]string{
|
||||
"instance": "127.0.0.1:9090",
|
||||
"job": "prometheus",
|
||||
},
|
||||
"scrapeUrl": "http://127.0.0.1:9090",
|
||||
"lastError": "error while scraping target",
|
||||
"lastScrape": testTime.UTC().Format(time.RFC3339Nano),
|
||||
"health": "up",
|
||||
},
|
||||
},
|
||||
"droppedTargets": []map[string]interface{}{
|
||||
{
|
||||
"discoveredLabels": map[string]string{
|
||||
"__address__": "127.0.0.1:9100",
|
||||
"__metrics_path__": "/metrics",
|
||||
"__scheme__": "http",
|
||||
"job": "node",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
res: TargetsResult{
|
||||
Active: []ActiveTarget{
|
||||
{
|
||||
DiscoveredLabels: model.LabelSet{
|
||||
"__address__": "127.0.0.1:9090",
|
||||
"__metrics_path__": "/metrics",
|
||||
"__scheme__": "http",
|
||||
"job": "prometheus",
|
||||
},
|
||||
Labels: model.LabelSet{
|
||||
"instance": "127.0.0.1:9090",
|
||||
"job": "prometheus",
|
||||
},
|
||||
ScrapeURL: "http://127.0.0.1:9090",
|
||||
LastError: "error while scraping target",
|
||||
LastScrape: testTime.UTC(),
|
||||
Health: HealthGood,
|
||||
},
|
||||
},
|
||||
Dropped: []DroppedTarget{
|
||||
{
|
||||
DiscoveredLabels: model.LabelSet{
|
||||
"__address__": "127.0.0.1:9100",
|
||||
"__metrics_path__": "/metrics",
|
||||
"__scheme__": "http",
|
||||
"job": "node",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
do: doTargets(),
|
||||
reqMethod: "GET",
|
||||
reqPath: "/api/v1/targets",
|
||||
inErr: fmt.Errorf("some error"),
|
||||
err: fmt.Errorf("some error"),
|
||||
},
|
||||
}
|
||||
|
||||
var tests []apiTest
|
||||
tests = append(tests, queryTests...)
|
||||
|
||||
for _, test := range tests {
|
||||
client.curTest = test
|
||||
|
||||
res, err := test.do()
|
||||
|
||||
if test.err != nil {
|
||||
if err == nil {
|
||||
t.Errorf("expected error %q but got none", test.err)
|
||||
continue
|
||||
}
|
||||
if err.Error() != test.err.Error() {
|
||||
t.Errorf("unexpected error: want %s, got %s", test.err, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(res, test.res) {
|
||||
t.Errorf("unexpected result: want %v, got %v", test.res, res)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type testClient struct {
|
||||
*testing.T
|
||||
|
||||
ch chan apiClientTest
|
||||
req *http.Request
|
||||
}
|
||||
|
||||
type apiClientTest struct {
|
||||
code int
|
||||
response interface{}
|
||||
expected string
|
||||
err *Error
|
||||
}
|
||||
|
||||
func (c *testClient) URL(ep string, args map[string]string) *url.URL {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *testClient) Do(ctx context.Context, req *http.Request) (*http.Response, []byte, error) {
|
||||
if ctx == nil {
|
||||
c.Fatalf("context was not passed down")
|
||||
}
|
||||
if req != c.req {
|
||||
c.Fatalf("request was not passed down")
|
||||
}
|
||||
|
||||
test := <-c.ch
|
||||
|
||||
var b []byte
|
||||
var err error
|
||||
|
||||
switch v := test.response.(type) {
|
||||
case string:
|
||||
b = []byte(v)
|
||||
default:
|
||||
b, err = json.Marshal(v)
|
||||
if err != nil {
|
||||
c.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
resp := &http.Response{
|
||||
StatusCode: test.code,
|
||||
}
|
||||
|
||||
return resp, b, nil
|
||||
}
|
||||
|
||||
func TestAPIClientDo(t *testing.T) {
|
||||
tests := []apiClientTest{
|
||||
{
|
||||
response: &apiResponse{
|
||||
Status: "error",
|
||||
Data: json.RawMessage(`null`),
|
||||
ErrorType: ErrBadData,
|
||||
Error: "failed",
|
||||
},
|
||||
err: &Error{
|
||||
Type: ErrBadData,
|
||||
Msg: "failed",
|
||||
},
|
||||
code: statusAPIError,
|
||||
expected: `null`,
|
||||
},
|
||||
{
|
||||
response: &apiResponse{
|
||||
Status: "error",
|
||||
Data: json.RawMessage(`"test"`),
|
||||
ErrorType: ErrTimeout,
|
||||
Error: "timed out",
|
||||
},
|
||||
err: &Error{
|
||||
Type: ErrTimeout,
|
||||
Msg: "timed out",
|
||||
},
|
||||
code: statusAPIError,
|
||||
expected: `test`,
|
||||
},
|
||||
{
|
||||
response: "bad json",
|
||||
err: &Error{
|
||||
Type: ErrBadResponse,
|
||||
Msg: "bad response code 500",
|
||||
},
|
||||
code: http.StatusInternalServerError,
|
||||
},
|
||||
{
|
||||
response: &apiResponse{
|
||||
Status: "error",
|
||||
Data: json.RawMessage(`null`),
|
||||
ErrorType: ErrBadData,
|
||||
Error: "end timestamp must not be before start time",
|
||||
},
|
||||
err: &Error{
|
||||
Type: ErrBadData,
|
||||
Msg: "end timestamp must not be before start time",
|
||||
},
|
||||
code: http.StatusBadRequest,
|
||||
},
|
||||
{
|
||||
response: "bad json",
|
||||
err: &Error{
|
||||
Type: ErrBadResponse,
|
||||
Msg: "invalid character 'b' looking for beginning of value",
|
||||
},
|
||||
code: statusAPIError,
|
||||
},
|
||||
{
|
||||
response: &apiResponse{
|
||||
Status: "success",
|
||||
Data: json.RawMessage(`"test"`),
|
||||
},
|
||||
err: &Error{
|
||||
Type: ErrBadResponse,
|
||||
Msg: "inconsistent body for response code",
|
||||
},
|
||||
code: statusAPIError,
|
||||
},
|
||||
{
|
||||
response: &apiResponse{
|
||||
Status: "success",
|
||||
Data: json.RawMessage(`"test"`),
|
||||
ErrorType: ErrTimeout,
|
||||
Error: "timed out",
|
||||
},
|
||||
err: &Error{
|
||||
Type: ErrBadResponse,
|
||||
Msg: "inconsistent body for response code",
|
||||
},
|
||||
code: statusAPIError,
|
||||
},
|
||||
{
|
||||
response: &apiResponse{
|
||||
Status: "error",
|
||||
Data: json.RawMessage(`"test"`),
|
||||
ErrorType: ErrTimeout,
|
||||
Error: "timed out",
|
||||
},
|
||||
err: &Error{
|
||||
Type: ErrBadResponse,
|
||||
Msg: "inconsistent body for response code",
|
||||
},
|
||||
code: http.StatusOK,
|
||||
},
|
||||
}
|
||||
|
||||
tc := &testClient{
|
||||
T: t,
|
||||
ch: make(chan apiClientTest, 1),
|
||||
req: &http.Request{},
|
||||
}
|
||||
client := &apiClient{tc}
|
||||
|
||||
for _, test := range tests {
|
||||
|
||||
tc.ch <- test
|
||||
|
||||
_, body, err := client.Do(context.Background(), tc.req)
|
||||
|
||||
if test.err != nil {
|
||||
if err == nil {
|
||||
t.Errorf("expected error %q but got none", test.err)
|
||||
continue
|
||||
}
|
||||
if test.err.Error() != err.Error() {
|
||||
t.Errorf("unexpected error: want %q, got %q", test.err, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("unexpeceted error %s", err)
|
||||
continue
|
||||
}
|
||||
|
||||
want, got := test.expected, string(body)
|
||||
if want != got {
|
||||
t.Errorf("unexpected body: want %q, got %q", want, got)
|
||||
}
|
||||
}
|
||||
}
|
106
vendor/github.com/prometheus/client_golang/examples/random/main.go
generated
vendored
Normal file
106
vendor/github.com/prometheus/client_golang/examples/random/main.go
generated
vendored
Normal file
|
@ -0,0 +1,106 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// A simple example exposing fictional RPC latencies with different types of
|
||||
// random distributions (uniform, normal, and exponential) as Prometheus
|
||||
// metrics.
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
"math"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
var (
|
||||
addr = flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.")
|
||||
uniformDomain = flag.Float64("uniform.domain", 0.0002, "The domain for the uniform distribution.")
|
||||
normDomain = flag.Float64("normal.domain", 0.0002, "The domain for the normal distribution.")
|
||||
normMean = flag.Float64("normal.mean", 0.00001, "The mean for the normal distribution.")
|
||||
oscillationPeriod = flag.Duration("oscillation-period", 10*time.Minute, "The duration of the rate oscillation period.")
|
||||
)
|
||||
|
||||
var (
|
||||
// Create a summary to track fictional interservice RPC latencies for three
|
||||
// distinct services with different latency distributions. These services are
|
||||
// differentiated via a "service" label.
|
||||
rpcDurations = prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Name: "rpc_durations_seconds",
|
||||
Help: "RPC latency distributions.",
|
||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||
},
|
||||
[]string{"service"},
|
||||
)
|
||||
// The same as above, but now as a histogram, and only for the normal
|
||||
// distribution. The buckets are targeted to the parameters of the
|
||||
// normal distribution, with 20 buckets centered on the mean, each
|
||||
// half-sigma wide.
|
||||
rpcDurationsHistogram = prometheus.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "rpc_durations_histogram_seconds",
|
||||
Help: "RPC latency distributions.",
|
||||
Buckets: prometheus.LinearBuckets(*normMean-5**normDomain, .5**normDomain, 20),
|
||||
})
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Register the summary and the histogram with Prometheus's default registry.
|
||||
prometheus.MustRegister(rpcDurations)
|
||||
prometheus.MustRegister(rpcDurationsHistogram)
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
start := time.Now()
|
||||
|
||||
oscillationFactor := func() float64 {
|
||||
return 2 + math.Sin(math.Sin(2*math.Pi*float64(time.Since(start))/float64(*oscillationPeriod)))
|
||||
}
|
||||
|
||||
// Periodically record some sample latencies for the three services.
|
||||
go func() {
|
||||
for {
|
||||
v := rand.Float64() * *uniformDomain
|
||||
rpcDurations.WithLabelValues("uniform").Observe(v)
|
||||
time.Sleep(time.Duration(100*oscillationFactor()) * time.Millisecond)
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
for {
|
||||
v := (rand.NormFloat64() * *normDomain) + *normMean
|
||||
rpcDurations.WithLabelValues("normal").Observe(v)
|
||||
rpcDurationsHistogram.Observe(v)
|
||||
time.Sleep(time.Duration(75*oscillationFactor()) * time.Millisecond)
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
for {
|
||||
v := rand.ExpFloat64() / 1e6
|
||||
rpcDurations.WithLabelValues("exponential").Observe(v)
|
||||
time.Sleep(time.Duration(50*oscillationFactor()) * time.Millisecond)
|
||||
}
|
||||
}()
|
||||
|
||||
// Expose the registered metrics via HTTP.
|
||||
http.Handle("/metrics", promhttp.Handler())
|
||||
log.Fatal(http.ListenAndServe(*addr, nil))
|
||||
}
|
31
vendor/github.com/prometheus/client_golang/examples/simple/main.go
generated
vendored
Normal file
31
vendor/github.com/prometheus/client_golang/examples/simple/main.go
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// A minimal example of how to include Prometheus instrumentation.
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
var addr = flag.String("listen-address", ":8080", "The address to listen on for HTTP requests.")
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
http.Handle("/metrics", promhttp.Handler())
|
||||
log.Fatal(http.ListenAndServe(*addr, nil))
|
||||
}
|
72
vendor/github.com/prometheus/client_golang/prometheus/collector.go
generated
vendored
72
vendor/github.com/prometheus/client_golang/prometheus/collector.go
generated
vendored
|
@ -29,27 +29,71 @@ type Collector interface {
|
|||
// collected by this Collector to the provided channel and returns once
|
||||
// the last descriptor has been sent. The sent descriptors fulfill the
|
||||
// consistency and uniqueness requirements described in the Desc
|
||||
// documentation. (It is valid if one and the same Collector sends
|
||||
// duplicate descriptors. Those duplicates are simply ignored. However,
|
||||
// two different Collectors must not send duplicate descriptors.) This
|
||||
// method idempotently sends the same descriptors throughout the
|
||||
// lifetime of the Collector. If a Collector encounters an error while
|
||||
// executing this method, it must send an invalid descriptor (created
|
||||
// with NewInvalidDesc) to signal the error to the registry.
|
||||
// documentation.
|
||||
//
|
||||
// It is valid if one and the same Collector sends duplicate
|
||||
// descriptors. Those duplicates are simply ignored. However, two
|
||||
// different Collectors must not send duplicate descriptors.
|
||||
//
|
||||
// Sending no descriptor at all marks the Collector as “unchecked”,
|
||||
// i.e. no checks will be performed at registration time, and the
|
||||
// Collector may yield any Metric it sees fit in its Collect method.
|
||||
//
|
||||
// This method idempotently sends the same descriptors throughout the
|
||||
// lifetime of the Collector.
|
||||
//
|
||||
// If a Collector encounters an error while executing this method, it
|
||||
// must send an invalid descriptor (created with NewInvalidDesc) to
|
||||
// signal the error to the registry.
|
||||
Describe(chan<- *Desc)
|
||||
// Collect is called by the Prometheus registry when collecting
|
||||
// metrics. The implementation sends each collected metric via the
|
||||
// provided channel and returns once the last metric has been sent. The
|
||||
// descriptor of each sent metric is one of those returned by
|
||||
// Describe. Returned metrics that share the same descriptor must differ
|
||||
// in their variable label values. This method may be called
|
||||
// concurrently and must therefore be implemented in a concurrency safe
|
||||
// way. Blocking occurs at the expense of total performance of rendering
|
||||
// all registered metrics. Ideally, Collector implementations support
|
||||
// concurrent readers.
|
||||
// descriptor of each sent metric is one of those returned by Describe
|
||||
// (unless the Collector is unchecked, see above). Returned metrics that
|
||||
// share the same descriptor must differ in their variable label
|
||||
// values.
|
||||
//
|
||||
// This method may be called concurrently and must therefore be
|
||||
// implemented in a concurrency safe way. Blocking occurs at the expense
|
||||
// of total performance of rendering all registered metrics. Ideally,
|
||||
// Collector implementations support concurrent readers.
|
||||
Collect(chan<- Metric)
|
||||
}
|
||||
|
||||
// DescribeByCollect is a helper to implement the Describe method of a custom
|
||||
// Collector. It collects the metrics from the provided Collector and sends
|
||||
// their descriptors to the provided channel.
|
||||
//
|
||||
// If a Collector collects the same metrics throughout its lifetime, its
|
||||
// Describe method can simply be implemented as:
|
||||
//
|
||||
// func (c customCollector) Describe(ch chan<- *Desc) {
|
||||
// DescribeByCollect(c, ch)
|
||||
// }
|
||||
//
|
||||
// However, this will not work if the metrics collected change dynamically over
|
||||
// the lifetime of the Collector in a way that their combined set of descriptors
|
||||
// changes as well. The shortcut implementation will then violate the contract
|
||||
// of the Describe method. If a Collector sometimes collects no metrics at all
|
||||
// (for example vectors like CounterVec, GaugeVec, etc., which only collect
|
||||
// metrics after a metric with a fully specified label set has been accessed),
|
||||
// it might even get registered as an unchecked Collecter (cf. the Register
|
||||
// method of the Registerer interface). Hence, only use this shortcut
|
||||
// implementation of Describe if you are certain to fulfill the contract.
|
||||
//
|
||||
// The Collector example demonstrates a use of DescribeByCollect.
|
||||
func DescribeByCollect(c Collector, descs chan<- *Desc) {
|
||||
metrics := make(chan Metric)
|
||||
go func() {
|
||||
c.Collect(metrics)
|
||||
close(metrics)
|
||||
}()
|
||||
for m := range metrics {
|
||||
descs <- m.Desc()
|
||||
}
|
||||
}
|
||||
|
||||
// selfCollector implements Collector for a single Metric so that the Metric
|
||||
// collects itself. Add it as an anonymous field to a struct that implements
|
||||
// Metric, and call init with the Metric itself as an argument.
|
||||
|
|
62
vendor/github.com/prometheus/client_golang/prometheus/collector_test.go
generated
vendored
Normal file
62
vendor/github.com/prometheus/client_golang/prometheus/collector_test.go
generated
vendored
Normal file
|
@ -0,0 +1,62 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import "testing"
|
||||
|
||||
type collectorDescribedByCollect struct {
|
||||
cnt Counter
|
||||
gge Gauge
|
||||
}
|
||||
|
||||
func (c collectorDescribedByCollect) Collect(ch chan<- Metric) {
|
||||
ch <- c.cnt
|
||||
ch <- c.gge
|
||||
}
|
||||
|
||||
func (c collectorDescribedByCollect) Describe(ch chan<- *Desc) {
|
||||
DescribeByCollect(c, ch)
|
||||
}
|
||||
|
||||
func TestDescribeByCollect(t *testing.T) {
|
||||
|
||||
goodCollector := collectorDescribedByCollect{
|
||||
cnt: NewCounter(CounterOpts{Name: "c1", Help: "help c1"}),
|
||||
gge: NewGauge(GaugeOpts{Name: "g1", Help: "help g1"}),
|
||||
}
|
||||
collidingCollector := collectorDescribedByCollect{
|
||||
cnt: NewCounter(CounterOpts{Name: "c2", Help: "help c2"}),
|
||||
gge: NewGauge(GaugeOpts{Name: "g1", Help: "help g1"}),
|
||||
}
|
||||
inconsistentCollector := collectorDescribedByCollect{
|
||||
cnt: NewCounter(CounterOpts{Name: "c3", Help: "help c3"}),
|
||||
gge: NewGauge(GaugeOpts{Name: "c3", Help: "help inconsistent"}),
|
||||
}
|
||||
|
||||
reg := NewPedanticRegistry()
|
||||
|
||||
if err := reg.Register(goodCollector); err != nil {
|
||||
t.Error("registration failed:", err)
|
||||
}
|
||||
if err := reg.Register(collidingCollector); err == nil {
|
||||
t.Error("registration unexpectedly succeeded")
|
||||
}
|
||||
if err := reg.Register(inconsistentCollector); err == nil {
|
||||
t.Error("registration unexpectedly succeeded")
|
||||
}
|
||||
|
||||
if _, err := reg.Gather(); err != nil {
|
||||
t.Error("gathering failed:", err)
|
||||
}
|
||||
}
|
8
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
8
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
|
@ -67,7 +67,7 @@ type Desc struct {
|
|||
|
||||
// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
|
||||
// and will be reported on registration time. variableLabels and constLabels can
|
||||
// be nil if no such labels should be set. fqName and help must not be empty.
|
||||
// be nil if no such labels should be set. fqName must not be empty.
|
||||
//
|
||||
// variableLabels only contain the label names. Their label values are variable
|
||||
// and therefore not part of the Desc. (They are managed within the Metric.)
|
||||
|
@ -80,10 +80,6 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
|
|||
help: help,
|
||||
variableLabels: variableLabels,
|
||||
}
|
||||
if help == "" {
|
||||
d.err = errors.New("empty help string")
|
||||
return d
|
||||
}
|
||||
if !model.IsValidMetricName(model.LabelValue(fqName)) {
|
||||
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
|
||||
return d
|
||||
|
@ -156,7 +152,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
|
|||
Value: proto.String(v),
|
||||
})
|
||||
}
|
||||
sort.Sort(LabelPairSorter(d.constLabelPairs))
|
||||
sort.Sort(labelPairSorter(d.constLabelPairs))
|
||||
return d
|
||||
}
|
||||
|
||||
|
|
13
vendor/github.com/prometheus/client_golang/prometheus/desc_test.go
generated
vendored
13
vendor/github.com/prometheus/client_golang/prometheus/desc_test.go
generated
vendored
|
@ -1,3 +1,16 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
|
|
12
vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
12
vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
|
@ -121,7 +121,17 @@
|
|||
// NewConstSummary (and their respective Must… versions). That will happen in
|
||||
// the Collect method. The Describe method has to return separate Desc
|
||||
// instances, representative of the “throw-away” metrics to be created later.
|
||||
// NewDesc comes in handy to create those Desc instances.
|
||||
// NewDesc comes in handy to create those Desc instances. Alternatively, you
|
||||
// could return no Desc at all, which will marke the Collector “unchecked”. No
|
||||
// checks are porformed at registration time, but metric consistency will still
|
||||
// be ensured at scrape time, i.e. any inconsistencies will lead to scrape
|
||||
// errors. Thus, with unchecked Collectors, the responsibility to not collect
|
||||
// metrics that lead to inconsistencies in the total scrape result lies with the
|
||||
// implementer of the Collector. While this is not a desirable state, it is
|
||||
// sometimes necessary. The typical use case is a situatios where the exact
|
||||
// metrics to be returned by a Collector cannot be predicted at registration
|
||||
// time, but the implementer has sufficient knowledge of the whole system to
|
||||
// guarantee metric consistency.
|
||||
//
|
||||
// The Collector example illustrates the use case. You can also look at the
|
||||
// source code of the processCollector (mirroring process metrics), the
|
||||
|
|
93
vendor/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go
generated
vendored
93
vendor/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go
generated
vendored
|
@ -17,18 +17,18 @@ import "github.com/prometheus/client_golang/prometheus"
|
|||
|
||||
// ClusterManager is an example for a system that might have been built without
|
||||
// Prometheus in mind. It models a central manager of jobs running in a
|
||||
// cluster. To turn it into something that collects Prometheus metrics, we
|
||||
// simply add the two methods required for the Collector interface.
|
||||
// cluster. Thus, we implement a custom Collector called
|
||||
// ClusterManagerCollector, which collects information from a ClusterManager
|
||||
// using its provided methods and turns them into Prometheus Metrics for
|
||||
// collection.
|
||||
//
|
||||
// An additional challenge is that multiple instances of the ClusterManager are
|
||||
// run within the same binary, each in charge of a different zone. We need to
|
||||
// make use of ConstLabels to be able to register each ClusterManager instance
|
||||
// with Prometheus.
|
||||
// make use of wrapping Registerers to be able to register each
|
||||
// ClusterManagerCollector instance with Prometheus.
|
||||
type ClusterManager struct {
|
||||
Zone string
|
||||
OOMCountDesc *prometheus.Desc
|
||||
RAMUsageDesc *prometheus.Desc
|
||||
// ... many more fields
|
||||
Zone string
|
||||
// Contains many more fields not listed in this example.
|
||||
}
|
||||
|
||||
// ReallyExpensiveAssessmentOfTheSystemState is a mock for the data gathering a
|
||||
|
@ -50,10 +50,30 @@ func (c *ClusterManager) ReallyExpensiveAssessmentOfTheSystemState() (
|
|||
return
|
||||
}
|
||||
|
||||
// Describe simply sends the two Descs in the struct to the channel.
|
||||
func (c *ClusterManager) Describe(ch chan<- *prometheus.Desc) {
|
||||
ch <- c.OOMCountDesc
|
||||
ch <- c.RAMUsageDesc
|
||||
// ClusterManagerCollector implements the Collector interface.
|
||||
type ClusterManagerCollector struct {
|
||||
ClusterManager *ClusterManager
|
||||
}
|
||||
|
||||
// Descriptors used by the ClusterManagerCollector below.
|
||||
var (
|
||||
oomCountDesc = prometheus.NewDesc(
|
||||
"clustermanager_oom_crashes_total",
|
||||
"Number of OOM crashes.",
|
||||
[]string{"host"}, nil,
|
||||
)
|
||||
ramUsageDesc = prometheus.NewDesc(
|
||||
"clustermanager_ram_usage_bytes",
|
||||
"RAM usage as reported to the cluster manager.",
|
||||
[]string{"host"}, nil,
|
||||
)
|
||||
)
|
||||
|
||||
// Describe is implemented with DescribeByCollect. That's possible because the
|
||||
// Collect method will always return the same two metrics with the same two
|
||||
// descriptors.
|
||||
func (cc ClusterManagerCollector) Describe(ch chan<- *prometheus.Desc) {
|
||||
prometheus.DescribeByCollect(cc, ch)
|
||||
}
|
||||
|
||||
// Collect first triggers the ReallyExpensiveAssessmentOfTheSystemState. Then it
|
||||
|
@ -61,11 +81,11 @@ func (c *ClusterManager) Describe(ch chan<- *prometheus.Desc) {
|
|||
//
|
||||
// Note that Collect could be called concurrently, so we depend on
|
||||
// ReallyExpensiveAssessmentOfTheSystemState to be concurrency-safe.
|
||||
func (c *ClusterManager) Collect(ch chan<- prometheus.Metric) {
|
||||
oomCountByHost, ramUsageByHost := c.ReallyExpensiveAssessmentOfTheSystemState()
|
||||
func (cc ClusterManagerCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
oomCountByHost, ramUsageByHost := cc.ClusterManager.ReallyExpensiveAssessmentOfTheSystemState()
|
||||
for host, oomCount := range oomCountByHost {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.OOMCountDesc,
|
||||
oomCountDesc,
|
||||
prometheus.CounterValue,
|
||||
float64(oomCount),
|
||||
host,
|
||||
|
@ -73,7 +93,7 @@ func (c *ClusterManager) Collect(ch chan<- prometheus.Metric) {
|
|||
}
|
||||
for host, ramUsage := range ramUsageByHost {
|
||||
ch <- prometheus.MustNewConstMetric(
|
||||
c.RAMUsageDesc,
|
||||
ramUsageDesc,
|
||||
prometheus.GaugeValue,
|
||||
ramUsage,
|
||||
host,
|
||||
|
@ -81,38 +101,27 @@ func (c *ClusterManager) Collect(ch chan<- prometheus.Metric) {
|
|||
}
|
||||
}
|
||||
|
||||
// NewClusterManager creates the two Descs OOMCountDesc and RAMUsageDesc. Note
|
||||
// that the zone is set as a ConstLabel. (It's different in each instance of the
|
||||
// ClusterManager, but constant over the lifetime of an instance.) Then there is
|
||||
// a variable label "host", since we want to partition the collected metrics by
|
||||
// host. Since all Descs created in this way are consistent across instances,
|
||||
// with a guaranteed distinction by the "zone" label, we can register different
|
||||
// ClusterManager instances with the same registry.
|
||||
func NewClusterManager(zone string) *ClusterManager {
|
||||
return &ClusterManager{
|
||||
// NewClusterManager first creates a Prometheus-ignorant ClusterManager
|
||||
// instance. Then, it creates a ClusterManagerCollector for the just created
|
||||
// ClusterManager. Finally, it registers the ClusterManagerCollector with a
|
||||
// wrapping Registerer that adds the zone as a label. In this way, the metrics
|
||||
// collected by different ClusterManagerCollectors do not collide.
|
||||
func NewClusterManager(zone string, reg prometheus.Registerer) *ClusterManager {
|
||||
c := &ClusterManager{
|
||||
Zone: zone,
|
||||
OOMCountDesc: prometheus.NewDesc(
|
||||
"clustermanager_oom_crashes_total",
|
||||
"Number of OOM crashes.",
|
||||
[]string{"host"},
|
||||
prometheus.Labels{"zone": zone},
|
||||
),
|
||||
RAMUsageDesc: prometheus.NewDesc(
|
||||
"clustermanager_ram_usage_bytes",
|
||||
"RAM usage as reported to the cluster manager.",
|
||||
[]string{"host"},
|
||||
prometheus.Labels{"zone": zone},
|
||||
),
|
||||
}
|
||||
cc := ClusterManagerCollector{ClusterManager: c}
|
||||
prometheus.WrapRegistererWith(prometheus.Labels{"zone": zone}, reg).MustRegister(cc)
|
||||
return c
|
||||
}
|
||||
|
||||
func ExampleCollector() {
|
||||
workerDB := NewClusterManager("db")
|
||||
workerCA := NewClusterManager("ca")
|
||||
|
||||
// Since we are dealing with custom Collector implementations, it might
|
||||
// be a good idea to try it out with a pedantic registry.
|
||||
reg := prometheus.NewPedanticRegistry()
|
||||
reg.MustRegister(workerDB)
|
||||
reg.MustRegister(workerCA)
|
||||
|
||||
// Construct cluster managers. In real code, we would assign them to
|
||||
// variables to then do something with them.
|
||||
NewClusterManager("db", reg)
|
||||
NewClusterManager("ca", reg)
|
||||
}
|
||||
|
|
92
vendor/github.com/prometheus/client_golang/prometheus/examples_test.go
generated
vendored
92
vendor/github.com/prometheus/client_golang/prometheus/examples_test.go
generated
vendored
|
@ -19,13 +19,13 @@ import (
|
|||
"math"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
@ -89,37 +89,6 @@ func ExampleGaugeFunc() {
|
|||
// GaugeFunc 'goroutines_count' registered.
|
||||
}
|
||||
|
||||
func ExampleCounter() {
|
||||
pushCounter := prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "repository_pushes", // Note: No help string...
|
||||
})
|
||||
err := prometheus.Register(pushCounter) // ... so this will return an error.
|
||||
if err != nil {
|
||||
fmt.Println("Push counter couldn't be registered, no counting will happen:", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Try it once more, this time with a help string.
|
||||
pushCounter = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "repository_pushes",
|
||||
Help: "Number of pushes to external repository.",
|
||||
})
|
||||
err = prometheus.Register(pushCounter)
|
||||
if err != nil {
|
||||
fmt.Println("Push counter couldn't be registered AGAIN, no counting will happen:", err)
|
||||
return
|
||||
}
|
||||
|
||||
pushComplete := make(chan struct{})
|
||||
// TODO: Start a goroutine that performs repository pushes and reports
|
||||
// each completion via the channel.
|
||||
for range pushComplete {
|
||||
pushCounter.Inc()
|
||||
}
|
||||
// Output:
|
||||
// Push counter couldn't be registered, no counting will happen: descriptor Desc{fqName: "repository_pushes", help: "", constLabels: {}, variableLabels: []} is invalid: empty help string
|
||||
}
|
||||
|
||||
func ExampleCounterVec() {
|
||||
httpReqs := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
|
@ -167,19 +136,6 @@ func ExampleInstrumentHandler() {
|
|||
))
|
||||
}
|
||||
|
||||
func ExampleLabelPairSorter() {
|
||||
labelPairs := []*dto.LabelPair{
|
||||
{Name: proto.String("status"), Value: proto.String("404")},
|
||||
{Name: proto.String("method"), Value: proto.String("get")},
|
||||
}
|
||||
|
||||
sort.Sort(prometheus.LabelPairSorter(labelPairs))
|
||||
|
||||
fmt.Println(labelPairs)
|
||||
// Output:
|
||||
// [name:"method" value:"get" name:"status" value:"404" ]
|
||||
}
|
||||
|
||||
func ExampleRegister() {
|
||||
// Imagine you have a worker pool and want to count the tasks completed.
|
||||
taskCounter := prometheus.NewCounter(prometheus.CounterOpts{
|
||||
|
@ -712,7 +668,7 @@ humidity_percent{location="inside"} 33.2
|
|||
# HELP temperature_kelvin Temperature in Kelvin.
|
||||
# Duplicate metric:
|
||||
temperature_kelvin{location="outside"} 265.3
|
||||
# Wrong labels:
|
||||
# Missing location label (note that this is undesirable but valid):
|
||||
temperature_kelvin 4.5
|
||||
`
|
||||
|
||||
|
@ -740,15 +696,47 @@ temperature_kelvin 4.5
|
|||
// temperature_kelvin{location="outside"} 273.14
|
||||
// temperature_kelvin{location="somewhere else"} 4.5
|
||||
// ----------
|
||||
// 2 error(s) occurred:
|
||||
// * collected metric temperature_kelvin label:<name:"location" value:"outside" > gauge:<value:265.3 > was collected before with the same name and label values
|
||||
// * collected metric temperature_kelvin gauge:<value:4.5 > has label dimensions inconsistent with previously collected metrics in the same metric family
|
||||
// collected metric "temperature_kelvin" { label:<name:"location" value:"outside" > gauge:<value:265.3 > } was collected before with the same name and label values
|
||||
// # HELP humidity_percent Humidity in %.
|
||||
// # TYPE humidity_percent gauge
|
||||
// humidity_percent{location="inside"} 33.2
|
||||
// humidity_percent{location="outside"} 45.4
|
||||
// # HELP temperature_kelvin Temperature in Kelvin.
|
||||
// # TYPE temperature_kelvin gauge
|
||||
// temperature_kelvin 4.5
|
||||
// temperature_kelvin{location="inside"} 298.44
|
||||
// temperature_kelvin{location="outside"} 273.14
|
||||
}
|
||||
|
||||
func ExampleNewMetricWithTimestamp() {
|
||||
desc := prometheus.NewDesc(
|
||||
"temperature_kelvin",
|
||||
"Current temperature in Kelvin.",
|
||||
nil, nil,
|
||||
)
|
||||
|
||||
// Create a constant gauge from values we got from an external
|
||||
// temperature reporting system. Those values are reported with a slight
|
||||
// delay, so we want to add the timestamp of the actual measurement.
|
||||
temperatureReportedByExternalSystem := 298.15
|
||||
timeReportedByExternalSystem := time.Date(2009, time.November, 10, 23, 0, 0, 12345678, time.UTC)
|
||||
s := prometheus.NewMetricWithTimestamp(
|
||||
timeReportedByExternalSystem,
|
||||
prometheus.MustNewConstMetric(
|
||||
desc, prometheus.GaugeValue, temperatureReportedByExternalSystem,
|
||||
),
|
||||
)
|
||||
|
||||
// Just for demonstration, let's check the state of the gauge by
|
||||
// (ab)using its Write method (which is usually only used by Prometheus
|
||||
// internally).
|
||||
metric := &dto.Metric{}
|
||||
s.Write(metric)
|
||||
fmt.Println(proto.MarshalTextString(metric))
|
||||
|
||||
// Output:
|
||||
// gauge: <
|
||||
// value: 298.15
|
||||
// >
|
||||
// timestamp_ms: 1257894000012
|
||||
}
|
||||
|
|
2
vendor/github.com/prometheus/client_golang/prometheus/expvar_collector_test.go
generated
vendored
2
vendor/github.com/prometheus/client_golang/prometheus/expvar_collector_test.go
generated
vendored
|
@ -78,7 +78,7 @@ func ExampleNewExpvarCollector() {
|
|||
close(metricChan)
|
||||
}()
|
||||
for m := range metricChan {
|
||||
if strings.Index(m.Desc().String(), "expvar_memstats") == -1 {
|
||||
if !strings.Contains(m.Desc().String(), "expvar_memstats") {
|
||||
metric.Reset()
|
||||
m.Write(&metric)
|
||||
metricStrings = append(metricStrings, metric.String())
|
||||
|
|
13
vendor/github.com/prometheus/client_golang/prometheus/fnv.go
generated
vendored
13
vendor/github.com/prometheus/client_golang/prometheus/fnv.go
generated
vendored
|
@ -1,3 +1,16 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
// Inline and byte-free variant of hash/fnv's fnv64a.
|
||||
|
|
23
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
23
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
|
@ -1,3 +1,16 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
|
@ -17,8 +30,12 @@ type goCollector struct {
|
|||
metrics memStatsMetrics
|
||||
}
|
||||
|
||||
// NewGoCollector returns a collector which exports metrics about the current
|
||||
// go process.
|
||||
// NewGoCollector returns a collector which exports metrics about the current Go
|
||||
// process. This includes memory stats. To collect those, runtime.ReadMemStats
|
||||
// is called. This causes a stop-the-world, which is very short with Go1.9+
|
||||
// (~25µs). However, with older Go versions, the stop-the-world duration depends
|
||||
// on the heap size and can be quite significant (~1.7 ms/GiB as per
|
||||
// https://go-review.googlesource.com/c/go/+/34937).
|
||||
func NewGoCollector() Collector {
|
||||
return &goCollector{
|
||||
goroutinesDesc: NewDesc(
|
||||
|
@ -265,7 +282,7 @@ func (c *goCollector) Collect(ch chan<- Metric) {
|
|||
quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
|
||||
}
|
||||
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
|
||||
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles)
|
||||
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
|
||||
|
||||
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
|
||||
|
||||
|
|
13
vendor/github.com/prometheus/client_golang/prometheus/go_collector_test.go
generated
vendored
13
vendor/github.com/prometheus/client_golang/prometheus/go_collector_test.go
generated
vendored
|
@ -1,3 +1,16 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
|
|
282
vendor/github.com/prometheus/client_golang/prometheus/graphite/bridge.go
generated
vendored
Normal file
282
vendor/github.com/prometheus/client_golang/prometheus/graphite/bridge.go
generated
vendored
Normal file
|
@ -0,0 +1,282 @@
|
|||
// Copyright 2016 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package graphite provides a bridge to push Prometheus metrics to a Graphite
|
||||
// server.
|
||||
package graphite
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/common/expfmt"
|
||||
"github.com/prometheus/common/model"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultInterval = 15 * time.Second
|
||||
millisecondsPerSecond = 1000
|
||||
)
|
||||
|
||||
// HandlerErrorHandling defines how a Handler serving metrics will handle
|
||||
// errors.
|
||||
type HandlerErrorHandling int
|
||||
|
||||
// These constants cause handlers serving metrics to behave as described if
|
||||
// errors are encountered.
|
||||
const (
|
||||
// Ignore errors and try to push as many metrics to Graphite as possible.
|
||||
ContinueOnError HandlerErrorHandling = iota
|
||||
|
||||
// Abort the push to Graphite upon the first error encountered.
|
||||
AbortOnError
|
||||
)
|
||||
|
||||
// Config defines the Graphite bridge config.
|
||||
type Config struct {
|
||||
// The url to push data to. Required.
|
||||
URL string
|
||||
|
||||
// The prefix for the pushed Graphite metrics. Defaults to empty string.
|
||||
Prefix string
|
||||
|
||||
// The interval to use for pushing data to Graphite. Defaults to 15 seconds.
|
||||
Interval time.Duration
|
||||
|
||||
// The timeout for pushing metrics to Graphite. Defaults to 15 seconds.
|
||||
Timeout time.Duration
|
||||
|
||||
// The Gatherer to use for metrics. Defaults to prometheus.DefaultGatherer.
|
||||
Gatherer prometheus.Gatherer
|
||||
|
||||
// The logger that messages are written to. Defaults to no logging.
|
||||
Logger Logger
|
||||
|
||||
// ErrorHandling defines how errors are handled. Note that errors are
|
||||
// logged regardless of the configured ErrorHandling provided Logger
|
||||
// is not nil.
|
||||
ErrorHandling HandlerErrorHandling
|
||||
}
|
||||
|
||||
// Bridge pushes metrics to the configured Graphite server.
|
||||
type Bridge struct {
|
||||
url string
|
||||
prefix string
|
||||
interval time.Duration
|
||||
timeout time.Duration
|
||||
|
||||
errorHandling HandlerErrorHandling
|
||||
logger Logger
|
||||
|
||||
g prometheus.Gatherer
|
||||
}
|
||||
|
||||
// Logger is the minimal interface Bridge needs for logging. Note that
|
||||
// log.Logger from the standard library implements this interface, and it is
|
||||
// easy to implement by custom loggers, if they don't do so already anyway.
|
||||
type Logger interface {
|
||||
Println(v ...interface{})
|
||||
}
|
||||
|
||||
// NewBridge returns a pointer to a new Bridge struct.
|
||||
func NewBridge(c *Config) (*Bridge, error) {
|
||||
b := &Bridge{}
|
||||
|
||||
if c.URL == "" {
|
||||
return nil, errors.New("missing URL")
|
||||
}
|
||||
b.url = c.URL
|
||||
|
||||
if c.Gatherer == nil {
|
||||
b.g = prometheus.DefaultGatherer
|
||||
} else {
|
||||
b.g = c.Gatherer
|
||||
}
|
||||
|
||||
if c.Logger != nil {
|
||||
b.logger = c.Logger
|
||||
}
|
||||
|
||||
if c.Prefix != "" {
|
||||
b.prefix = c.Prefix
|
||||
}
|
||||
|
||||
var z time.Duration
|
||||
if c.Interval == z {
|
||||
b.interval = defaultInterval
|
||||
} else {
|
||||
b.interval = c.Interval
|
||||
}
|
||||
|
||||
if c.Timeout == z {
|
||||
b.timeout = defaultInterval
|
||||
} else {
|
||||
b.timeout = c.Timeout
|
||||
}
|
||||
|
||||
b.errorHandling = c.ErrorHandling
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// Run starts the event loop that pushes Prometheus metrics to Graphite at the
|
||||
// configured interval.
|
||||
func (b *Bridge) Run(ctx context.Context) {
|
||||
ticker := time.NewTicker(b.interval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if err := b.Push(); err != nil && b.logger != nil {
|
||||
b.logger.Println("error pushing to Graphite:", err)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Push pushes Prometheus metrics to the configured Graphite server.
|
||||
func (b *Bridge) Push() error {
|
||||
mfs, err := b.g.Gather()
|
||||
if err != nil || len(mfs) == 0 {
|
||||
switch b.errorHandling {
|
||||
case AbortOnError:
|
||||
return err
|
||||
case ContinueOnError:
|
||||
if b.logger != nil {
|
||||
b.logger.Println("continue on error:", err)
|
||||
}
|
||||
default:
|
||||
panic("unrecognized error handling value")
|
||||
}
|
||||
}
|
||||
|
||||
conn, err := net.DialTimeout("tcp", b.url, b.timeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
return writeMetrics(conn, mfs, b.prefix, model.Now())
|
||||
}
|
||||
|
||||
func writeMetrics(w io.Writer, mfs []*dto.MetricFamily, prefix string, now model.Time) error {
|
||||
vec, err := expfmt.ExtractSamples(&expfmt.DecodeOptions{
|
||||
Timestamp: now,
|
||||
}, mfs...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
buf := bufio.NewWriter(w)
|
||||
for _, s := range vec {
|
||||
for _, c := range prefix {
|
||||
if _, err := buf.WriteRune(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := buf.WriteByte('.'); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := writeMetric(buf, s.Metric); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := fmt.Fprintf(buf, " %g %d\n", s.Value, int64(s.Timestamp)/millisecondsPerSecond); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := buf.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeMetric(buf *bufio.Writer, m model.Metric) error {
|
||||
metricName, hasName := m[model.MetricNameLabel]
|
||||
numLabels := len(m) - 1
|
||||
if !hasName {
|
||||
numLabels = len(m)
|
||||
}
|
||||
|
||||
labelStrings := make([]string, 0, numLabels)
|
||||
for label, value := range m {
|
||||
if label != model.MetricNameLabel {
|
||||
labelStrings = append(labelStrings, fmt.Sprintf("%s %s", string(label), string(value)))
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
switch numLabels {
|
||||
case 0:
|
||||
if hasName {
|
||||
return writeSanitized(buf, string(metricName))
|
||||
}
|
||||
default:
|
||||
sort.Strings(labelStrings)
|
||||
if err = writeSanitized(buf, string(metricName)); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, s := range labelStrings {
|
||||
if err = buf.WriteByte('.'); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = writeSanitized(buf, s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeSanitized(buf *bufio.Writer, s string) error {
|
||||
prevUnderscore := false
|
||||
|
||||
for _, c := range s {
|
||||
c = replaceInvalidRune(c)
|
||||
if c == '_' {
|
||||
if prevUnderscore {
|
||||
continue
|
||||
}
|
||||
prevUnderscore = true
|
||||
} else {
|
||||
prevUnderscore = false
|
||||
}
|
||||
if _, err := buf.WriteRune(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func replaceInvalidRune(c rune) rune {
|
||||
if c == ' ' {
|
||||
return '.'
|
||||
}
|
||||
if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_' || c == ':' || c == '-' || (c >= '0' && c <= '9')) {
|
||||
return '_'
|
||||
}
|
||||
return c
|
||||
}
|
338
vendor/github.com/prometheus/client_golang/prometheus/graphite/bridge_test.go
generated
vendored
Normal file
338
vendor/github.com/prometheus/client_golang/prometheus/graphite/bridge_test.go
generated
vendored
Normal file
|
@ -0,0 +1,338 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package graphite
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"regexp"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/common/model"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func TestSanitize(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in, out string
|
||||
}{
|
||||
{in: "hello", out: "hello"},
|
||||
{in: "hE/l1o", out: "hE_l1o"},
|
||||
{in: "he,*ll(.o", out: "he_ll_o"},
|
||||
{in: "hello_there%^&", out: "hello_there_"},
|
||||
{in: "hell-.o", out: "hell-_o"},
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
w := bufio.NewWriter(&buf)
|
||||
|
||||
for i, tc := range testCases {
|
||||
if err := writeSanitized(w, tc.in); err != nil {
|
||||
t.Fatalf("write failed: %v", err)
|
||||
}
|
||||
if err := w.Flush(); err != nil {
|
||||
t.Fatalf("flush failed: %v", err)
|
||||
}
|
||||
|
||||
if want, got := tc.out, buf.String(); want != got {
|
||||
t.Fatalf("test case index %d: got sanitized string %s, want %s", i, got, want)
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteSummary(t *testing.T) {
|
||||
sumVec := prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Name: "name",
|
||||
Help: "docstring",
|
||||
ConstLabels: prometheus.Labels{"constname": "constvalue"},
|
||||
Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},
|
||||
},
|
||||
[]string{"labelname"},
|
||||
)
|
||||
|
||||
sumVec.WithLabelValues("val1").Observe(float64(10))
|
||||
sumVec.WithLabelValues("val1").Observe(float64(20))
|
||||
sumVec.WithLabelValues("val1").Observe(float64(30))
|
||||
sumVec.WithLabelValues("val2").Observe(float64(20))
|
||||
sumVec.WithLabelValues("val2").Observe(float64(30))
|
||||
sumVec.WithLabelValues("val2").Observe(float64(40))
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
reg.MustRegister(sumVec)
|
||||
|
||||
mfs, err := reg.Gather()
|
||||
if err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
prefix string
|
||||
}{
|
||||
{prefix: "prefix"},
|
||||
{prefix: "pre/fix"},
|
||||
{prefix: "pre.fix"},
|
||||
}
|
||||
|
||||
const want = `%s.name.constname.constvalue.labelname.val1.quantile.0_5 20 1477043
|
||||
%s.name.constname.constvalue.labelname.val1.quantile.0_9 30 1477043
|
||||
%s.name.constname.constvalue.labelname.val1.quantile.0_99 30 1477043
|
||||
%s.name_sum.constname.constvalue.labelname.val1 60 1477043
|
||||
%s.name_count.constname.constvalue.labelname.val1 3 1477043
|
||||
%s.name.constname.constvalue.labelname.val2.quantile.0_5 30 1477043
|
||||
%s.name.constname.constvalue.labelname.val2.quantile.0_9 40 1477043
|
||||
%s.name.constname.constvalue.labelname.val2.quantile.0_99 40 1477043
|
||||
%s.name_sum.constname.constvalue.labelname.val2 90 1477043
|
||||
%s.name_count.constname.constvalue.labelname.val2 3 1477043
|
||||
`
|
||||
for i, tc := range testCases {
|
||||
|
||||
now := model.Time(1477043083)
|
||||
var buf bytes.Buffer
|
||||
err = writeMetrics(&buf, mfs, tc.prefix, now)
|
||||
if err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
wantWithPrefix := fmt.Sprintf(want,
|
||||
tc.prefix, tc.prefix, tc.prefix, tc.prefix, tc.prefix,
|
||||
tc.prefix, tc.prefix, tc.prefix, tc.prefix, tc.prefix,
|
||||
)
|
||||
if got := buf.String(); wantWithPrefix != got {
|
||||
t.Fatalf("test case index %d: wanted \n%s\n, got \n%s\n", i, wantWithPrefix, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteHistogram(t *testing.T) {
|
||||
histVec := prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Name: "name",
|
||||
Help: "docstring",
|
||||
ConstLabels: prometheus.Labels{"constname": "constvalue"},
|
||||
Buckets: []float64{0.01, 0.02, 0.05, 0.1},
|
||||
},
|
||||
[]string{"labelname"},
|
||||
)
|
||||
|
||||
histVec.WithLabelValues("val1").Observe(float64(10))
|
||||
histVec.WithLabelValues("val1").Observe(float64(20))
|
||||
histVec.WithLabelValues("val1").Observe(float64(30))
|
||||
histVec.WithLabelValues("val2").Observe(float64(20))
|
||||
histVec.WithLabelValues("val2").Observe(float64(30))
|
||||
histVec.WithLabelValues("val2").Observe(float64(40))
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
reg.MustRegister(histVec)
|
||||
|
||||
mfs, err := reg.Gather()
|
||||
if err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
now := model.Time(1477043083)
|
||||
var buf bytes.Buffer
|
||||
err = writeMetrics(&buf, mfs, "prefix", now)
|
||||
if err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
want := `prefix.name_bucket.constname.constvalue.labelname.val1.le.0_01 0 1477043
|
||||
prefix.name_bucket.constname.constvalue.labelname.val1.le.0_02 0 1477043
|
||||
prefix.name_bucket.constname.constvalue.labelname.val1.le.0_05 0 1477043
|
||||
prefix.name_bucket.constname.constvalue.labelname.val1.le.0_1 0 1477043
|
||||
prefix.name_sum.constname.constvalue.labelname.val1 60 1477043
|
||||
prefix.name_count.constname.constvalue.labelname.val1 3 1477043
|
||||
prefix.name_bucket.constname.constvalue.labelname.val1.le._Inf 3 1477043
|
||||
prefix.name_bucket.constname.constvalue.labelname.val2.le.0_01 0 1477043
|
||||
prefix.name_bucket.constname.constvalue.labelname.val2.le.0_02 0 1477043
|
||||
prefix.name_bucket.constname.constvalue.labelname.val2.le.0_05 0 1477043
|
||||
prefix.name_bucket.constname.constvalue.labelname.val2.le.0_1 0 1477043
|
||||
prefix.name_sum.constname.constvalue.labelname.val2 90 1477043
|
||||
prefix.name_count.constname.constvalue.labelname.val2 3 1477043
|
||||
prefix.name_bucket.constname.constvalue.labelname.val2.le._Inf 3 1477043
|
||||
`
|
||||
if got := buf.String(); want != got {
|
||||
t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestToReader(t *testing.T) {
|
||||
cntVec := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "name",
|
||||
Help: "docstring",
|
||||
ConstLabels: prometheus.Labels{"constname": "constvalue"},
|
||||
},
|
||||
[]string{"labelname"},
|
||||
)
|
||||
cntVec.WithLabelValues("val1").Inc()
|
||||
cntVec.WithLabelValues("val2").Inc()
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
reg.MustRegister(cntVec)
|
||||
|
||||
want := `prefix.name.constname.constvalue.labelname.val1 1 1477043
|
||||
prefix.name.constname.constvalue.labelname.val2 1 1477043
|
||||
`
|
||||
mfs, err := reg.Gather()
|
||||
if err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
now := model.Time(1477043083)
|
||||
var buf bytes.Buffer
|
||||
err = writeMetrics(&buf, mfs, "prefix", now)
|
||||
if err != nil {
|
||||
t.Fatalf("error: %v", err)
|
||||
}
|
||||
|
||||
if got := buf.String(); want != got {
|
||||
t.Fatalf("wanted \n%s\n, got \n%s\n", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPush(t *testing.T) {
|
||||
reg := prometheus.NewRegistry()
|
||||
cntVec := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "name",
|
||||
Help: "docstring",
|
||||
ConstLabels: prometheus.Labels{"constname": "constvalue"},
|
||||
},
|
||||
[]string{"labelname"},
|
||||
)
|
||||
cntVec.WithLabelValues("val1").Inc()
|
||||
cntVec.WithLabelValues("val2").Inc()
|
||||
reg.MustRegister(cntVec)
|
||||
|
||||
host := "localhost"
|
||||
port := ":56789"
|
||||
b, err := NewBridge(&Config{
|
||||
URL: host + port,
|
||||
Gatherer: reg,
|
||||
Prefix: "prefix",
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("error creating bridge: %v", err)
|
||||
}
|
||||
|
||||
nmg, err := newMockGraphite(port)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating mock graphite: %v", err)
|
||||
}
|
||||
defer nmg.Close()
|
||||
|
||||
err = b.Push()
|
||||
if err != nil {
|
||||
t.Fatalf("error pushing: %v", err)
|
||||
}
|
||||
|
||||
wants := []string{
|
||||
"prefix.name.constname.constvalue.labelname.val1 1",
|
||||
"prefix.name.constname.constvalue.labelname.val2 1",
|
||||
}
|
||||
|
||||
select {
|
||||
case got := <-nmg.readc:
|
||||
for _, want := range wants {
|
||||
matched, err := regexp.MatchString(want, got)
|
||||
if err != nil {
|
||||
t.Fatalf("error pushing: %v", err)
|
||||
}
|
||||
if !matched {
|
||||
t.Fatalf("missing metric:\nno match for %s received by server:\n%s", want, got)
|
||||
}
|
||||
}
|
||||
return
|
||||
case err := <-nmg.errc:
|
||||
t.Fatalf("error reading push: %v", err)
|
||||
case <-time.After(50 * time.Millisecond):
|
||||
t.Fatalf("no result from graphite server")
|
||||
}
|
||||
}
|
||||
|
||||
func newMockGraphite(port string) (*mockGraphite, error) {
|
||||
readc := make(chan string)
|
||||
errc := make(chan error)
|
||||
ln, err := net.Listen("tcp", port)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
conn, err := ln.Accept()
|
||||
if err != nil {
|
||||
errc <- err
|
||||
}
|
||||
var b bytes.Buffer
|
||||
io.Copy(&b, conn)
|
||||
readc <- b.String()
|
||||
}()
|
||||
|
||||
return &mockGraphite{
|
||||
readc: readc,
|
||||
errc: errc,
|
||||
Listener: ln,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type mockGraphite struct {
|
||||
readc chan string
|
||||
errc chan error
|
||||
|
||||
net.Listener
|
||||
}
|
||||
|
||||
func ExampleBridge() {
|
||||
b, err := NewBridge(&Config{
|
||||
URL: "graphite.example.org:3099",
|
||||
Gatherer: prometheus.DefaultGatherer,
|
||||
Prefix: "prefix",
|
||||
Interval: 15 * time.Second,
|
||||
Timeout: 10 * time.Second,
|
||||
ErrorHandling: AbortOnError,
|
||||
Logger: log.New(os.Stdout, "graphite bridge: ", log.Lshortfile),
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
// Start something in a goroutine that uses metrics.
|
||||
}()
|
||||
|
||||
// Push initial metrics to Graphite. Fail fast if the push fails.
|
||||
if err := b.Push(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Create a Context to control stopping the Run() loop that pushes
|
||||
// metrics to Graphite.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Start pushing metrics to Graphite in the Run() loop.
|
||||
b.Run(ctx)
|
||||
}
|
155
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
155
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
|
@ -16,7 +16,9 @@ package prometheus
|
|||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"runtime"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
@ -108,8 +110,9 @@ func ExponentialBuckets(start, factor float64, count int) []float64 {
|
|||
}
|
||||
|
||||
// HistogramOpts bundles the options for creating a Histogram metric. It is
|
||||
// mandatory to set Name and Help to a non-empty string. All other fields are
|
||||
// optional and can safely be left at their zero value.
|
||||
// mandatory to set Name to a non-empty string. All other fields are optional
|
||||
// and can safely be left at their zero value, although it is strongly
|
||||
// encouraged to set a Help string.
|
||||
type HistogramOpts struct {
|
||||
// Namespace, Subsystem, and Name are components of the fully-qualified
|
||||
// name of the Histogram (created by joining these components with
|
||||
|
@ -120,7 +123,7 @@ type HistogramOpts struct {
|
|||
Subsystem string
|
||||
Name string
|
||||
|
||||
// Help provides information about this Histogram. Mandatory!
|
||||
// Help provides information about this Histogram.
|
||||
//
|
||||
// Metrics with the same fully-qualified name must have the same Help
|
||||
// string.
|
||||
|
@ -184,6 +187,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
|
|||
desc: desc,
|
||||
upperBounds: opts.Buckets,
|
||||
labelPairs: makeLabelPairs(desc, labelValues),
|
||||
counts: [2]*histogramCounts{&histogramCounts{}, &histogramCounts{}},
|
||||
}
|
||||
for i, upperBound := range h.upperBounds {
|
||||
if i < len(h.upperBounds)-1 {
|
||||
|
@ -200,28 +204,53 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
|
|||
}
|
||||
}
|
||||
}
|
||||
// Finally we know the final length of h.upperBounds and can make counts.
|
||||
h.counts = make([]uint64, len(h.upperBounds))
|
||||
// Finally we know the final length of h.upperBounds and can make counts
|
||||
// for both states:
|
||||
h.counts[0].buckets = make([]uint64, len(h.upperBounds))
|
||||
h.counts[1].buckets = make([]uint64, len(h.upperBounds))
|
||||
|
||||
h.init(h) // Init self-collection.
|
||||
return h
|
||||
}
|
||||
|
||||
type histogram struct {
|
||||
type histogramCounts struct {
|
||||
// sumBits contains the bits of the float64 representing the sum of all
|
||||
// observations. sumBits and count have to go first in the struct to
|
||||
// guarantee alignment for atomic operations.
|
||||
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||
sumBits uint64
|
||||
count uint64
|
||||
buckets []uint64
|
||||
}
|
||||
|
||||
type histogram struct {
|
||||
// countAndHotIdx is a complicated one. For lock-free yet atomic
|
||||
// observations, we need to save the total count of observations again,
|
||||
// combined with the index of the currently-hot counts struct, so that
|
||||
// we can perform the operation on both values atomically. The least
|
||||
// significant bit defines the hot counts struct. The remaining 63 bits
|
||||
// represent the total count of observations. This happens under the
|
||||
// assumption that the 63bit count will never overflow. Rationale: An
|
||||
// observations takes about 30ns. Let's assume it could happen in
|
||||
// 10ns. Overflowing the counter will then take at least (2^63)*10ns,
|
||||
// which is about 3000 years.
|
||||
//
|
||||
// This has to be first in the struct for 64bit alignment. See
|
||||
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||
countAndHotIdx uint64
|
||||
|
||||
selfCollector
|
||||
// Note that there is no mutex required.
|
||||
|
||||
desc *Desc
|
||||
desc *Desc
|
||||
writeMtx sync.Mutex // Only used in the Write method.
|
||||
|
||||
upperBounds []float64
|
||||
counts []uint64
|
||||
|
||||
// Two counts, one is "hot" for lock-free observations, the other is
|
||||
// "cold" for writing out a dto.Metric. It has to be an array of
|
||||
// pointers to guarantee 64bit alignment of the histogramCounts, see
|
||||
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
|
||||
counts [2]*histogramCounts
|
||||
hotIdx int // Index of currently-hot counts. Only used within Write.
|
||||
|
||||
labelPairs []*dto.LabelPair
|
||||
}
|
||||
|
@ -241,36 +270,113 @@ func (h *histogram) Observe(v float64) {
|
|||
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
|
||||
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
|
||||
i := sort.SearchFloat64s(h.upperBounds, v)
|
||||
if i < len(h.counts) {
|
||||
atomic.AddUint64(&h.counts[i], 1)
|
||||
|
||||
// We increment h.countAndHotIdx by 2 so that the counter in the upper
|
||||
// 63 bits gets incremented by 1. At the same time, we get the new value
|
||||
// back, which we can use to find the currently-hot counts.
|
||||
n := atomic.AddUint64(&h.countAndHotIdx, 2)
|
||||
hotCounts := h.counts[n%2]
|
||||
|
||||
if i < len(h.upperBounds) {
|
||||
atomic.AddUint64(&hotCounts.buckets[i], 1)
|
||||
}
|
||||
atomic.AddUint64(&h.count, 1)
|
||||
for {
|
||||
oldBits := atomic.LoadUint64(&h.sumBits)
|
||||
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
|
||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
|
||||
if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) {
|
||||
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Increment count last as we take it as a signal that the observation
|
||||
// is complete.
|
||||
atomic.AddUint64(&hotCounts.count, 1)
|
||||
}
|
||||
|
||||
func (h *histogram) Write(out *dto.Metric) error {
|
||||
his := &dto.Histogram{}
|
||||
buckets := make([]*dto.Bucket, len(h.upperBounds))
|
||||
var (
|
||||
his = &dto.Histogram{}
|
||||
buckets = make([]*dto.Bucket, len(h.upperBounds))
|
||||
hotCounts, coldCounts *histogramCounts
|
||||
count uint64
|
||||
)
|
||||
|
||||
his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits)))
|
||||
his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count))
|
||||
var count uint64
|
||||
// For simplicity, we mutex the rest of this method. It is not in the
|
||||
// hot path, i.e. Observe is called much more often than Write. The
|
||||
// complication of making Write lock-free isn't worth it.
|
||||
h.writeMtx.Lock()
|
||||
defer h.writeMtx.Unlock()
|
||||
|
||||
// This is a bit arcane, which is why the following spells out this if
|
||||
// clause in English:
|
||||
//
|
||||
// If the currently-hot counts struct is #0, we atomically increment
|
||||
// h.countAndHotIdx by 1 so that from now on Observe will use the counts
|
||||
// struct #1. Furthermore, the atomic increment gives us the new value,
|
||||
// which, in its most significant 63 bits, tells us the count of
|
||||
// observations done so far up to and including currently ongoing
|
||||
// observations still using the counts struct just changed from hot to
|
||||
// cold. To have a normal uint64 for the count, we bitshift by 1 and
|
||||
// save the result in count. We also set h.hotIdx to 1 for the next
|
||||
// Write call, and we will refer to counts #1 as hotCounts and to counts
|
||||
// #0 as coldCounts.
|
||||
//
|
||||
// If the currently-hot counts struct is #1, we do the corresponding
|
||||
// things the other way round. We have to _decrement_ h.countAndHotIdx
|
||||
// (which is a bit arcane in itself, as we have to express -1 with an
|
||||
// unsigned int...).
|
||||
if h.hotIdx == 0 {
|
||||
count = atomic.AddUint64(&h.countAndHotIdx, 1) >> 1
|
||||
h.hotIdx = 1
|
||||
hotCounts = h.counts[1]
|
||||
coldCounts = h.counts[0]
|
||||
} else {
|
||||
count = atomic.AddUint64(&h.countAndHotIdx, ^uint64(0)) >> 1 // Decrement.
|
||||
h.hotIdx = 0
|
||||
hotCounts = h.counts[0]
|
||||
coldCounts = h.counts[1]
|
||||
}
|
||||
|
||||
// Now we have to wait for the now-declared-cold counts to actually cool
|
||||
// down, i.e. wait for all observations still using it to finish. That's
|
||||
// the case once the count in the cold counts struct is the same as the
|
||||
// one atomically retrieved from the upper 63bits of h.countAndHotIdx.
|
||||
for {
|
||||
if count == atomic.LoadUint64(&coldCounts.count) {
|
||||
break
|
||||
}
|
||||
runtime.Gosched() // Let observations get work done.
|
||||
}
|
||||
|
||||
his.SampleCount = proto.Uint64(count)
|
||||
his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits)))
|
||||
var cumCount uint64
|
||||
for i, upperBound := range h.upperBounds {
|
||||
count += atomic.LoadUint64(&h.counts[i])
|
||||
cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
|
||||
buckets[i] = &dto.Bucket{
|
||||
CumulativeCount: proto.Uint64(count),
|
||||
CumulativeCount: proto.Uint64(cumCount),
|
||||
UpperBound: proto.Float64(upperBound),
|
||||
}
|
||||
}
|
||||
|
||||
his.Bucket = buckets
|
||||
out.Histogram = his
|
||||
out.Label = h.labelPairs
|
||||
|
||||
// Finally add all the cold counts to the new hot counts and reset the cold counts.
|
||||
atomic.AddUint64(&hotCounts.count, count)
|
||||
atomic.StoreUint64(&coldCounts.count, 0)
|
||||
for {
|
||||
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
|
||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum())
|
||||
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
|
||||
atomic.StoreUint64(&coldCounts.sumBits, 0)
|
||||
break
|
||||
}
|
||||
}
|
||||
for i := range h.upperBounds {
|
||||
atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i]))
|
||||
atomic.StoreUint64(&coldCounts.buckets[i], 0)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -454,7 +560,7 @@ func (h *constHistogram) Write(out *dto.Metric) error {
|
|||
// bucket.
|
||||
//
|
||||
// NewConstHistogram returns an error if the length of labelValues is not
|
||||
// consistent with the variable labels in Desc.
|
||||
// consistent with the variable labels in Desc or if Desc is invalid.
|
||||
func NewConstHistogram(
|
||||
desc *Desc,
|
||||
count uint64,
|
||||
|
@ -462,6 +568,9 @@ func NewConstHistogram(
|
|||
buckets map[float64]uint64,
|
||||
labelValues ...string,
|
||||
) (Metric, error) {
|
||||
if desc.err != nil {
|
||||
return nil, desc.err
|
||||
}
|
||||
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
45
vendor/github.com/prometheus/client_golang/prometheus/histogram_test.go
generated
vendored
45
vendor/github.com/prometheus/client_golang/prometheus/histogram_test.go
generated
vendored
|
@ -17,6 +17,7 @@ import (
|
|||
"math"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
|
@ -346,3 +347,47 @@ func TestBuckets(t *testing.T) {
|
|||
t.Errorf("linear buckets: got %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHistogramAtomicObserve(t *testing.T) {
|
||||
var (
|
||||
quit = make(chan struct{})
|
||||
his = NewHistogram(HistogramOpts{
|
||||
Buckets: []float64{0.5, 10, 20},
|
||||
})
|
||||
)
|
||||
|
||||
defer func() { close(quit) }()
|
||||
|
||||
observe := func() {
|
||||
for {
|
||||
select {
|
||||
case <-quit:
|
||||
return
|
||||
default:
|
||||
his.Observe(1)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
go observe()
|
||||
go observe()
|
||||
go observe()
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
m := &dto.Metric{}
|
||||
if err := his.Write(m); err != nil {
|
||||
t.Fatal("unexpected error writing histogram:", err)
|
||||
}
|
||||
h := m.GetHistogram()
|
||||
if h.GetSampleCount() != uint64(h.GetSampleSum()) ||
|
||||
h.GetSampleCount() != h.GetBucket()[1].GetCumulativeCount() ||
|
||||
h.GetSampleCount() != h.GetBucket()[2].GetCumulativeCount() {
|
||||
t.Fatalf(
|
||||
"inconsistent counts in histogram: count=%d sum=%f buckets=[%d, %d]",
|
||||
h.GetSampleCount(), h.GetSampleSum(),
|
||||
h.GetBucket()[1].GetCumulativeCount(), h.GetBucket()[2].GetCumulativeCount(),
|
||||
)
|
||||
}
|
||||
runtime.Gosched()
|
||||
}
|
||||
}
|
||||
|
|
50
vendor/github.com/prometheus/client_golang/prometheus/http.go
generated
vendored
50
vendor/github.com/prometheus/client_golang/prometheus/http.go
generated
vendored
|
@ -61,15 +61,15 @@ func giveBuf(buf *bytes.Buffer) {
|
|||
// name).
|
||||
//
|
||||
// Deprecated: Please note the issues described in the doc comment of
|
||||
// InstrumentHandler. You might want to consider using
|
||||
// promhttp.InstrumentedHandler instead.
|
||||
// InstrumentHandler. You might want to consider using promhttp.Handler instead.
|
||||
func Handler() http.Handler {
|
||||
return InstrumentHandler("prometheus", UninstrumentedHandler())
|
||||
}
|
||||
|
||||
// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
|
||||
//
|
||||
// Deprecated: Use promhttp.Handler instead. See there for further documentation.
|
||||
// Deprecated: Use promhttp.HandlerFor(DefaultGatherer, promhttp.HandlerOpts{})
|
||||
// instead. See there for further documentation.
|
||||
func UninstrumentedHandler() http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
mfs, err := DefaultGatherer.Gather()
|
||||
|
@ -115,7 +115,7 @@ func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string)
|
|||
header := request.Header.Get(acceptEncodingHeader)
|
||||
parts := strings.Split(header, ",")
|
||||
for _, part := range parts {
|
||||
part := strings.TrimSpace(part)
|
||||
part = strings.TrimSpace(part)
|
||||
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
|
||||
return gzip.NewWriter(writer), "gzip"
|
||||
}
|
||||
|
@ -139,16 +139,6 @@ var now nower = nowFunc(func() time.Time {
|
|||
return time.Now()
|
||||
})
|
||||
|
||||
func nowSeries(t ...time.Time) nower {
|
||||
return nowFunc(func() time.Time {
|
||||
defer func() {
|
||||
t = t[1:]
|
||||
}()
|
||||
|
||||
return t[0]
|
||||
})
|
||||
}
|
||||
|
||||
// InstrumentHandler wraps the given HTTP handler for instrumentation. It
|
||||
// registers four metric collectors (if not already done) and reports HTTP
|
||||
// metrics to the (newly or already) registered collectors: http_requests_total
|
||||
|
@ -159,21 +149,14 @@ func nowSeries(t ...time.Time) nower {
|
|||
// (label name "method") and HTTP status code (label name "code").
|
||||
//
|
||||
// Deprecated: InstrumentHandler has several issues. Use the tooling provided in
|
||||
// package promhttp instead. The issues are the following:
|
||||
//
|
||||
// - It uses Summaries rather than Histograms. Summaries are not useful if
|
||||
// aggregation across multiple instances is required.
|
||||
//
|
||||
// - It uses microseconds as unit, which is deprecated and should be replaced by
|
||||
// seconds.
|
||||
//
|
||||
// - The size of the request is calculated in a separate goroutine. Since this
|
||||
// calculator requires access to the request header, it creates a race with
|
||||
// any writes to the header performed during request handling.
|
||||
// httputil.ReverseProxy is a prominent example for a handler
|
||||
// performing such writes.
|
||||
//
|
||||
// - It has additional issues with HTTP/2, cf.
|
||||
// package promhttp instead. The issues are the following: (1) It uses Summaries
|
||||
// rather than Histograms. Summaries are not useful if aggregation across
|
||||
// multiple instances is required. (2) It uses microseconds as unit, which is
|
||||
// deprecated and should be replaced by seconds. (3) The size of the request is
|
||||
// calculated in a separate goroutine. Since this calculator requires access to
|
||||
// the request header, it creates a race with any writes to the header performed
|
||||
// during request handling. httputil.ReverseProxy is a prominent example for a
|
||||
// handler performing such writes. (4) It has additional issues with HTTP/2, cf.
|
||||
// https://github.com/prometheus/client_golang/issues/272.
|
||||
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
|
||||
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
|
||||
|
@ -317,7 +300,7 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo
|
|||
}
|
||||
|
||||
func computeApproximateRequestSize(r *http.Request) <-chan int {
|
||||
// Get URL length in current go routine for avoiding a race condition.
|
||||
// Get URL length in current goroutine for avoiding a race condition.
|
||||
// HandlerFunc that runs in parallel may modify the URL.
|
||||
s := 0
|
||||
if r.URL != nil {
|
||||
|
@ -352,10 +335,9 @@ func computeApproximateRequestSize(r *http.Request) <-chan int {
|
|||
type responseWriterDelegator struct {
|
||||
http.ResponseWriter
|
||||
|
||||
handler, method string
|
||||
status int
|
||||
written int64
|
||||
wroteHeader bool
|
||||
status int
|
||||
written int64
|
||||
wroteHeader bool
|
||||
}
|
||||
|
||||
func (r *responseWriterDelegator) WriteHeader(code int) {
|
||||
|
|
18
vendor/github.com/prometheus/client_golang/prometheus/http_test.go
generated
vendored
18
vendor/github.com/prometheus/client_golang/prometheus/http_test.go
generated
vendored
|
@ -29,6 +29,16 @@ func (b respBody) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
w.Write([]byte(b))
|
||||
}
|
||||
|
||||
func nowSeries(t ...time.Time) nower {
|
||||
return nowFunc(func() time.Time {
|
||||
defer func() {
|
||||
t = t[1:]
|
||||
}()
|
||||
|
||||
return t[0]
|
||||
})
|
||||
}
|
||||
|
||||
func TestInstrumentHandler(t *testing.T) {
|
||||
defer func(n nower) {
|
||||
now = n.(nower)
|
||||
|
@ -37,9 +47,9 @@ func TestInstrumentHandler(t *testing.T) {
|
|||
instant := time.Now()
|
||||
end := instant.Add(30 * time.Second)
|
||||
now = nowSeries(instant, end)
|
||||
respBody := respBody("Howdy there!")
|
||||
body := respBody("Howdy there!")
|
||||
|
||||
hndlr := InstrumentHandler("test-handler", respBody)
|
||||
hndlr := InstrumentHandler("test-handler", body)
|
||||
|
||||
opts := SummaryOpts{
|
||||
Subsystem: "http",
|
||||
|
@ -114,8 +124,8 @@ func TestInstrumentHandler(t *testing.T) {
|
|||
if resp.Code != http.StatusTeapot {
|
||||
t.Fatalf("expected status %d, got %d", http.StatusTeapot, resp.Code)
|
||||
}
|
||||
if string(resp.Body.Bytes()) != "Howdy there!" {
|
||||
t.Fatalf("expected body %s, got %s", "Howdy there!", string(resp.Body.Bytes()))
|
||||
if resp.Body.String() != "Howdy there!" {
|
||||
t.Fatalf("expected body %s, got %s", "Howdy there!", resp.Body.String())
|
||||
}
|
||||
|
||||
out := &dto.Metric{}
|
||||
|
|
85
vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
generated
vendored
Normal file
85
vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
generated
vendored
Normal file
|
@ -0,0 +1,85 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
// metricSorter is a sortable slice of *dto.Metric.
|
||||
type metricSorter []*dto.Metric
|
||||
|
||||
func (s metricSorter) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s metricSorter) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (s metricSorter) Less(i, j int) bool {
|
||||
if len(s[i].Label) != len(s[j].Label) {
|
||||
// This should not happen. The metrics are
|
||||
// inconsistent. However, we have to deal with the fact, as
|
||||
// people might use custom collectors or metric family injection
|
||||
// to create inconsistent metrics. So let's simply compare the
|
||||
// number of labels in this case. That will still yield
|
||||
// reproducible sorting.
|
||||
return len(s[i].Label) < len(s[j].Label)
|
||||
}
|
||||
for n, lp := range s[i].Label {
|
||||
vi := lp.GetValue()
|
||||
vj := s[j].Label[n].GetValue()
|
||||
if vi != vj {
|
||||
return vi < vj
|
||||
}
|
||||
}
|
||||
|
||||
// We should never arrive here. Multiple metrics with the same
|
||||
// label set in the same scrape will lead to undefined ingestion
|
||||
// behavior. However, as above, we have to provide stable sorting
|
||||
// here, even for inconsistent metrics. So sort equal metrics
|
||||
// by their timestamp, with missing timestamps (implying "now")
|
||||
// coming last.
|
||||
if s[i].TimestampMs == nil {
|
||||
return false
|
||||
}
|
||||
if s[j].TimestampMs == nil {
|
||||
return true
|
||||
}
|
||||
return s[i].GetTimestampMs() < s[j].GetTimestampMs()
|
||||
}
|
||||
|
||||
// NormalizeMetricFamilies returns a MetricFamily slice with empty
|
||||
// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
|
||||
// the slice, with the contained Metrics sorted within each MetricFamily.
|
||||
func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
|
||||
for _, mf := range metricFamiliesByName {
|
||||
sort.Sort(metricSorter(mf.Metric))
|
||||
}
|
||||
names := make([]string, 0, len(metricFamiliesByName))
|
||||
for name, mf := range metricFamiliesByName {
|
||||
if len(mf.Metric) > 0 {
|
||||
names = append(names, name)
|
||||
}
|
||||
}
|
||||
sort.Strings(names)
|
||||
result := make([]*dto.MetricFamily, 0, len(names))
|
||||
for _, name := range names {
|
||||
result = append(result, metricFamiliesByName[name])
|
||||
}
|
||||
return result
|
||||
}
|
13
vendor/github.com/prometheus/client_golang/prometheus/labels.go
generated
vendored
13
vendor/github.com/prometheus/client_golang/prometheus/labels.go
generated
vendored
|
@ -1,3 +1,16 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
|
|
70
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
70
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
|
@ -15,6 +15,9 @@ package prometheus
|
|||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
@ -43,9 +46,8 @@ type Metric interface {
|
|||
// While populating dto.Metric, it is the responsibility of the
|
||||
// implementation to ensure validity of the Metric protobuf (like valid
|
||||
// UTF-8 strings or syntactically valid metric and label names). It is
|
||||
// recommended to sort labels lexicographically. (Implementers may find
|
||||
// LabelPairSorter useful for that.) Callers of Write should still make
|
||||
// sure of sorting if they depend on it.
|
||||
// recommended to sort labels lexicographically. Callers of Write should
|
||||
// still make sure of sorting if they depend on it.
|
||||
Write(*dto.Metric) error
|
||||
// TODO(beorn7): The original rationale of passing in a pre-allocated
|
||||
// dto.Metric protobuf to save allocations has disappeared. The
|
||||
|
@ -57,8 +59,9 @@ type Metric interface {
|
|||
// implementation XXX has its own XXXOpts type, but in most cases, it is just be
|
||||
// an alias of this type (which might change when the requirement arises.)
|
||||
//
|
||||
// It is mandatory to set Name and Help to a non-empty string. All other fields
|
||||
// are optional and can safely be left at their zero value.
|
||||
// It is mandatory to set Name to a non-empty string. All other fields are
|
||||
// optional and can safely be left at their zero value, although it is strongly
|
||||
// encouraged to set a Help string.
|
||||
type Opts struct {
|
||||
// Namespace, Subsystem, and Name are components of the fully-qualified
|
||||
// name of the Metric (created by joining these components with
|
||||
|
@ -69,7 +72,7 @@ type Opts struct {
|
|||
Subsystem string
|
||||
Name string
|
||||
|
||||
// Help provides information about this metric. Mandatory!
|
||||
// Help provides information about this metric.
|
||||
//
|
||||
// Metrics with the same fully-qualified name must have the same Help
|
||||
// string.
|
||||
|
@ -110,37 +113,22 @@ func BuildFQName(namespace, subsystem, name string) string {
|
|||
return name
|
||||
}
|
||||
|
||||
// LabelPairSorter implements sort.Interface. It is used to sort a slice of
|
||||
// dto.LabelPair pointers. This is useful for implementing the Write method of
|
||||
// custom metrics.
|
||||
type LabelPairSorter []*dto.LabelPair
|
||||
// labelPairSorter implements sort.Interface. It is used to sort a slice of
|
||||
// dto.LabelPair pointers.
|
||||
type labelPairSorter []*dto.LabelPair
|
||||
|
||||
func (s LabelPairSorter) Len() int {
|
||||
func (s labelPairSorter) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s LabelPairSorter) Swap(i, j int) {
|
||||
func (s labelPairSorter) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (s LabelPairSorter) Less(i, j int) bool {
|
||||
func (s labelPairSorter) Less(i, j int) bool {
|
||||
return s[i].GetName() < s[j].GetName()
|
||||
}
|
||||
|
||||
type hashSorter []uint64
|
||||
|
||||
func (s hashSorter) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s hashSorter) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (s hashSorter) Less(i, j int) bool {
|
||||
return s[i] < s[j]
|
||||
}
|
||||
|
||||
type invalidMetric struct {
|
||||
desc *Desc
|
||||
err error
|
||||
|
@ -156,3 +144,31 @@ func NewInvalidMetric(desc *Desc, err error) Metric {
|
|||
func (m *invalidMetric) Desc() *Desc { return m.desc }
|
||||
|
||||
func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
|
||||
|
||||
type timestampedMetric struct {
|
||||
Metric
|
||||
t time.Time
|
||||
}
|
||||
|
||||
func (m timestampedMetric) Write(pb *dto.Metric) error {
|
||||
e := m.Metric.Write(pb)
|
||||
pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000))
|
||||
return e
|
||||
}
|
||||
|
||||
// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a
|
||||
// way that it has an explicit timestamp set to the provided Time. This is only
|
||||
// useful in rare cases as the timestamp of a Prometheus metric should usually
|
||||
// be set by the Prometheus server during scraping. Exceptions include mirroring
|
||||
// metrics with given timestamps from other metric
|
||||
// sources.
|
||||
//
|
||||
// NewMetricWithTimestamp works best with MustNewConstMetric,
|
||||
// MustNewConstHistogram, and MustNewConstSummary, see example.
|
||||
//
|
||||
// Currently, the exposition formats used by Prometheus are limited to
|
||||
// millisecond resolution. Thus, the provided time will be rounded down to the
|
||||
// next full millisecond value.
|
||||
func NewMetricWithTimestamp(t time.Time, m Metric) Metric {
|
||||
return timestampedMetric{Metric: m, t: t}
|
||||
}
|
||||
|
|
116
vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
generated
vendored
116
vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
generated
vendored
|
@ -13,46 +13,74 @@
|
|||
|
||||
package prometheus
|
||||
|
||||
import "github.com/prometheus/procfs"
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"github.com/prometheus/procfs"
|
||||
)
|
||||
|
||||
type processCollector struct {
|
||||
pid int
|
||||
collectFn func(chan<- Metric)
|
||||
pidFn func() (int, error)
|
||||
reportErrors bool
|
||||
cpuTotal *Desc
|
||||
openFDs, maxFDs *Desc
|
||||
vsize, rss *Desc
|
||||
vsize, maxVsize *Desc
|
||||
rss *Desc
|
||||
startTime *Desc
|
||||
}
|
||||
|
||||
// ProcessCollectorOpts defines the behavior of a process metrics collector
|
||||
// created with NewProcessCollector.
|
||||
type ProcessCollectorOpts struct {
|
||||
// PidFn returns the PID of the process the collector collects metrics
|
||||
// for. It is called upon each collection. By default, the PID of the
|
||||
// current process is used, as determined on construction time by
|
||||
// calling os.Getpid().
|
||||
PidFn func() (int, error)
|
||||
// If non-empty, each of the collected metrics is prefixed by the
|
||||
// provided string and an underscore ("_").
|
||||
Namespace string
|
||||
// If true, any error encountered during collection is reported as an
|
||||
// invalid metric (see NewInvalidMetric). Otherwise, errors are ignored
|
||||
// and the collected metrics will be incomplete. (Possibly, no metrics
|
||||
// will be collected at all.) While that's usually not desired, it is
|
||||
// appropriate for the common "mix-in" of process metrics, where process
|
||||
// metrics are nice to have, but failing to collect them should not
|
||||
// disrupt the collection of the remaining metrics.
|
||||
ReportErrors bool
|
||||
}
|
||||
|
||||
// NewProcessCollector returns a collector which exports the current state of
|
||||
// process metrics including CPU, memory and file descriptor usage as well as
|
||||
// the process start time for the given process ID under the given namespace.
|
||||
// the process start time. The detailed behavior is defined by the provided
|
||||
// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a
|
||||
// collector for the current process with an empty namespace string and no error
|
||||
// reporting.
|
||||
//
|
||||
// Currently, the collector depends on a Linux-style proc filesystem and
|
||||
// therefore only exports metrics for Linux.
|
||||
func NewProcessCollector(pid int, namespace string) Collector {
|
||||
return NewProcessCollectorPIDFn(
|
||||
func() (int, error) { return pid, nil },
|
||||
namespace,
|
||||
)
|
||||
}
|
||||
|
||||
// NewProcessCollectorPIDFn works like NewProcessCollector but the process ID is
|
||||
// determined on each collect anew by calling the given pidFn function.
|
||||
func NewProcessCollectorPIDFn(
|
||||
pidFn func() (int, error),
|
||||
namespace string,
|
||||
) Collector {
|
||||
//
|
||||
// Note: An older version of this function had the following signature:
|
||||
//
|
||||
// NewProcessCollector(pid int, namespace string) Collector
|
||||
//
|
||||
// Most commonly, it was called as
|
||||
//
|
||||
// NewProcessCollector(os.Getpid(), "")
|
||||
//
|
||||
// The following call of the current version is equivalent to the above:
|
||||
//
|
||||
// NewProcessCollector(ProcessCollectorOpts{})
|
||||
func NewProcessCollector(opts ProcessCollectorOpts) Collector {
|
||||
ns := ""
|
||||
if len(namespace) > 0 {
|
||||
ns = namespace + "_"
|
||||
if len(opts.Namespace) > 0 {
|
||||
ns = opts.Namespace + "_"
|
||||
}
|
||||
|
||||
c := processCollector{
|
||||
pidFn: pidFn,
|
||||
collectFn: func(chan<- Metric) {},
|
||||
|
||||
c := &processCollector{
|
||||
reportErrors: opts.ReportErrors,
|
||||
cpuTotal: NewDesc(
|
||||
ns+"process_cpu_seconds_total",
|
||||
"Total user and system CPU time spent in seconds.",
|
||||
|
@ -73,6 +101,11 @@ func NewProcessCollectorPIDFn(
|
|||
"Virtual memory size in bytes.",
|
||||
nil, nil,
|
||||
),
|
||||
maxVsize: NewDesc(
|
||||
ns+"process_virtual_memory_max_bytes",
|
||||
"Maximum amount of virtual memory available in bytes.",
|
||||
nil, nil,
|
||||
),
|
||||
rss: NewDesc(
|
||||
ns+"process_resident_memory_bytes",
|
||||
"Resident memory size in bytes.",
|
||||
|
@ -85,12 +118,23 @@ func NewProcessCollectorPIDFn(
|
|||
),
|
||||
}
|
||||
|
||||
if opts.PidFn == nil {
|
||||
pid := os.Getpid()
|
||||
c.pidFn = func() (int, error) { return pid, nil }
|
||||
} else {
|
||||
c.pidFn = opts.PidFn
|
||||
}
|
||||
|
||||
// Set up process metric collection if supported by the runtime.
|
||||
if _, err := procfs.NewStat(); err == nil {
|
||||
c.collectFn = c.processCollect
|
||||
} else {
|
||||
c.collectFn = func(ch chan<- Metric) {
|
||||
c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
|
||||
}
|
||||
}
|
||||
|
||||
return &c
|
||||
return c
|
||||
}
|
||||
|
||||
// Describe returns all descriptions of the collector.
|
||||
|
@ -99,6 +143,7 @@ func (c *processCollector) Describe(ch chan<- *Desc) {
|
|||
ch <- c.openFDs
|
||||
ch <- c.maxFDs
|
||||
ch <- c.vsize
|
||||
ch <- c.maxVsize
|
||||
ch <- c.rss
|
||||
ch <- c.startTime
|
||||
}
|
||||
|
@ -108,16 +153,16 @@ func (c *processCollector) Collect(ch chan<- Metric) {
|
|||
c.collectFn(ch)
|
||||
}
|
||||
|
||||
// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the
|
||||
// client allows users to configure the error behavior.
|
||||
func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||
pid, err := c.pidFn()
|
||||
if err != nil {
|
||||
c.reportError(ch, nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
p, err := procfs.NewProc(pid)
|
||||
if err != nil {
|
||||
c.reportError(ch, nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -127,14 +172,33 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
|
|||
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
|
||||
if startTime, err := stat.StartTime(); err == nil {
|
||||
ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
|
||||
} else {
|
||||
c.reportError(ch, c.startTime, err)
|
||||
}
|
||||
} else {
|
||||
c.reportError(ch, nil, err)
|
||||
}
|
||||
|
||||
if fds, err := p.FileDescriptorsLen(); err == nil {
|
||||
ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
|
||||
} else {
|
||||
c.reportError(ch, c.openFDs, err)
|
||||
}
|
||||
|
||||
if limits, err := p.NewLimits(); err == nil {
|
||||
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
|
||||
ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
|
||||
} else {
|
||||
c.reportError(ch, nil, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
|
||||
if !c.reportErrors {
|
||||
return
|
||||
}
|
||||
if desc == nil {
|
||||
desc = NewInvalidDesc(err)
|
||||
}
|
||||
ch <- NewInvalidMetric(desc, err)
|
||||
}
|
||||
|
|
53
vendor/github.com/prometheus/client_golang/prometheus/process_collector_test.go
generated
vendored
53
vendor/github.com/prometheus/client_golang/prometheus/process_collector_test.go
generated
vendored
|
@ -1,13 +1,31 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"os"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/common/expfmt"
|
||||
"github.com/prometheus/procfs"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
func TestProcessCollector(t *testing.T) {
|
||||
|
@ -16,12 +34,14 @@ func TestProcessCollector(t *testing.T) {
|
|||
}
|
||||
|
||||
registry := NewRegistry()
|
||||
if err := registry.Register(NewProcessCollector(os.Getpid(), "")); err != nil {
|
||||
if err := registry.Register(NewProcessCollector(ProcessCollectorOpts{})); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := registry.Register(NewProcessCollectorPIDFn(
|
||||
func() (int, error) { return os.Getpid(), nil }, "foobar"),
|
||||
); err != nil {
|
||||
if err := registry.Register(NewProcessCollector(ProcessCollectorOpts{
|
||||
PidFn: func() (int, error) { return os.Getpid(), nil },
|
||||
Namespace: "foobar",
|
||||
ReportErrors: true, // No errors expected, just to see if none are reported.
|
||||
})); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
|
@ -41,12 +61,14 @@ func TestProcessCollector(t *testing.T) {
|
|||
regexp.MustCompile("\nprocess_cpu_seconds_total [0-9]"),
|
||||
regexp.MustCompile("\nprocess_max_fds [1-9]"),
|
||||
regexp.MustCompile("\nprocess_open_fds [1-9]"),
|
||||
regexp.MustCompile("\nprocess_virtual_memory_max_bytes (-1|[1-9])"),
|
||||
regexp.MustCompile("\nprocess_virtual_memory_bytes [1-9]"),
|
||||
regexp.MustCompile("\nprocess_resident_memory_bytes [1-9]"),
|
||||
regexp.MustCompile("\nprocess_start_time_seconds [0-9.]{10,}"),
|
||||
regexp.MustCompile("\nfoobar_process_cpu_seconds_total [0-9]"),
|
||||
regexp.MustCompile("\nfoobar_process_max_fds [1-9]"),
|
||||
regexp.MustCompile("\nfoobar_process_open_fds [1-9]"),
|
||||
regexp.MustCompile("\nfoobar_process_virtual_memory_max_bytes (-1|[1-9])"),
|
||||
regexp.MustCompile("\nfoobar_process_virtual_memory_bytes [1-9]"),
|
||||
regexp.MustCompile("\nfoobar_process_resident_memory_bytes [1-9]"),
|
||||
regexp.MustCompile("\nfoobar_process_start_time_seconds [0-9.]{10,}"),
|
||||
|
@ -55,4 +77,27 @@ func TestProcessCollector(t *testing.T) {
|
|||
t.Errorf("want body to match %s\n%s", re, buf.String())
|
||||
}
|
||||
}
|
||||
|
||||
brokenProcessCollector := NewProcessCollector(ProcessCollectorOpts{
|
||||
PidFn: func() (int, error) { return 0, errors.New("boo") },
|
||||
ReportErrors: true,
|
||||
})
|
||||
|
||||
ch := make(chan Metric)
|
||||
go func() {
|
||||
brokenProcessCollector.Collect(ch)
|
||||
close(ch)
|
||||
}()
|
||||
n := 0
|
||||
for m := range ch {
|
||||
n++
|
||||
pb := &dto.Metric{}
|
||||
err := m.Write(pb)
|
||||
if err == nil {
|
||||
t.Error("metric collected from broken process collector is unexpectedly valid")
|
||||
}
|
||||
}
|
||||
if n != 1 {
|
||||
t.Errorf("%d metrics collected, want 1", n)
|
||||
}
|
||||
}
|
||||
|
|
223
vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go
generated
vendored
Normal file
223
vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go
generated
vendored
Normal file
|
@ -0,0 +1,223 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package promauto provides constructors for the usual Prometheus metrics that
|
||||
// return them already registered with the global registry
|
||||
// (prometheus.DefaultRegisterer). This allows very compact code, avoiding any
|
||||
// references to the registry altogether, but all the constructors in this
|
||||
// package will panic if the registration fails.
|
||||
//
|
||||
// The following example is a complete program to create a histogram of normally
|
||||
// distributed random numbers from the math/rand package:
|
||||
//
|
||||
// package main
|
||||
//
|
||||
// import (
|
||||
// "math/rand"
|
||||
// "net/http"
|
||||
//
|
||||
// "github.com/prometheus/client_golang/prometheus"
|
||||
// "github.com/prometheus/client_golang/prometheus/promauto"
|
||||
// "github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
// )
|
||||
//
|
||||
// var histogram = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
// Name: "random_numbers",
|
||||
// Help: "A histogram of normally distributed random numbers.",
|
||||
// Buckets: prometheus.LinearBuckets(-3, .1, 61),
|
||||
// })
|
||||
//
|
||||
// func Random() {
|
||||
// for {
|
||||
// histogram.Observe(rand.NormFloat64())
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// func main() {
|
||||
// go Random()
|
||||
// http.Handle("/metrics", promhttp.Handler())
|
||||
// http.ListenAndServe(":1971", nil)
|
||||
// }
|
||||
//
|
||||
// Prometheus's version of a minimal hello-world program:
|
||||
//
|
||||
// package main
|
||||
//
|
||||
// import (
|
||||
// "fmt"
|
||||
// "net/http"
|
||||
//
|
||||
// "github.com/prometheus/client_golang/prometheus"
|
||||
// "github.com/prometheus/client_golang/prometheus/promauto"
|
||||
// "github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
// )
|
||||
//
|
||||
// func main() {
|
||||
// http.Handle("/", promhttp.InstrumentHandlerCounter(
|
||||
// promauto.NewCounterVec(
|
||||
// prometheus.CounterOpts{
|
||||
// Name: "hello_requests_total",
|
||||
// Help: "Total number of hello-world requests by HTTP code.",
|
||||
// },
|
||||
// []string{"code"},
|
||||
// ),
|
||||
// http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// fmt.Fprint(w, "Hello, world!")
|
||||
// }),
|
||||
// ))
|
||||
// http.Handle("/metrics", promhttp.Handler())
|
||||
// http.ListenAndServe(":1971", nil)
|
||||
// }
|
||||
//
|
||||
// This appears very handy. So why are these constructors locked away in a
|
||||
// separate package? There are two caveats:
|
||||
//
|
||||
// First, in more complex programs, global state is often quite problematic.
|
||||
// That's the reason why the metrics constructors in the prometheus package do
|
||||
// not interact with the global prometheus.DefaultRegisterer on their own. You
|
||||
// are free to use the Register or MustRegister functions to register them with
|
||||
// the global prometheus.DefaultRegisterer, but you could as well choose a local
|
||||
// Registerer (usually created with prometheus.NewRegistry, but there are other
|
||||
// scenarios, e.g. testing).
|
||||
//
|
||||
// The second issue is that registration may fail, e.g. if a metric inconsistent
|
||||
// with the newly to be registered one is already registered. But how to signal
|
||||
// and handle a panic in the automatic registration with the default registry?
|
||||
// The only way is panicking. While panicking on invalid input provided by the
|
||||
// programmer is certainly fine, things are a bit more subtle in this case: You
|
||||
// might just add another package to the program, and that package (in its init
|
||||
// function) happens to register a metric with the same name as your code. Now,
|
||||
// all of a sudden, either your code or the code of the newly imported package
|
||||
// panics, depending on initialization order, without any opportunity to handle
|
||||
// the case gracefully. Even worse is a scenario where registration happens
|
||||
// later during the runtime (e.g. upon loading some kind of plugin), where the
|
||||
// panic could be triggered long after the code has been deployed to
|
||||
// production. A possibility to panic should be explicitly called out by the
|
||||
// Must… idiom, cf. prometheus.MustRegister. But adding a separate set of
|
||||
// constructors in the prometheus package called MustRegisterNewCounterVec or
|
||||
// similar would be quite unwieldy. Adding an extra MustRegister method to each
|
||||
// metric, returning the registered metric, would result in nice code for those
|
||||
// using the method, but would pollute every single metric interface for
|
||||
// everybody avoiding the global registry.
|
||||
//
|
||||
// To address both issues, the problematic auto-registering and possibly
|
||||
// panicking constructors are all in this package with a clear warning
|
||||
// ahead. And whoever cares about avoiding global state and possibly panicking
|
||||
// function calls can simply ignore the existence of the promauto package
|
||||
// altogether.
|
||||
//
|
||||
// A final note: There is a similar case in the net/http package of the standard
|
||||
// library. It has DefaultServeMux as a global instance of ServeMux, and the
|
||||
// Handle function acts on it, panicking if a handler for the same pattern has
|
||||
// already been registered. However, one might argue that the whole HTTP routing
|
||||
// is usually set up closely together in the same package or file, while
|
||||
// Prometheus metrics tend to be spread widely over the codebase, increasing the
|
||||
// chance of surprising registration failures. Furthermore, the use of global
|
||||
// state in net/http has been criticized widely, and some avoid it altogether.
|
||||
package promauto
|
||||
|
||||
import "github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
// NewCounter works like the function of the same name in the prometheus package
|
||||
// but it automatically registers the Counter with the
|
||||
// prometheus.DefaultRegisterer. If the registration fails, NewCounter panics.
|
||||
func NewCounter(opts prometheus.CounterOpts) prometheus.Counter {
|
||||
c := prometheus.NewCounter(opts)
|
||||
prometheus.MustRegister(c)
|
||||
return c
|
||||
}
|
||||
|
||||
// NewCounterVec works like the function of the same name in the prometheus
|
||||
// package but it automatically registers the CounterVec with the
|
||||
// prometheus.DefaultRegisterer. If the registration fails, NewCounterVec
|
||||
// panics.
|
||||
func NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec {
|
||||
c := prometheus.NewCounterVec(opts, labelNames)
|
||||
prometheus.MustRegister(c)
|
||||
return c
|
||||
}
|
||||
|
||||
// NewCounterFunc works like the function of the same name in the prometheus
|
||||
// package but it automatically registers the CounterFunc with the
|
||||
// prometheus.DefaultRegisterer. If the registration fails, NewCounterFunc
|
||||
// panics.
|
||||
func NewCounterFunc(opts prometheus.CounterOpts, function func() float64) prometheus.CounterFunc {
|
||||
g := prometheus.NewCounterFunc(opts, function)
|
||||
prometheus.MustRegister(g)
|
||||
return g
|
||||
}
|
||||
|
||||
// NewGauge works like the function of the same name in the prometheus package
|
||||
// but it automatically registers the Gauge with the
|
||||
// prometheus.DefaultRegisterer. If the registration fails, NewGauge panics.
|
||||
func NewGauge(opts prometheus.GaugeOpts) prometheus.Gauge {
|
||||
g := prometheus.NewGauge(opts)
|
||||
prometheus.MustRegister(g)
|
||||
return g
|
||||
}
|
||||
|
||||
// NewGaugeVec works like the function of the same name in the prometheus
|
||||
// package but it automatically registers the GaugeVec with the
|
||||
// prometheus.DefaultRegisterer. If the registration fails, NewGaugeVec panics.
|
||||
func NewGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *prometheus.GaugeVec {
|
||||
g := prometheus.NewGaugeVec(opts, labelNames)
|
||||
prometheus.MustRegister(g)
|
||||
return g
|
||||
}
|
||||
|
||||
// NewGaugeFunc works like the function of the same name in the prometheus
|
||||
// package but it automatically registers the GaugeFunc with the
|
||||
// prometheus.DefaultRegisterer. If the registration fails, NewGaugeFunc panics.
|
||||
func NewGaugeFunc(opts prometheus.GaugeOpts, function func() float64) prometheus.GaugeFunc {
|
||||
g := prometheus.NewGaugeFunc(opts, function)
|
||||
prometheus.MustRegister(g)
|
||||
return g
|
||||
}
|
||||
|
||||
// NewSummary works like the function of the same name in the prometheus package
|
||||
// but it automatically registers the Summary with the
|
||||
// prometheus.DefaultRegisterer. If the registration fails, NewSummary panics.
|
||||
func NewSummary(opts prometheus.SummaryOpts) prometheus.Summary {
|
||||
s := prometheus.NewSummary(opts)
|
||||
prometheus.MustRegister(s)
|
||||
return s
|
||||
}
|
||||
|
||||
// NewSummaryVec works like the function of the same name in the prometheus
|
||||
// package but it automatically registers the SummaryVec with the
|
||||
// prometheus.DefaultRegisterer. If the registration fails, NewSummaryVec
|
||||
// panics.
|
||||
func NewSummaryVec(opts prometheus.SummaryOpts, labelNames []string) *prometheus.SummaryVec {
|
||||
s := prometheus.NewSummaryVec(opts, labelNames)
|
||||
prometheus.MustRegister(s)
|
||||
return s
|
||||
}
|
||||
|
||||
// NewHistogram works like the function of the same name in the prometheus
|
||||
// package but it automatically registers the Histogram with the
|
||||
// prometheus.DefaultRegisterer. If the registration fails, NewHistogram panics.
|
||||
func NewHistogram(opts prometheus.HistogramOpts) prometheus.Histogram {
|
||||
h := prometheus.NewHistogram(opts)
|
||||
prometheus.MustRegister(h)
|
||||
return h
|
||||
}
|
||||
|
||||
// NewHistogramVec works like the function of the same name in the prometheus
|
||||
// package but it automatically registers the HistogramVec with the
|
||||
// prometheus.DefaultRegisterer. If the registration fails, NewHistogramVec
|
||||
// panics.
|
||||
func NewHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec {
|
||||
h := prometheus.NewHistogramVec(opts, labelNames)
|
||||
prometheus.MustRegister(h)
|
||||
return h
|
||||
}
|
36
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
generated
vendored
36
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
generated
vendored
|
@ -76,16 +76,16 @@ type flusherDelegator struct{ *responseWriterDelegator }
|
|||
type hijackerDelegator struct{ *responseWriterDelegator }
|
||||
type readerFromDelegator struct{ *responseWriterDelegator }
|
||||
|
||||
func (d *closeNotifierDelegator) CloseNotify() <-chan bool {
|
||||
func (d closeNotifierDelegator) CloseNotify() <-chan bool {
|
||||
return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
|
||||
}
|
||||
func (d *flusherDelegator) Flush() {
|
||||
func (d flusherDelegator) Flush() {
|
||||
d.ResponseWriter.(http.Flusher).Flush()
|
||||
}
|
||||
func (d *hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
return d.ResponseWriter.(http.Hijacker).Hijack()
|
||||
}
|
||||
func (d *readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
|
||||
func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
|
||||
if !d.wroteHeader {
|
||||
d.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
@ -102,34 +102,34 @@ func init() {
|
|||
return d
|
||||
}
|
||||
pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1
|
||||
return &closeNotifierDelegator{d}
|
||||
return closeNotifierDelegator{d}
|
||||
}
|
||||
pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2
|
||||
return &flusherDelegator{d}
|
||||
return flusherDelegator{d}
|
||||
}
|
||||
pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, &flusherDelegator{d}, &closeNotifierDelegator{d}}
|
||||
}{d, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4
|
||||
return &hijackerDelegator{d}
|
||||
return hijackerDelegator{d}
|
||||
}
|
||||
pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Hijacker
|
||||
http.CloseNotifier
|
||||
}{d, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
|
||||
}{d, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Hijacker
|
||||
http.Flusher
|
||||
}{d, &hijackerDelegator{d}, &flusherDelegator{d}}
|
||||
}{d, hijackerDelegator{d}, flusherDelegator{d}}
|
||||
}
|
||||
pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7
|
||||
return struct {
|
||||
|
@ -137,7 +137,7 @@ func init() {
|
|||
http.Hijacker
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
|
||||
}{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8
|
||||
return readerFromDelegator{d}
|
||||
|
@ -147,14 +147,14 @@ func init() {
|
|||
*responseWriterDelegator
|
||||
io.ReaderFrom
|
||||
http.CloseNotifier
|
||||
}{d, &readerFromDelegator{d}, &closeNotifierDelegator{d}}
|
||||
}{d, readerFromDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
io.ReaderFrom
|
||||
http.Flusher
|
||||
}{d, &readerFromDelegator{d}, &flusherDelegator{d}}
|
||||
}{d, readerFromDelegator{d}, flusherDelegator{d}}
|
||||
}
|
||||
pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11
|
||||
return struct {
|
||||
|
@ -162,14 +162,14 @@ func init() {
|
|||
io.ReaderFrom
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, &readerFromDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
|
||||
}{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
io.ReaderFrom
|
||||
http.Hijacker
|
||||
}{d, &readerFromDelegator{d}, &hijackerDelegator{d}}
|
||||
}{d, readerFromDelegator{d}, hijackerDelegator{d}}
|
||||
}
|
||||
pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13
|
||||
return struct {
|
||||
|
@ -177,7 +177,7 @@ func init() {
|
|||
io.ReaderFrom
|
||||
http.Hijacker
|
||||
http.CloseNotifier
|
||||
}{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
|
||||
}{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14
|
||||
return struct {
|
||||
|
@ -185,7 +185,7 @@ func init() {
|
|||
io.ReaderFrom
|
||||
http.Hijacker
|
||||
http.Flusher
|
||||
}{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}}
|
||||
}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
|
||||
}
|
||||
pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15
|
||||
return struct {
|
||||
|
@ -194,6 +194,6 @@ func init() {
|
|||
http.Hijacker
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
|
||||
}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
}
|
||||
|
|
34
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
generated
vendored
34
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
generated
vendored
|
@ -22,27 +22,27 @@ import (
|
|||
|
||||
type pusherDelegator struct{ *responseWriterDelegator }
|
||||
|
||||
func (d *pusherDelegator) Push(target string, opts *http.PushOptions) error {
|
||||
func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
|
||||
return d.ResponseWriter.(http.Pusher).Push(target, opts)
|
||||
}
|
||||
|
||||
func init() {
|
||||
pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
|
||||
return &pusherDelegator{d}
|
||||
return pusherDelegator{d}
|
||||
}
|
||||
pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
http.CloseNotifier
|
||||
}{d, &pusherDelegator{d}, &closeNotifierDelegator{d}}
|
||||
}{d, pusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
http.Flusher
|
||||
}{d, &pusherDelegator{d}, &flusherDelegator{d}}
|
||||
}{d, pusherDelegator{d}, flusherDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
|
||||
return struct {
|
||||
|
@ -50,14 +50,14 @@ func init() {
|
|||
http.Pusher
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, &pusherDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
|
||||
}{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
http.Hijacker
|
||||
}{d, &pusherDelegator{d}, &hijackerDelegator{d}}
|
||||
}{d, pusherDelegator{d}, hijackerDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
|
||||
return struct {
|
||||
|
@ -65,7 +65,7 @@ func init() {
|
|||
http.Pusher
|
||||
http.Hijacker
|
||||
http.CloseNotifier
|
||||
}{d, &pusherDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
|
||||
}{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
|
||||
return struct {
|
||||
|
@ -73,7 +73,7 @@ func init() {
|
|||
http.Pusher
|
||||
http.Hijacker
|
||||
http.Flusher
|
||||
}{d, &pusherDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}}
|
||||
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
|
||||
return struct {
|
||||
|
@ -82,14 +82,14 @@ func init() {
|
|||
http.Hijacker
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, &pusherDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
|
||||
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
io.ReaderFrom
|
||||
}{d, &pusherDelegator{d}, &readerFromDelegator{d}}
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
|
||||
return struct {
|
||||
|
@ -97,7 +97,7 @@ func init() {
|
|||
http.Pusher
|
||||
io.ReaderFrom
|
||||
http.CloseNotifier
|
||||
}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &closeNotifierDelegator{d}}
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
|
||||
return struct {
|
||||
|
@ -105,7 +105,7 @@ func init() {
|
|||
http.Pusher
|
||||
io.ReaderFrom
|
||||
http.Flusher
|
||||
}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &flusherDelegator{d}}
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
|
||||
return struct {
|
||||
|
@ -114,7 +114,7 @@ func init() {
|
|||
io.ReaderFrom
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
|
||||
return struct {
|
||||
|
@ -122,7 +122,7 @@ func init() {
|
|||
http.Pusher
|
||||
io.ReaderFrom
|
||||
http.Hijacker
|
||||
}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}}
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
|
||||
return struct {
|
||||
|
@ -131,7 +131,7 @@ func init() {
|
|||
io.ReaderFrom
|
||||
http.Hijacker
|
||||
http.CloseNotifier
|
||||
}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
|
||||
return struct {
|
||||
|
@ -140,7 +140,7 @@ func init() {
|
|||
io.ReaderFrom
|
||||
http.Hijacker
|
||||
http.Flusher
|
||||
}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}}
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
|
||||
return struct {
|
||||
|
@ -150,7 +150,7 @@ func init() {
|
|||
http.Hijacker
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
2
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
2
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
|
@ -302,7 +302,7 @@ func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled
|
|||
header := request.Header.Get(acceptEncodingHeader)
|
||||
parts := strings.Split(header, ",")
|
||||
for _, part := range parts {
|
||||
part := strings.TrimSpace(part)
|
||||
part = strings.TrimSpace(part)
|
||||
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
|
||||
return gzip.NewWriter(writer), "gzip"
|
||||
}
|
||||
|
|
2
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go
generated
vendored
2
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go
generated
vendored
|
@ -121,7 +121,7 @@ the_count 0
|
|||
t.Errorf("got HTTP status code %d, want %d", got, want)
|
||||
}
|
||||
if got := logBuf.String(); got != wantMsg {
|
||||
t.Errorf("got log message:\n%s\nwant log mesage:\n%s\n", got, wantMsg)
|
||||
t.Errorf("got log message:\n%s\nwant log message:\n%s\n", got, wantMsg)
|
||||
}
|
||||
if got := writer.Body.String(); got != wantErrorBody {
|
||||
t.Errorf("got body:\n%s\nwant body:\n%s\n", got, wantErrorBody)
|
||||
|
|
|
@ -81,8 +81,8 @@ func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) Ro
|
|||
}
|
||||
},
|
||||
DNSDone: func(_ httptrace.DNSDoneInfo) {
|
||||
if it.DNSStart != nil {
|
||||
it.DNSStart(time.Since(start).Seconds())
|
||||
if it.DNSDone != nil {
|
||||
it.DNSDone(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
ConnectStart: func(_, _ string) {
|
||||
|
|
172
vendor/github.com/prometheus/client_golang/prometheus/push/deprecated.go
generated
vendored
Normal file
172
vendor/github.com/prometheus/client_golang/prometheus/push/deprecated.go
generated
vendored
Normal file
|
@ -0,0 +1,172 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package push
|
||||
|
||||
// This file contains only deprecated code. Remove after v0.9 is released.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/common/expfmt"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// FromGatherer triggers a metric collection by the provided Gatherer (which is
|
||||
// usually implemented by a prometheus.Registry) and pushes all gathered metrics
|
||||
// to the Pushgateway specified by url, using the provided job name and the
|
||||
// (optional) further grouping labels (the grouping map may be nil). See the
|
||||
// Pushgateway documentation for detailed implications of the job and other
|
||||
// grouping labels. Neither the job name nor any grouping label value may
|
||||
// contain a "/". The metrics pushed must not contain a job label of their own
|
||||
// nor any of the grouping labels.
|
||||
//
|
||||
// You can use just host:port or ip:port as url, in which case 'http://' is
|
||||
// added automatically. You can also include the schema in the URL. However, do
|
||||
// not include the '/metrics/jobs/...' part.
|
||||
//
|
||||
// Note that all previously pushed metrics with the same job and other grouping
|
||||
// labels will be replaced with the metrics pushed by this call. (It uses HTTP
|
||||
// method 'PUT' to push to the Pushgateway.)
|
||||
//
|
||||
// Deprecated: Please use a Pusher created with New instead.
|
||||
func FromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error {
|
||||
return push(job, grouping, url, g, "PUT")
|
||||
}
|
||||
|
||||
// AddFromGatherer works like FromGatherer, but only previously pushed metrics
|
||||
// with the same name (and the same job and other grouping labels) will be
|
||||
// replaced. (It uses HTTP method 'POST' to push to the Pushgateway.)
|
||||
//
|
||||
// Deprecated: Please use a Pusher created with New instead.
|
||||
func AddFromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error {
|
||||
return push(job, grouping, url, g, "POST")
|
||||
}
|
||||
|
||||
func push(job string, grouping map[string]string, pushURL string, g prometheus.Gatherer, method string) error {
|
||||
if !strings.Contains(pushURL, "://") {
|
||||
pushURL = "http://" + pushURL
|
||||
}
|
||||
if strings.HasSuffix(pushURL, "/") {
|
||||
pushURL = pushURL[:len(pushURL)-1]
|
||||
}
|
||||
|
||||
if strings.Contains(job, "/") {
|
||||
return fmt.Errorf("job contains '/': %s", job)
|
||||
}
|
||||
urlComponents := []string{url.QueryEscape(job)}
|
||||
for ln, lv := range grouping {
|
||||
if !model.LabelName(ln).IsValid() {
|
||||
return fmt.Errorf("grouping label has invalid name: %s", ln)
|
||||
}
|
||||
if strings.Contains(lv, "/") {
|
||||
return fmt.Errorf("value of grouping label %s contains '/': %s", ln, lv)
|
||||
}
|
||||
urlComponents = append(urlComponents, ln, lv)
|
||||
}
|
||||
pushURL = fmt.Sprintf("%s/metrics/job/%s", pushURL, strings.Join(urlComponents, "/"))
|
||||
|
||||
mfs, err := g.Gather()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buf := &bytes.Buffer{}
|
||||
enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)
|
||||
// Check for pre-existing grouping labels:
|
||||
for _, mf := range mfs {
|
||||
for _, m := range mf.GetMetric() {
|
||||
for _, l := range m.GetLabel() {
|
||||
if l.GetName() == "job" {
|
||||
return fmt.Errorf("pushed metric %s (%s) already contains a job label", mf.GetName(), m)
|
||||
}
|
||||
if _, ok := grouping[l.GetName()]; ok {
|
||||
return fmt.Errorf(
|
||||
"pushed metric %s (%s) already contains grouping label %s",
|
||||
mf.GetName(), m, l.GetName(),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
enc.Encode(mf)
|
||||
}
|
||||
req, err := http.NewRequest(method, pushURL, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set(contentTypeHeader, string(expfmt.FmtProtoDelim))
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != 202 {
|
||||
body, _ := ioutil.ReadAll(resp.Body) // Ignore any further error as this is for an error message only.
|
||||
return fmt.Errorf("unexpected status code %d while pushing to %s: %s", resp.StatusCode, pushURL, body)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collectors works like FromGatherer, but it does not use a Gatherer. Instead,
|
||||
// it collects from the provided collectors directly. It is a convenient way to
|
||||
// push only a few metrics.
|
||||
//
|
||||
// Deprecated: Please use a Pusher created with New instead.
|
||||
func Collectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error {
|
||||
return pushCollectors(job, grouping, url, "PUT", collectors...)
|
||||
}
|
||||
|
||||
// AddCollectors works like AddFromGatherer, but it does not use a Gatherer.
|
||||
// Instead, it collects from the provided collectors directly. It is a
|
||||
// convenient way to push only a few metrics.
|
||||
//
|
||||
// Deprecated: Please use a Pusher created with New instead.
|
||||
func AddCollectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error {
|
||||
return pushCollectors(job, grouping, url, "POST", collectors...)
|
||||
}
|
||||
|
||||
func pushCollectors(job string, grouping map[string]string, url, method string, collectors ...prometheus.Collector) error {
|
||||
r := prometheus.NewRegistry()
|
||||
for _, collector := range collectors {
|
||||
if err := r.Register(collector); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return push(job, grouping, url, r, method)
|
||||
}
|
||||
|
||||
// HostnameGroupingKey returns a label map with the only entry
|
||||
// {instance="<hostname>"}. This can be conveniently used as the grouping
|
||||
// parameter if metrics should be pushed with the hostname as label. The
|
||||
// returned map is created upon each call so that the caller is free to add more
|
||||
// labels to the map.
|
||||
//
|
||||
// Deprecated: Usually, metrics pushed to the Pushgateway should not be
|
||||
// host-centric. (You would use https://github.com/prometheus/node_exporter in
|
||||
// that case.) If you have the need to add the hostname to the grouping key, you
|
||||
// are probably doing something wrong. See
|
||||
// https://prometheus.io/docs/practices/pushing/ for details.
|
||||
func HostnameGroupingKey() map[string]string {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
return map[string]string{"instance": "unknown"}
|
||||
}
|
||||
return map[string]string{"instance": hostname}
|
||||
}
|
80
vendor/github.com/prometheus/client_golang/prometheus/push/example_add_from_gatherer_test.go
generated
vendored
Normal file
80
vendor/github.com/prometheus/client_golang/prometheus/push/example_add_from_gatherer_test.go
generated
vendored
Normal file
|
@ -0,0 +1,80 @@
|
|||
// Copyright 2016 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package push_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/push"
|
||||
)
|
||||
|
||||
var (
|
||||
completionTime = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "db_backup_last_completion_timestamp_seconds",
|
||||
Help: "The timestamp of the last completion of a DB backup, successful or not.",
|
||||
})
|
||||
successTime = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "db_backup_last_success_timestamp_seconds",
|
||||
Help: "The timestamp of the last successful completion of a DB backup.",
|
||||
})
|
||||
duration = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "db_backup_duration_seconds",
|
||||
Help: "The duration of the last DB backup in seconds.",
|
||||
})
|
||||
records = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "db_backup_records_processed",
|
||||
Help: "The number of records processed in the last DB backup.",
|
||||
})
|
||||
)
|
||||
|
||||
func performBackup() (int, error) {
|
||||
// Perform the backup and return the number of backed up records and any
|
||||
// applicable error.
|
||||
// ...
|
||||
return 42, nil
|
||||
}
|
||||
|
||||
func ExamplePusher_Add() {
|
||||
// We use a registry here to benefit from the consistency checks that
|
||||
// happen during registration.
|
||||
registry := prometheus.NewRegistry()
|
||||
registry.MustRegister(completionTime, duration, records)
|
||||
// Note that successTime is not registered.
|
||||
|
||||
pusher := push.New("http://pushgateway:9091", "db_backup").Gatherer(registry)
|
||||
|
||||
start := time.Now()
|
||||
n, err := performBackup()
|
||||
records.Set(float64(n))
|
||||
// Note that time.Since only uses a monotonic clock in Go1.9+.
|
||||
duration.Set(time.Since(start).Seconds())
|
||||
completionTime.SetToCurrentTime()
|
||||
if err != nil {
|
||||
fmt.Println("DB backup failed:", err)
|
||||
} else {
|
||||
// Add successTime to pusher only in case of success.
|
||||
// We could as well register it with the registry.
|
||||
// This example, however, demonstrates that you can
|
||||
// mix Gatherers and Collectors when handling a Pusher.
|
||||
pusher.Collector(successTime)
|
||||
successTime.SetToCurrentTime()
|
||||
}
|
||||
// Add is used here rather than Push to not delete a previously pushed
|
||||
// success timestamp in case of a failure of this backup.
|
||||
if err := pusher.Add(); err != nil {
|
||||
fmt.Println("Could not push to Pushgateway:", err)
|
||||
}
|
||||
}
|
35
vendor/github.com/prometheus/client_golang/prometheus/push/examples_test.go
generated
vendored
Normal file
35
vendor/github.com/prometheus/client_golang/prometheus/push/examples_test.go
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
// Copyright 2016 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package push_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/push"
|
||||
)
|
||||
|
||||
func ExamplePusher_Push() {
|
||||
completionTime := prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "db_backup_last_completion_timestamp_seconds",
|
||||
Help: "The timestamp of the last successful completion of a DB backup.",
|
||||
})
|
||||
completionTime.SetToCurrentTime()
|
||||
if err := push.New("http://pushgateway:9091", "db_backup").
|
||||
Collector(completionTime).
|
||||
Grouping("db", "customers").
|
||||
Push(); err != nil {
|
||||
fmt.Println("Could not push completion time to Pushgateway:", err)
|
||||
}
|
||||
}
|
236
vendor/github.com/prometheus/client_golang/prometheus/push/push.go
generated
vendored
Normal file
236
vendor/github.com/prometheus/client_golang/prometheus/push/push.go
generated
vendored
Normal file
|
@ -0,0 +1,236 @@
|
|||
// Copyright 2015 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package push provides functions to push metrics to a Pushgateway. It uses a
|
||||
// builder approach. Create a Pusher with New and then add the various options
|
||||
// by using its methods, finally calling Add or Push, like this:
|
||||
//
|
||||
// // Easy case:
|
||||
// push.New("http://example.org/metrics", "my_job").Gatherer(myRegistry).Push()
|
||||
//
|
||||
// // Complex case:
|
||||
// push.New("http://example.org/metrics", "my_job").
|
||||
// Collector(myCollector1).
|
||||
// Collector(myCollector2).
|
||||
// Grouping("zone", "xy").
|
||||
// Client(&myHTTPClient).
|
||||
// BasicAuth("top", "secret").
|
||||
// Add()
|
||||
//
|
||||
// See the examples section for more detailed examples.
|
||||
//
|
||||
// See the documentation of the Pushgateway to understand the meaning of
|
||||
// the grouping key and the differences between Push and Add:
|
||||
// https://github.com/prometheus/pushgateway
|
||||
package push
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/common/expfmt"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const contentTypeHeader = "Content-Type"
|
||||
|
||||
// Pusher manages a push to the Pushgateway. Use New to create one, configure it
|
||||
// with its methods, and finally use the Add or Push method to push.
|
||||
type Pusher struct {
|
||||
error error
|
||||
|
||||
url, job string
|
||||
grouping map[string]string
|
||||
|
||||
gatherers prometheus.Gatherers
|
||||
registerer prometheus.Registerer
|
||||
|
||||
client *http.Client
|
||||
useBasicAuth bool
|
||||
username, password string
|
||||
}
|
||||
|
||||
// New creates a new Pusher to push to the provided URL with the provided job
|
||||
// name. You can use just host:port or ip:port as url, in which case “http://”
|
||||
// is added automatically. Alternatively, include the schema in the
|
||||
// URL. However, do not include the “/metrics/jobs/…” part.
|
||||
//
|
||||
// Note that until https://github.com/prometheus/pushgateway/issues/97 is
|
||||
// resolved, a “/” character in the job name is prohibited.
|
||||
func New(url, job string) *Pusher {
|
||||
var (
|
||||
reg = prometheus.NewRegistry()
|
||||
err error
|
||||
)
|
||||
if !strings.Contains(url, "://") {
|
||||
url = "http://" + url
|
||||
}
|
||||
if strings.HasSuffix(url, "/") {
|
||||
url = url[:len(url)-1]
|
||||
}
|
||||
if strings.Contains(job, "/") {
|
||||
err = fmt.Errorf("job contains '/': %s", job)
|
||||
}
|
||||
|
||||
return &Pusher{
|
||||
error: err,
|
||||
url: url,
|
||||
job: job,
|
||||
grouping: map[string]string{},
|
||||
gatherers: prometheus.Gatherers{reg},
|
||||
registerer: reg,
|
||||
client: &http.Client{},
|
||||
}
|
||||
}
|
||||
|
||||
// Push collects/gathers all metrics from all Collectors and Gatherers added to
|
||||
// this Pusher. Then, it pushes them to the Pushgateway configured while
|
||||
// creating this Pusher, using the configured job name and any added grouping
|
||||
// labels as grouping key. All previously pushed metrics with the same job and
|
||||
// other grouping labels will be replaced with the metrics pushed by this
|
||||
// call. (It uses HTTP method “PUT” to push to the Pushgateway.)
|
||||
//
|
||||
// Push returns the first error encountered by any method call (including this
|
||||
// one) in the lifetime of the Pusher.
|
||||
func (p *Pusher) Push() error {
|
||||
return p.push("PUT")
|
||||
}
|
||||
|
||||
// Add works like push, but only previously pushed metrics with the same name
|
||||
// (and the same job and other grouping labels) will be replaced. (It uses HTTP
|
||||
// method “POST” to push to the Pushgateway.)
|
||||
func (p *Pusher) Add() error {
|
||||
return p.push("POST")
|
||||
}
|
||||
|
||||
// Gatherer adds a Gatherer to the Pusher, from which metrics will be gathered
|
||||
// to push them to the Pushgateway. The gathered metrics must not contain a job
|
||||
// label of their own.
|
||||
//
|
||||
// For convenience, this method returns a pointer to the Pusher itself.
|
||||
func (p *Pusher) Gatherer(g prometheus.Gatherer) *Pusher {
|
||||
p.gatherers = append(p.gatherers, g)
|
||||
return p
|
||||
}
|
||||
|
||||
// Collector adds a Collector to the Pusher, from which metrics will be
|
||||
// collected to push them to the Pushgateway. The collected metrics must not
|
||||
// contain a job label of their own.
|
||||
//
|
||||
// For convenience, this method returns a pointer to the Pusher itself.
|
||||
func (p *Pusher) Collector(c prometheus.Collector) *Pusher {
|
||||
if p.error == nil {
|
||||
p.error = p.registerer.Register(c)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// Grouping adds a label pair to the grouping key of the Pusher, replacing any
|
||||
// previously added label pair with the same label name. Note that setting any
|
||||
// labels in the grouping key that are already contained in the metrics to push
|
||||
// will lead to an error.
|
||||
//
|
||||
// For convenience, this method returns a pointer to the Pusher itself.
|
||||
//
|
||||
// Note that until https://github.com/prometheus/pushgateway/issues/97 is
|
||||
// resolved, this method does not allow a “/” character in the label value.
|
||||
func (p *Pusher) Grouping(name, value string) *Pusher {
|
||||
if p.error == nil {
|
||||
if !model.LabelName(name).IsValid() {
|
||||
p.error = fmt.Errorf("grouping label has invalid name: %s", name)
|
||||
return p
|
||||
}
|
||||
if strings.Contains(value, "/") {
|
||||
p.error = fmt.Errorf("value of grouping label %s contains '/': %s", name, value)
|
||||
return p
|
||||
}
|
||||
p.grouping[name] = value
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// Client sets a custom HTTP client for the Pusher. For convenience, this method
|
||||
// returns a pointer to the Pusher itself.
|
||||
func (p *Pusher) Client(c *http.Client) *Pusher {
|
||||
p.client = c
|
||||
return p
|
||||
}
|
||||
|
||||
// BasicAuth configures the Pusher to use HTTP Basic Authentication with the
|
||||
// provided username and password. For convenience, this method returns a
|
||||
// pointer to the Pusher itself.
|
||||
func (p *Pusher) BasicAuth(username, password string) *Pusher {
|
||||
p.useBasicAuth = true
|
||||
p.username = username
|
||||
p.password = password
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *Pusher) push(method string) error {
|
||||
if p.error != nil {
|
||||
return p.error
|
||||
}
|
||||
urlComponents := []string{url.QueryEscape(p.job)}
|
||||
for ln, lv := range p.grouping {
|
||||
urlComponents = append(urlComponents, ln, lv)
|
||||
}
|
||||
pushURL := fmt.Sprintf("%s/metrics/job/%s", p.url, strings.Join(urlComponents, "/"))
|
||||
|
||||
mfs, err := p.gatherers.Gather()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buf := &bytes.Buffer{}
|
||||
enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)
|
||||
// Check for pre-existing grouping labels:
|
||||
for _, mf := range mfs {
|
||||
for _, m := range mf.GetMetric() {
|
||||
for _, l := range m.GetLabel() {
|
||||
if l.GetName() == "job" {
|
||||
return fmt.Errorf("pushed metric %s (%s) already contains a job label", mf.GetName(), m)
|
||||
}
|
||||
if _, ok := p.grouping[l.GetName()]; ok {
|
||||
return fmt.Errorf(
|
||||
"pushed metric %s (%s) already contains grouping label %s",
|
||||
mf.GetName(), m, l.GetName(),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
enc.Encode(mf)
|
||||
}
|
||||
req, err := http.NewRequest(method, pushURL, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if p.useBasicAuth {
|
||||
req.SetBasicAuth(p.username, p.password)
|
||||
}
|
||||
req.Header.Set(contentTypeHeader, string(expfmt.FmtProtoDelim))
|
||||
resp, err := p.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != 202 {
|
||||
body, _ := ioutil.ReadAll(resp.Body) // Ignore any further error as this is for an error message only.
|
||||
return fmt.Errorf("unexpected status code %d while pushing to %s: %s", resp.StatusCode, pushURL, body)
|
||||
}
|
||||
return nil
|
||||
}
|
194
vendor/github.com/prometheus/client_golang/prometheus/push/push_test.go
generated
vendored
Normal file
194
vendor/github.com/prometheus/client_golang/prometheus/push/push_test.go
generated
vendored
Normal file
|
@ -0,0 +1,194 @@
|
|||
// Copyright 2016 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package push
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/common/expfmt"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func TestPush(t *testing.T) {
|
||||
|
||||
var (
|
||||
lastMethod string
|
||||
lastBody []byte
|
||||
lastPath string
|
||||
)
|
||||
|
||||
// Fake a Pushgateway that always responds with 202.
|
||||
pgwOK := httptest.NewServer(
|
||||
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
lastMethod = r.Method
|
||||
var err error
|
||||
lastBody, err = ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
lastPath = r.URL.EscapedPath()
|
||||
w.Header().Set("Content-Type", `text/plain; charset=utf-8`)
|
||||
w.WriteHeader(http.StatusAccepted)
|
||||
}),
|
||||
)
|
||||
defer pgwOK.Close()
|
||||
|
||||
// Fake a Pushgateway that always responds with 500.
|
||||
pgwErr := httptest.NewServer(
|
||||
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, "fake error", http.StatusInternalServerError)
|
||||
}),
|
||||
)
|
||||
defer pgwErr.Close()
|
||||
|
||||
metric1 := prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "testname1",
|
||||
Help: "testhelp1",
|
||||
})
|
||||
metric2 := prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "testname2",
|
||||
Help: "testhelp2",
|
||||
ConstLabels: prometheus.Labels{"foo": "bar", "dings": "bums"},
|
||||
})
|
||||
|
||||
reg := prometheus.NewRegistry()
|
||||
reg.MustRegister(metric1)
|
||||
reg.MustRegister(metric2)
|
||||
|
||||
mfs, err := reg.Gather()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)
|
||||
|
||||
for _, mf := range mfs {
|
||||
if err := enc.Encode(mf); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
wantBody := buf.Bytes()
|
||||
|
||||
// Push some Collectors, all good.
|
||||
if err := New(pgwOK.URL, "testjob").
|
||||
Collector(metric1).
|
||||
Collector(metric2).
|
||||
Push(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if lastMethod != "PUT" {
|
||||
t.Error("want method PUT for Push, got", lastMethod)
|
||||
}
|
||||
if bytes.Compare(lastBody, wantBody) != 0 {
|
||||
t.Errorf("got body %v, want %v", lastBody, wantBody)
|
||||
}
|
||||
if lastPath != "/metrics/job/testjob" {
|
||||
t.Error("unexpected path:", lastPath)
|
||||
}
|
||||
|
||||
// Add some Collectors, with nil grouping, all good.
|
||||
if err := New(pgwOK.URL, "testjob").
|
||||
Collector(metric1).
|
||||
Collector(metric2).
|
||||
Add(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if lastMethod != "POST" {
|
||||
t.Error("want method POST for Add, got", lastMethod)
|
||||
}
|
||||
if bytes.Compare(lastBody, wantBody) != 0 {
|
||||
t.Errorf("got body %v, want %v", lastBody, wantBody)
|
||||
}
|
||||
if lastPath != "/metrics/job/testjob" {
|
||||
t.Error("unexpected path:", lastPath)
|
||||
}
|
||||
|
||||
// Push some Collectors with a broken PGW.
|
||||
if err := New(pgwErr.URL, "testjob").
|
||||
Collector(metric1).
|
||||
Collector(metric2).
|
||||
Push(); err == nil {
|
||||
t.Error("push to broken Pushgateway succeeded")
|
||||
} else {
|
||||
if got, want := err.Error(), "unexpected status code 500 while pushing to "+pgwErr.URL+"/metrics/job/testjob: fake error\n"; got != want {
|
||||
t.Errorf("got error %q, want %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// Push some Collectors with invalid grouping or job.
|
||||
if err := New(pgwOK.URL, "testjob").
|
||||
Grouping("foo", "bums").
|
||||
Collector(metric1).
|
||||
Collector(metric2).
|
||||
Push(); err == nil {
|
||||
t.Error("push with grouping contained in metrics succeeded")
|
||||
}
|
||||
if err := New(pgwOK.URL, "test/job").
|
||||
Collector(metric1).
|
||||
Collector(metric2).
|
||||
Push(); err == nil {
|
||||
t.Error("push with invalid job value succeeded")
|
||||
}
|
||||
if err := New(pgwOK.URL, "testjob").
|
||||
Grouping("foobar", "bu/ms").
|
||||
Collector(metric1).
|
||||
Collector(metric2).
|
||||
Push(); err == nil {
|
||||
t.Error("push with invalid grouping succeeded")
|
||||
}
|
||||
if err := New(pgwOK.URL, "testjob").
|
||||
Grouping("foo-bar", "bums").
|
||||
Collector(metric1).
|
||||
Collector(metric2).
|
||||
Push(); err == nil {
|
||||
t.Error("push with invalid grouping succeeded")
|
||||
}
|
||||
|
||||
// Push registry, all good.
|
||||
if err := New(pgwOK.URL, "testjob").
|
||||
Gatherer(reg).
|
||||
Push(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if lastMethod != "PUT" {
|
||||
t.Error("want method PUT for Push, got", lastMethod)
|
||||
}
|
||||
if bytes.Compare(lastBody, wantBody) != 0 {
|
||||
t.Errorf("got body %v, want %v", lastBody, wantBody)
|
||||
}
|
||||
|
||||
// Add registry, all good.
|
||||
if err := New(pgwOK.URL, "testjob").
|
||||
Grouping("a", "x").
|
||||
Grouping("b", "y").
|
||||
Gatherer(reg).
|
||||
Add(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if lastMethod != "POST" {
|
||||
t.Error("want method POST for Add, got", lastMethod)
|
||||
}
|
||||
if bytes.Compare(lastBody, wantBody) != 0 {
|
||||
t.Errorf("got body %v, want %v", lastBody, wantBody)
|
||||
}
|
||||
if lastPath != "/metrics/job/testjob/a/x/b/y" && lastPath != "/metrics/job/testjob/b/y/a/x" {
|
||||
t.Error("unexpected path:", lastPath)
|
||||
}
|
||||
}
|
359
vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
359
vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
|
@ -15,17 +15,18 @@ package prometheus
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus/internal"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -38,12 +39,13 @@ const (
|
|||
// Registerer and Gatherer interface a number of convenience functions in this
|
||||
// package act on. Initially, both variables point to the same Registry, which
|
||||
// has a process collector (currently on Linux only, see NewProcessCollector)
|
||||
// and a Go collector (see NewGoCollector) already registered. This approach to
|
||||
// keep default instances as global state mirrors the approach of other packages
|
||||
// in the Go standard library. Note that there are caveats. Change the variables
|
||||
// with caution and only if you understand the consequences. Users who want to
|
||||
// avoid global state altogether should not use the convenience functions and
|
||||
// act on custom instances instead.
|
||||
// and a Go collector (see NewGoCollector, in particular the note about
|
||||
// stop-the-world implication with Go versions older than 1.9) already
|
||||
// registered. This approach to keep default instances as global state mirrors
|
||||
// the approach of other packages in the Go standard library. Note that there
|
||||
// are caveats. Change the variables with caution and only if you understand the
|
||||
// consequences. Users who want to avoid global state altogether should not use
|
||||
// the convenience functions and act on custom instances instead.
|
||||
var (
|
||||
defaultRegistry = NewRegistry()
|
||||
DefaultRegisterer Registerer = defaultRegistry
|
||||
|
@ -51,7 +53,7 @@ var (
|
|||
)
|
||||
|
||||
func init() {
|
||||
MustRegister(NewProcessCollector(os.Getpid(), ""))
|
||||
MustRegister(NewProcessCollector(ProcessCollectorOpts{}))
|
||||
MustRegister(NewGoCollector())
|
||||
}
|
||||
|
||||
|
@ -67,7 +69,8 @@ func NewRegistry() *Registry {
|
|||
|
||||
// NewPedanticRegistry returns a registry that checks during collection if each
|
||||
// collected Metric is consistent with its reported Desc, and if the Desc has
|
||||
// actually been registered with the registry.
|
||||
// actually been registered with the registry. Unchecked Collectors (those whose
|
||||
// Describe methed does not yield any descriptors) are excluded from the check.
|
||||
//
|
||||
// Usually, a Registry will be happy as long as the union of all collected
|
||||
// Metrics is consistent and valid even if some metrics are not consistent with
|
||||
|
@ -97,6 +100,14 @@ type Registerer interface {
|
|||
// returned error is an instance of AlreadyRegisteredError, which
|
||||
// contains the previously registered Collector.
|
||||
//
|
||||
// A Collector whose Describe method does not yield any Desc is treated
|
||||
// as unchecked. Registration will always succeed. No check for
|
||||
// re-registering (see previous paragraph) is performed. Thus, the
|
||||
// caller is responsible for not double-registering the same unchecked
|
||||
// Collector, and for providing a Collector that will not cause
|
||||
// inconsistent metrics on collection. (This would lead to scrape
|
||||
// errors.)
|
||||
//
|
||||
// It is in general not safe to register the same Collector multiple
|
||||
// times concurrently.
|
||||
Register(Collector) error
|
||||
|
@ -107,7 +118,9 @@ type Registerer interface {
|
|||
// Unregister unregisters the Collector that equals the Collector passed
|
||||
// in as an argument. (Two Collectors are considered equal if their
|
||||
// Describe method yields the same set of descriptors.) The function
|
||||
// returns whether a Collector was unregistered.
|
||||
// returns whether a Collector was unregistered. Note that an unchecked
|
||||
// Collector cannot be unregistered (as its Describe method does not
|
||||
// yield any descriptor).
|
||||
//
|
||||
// Note that even after unregistering, it will not be possible to
|
||||
// register a new Collector that is inconsistent with the unregistered
|
||||
|
@ -125,15 +138,23 @@ type Registerer interface {
|
|||
type Gatherer interface {
|
||||
// Gather calls the Collect method of the registered Collectors and then
|
||||
// gathers the collected metrics into a lexicographically sorted slice
|
||||
// of MetricFamily protobufs. Even if an error occurs, Gather attempts
|
||||
// to gather as many metrics as possible. Hence, if a non-nil error is
|
||||
// returned, the returned MetricFamily slice could be nil (in case of a
|
||||
// fatal error that prevented any meaningful metric collection) or
|
||||
// contain a number of MetricFamily protobufs, some of which might be
|
||||
// incomplete, and some might be missing altogether. The returned error
|
||||
// (which might be a MultiError) explains the details. In scenarios
|
||||
// where complete collection is critical, the returned MetricFamily
|
||||
// protobufs should be disregarded if the returned error is non-nil.
|
||||
// of uniquely named MetricFamily protobufs. Gather ensures that the
|
||||
// returned slice is valid and self-consistent so that it can be used
|
||||
// for valid exposition. As an exception to the strict consistency
|
||||
// requirements described for metric.Desc, Gather will tolerate
|
||||
// different sets of label names for metrics of the same metric family.
|
||||
//
|
||||
// Even if an error occurs, Gather attempts to gather as many metrics as
|
||||
// possible. Hence, if a non-nil error is returned, the returned
|
||||
// MetricFamily slice could be nil (in case of a fatal error that
|
||||
// prevented any meaningful metric collection) or contain a number of
|
||||
// MetricFamily protobufs, some of which might be incomplete, and some
|
||||
// might be missing altogether. The returned error (which might be a
|
||||
// MultiError) explains the details. Note that this is mostly useful for
|
||||
// debugging purposes. If the gathered protobufs are to be used for
|
||||
// exposition in actual monitoring, it is almost always better to not
|
||||
// expose an incomplete result and instead disregard the returned
|
||||
// MetricFamily protobufs in case the returned error is non-nil.
|
||||
Gather() ([]*dto.MetricFamily, error)
|
||||
}
|
||||
|
||||
|
@ -234,6 +255,7 @@ type Registry struct {
|
|||
collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
|
||||
descIDs map[uint64]struct{}
|
||||
dimHashesByName map[string]uint64
|
||||
uncheckedCollectors []Collector
|
||||
pedanticChecksEnabled bool
|
||||
}
|
||||
|
||||
|
@ -291,9 +313,10 @@ func (r *Registry) Register(c Collector) error {
|
|||
}
|
||||
}
|
||||
}
|
||||
// Did anything happen at all?
|
||||
// A Collector yielding no Desc at all is considered unchecked.
|
||||
if len(newDescIDs) == 0 {
|
||||
return errors.New("collector has no descriptors")
|
||||
r.uncheckedCollectors = append(r.uncheckedCollectors, c)
|
||||
return nil
|
||||
}
|
||||
if existing, exists := r.collectorsByID[collectorID]; exists {
|
||||
return AlreadyRegisteredError{
|
||||
|
@ -367,20 +390,24 @@ func (r *Registry) MustRegister(cs ...Collector) {
|
|||
// Gather implements Gatherer.
|
||||
func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
|
||||
var (
|
||||
metricChan = make(chan Metric, capMetricChan)
|
||||
metricHashes = map[uint64]struct{}{}
|
||||
dimHashes = map[string]uint64{}
|
||||
wg sync.WaitGroup
|
||||
errs MultiError // The collected errors to return in the end.
|
||||
registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
|
||||
checkedMetricChan = make(chan Metric, capMetricChan)
|
||||
uncheckedMetricChan = make(chan Metric, capMetricChan)
|
||||
metricHashes = map[uint64]struct{}{}
|
||||
wg sync.WaitGroup
|
||||
errs MultiError // The collected errors to return in the end.
|
||||
registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
|
||||
)
|
||||
|
||||
r.mtx.RLock()
|
||||
goroutineBudget := len(r.collectorsByID)
|
||||
goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors)
|
||||
metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
|
||||
collectors := make(chan Collector, len(r.collectorsByID))
|
||||
checkedCollectors := make(chan Collector, len(r.collectorsByID))
|
||||
uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors))
|
||||
for _, collector := range r.collectorsByID {
|
||||
collectors <- collector
|
||||
checkedCollectors <- collector
|
||||
}
|
||||
for _, collector := range r.uncheckedCollectors {
|
||||
uncheckedCollectors <- collector
|
||||
}
|
||||
// In case pedantic checks are enabled, we have to copy the map before
|
||||
// giving up the RLock.
|
||||
|
@ -397,12 +424,14 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
|
|||
collectWorker := func() {
|
||||
for {
|
||||
select {
|
||||
case collector := <-collectors:
|
||||
collector.Collect(metricChan)
|
||||
wg.Done()
|
||||
case collector := <-checkedCollectors:
|
||||
collector.Collect(checkedMetricChan)
|
||||
case collector := <-uncheckedCollectors:
|
||||
collector.Collect(uncheckedMetricChan)
|
||||
default:
|
||||
return
|
||||
}
|
||||
wg.Done()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -410,53 +439,96 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
|
|||
go collectWorker()
|
||||
goroutineBudget--
|
||||
|
||||
// Close the metricChan once all collectors are collected.
|
||||
// Close checkedMetricChan and uncheckedMetricChan once all collectors
|
||||
// are collected.
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(metricChan)
|
||||
close(checkedMetricChan)
|
||||
close(uncheckedMetricChan)
|
||||
}()
|
||||
|
||||
// Drain metricChan in case of premature return.
|
||||
// Drain checkedMetricChan and uncheckedMetricChan in case of premature return.
|
||||
defer func() {
|
||||
for range metricChan {
|
||||
if checkedMetricChan != nil {
|
||||
for range checkedMetricChan {
|
||||
}
|
||||
}
|
||||
if uncheckedMetricChan != nil {
|
||||
for range uncheckedMetricChan {
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
collectLoop:
|
||||
// Copy the channel references so we can nil them out later to remove
|
||||
// them from the select statements below.
|
||||
cmc := checkedMetricChan
|
||||
umc := uncheckedMetricChan
|
||||
|
||||
for {
|
||||
select {
|
||||
case metric, ok := <-metricChan:
|
||||
case metric, ok := <-cmc:
|
||||
if !ok {
|
||||
// metricChan is closed, we are done.
|
||||
break collectLoop
|
||||
cmc = nil
|
||||
break
|
||||
}
|
||||
errs.Append(processMetric(
|
||||
metric, metricFamiliesByName,
|
||||
metricHashes, dimHashes,
|
||||
metricHashes,
|
||||
registeredDescIDs,
|
||||
))
|
||||
case metric, ok := <-umc:
|
||||
if !ok {
|
||||
umc = nil
|
||||
break
|
||||
}
|
||||
errs.Append(processMetric(
|
||||
metric, metricFamiliesByName,
|
||||
metricHashes,
|
||||
nil,
|
||||
))
|
||||
default:
|
||||
if goroutineBudget <= 0 || len(collectors) == 0 {
|
||||
// All collectors are aleady being worked on or
|
||||
if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 {
|
||||
// All collectors are already being worked on or
|
||||
// we have already as many goroutines started as
|
||||
// there are collectors. Just process metrics
|
||||
// from now on.
|
||||
for metric := range metricChan {
|
||||
// there are collectors. Do the same as above,
|
||||
// just without the default.
|
||||
select {
|
||||
case metric, ok := <-cmc:
|
||||
if !ok {
|
||||
cmc = nil
|
||||
break
|
||||
}
|
||||
errs.Append(processMetric(
|
||||
metric, metricFamiliesByName,
|
||||
metricHashes, dimHashes,
|
||||
metricHashes,
|
||||
registeredDescIDs,
|
||||
))
|
||||
case metric, ok := <-umc:
|
||||
if !ok {
|
||||
umc = nil
|
||||
break
|
||||
}
|
||||
errs.Append(processMetric(
|
||||
metric, metricFamiliesByName,
|
||||
metricHashes,
|
||||
nil,
|
||||
))
|
||||
}
|
||||
break collectLoop
|
||||
break
|
||||
}
|
||||
// Start more workers.
|
||||
go collectWorker()
|
||||
goroutineBudget--
|
||||
runtime.Gosched()
|
||||
}
|
||||
// Once both checkedMetricChan and uncheckdMetricChan are closed
|
||||
// and drained, the contraption above will nil out cmc and umc,
|
||||
// and then we can leave the collect loop here.
|
||||
if cmc == nil && umc == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
|
||||
return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
|
||||
}
|
||||
|
||||
// processMetric is an internal helper method only used by the Gather method.
|
||||
|
@ -464,16 +536,20 @@ func processMetric(
|
|||
metric Metric,
|
||||
metricFamiliesByName map[string]*dto.MetricFamily,
|
||||
metricHashes map[uint64]struct{},
|
||||
dimHashes map[string]uint64,
|
||||
registeredDescIDs map[uint64]struct{},
|
||||
) error {
|
||||
desc := metric.Desc()
|
||||
// Wrapped metrics collected by an unchecked Collector can have an
|
||||
// invalid Desc.
|
||||
if desc.err != nil {
|
||||
return desc.err
|
||||
}
|
||||
dtoMetric := &dto.Metric{}
|
||||
if err := metric.Write(dtoMetric); err != nil {
|
||||
return fmt.Errorf("error collecting metric %v: %s", desc, err)
|
||||
}
|
||||
metricFamily, ok := metricFamiliesByName[desc.fqName]
|
||||
if ok {
|
||||
if ok { // Existing name.
|
||||
if metricFamily.GetHelp() != desc.help {
|
||||
return fmt.Errorf(
|
||||
"collected metric %s %s has help %q but should have %q",
|
||||
|
@ -520,7 +596,7 @@ func processMetric(
|
|||
default:
|
||||
panic("encountered MetricFamily with invalid type")
|
||||
}
|
||||
} else {
|
||||
} else { // New name.
|
||||
metricFamily = &dto.MetricFamily{}
|
||||
metricFamily.Name = proto.String(desc.fqName)
|
||||
metricFamily.Help = proto.String(desc.help)
|
||||
|
@ -539,9 +615,12 @@ func processMetric(
|
|||
default:
|
||||
return fmt.Errorf("empty metric collected: %s", dtoMetric)
|
||||
}
|
||||
if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil {
|
||||
return err
|
||||
}
|
||||
metricFamiliesByName[desc.fqName] = metricFamily
|
||||
}
|
||||
if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil {
|
||||
if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil {
|
||||
return err
|
||||
}
|
||||
if registeredDescIDs != nil {
|
||||
|
@ -583,7 +662,6 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
|
|||
var (
|
||||
metricFamiliesByName = map[string]*dto.MetricFamily{}
|
||||
metricHashes = map[uint64]struct{}{}
|
||||
dimHashes = map[string]uint64{}
|
||||
errs MultiError // The collected errors to return in the end.
|
||||
)
|
||||
|
||||
|
@ -620,10 +698,14 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
|
|||
existingMF.Name = mf.Name
|
||||
existingMF.Help = mf.Help
|
||||
existingMF.Type = mf.Type
|
||||
if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
metricFamiliesByName[mf.GetName()] = existingMF
|
||||
}
|
||||
for _, m := range mf.Metric {
|
||||
if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil {
|
||||
if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
|
@ -631,87 +713,77 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
|
|||
}
|
||||
}
|
||||
}
|
||||
return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
|
||||
return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
|
||||
}
|
||||
|
||||
// metricSorter is a sortable slice of *dto.Metric.
|
||||
type metricSorter []*dto.Metric
|
||||
|
||||
func (s metricSorter) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s metricSorter) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (s metricSorter) Less(i, j int) bool {
|
||||
if len(s[i].Label) != len(s[j].Label) {
|
||||
// This should not happen. The metrics are
|
||||
// inconsistent. However, we have to deal with the fact, as
|
||||
// people might use custom collectors or metric family injection
|
||||
// to create inconsistent metrics. So let's simply compare the
|
||||
// number of labels in this case. That will still yield
|
||||
// reproducible sorting.
|
||||
return len(s[i].Label) < len(s[j].Label)
|
||||
// checkSuffixCollisions checks for collisions with the “magic” suffixes the
|
||||
// Prometheus text format and the internal metric representation of the
|
||||
// Prometheus server add while flattening Summaries and Histograms.
|
||||
func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error {
|
||||
var (
|
||||
newName = mf.GetName()
|
||||
newType = mf.GetType()
|
||||
newNameWithoutSuffix = ""
|
||||
)
|
||||
switch {
|
||||
case strings.HasSuffix(newName, "_count"):
|
||||
newNameWithoutSuffix = newName[:len(newName)-6]
|
||||
case strings.HasSuffix(newName, "_sum"):
|
||||
newNameWithoutSuffix = newName[:len(newName)-4]
|
||||
case strings.HasSuffix(newName, "_bucket"):
|
||||
newNameWithoutSuffix = newName[:len(newName)-7]
|
||||
}
|
||||
for n, lp := range s[i].Label {
|
||||
vi := lp.GetValue()
|
||||
vj := s[j].Label[n].GetValue()
|
||||
if vi != vj {
|
||||
return vi < vj
|
||||
if newNameWithoutSuffix != "" {
|
||||
if existingMF, ok := mfs[newNameWithoutSuffix]; ok {
|
||||
switch existingMF.GetType() {
|
||||
case dto.MetricType_SUMMARY:
|
||||
if !strings.HasSuffix(newName, "_bucket") {
|
||||
return fmt.Errorf(
|
||||
"collected metric named %q collides with previously collected summary named %q",
|
||||
newName, newNameWithoutSuffix,
|
||||
)
|
||||
}
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
return fmt.Errorf(
|
||||
"collected metric named %q collides with previously collected histogram named %q",
|
||||
newName, newNameWithoutSuffix,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We should never arrive here. Multiple metrics with the same
|
||||
// label set in the same scrape will lead to undefined ingestion
|
||||
// behavior. However, as above, we have to provide stable sorting
|
||||
// here, even for inconsistent metrics. So sort equal metrics
|
||||
// by their timestamp, with missing timestamps (implying "now")
|
||||
// coming last.
|
||||
if s[i].TimestampMs == nil {
|
||||
return false
|
||||
}
|
||||
if s[j].TimestampMs == nil {
|
||||
return true
|
||||
}
|
||||
return s[i].GetTimestampMs() < s[j].GetTimestampMs()
|
||||
}
|
||||
|
||||
// normalizeMetricFamilies returns a MetricFamily slice with empty
|
||||
// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
|
||||
// the slice, with the contained Metrics sorted within each MetricFamily.
|
||||
func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
|
||||
for _, mf := range metricFamiliesByName {
|
||||
sort.Sort(metricSorter(mf.Metric))
|
||||
}
|
||||
names := make([]string, 0, len(metricFamiliesByName))
|
||||
for name, mf := range metricFamiliesByName {
|
||||
if len(mf.Metric) > 0 {
|
||||
names = append(names, name)
|
||||
if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM {
|
||||
if _, ok := mfs[newName+"_count"]; ok {
|
||||
return fmt.Errorf(
|
||||
"collected histogram or summary named %q collides with previously collected metric named %q",
|
||||
newName, newName+"_count",
|
||||
)
|
||||
}
|
||||
if _, ok := mfs[newName+"_sum"]; ok {
|
||||
return fmt.Errorf(
|
||||
"collected histogram or summary named %q collides with previously collected metric named %q",
|
||||
newName, newName+"_sum",
|
||||
)
|
||||
}
|
||||
}
|
||||
sort.Strings(names)
|
||||
result := make([]*dto.MetricFamily, 0, len(names))
|
||||
for _, name := range names {
|
||||
result = append(result, metricFamiliesByName[name])
|
||||
if newType == dto.MetricType_HISTOGRAM {
|
||||
if _, ok := mfs[newName+"_bucket"]; ok {
|
||||
return fmt.Errorf(
|
||||
"collected histogram named %q collides with previously collected metric named %q",
|
||||
newName, newName+"_bucket",
|
||||
)
|
||||
}
|
||||
}
|
||||
return result
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkMetricConsistency checks if the provided Metric is consistent with the
|
||||
// provided MetricFamily. It also hashed the Metric labels and the MetricFamily
|
||||
// provided MetricFamily. It also hashes the Metric labels and the MetricFamily
|
||||
// name. If the resulting hash is already in the provided metricHashes, an error
|
||||
// is returned. If not, it is added to metricHashes. The provided dimHashes maps
|
||||
// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes
|
||||
// doesn't yet contain a hash for the provided MetricFamily, it is
|
||||
// added. Otherwise, an error is returned if the existing dimHashes in not equal
|
||||
// the calculated dimHash.
|
||||
// is returned. If not, it is added to metricHashes.
|
||||
func checkMetricConsistency(
|
||||
metricFamily *dto.MetricFamily,
|
||||
dtoMetric *dto.Metric,
|
||||
metricHashes map[uint64]struct{},
|
||||
dimHashes map[string]uint64,
|
||||
) error {
|
||||
// Type consistency with metric family.
|
||||
if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
|
||||
|
@ -720,47 +792,50 @@ func checkMetricConsistency(
|
|||
metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
|
||||
metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
|
||||
return fmt.Errorf(
|
||||
"collected metric %s %s is not a %s",
|
||||
"collected metric %q { %s} is not a %s",
|
||||
metricFamily.GetName(), dtoMetric, metricFamily.GetType(),
|
||||
)
|
||||
}
|
||||
|
||||
for _, labelPair := range dtoMetric.GetLabel() {
|
||||
if !utf8.ValidString(*labelPair.Value) {
|
||||
return fmt.Errorf("collected metric's label %s is not utf8: %#v", *labelPair.Name, *labelPair.Value)
|
||||
if !checkLabelName(labelPair.GetName()) {
|
||||
return fmt.Errorf(
|
||||
"collected metric %q { %s} has a label with an invalid name: %s",
|
||||
metricFamily.GetName(), dtoMetric, labelPair.GetName(),
|
||||
)
|
||||
}
|
||||
if dtoMetric.Summary != nil && labelPair.GetName() == quantileLabel {
|
||||
return fmt.Errorf(
|
||||
"collected metric %q { %s} must not have an explicit %q label",
|
||||
metricFamily.GetName(), dtoMetric, quantileLabel,
|
||||
)
|
||||
}
|
||||
if !utf8.ValidString(labelPair.GetValue()) {
|
||||
return fmt.Errorf(
|
||||
"collected metric %q { %s} has a label named %q whose value is not utf8: %#v",
|
||||
metricFamily.GetName(), dtoMetric, labelPair.GetName(), labelPair.GetValue())
|
||||
}
|
||||
}
|
||||
|
||||
// Is the metric unique (i.e. no other metric with the same name and the same label values)?
|
||||
// Is the metric unique (i.e. no other metric with the same name and the same labels)?
|
||||
h := hashNew()
|
||||
h = hashAdd(h, metricFamily.GetName())
|
||||
h = hashAddByte(h, separatorByte)
|
||||
dh := hashNew()
|
||||
// Make sure label pairs are sorted. We depend on it for the consistency
|
||||
// check.
|
||||
sort.Sort(LabelPairSorter(dtoMetric.Label))
|
||||
sort.Sort(labelPairSorter(dtoMetric.Label))
|
||||
for _, lp := range dtoMetric.Label {
|
||||
h = hashAdd(h, lp.GetName())
|
||||
h = hashAddByte(h, separatorByte)
|
||||
h = hashAdd(h, lp.GetValue())
|
||||
h = hashAddByte(h, separatorByte)
|
||||
dh = hashAdd(dh, lp.GetName())
|
||||
dh = hashAddByte(dh, separatorByte)
|
||||
}
|
||||
if _, exists := metricHashes[h]; exists {
|
||||
return fmt.Errorf(
|
||||
"collected metric %s %s was collected before with the same name and label values",
|
||||
"collected metric %q { %s} was collected before with the same name and label values",
|
||||
metricFamily.GetName(), dtoMetric,
|
||||
)
|
||||
}
|
||||
if dimHash, ok := dimHashes[metricFamily.GetName()]; ok {
|
||||
if dimHash != dh {
|
||||
return fmt.Errorf(
|
||||
"collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family",
|
||||
metricFamily.GetName(), dtoMetric,
|
||||
)
|
||||
}
|
||||
} else {
|
||||
dimHashes[metricFamily.GetName()] = dh
|
||||
}
|
||||
metricHashes[h] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
@ -792,7 +867,7 @@ func checkDescConsistency(
|
|||
metricFamily.GetName(), dtoMetric, desc,
|
||||
)
|
||||
}
|
||||
sort.Sort(LabelPairSorter(lpsFromDesc))
|
||||
sort.Sort(labelPairSorter(lpsFromDesc))
|
||||
for i, lpFromDesc := range lpsFromDesc {
|
||||
lpFromMetric := dtoMetric.Label[i]
|
||||
if lpFromDesc.GetName() != lpFromMetric.GetName() ||
|
||||
|
|
156
vendor/github.com/prometheus/client_golang/prometheus/registry_test.go
generated
vendored
156
vendor/github.com/prometheus/client_golang/prometheus/registry_test.go
generated
vendored
|
@ -34,7 +34,22 @@ import (
|
|||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
// uncheckedCollector wraps a Collector but its Describe method yields no Desc.
|
||||
type uncheckedCollector struct {
|
||||
c prometheus.Collector
|
||||
}
|
||||
|
||||
func (u uncheckedCollector) Describe(_ chan<- *prometheus.Desc) {}
|
||||
func (u uncheckedCollector) Collect(c chan<- prometheus.Metric) {
|
||||
u.c.Collect(c)
|
||||
}
|
||||
|
||||
func testHandler(t testing.TB) {
|
||||
// TODO(beorn7): This test is a bit too "end-to-end". It tests quite a
|
||||
// few moving parts that are not strongly coupled. They could/should be
|
||||
// tested separately. However, the changes planned for v0.10 will
|
||||
// require a major rework of this test anyway, at which time I will
|
||||
// structure it in a better way.
|
||||
|
||||
metricVec := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
|
@ -234,7 +249,64 @@ metric: <
|
|||
|
||||
expectedMetricFamilyInvalidLabelValueAsText := []byte(`An error has occurred during metrics gathering:
|
||||
|
||||
collected metric's label constname is not utf8: "\xff"
|
||||
collected metric "name" { label:<name:"constname" value:"\377" > label:<name:"labelname" value:"different_val" > counter:<value:42 > } has a label named "constname" whose value is not utf8: "\xff"
|
||||
`)
|
||||
|
||||
summary := prometheus.NewSummary(prometheus.SummaryOpts{
|
||||
Name: "complex",
|
||||
Help: "A metric to check collisions with _sum and _count.",
|
||||
})
|
||||
summaryAsText := []byte(`# HELP complex A metric to check collisions with _sum and _count.
|
||||
# TYPE complex summary
|
||||
complex{quantile="0.5"} NaN
|
||||
complex{quantile="0.9"} NaN
|
||||
complex{quantile="0.99"} NaN
|
||||
complex_sum 0
|
||||
complex_count 0
|
||||
`)
|
||||
histogram := prometheus.NewHistogram(prometheus.HistogramOpts{
|
||||
Name: "complex",
|
||||
Help: "A metric to check collisions with _sun, _count, and _bucket.",
|
||||
})
|
||||
externalMetricFamilyWithBucketSuffix := &dto.MetricFamily{
|
||||
Name: proto.String("complex_bucket"),
|
||||
Help: proto.String("externaldocstring"),
|
||||
Type: dto.MetricType_COUNTER.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
{
|
||||
Counter: &dto.Counter{
|
||||
Value: proto.Float64(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
externalMetricFamilyWithBucketSuffixAsText := []byte(`# HELP complex_bucket externaldocstring
|
||||
# TYPE complex_bucket counter
|
||||
complex_bucket 1
|
||||
`)
|
||||
externalMetricFamilyWithCountSuffix := &dto.MetricFamily{
|
||||
Name: proto.String("complex_count"),
|
||||
Help: proto.String("externaldocstring"),
|
||||
Type: dto.MetricType_COUNTER.Enum(),
|
||||
Metric: []*dto.Metric{
|
||||
{
|
||||
Counter: &dto.Counter{
|
||||
Value: proto.Float64(1),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
bucketCollisionMsg := []byte(`An error has occurred during metrics gathering:
|
||||
|
||||
collected metric named "complex_bucket" collides with previously collected histogram named "complex"
|
||||
`)
|
||||
summaryCountCollisionMsg := []byte(`An error has occurred during metrics gathering:
|
||||
|
||||
collected metric named "complex_count" collides with previously collected summary named "complex"
|
||||
`)
|
||||
histogramCountCollisionMsg := []byte(`An error has occurred during metrics gathering:
|
||||
|
||||
collected metric named "complex_count" collides with previously collected histogram named "complex"
|
||||
`)
|
||||
|
||||
type output struct {
|
||||
|
@ -496,6 +568,84 @@ collected metric's label constname is not utf8: "\xff"
|
|||
externalMetricFamilyWithInvalidLabelValue,
|
||||
},
|
||||
},
|
||||
{ // 17
|
||||
headers: map[string]string{
|
||||
"Accept": "text/plain",
|
||||
},
|
||||
out: output{
|
||||
headers: map[string]string{
|
||||
"Content-Type": `text/plain; version=0.0.4; charset=utf-8`,
|
||||
},
|
||||
body: expectedMetricFamilyAsText,
|
||||
},
|
||||
collector: uncheckedCollector{metricVec},
|
||||
},
|
||||
{ // 18
|
||||
headers: map[string]string{
|
||||
"Accept": "text/plain",
|
||||
},
|
||||
out: output{
|
||||
headers: map[string]string{
|
||||
"Content-Type": `text/plain; charset=utf-8`,
|
||||
},
|
||||
body: histogramCountCollisionMsg,
|
||||
},
|
||||
collector: histogram,
|
||||
externalMF: []*dto.MetricFamily{
|
||||
externalMetricFamilyWithCountSuffix,
|
||||
},
|
||||
},
|
||||
{ // 19
|
||||
headers: map[string]string{
|
||||
"Accept": "text/plain",
|
||||
},
|
||||
out: output{
|
||||
headers: map[string]string{
|
||||
"Content-Type": `text/plain; charset=utf-8`,
|
||||
},
|
||||
body: bucketCollisionMsg,
|
||||
},
|
||||
collector: histogram,
|
||||
externalMF: []*dto.MetricFamily{
|
||||
externalMetricFamilyWithBucketSuffix,
|
||||
},
|
||||
},
|
||||
{ // 20
|
||||
headers: map[string]string{
|
||||
"Accept": "text/plain",
|
||||
},
|
||||
out: output{
|
||||
headers: map[string]string{
|
||||
"Content-Type": `text/plain; charset=utf-8`,
|
||||
},
|
||||
body: summaryCountCollisionMsg,
|
||||
},
|
||||
collector: summary,
|
||||
externalMF: []*dto.MetricFamily{
|
||||
externalMetricFamilyWithCountSuffix,
|
||||
},
|
||||
},
|
||||
{ // 21
|
||||
headers: map[string]string{
|
||||
"Accept": "text/plain",
|
||||
},
|
||||
out: output{
|
||||
headers: map[string]string{
|
||||
"Content-Type": `text/plain; version=0.0.4; charset=utf-8`,
|
||||
},
|
||||
body: bytes.Join(
|
||||
[][]byte{
|
||||
summaryAsText,
|
||||
externalMetricFamilyWithBucketSuffixAsText,
|
||||
},
|
||||
[]byte{},
|
||||
),
|
||||
},
|
||||
collector: summary,
|
||||
externalMF: []*dto.MetricFamily{
|
||||
externalMetricFamilyWithBucketSuffix,
|
||||
},
|
||||
},
|
||||
}
|
||||
for i, scenario := range scenarios {
|
||||
registry := prometheus.NewPedanticRegistry()
|
||||
|
@ -510,7 +660,7 @@ collected metric's label constname is not utf8: "\xff"
|
|||
}
|
||||
|
||||
if scenario.collector != nil {
|
||||
registry.Register(scenario.collector)
|
||||
registry.MustRegister(scenario.collector)
|
||||
}
|
||||
writer := httptest.NewRecorder()
|
||||
handler := prometheus.InstrumentHandler("prometheus", promhttp.HandlerFor(gatherer, promhttp.HandlerOpts{}))
|
||||
|
@ -521,7 +671,7 @@ collected metric's label constname is not utf8: "\xff"
|
|||
handler(writer, request)
|
||||
|
||||
for key, value := range scenario.out.headers {
|
||||
if writer.HeaderMap.Get(key) != value {
|
||||
if writer.Header().Get(key) != value {
|
||||
t.Errorf(
|
||||
"%d. expected %q for header %q, got %q",
|
||||
i, value, key, writer.Header().Get(key),
|
||||
|
|
31
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
31
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
|
@ -37,7 +37,7 @@ const quantileLabel = "quantile"
|
|||
// A typical use-case is the observation of request latencies. By default, a
|
||||
// Summary provides the median, the 90th and the 99th percentile of the latency
|
||||
// as rank estimations. However, the default behavior will change in the
|
||||
// upcoming v0.10 of the library. There will be no rank estiamtions at all by
|
||||
// upcoming v0.10 of the library. There will be no rank estimations at all by
|
||||
// default. For a sane transition, it is recommended to set the desired rank
|
||||
// estimations explicitly.
|
||||
//
|
||||
|
@ -81,10 +81,10 @@ const (
|
|||
)
|
||||
|
||||
// SummaryOpts bundles the options for creating a Summary metric. It is
|
||||
// mandatory to set Name and Help to a non-empty string. While all other fields
|
||||
// are optional and can safely be left at their zero value, it is recommended to
|
||||
// explicitly set the Objectives field to the desired value as the default value
|
||||
// will change in the upcoming v0.10 of the library.
|
||||
// mandatory to set Name to a non-empty string. While all other fields are
|
||||
// optional and can safely be left at their zero value, it is recommended to set
|
||||
// a help string and to explicitly set the Objectives field to the desired value
|
||||
// as the default value will change in the upcoming v0.10 of the library.
|
||||
type SummaryOpts struct {
|
||||
// Namespace, Subsystem, and Name are components of the fully-qualified
|
||||
// name of the Summary (created by joining these components with
|
||||
|
@ -95,7 +95,7 @@ type SummaryOpts struct {
|
|||
Subsystem string
|
||||
Name string
|
||||
|
||||
// Help provides information about this Summary. Mandatory!
|
||||
// Help provides information about this Summary.
|
||||
//
|
||||
// Metrics with the same fully-qualified name must have the same Help
|
||||
// string.
|
||||
|
@ -105,6 +105,11 @@ type SummaryOpts struct {
|
|||
// with the same fully-qualified name must have the same label names in
|
||||
// their ConstLabels.
|
||||
//
|
||||
// Due to the way a Summary is represented in the Prometheus text format
|
||||
// and how it is handled by the Prometheus server internally, “quantile”
|
||||
// is an illegal label name. Construction of a Summary or SummaryVec
|
||||
// will panic if this label name is used in ConstLabels.
|
||||
//
|
||||
// ConstLabels are only used rarely. In particular, do not use them to
|
||||
// attach the same labels to all your metrics. Those use cases are
|
||||
// better covered by target labels set by the scraping Prometheus
|
||||
|
@ -402,7 +407,16 @@ type SummaryVec struct {
|
|||
|
||||
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
|
||||
// partitioned by the given label names.
|
||||
//
|
||||
// Due to the way a Summary is represented in the Prometheus text format and how
|
||||
// it is handled by the Prometheus server internally, “quantile” is an illegal
|
||||
// label name. NewSummaryVec will panic if this label name is used.
|
||||
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
|
||||
for _, ln := range labelNames {
|
||||
if ln == quantileLabel {
|
||||
panic(errQuantileLabelNotAllowed)
|
||||
}
|
||||
}
|
||||
desc := NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
opts.Help,
|
||||
|
@ -572,7 +586,7 @@ func (s *constSummary) Write(out *dto.Metric) error {
|
|||
// map[float64]float64{0.5: 0.23, 0.99: 0.56}
|
||||
//
|
||||
// NewConstSummary returns an error if the length of labelValues is not
|
||||
// consistent with the variable labels in Desc.
|
||||
// consistent with the variable labels in Desc or if Desc is invalid.
|
||||
func NewConstSummary(
|
||||
desc *Desc,
|
||||
count uint64,
|
||||
|
@ -580,6 +594,9 @@ func NewConstSummary(
|
|||
quantiles map[float64]float64,
|
||||
labelValues ...string,
|
||||
) (Metric, error) {
|
||||
if desc.err != nil {
|
||||
return nil, desc.err
|
||||
}
|
||||
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
25
vendor/github.com/prometheus/client_golang/prometheus/summary_test.go
generated
vendored
25
vendor/github.com/prometheus/client_golang/prometheus/summary_test.go
generated
vendored
|
@ -64,6 +64,31 @@ func TestSummaryWithoutObjectives(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestSummaryWithQuantileLabel(t *testing.T) {
|
||||
defer func() {
|
||||
if r := recover(); r == nil {
|
||||
t.Error("Attempt to create Summary with 'quantile' label did not panic.")
|
||||
}
|
||||
}()
|
||||
_ = NewSummary(SummaryOpts{
|
||||
Name: "test_summary",
|
||||
Help: "less",
|
||||
ConstLabels: Labels{"quantile": "test"},
|
||||
})
|
||||
}
|
||||
|
||||
func TestSummaryVecWithQuantileLabel(t *testing.T) {
|
||||
defer func() {
|
||||
if r := recover(); r == nil {
|
||||
t.Error("Attempt to create SummaryVec with 'quantile' label did not panic.")
|
||||
}
|
||||
}()
|
||||
_ = NewSummaryVec(SummaryOpts{
|
||||
Name: "test_summary",
|
||||
Help: "less",
|
||||
}, []string{"quantile"})
|
||||
}
|
||||
|
||||
func benchmarkSummaryObserve(w int, b *testing.B) {
|
||||
b.StopTimer()
|
||||
|
||||
|
|
184
vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go
generated
vendored
Normal file
184
vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go
generated
vendored
Normal file
|
@ -0,0 +1,184 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package testutil provides helpers to test code using the prometheus package
|
||||
// of client_golang.
|
||||
//
|
||||
// While writing unit tests to verify correct instrumentation of your code, it's
|
||||
// a common mistake to mostly test the instrumentation library instead of your
|
||||
// own code. Rather than verifying that a prometheus.Counter's value has changed
|
||||
// as expected or that it shows up in the exposition after registration, it is
|
||||
// in general more robust and more faithful to the concept of unit tests to use
|
||||
// mock implementations of the prometheus.Counter and prometheus.Registerer
|
||||
// interfaces that simply assert that the Add or Register methods have been
|
||||
// called with the expected arguments. However, this might be overkill in simple
|
||||
// scenarios. The ToFloat64 function is provided for simple inspection of a
|
||||
// single-value metric, but it has to be used with caution.
|
||||
//
|
||||
// End-to-end tests to verify all or larger parts of the metrics exposition can
|
||||
// be implemented with the CollectAndCompare or GatherAndCompare functions. The
|
||||
// most appropriate use is not so much testing instrumentation of your code, but
|
||||
// testing custom prometheus.Collector implementations and in particular whole
|
||||
// exporters, i.e. programs that retrieve telemetry data from a 3rd party source
|
||||
// and convert it into Prometheus metrics.
|
||||
package testutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
|
||||
"github.com/prometheus/common/expfmt"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/internal"
|
||||
)
|
||||
|
||||
// ToFloat64 collects all Metrics from the provided Collector. It expects that
|
||||
// this results in exactly one Metric being collected, which must be a Gauge,
|
||||
// Counter, or Untyped. In all other cases, ToFloat64 panics. ToFloat64 returns
|
||||
// the value of the collected Metric.
|
||||
//
|
||||
// The Collector provided is typically a simple instance of Gauge or Counter, or
|
||||
// – less commonly – a GaugeVec or CounterVec with exactly one element. But any
|
||||
// Collector fulfilling the prerequisites described above will do.
|
||||
//
|
||||
// Use this function with caution. It is computationally very expensive and thus
|
||||
// not suited at all to read values from Metrics in regular code. This is really
|
||||
// only for testing purposes, and even for testing, other approaches are often
|
||||
// more appropriate (see this package's documentation).
|
||||
//
|
||||
// A clear anti-pattern would be to use a metric type from the prometheus
|
||||
// package to track values that are also needed for something else than the
|
||||
// exposition of Prometheus metrics. For example, you would like to track the
|
||||
// number of items in a queue because your code should reject queuing further
|
||||
// items if a certain limit is reached. It is tempting to track the number of
|
||||
// items in a prometheus.Gauge, as it is then easily available as a metric for
|
||||
// exposition, too. However, then you would need to call ToFloat64 in your
|
||||
// regular code, potentially quite often. The recommended way is to track the
|
||||
// number of items conventionally (in the way you would have done it without
|
||||
// considering Prometheus metrics) and then expose the number with a
|
||||
// prometheus.GaugeFunc.
|
||||
func ToFloat64(c prometheus.Collector) float64 {
|
||||
var (
|
||||
m prometheus.Metric
|
||||
mCount int
|
||||
mChan = make(chan prometheus.Metric)
|
||||
done = make(chan struct{})
|
||||
)
|
||||
|
||||
go func() {
|
||||
for m = range mChan {
|
||||
mCount++
|
||||
}
|
||||
close(done)
|
||||
}()
|
||||
|
||||
c.Collect(mChan)
|
||||
close(mChan)
|
||||
<-done
|
||||
|
||||
if mCount != 1 {
|
||||
panic(fmt.Errorf("collected %d metrics instead of exactly 1", mCount))
|
||||
}
|
||||
|
||||
pb := &dto.Metric{}
|
||||
m.Write(pb)
|
||||
if pb.Gauge != nil {
|
||||
return pb.Gauge.GetValue()
|
||||
}
|
||||
if pb.Counter != nil {
|
||||
return pb.Counter.GetValue()
|
||||
}
|
||||
if pb.Untyped != nil {
|
||||
return pb.Untyped.GetValue()
|
||||
}
|
||||
panic(fmt.Errorf("collected a non-gauge/counter/untyped metric: %s", pb))
|
||||
}
|
||||
|
||||
// CollectAndCompare registers the provided Collector with a newly created
|
||||
// pedantic Registry. It then does the same as GatherAndCompare, gathering the
|
||||
// metrics from the pedantic Registry.
|
||||
func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames ...string) error {
|
||||
reg := prometheus.NewPedanticRegistry()
|
||||
if err := reg.Register(c); err != nil {
|
||||
return fmt.Errorf("registering collector failed: %s", err)
|
||||
}
|
||||
return GatherAndCompare(reg, expected, metricNames...)
|
||||
}
|
||||
|
||||
// GatherAndCompare gathers all metrics from the provided Gatherer and compares
|
||||
// it to an expected output read from the provided Reader in the Prometheus text
|
||||
// exposition format. If any metricNames are provided, only metrics with those
|
||||
// names are compared.
|
||||
func GatherAndCompare(g prometheus.Gatherer, expected io.Reader, metricNames ...string) error {
|
||||
metrics, err := g.Gather()
|
||||
if err != nil {
|
||||
return fmt.Errorf("gathering metrics failed: %s", err)
|
||||
}
|
||||
if metricNames != nil {
|
||||
metrics = filterMetrics(metrics, metricNames)
|
||||
}
|
||||
var tp expfmt.TextParser
|
||||
expectedMetrics, err := tp.TextToMetricFamilies(expected)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing expected metrics failed: %s", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(metrics, internal.NormalizeMetricFamilies(expectedMetrics)) {
|
||||
// Encode the gathered output to the readable text format for comparison.
|
||||
var buf1 bytes.Buffer
|
||||
enc := expfmt.NewEncoder(&buf1, expfmt.FmtText)
|
||||
for _, mf := range metrics {
|
||||
if err := enc.Encode(mf); err != nil {
|
||||
return fmt.Errorf("encoding result failed: %s", err)
|
||||
}
|
||||
}
|
||||
// Encode normalized expected metrics again to generate them in the same ordering
|
||||
// the registry does to spot differences more easily.
|
||||
var buf2 bytes.Buffer
|
||||
enc = expfmt.NewEncoder(&buf2, expfmt.FmtText)
|
||||
for _, mf := range internal.NormalizeMetricFamilies(expectedMetrics) {
|
||||
if err := enc.Encode(mf); err != nil {
|
||||
return fmt.Errorf("encoding result failed: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf(`
|
||||
metric output does not match expectation; want:
|
||||
|
||||
%s
|
||||
|
||||
got:
|
||||
|
||||
%s
|
||||
`, buf2.String(), buf1.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func filterMetrics(metrics []*dto.MetricFamily, names []string) []*dto.MetricFamily {
|
||||
var filtered []*dto.MetricFamily
|
||||
for _, m := range metrics {
|
||||
for _, name := range names {
|
||||
if m.GetName() == name {
|
||||
filtered = append(filtered, m)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
213
vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil_test.go
generated
vendored
Normal file
213
vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil_test.go
generated
vendored
Normal file
|
@ -0,0 +1,213 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package testutil
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
type untypedCollector struct{}
|
||||
|
||||
func (u untypedCollector) Describe(c chan<- *prometheus.Desc) {
|
||||
c <- prometheus.NewDesc("name", "help", nil, nil)
|
||||
}
|
||||
|
||||
func (u untypedCollector) Collect(c chan<- prometheus.Metric) {
|
||||
c <- prometheus.MustNewConstMetric(
|
||||
prometheus.NewDesc("name", "help", nil, nil),
|
||||
prometheus.UntypedValue,
|
||||
2001,
|
||||
)
|
||||
}
|
||||
|
||||
func TestToFloat64(t *testing.T) {
|
||||
gaugeWithAValueSet := prometheus.NewGauge(prometheus.GaugeOpts{})
|
||||
gaugeWithAValueSet.Set(3.14)
|
||||
|
||||
counterVecWithOneElement := prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"foo"})
|
||||
counterVecWithOneElement.WithLabelValues("bar").Inc()
|
||||
|
||||
counterVecWithTwoElements := prometheus.NewCounterVec(prometheus.CounterOpts{}, []string{"foo"})
|
||||
counterVecWithTwoElements.WithLabelValues("bar").Add(42)
|
||||
counterVecWithTwoElements.WithLabelValues("baz").Inc()
|
||||
|
||||
histogramVecWithOneElement := prometheus.NewHistogramVec(prometheus.HistogramOpts{}, []string{"foo"})
|
||||
histogramVecWithOneElement.WithLabelValues("bar").Observe(2.7)
|
||||
|
||||
scenarios := map[string]struct {
|
||||
collector prometheus.Collector
|
||||
panics bool
|
||||
want float64
|
||||
}{
|
||||
"simple counter": {
|
||||
collector: prometheus.NewCounter(prometheus.CounterOpts{}),
|
||||
panics: false,
|
||||
want: 0,
|
||||
},
|
||||
"simple gauge": {
|
||||
collector: prometheus.NewGauge(prometheus.GaugeOpts{}),
|
||||
panics: false,
|
||||
want: 0,
|
||||
},
|
||||
"simple untyped": {
|
||||
collector: untypedCollector{},
|
||||
panics: false,
|
||||
want: 2001,
|
||||
},
|
||||
"simple histogram": {
|
||||
collector: prometheus.NewHistogram(prometheus.HistogramOpts{}),
|
||||
panics: true,
|
||||
},
|
||||
"simple summary": {
|
||||
collector: prometheus.NewSummary(prometheus.SummaryOpts{}),
|
||||
panics: true,
|
||||
},
|
||||
"simple gauge with an actual value set": {
|
||||
collector: gaugeWithAValueSet,
|
||||
panics: false,
|
||||
want: 3.14,
|
||||
},
|
||||
"counter vec with zero elements": {
|
||||
collector: prometheus.NewCounterVec(prometheus.CounterOpts{}, nil),
|
||||
panics: true,
|
||||
},
|
||||
"counter vec with one element": {
|
||||
collector: counterVecWithOneElement,
|
||||
panics: false,
|
||||
want: 1,
|
||||
},
|
||||
"counter vec with two elements": {
|
||||
collector: counterVecWithTwoElements,
|
||||
panics: true,
|
||||
},
|
||||
"histogram vec with one element": {
|
||||
collector: histogramVecWithOneElement,
|
||||
panics: true,
|
||||
},
|
||||
}
|
||||
|
||||
for n, s := range scenarios {
|
||||
t.Run(n, func(t *testing.T) {
|
||||
defer func() {
|
||||
r := recover()
|
||||
if r == nil && s.panics {
|
||||
t.Error("expected panic")
|
||||
} else if r != nil && !s.panics {
|
||||
t.Error("unexpected panic: ", r)
|
||||
}
|
||||
// Any other combination is the expected outcome.
|
||||
}()
|
||||
if got := ToFloat64(s.collector); got != s.want {
|
||||
t.Errorf("want %f, got %f", s.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCollectAndCompare(t *testing.T) {
|
||||
const metadata = `
|
||||
# HELP some_total A value that represents a counter.
|
||||
# TYPE some_total counter
|
||||
`
|
||||
|
||||
c := prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "some_total",
|
||||
Help: "A value that represents a counter.",
|
||||
ConstLabels: prometheus.Labels{
|
||||
"label1": "value1",
|
||||
},
|
||||
})
|
||||
c.Inc()
|
||||
|
||||
expected := `
|
||||
|
||||
some_total{ label1 = "value1" } 1
|
||||
`
|
||||
|
||||
if err := CollectAndCompare(c, strings.NewReader(metadata+expected), "some_total"); err != nil {
|
||||
t.Errorf("unexpected collecting result:\n%s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNoMetricFilter(t *testing.T) {
|
||||
const metadata = `
|
||||
# HELP some_total A value that represents a counter.
|
||||
# TYPE some_total counter
|
||||
`
|
||||
|
||||
c := prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "some_total",
|
||||
Help: "A value that represents a counter.",
|
||||
ConstLabels: prometheus.Labels{
|
||||
"label1": "value1",
|
||||
},
|
||||
})
|
||||
c.Inc()
|
||||
|
||||
expected := `
|
||||
some_total{label1="value1"} 1
|
||||
`
|
||||
|
||||
if err := CollectAndCompare(c, strings.NewReader(metadata+expected)); err != nil {
|
||||
t.Errorf("unexpected collecting result:\n%s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricNotFound(t *testing.T) {
|
||||
const metadata = `
|
||||
# HELP some_other_metric A value that represents a counter.
|
||||
# TYPE some_other_metric counter
|
||||
`
|
||||
|
||||
c := prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: "some_total",
|
||||
Help: "A value that represents a counter.",
|
||||
ConstLabels: prometheus.Labels{
|
||||
"label1": "value1",
|
||||
},
|
||||
})
|
||||
c.Inc()
|
||||
|
||||
expected := `
|
||||
some_other_metric{label1="value1"} 1
|
||||
`
|
||||
|
||||
expectedError := `
|
||||
metric output does not match expectation; want:
|
||||
|
||||
# HELP some_other_metric A value that represents a counter.
|
||||
# TYPE some_other_metric counter
|
||||
some_other_metric{label1="value1"} 1
|
||||
|
||||
|
||||
got:
|
||||
|
||||
# HELP some_total A value that represents a counter.
|
||||
# TYPE some_total counter
|
||||
some_total{label1="value1"} 1
|
||||
|
||||
`
|
||||
|
||||
err := CollectAndCompare(c, strings.NewReader(metadata+expected))
|
||||
if err == nil {
|
||||
t.Error("Expected error, got no error.")
|
||||
}
|
||||
|
||||
if err.Error() != expectedError {
|
||||
t.Errorf("Expected\n%#+v\nGot:\n%#+v\n", expectedError, err.Error())
|
||||
}
|
||||
}
|
16
vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
16
vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
|
@ -17,9 +17,9 @@ import (
|
|||
"fmt"
|
||||
"sort"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
// ValueType is an enumeration of metric types that represent a simple value.
|
||||
|
@ -77,8 +77,12 @@ func (v *valueFunc) Write(out *dto.Metric) error {
|
|||
// operations. However, when implementing custom Collectors, it is useful as a
|
||||
// throw-away metric that is generated on the fly to send it to Prometheus in
|
||||
// the Collect method. NewConstMetric returns an error if the length of
|
||||
// labelValues is not consistent with the variable labels in Desc.
|
||||
// labelValues is not consistent with the variable labels in Desc or if Desc is
|
||||
// invalid.
|
||||
func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
|
||||
if desc.err != nil {
|
||||
return nil, desc.err
|
||||
}
|
||||
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -152,9 +156,7 @@ func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
|
|||
Value: proto.String(labelValues[i]),
|
||||
})
|
||||
}
|
||||
for _, lp := range desc.constLabelPairs {
|
||||
labelPairs = append(labelPairs, lp)
|
||||
}
|
||||
sort.Sort(LabelPairSorter(labelPairs))
|
||||
labelPairs = append(labelPairs, desc.constLabelPairs...)
|
||||
sort.Sort(labelPairSorter(labelPairs))
|
||||
return labelPairs
|
||||
}
|
||||
|
|
13
vendor/github.com/prometheus/client_golang/prometheus/value_test.go
generated
vendored
13
vendor/github.com/prometheus/client_golang/prometheus/value_test.go
generated
vendored
|
@ -1,3 +1,16 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
|
|
3
vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
3
vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
|
@ -277,6 +277,9 @@ func (m *metricMap) deleteByHashWithLabelValues(
|
|||
func (m *metricMap) deleteByHashWithLabels(
|
||||
h uint64, labels Labels, curry []curriedLabelValue,
|
||||
) bool {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
metrics, ok := m.metrics[h]
|
||||
if !ok {
|
||||
return false
|
||||
|
|
179
vendor/github.com/prometheus/client_golang/prometheus/wrap.go
generated
vendored
Normal file
179
vendor/github.com/prometheus/client_golang/prometheus/wrap.go
generated
vendored
Normal file
|
@ -0,0 +1,179 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
// WrapRegistererWith returns a Registerer wrapping the provided
|
||||
// Registerer. Collectors registered with the returned Registerer will be
|
||||
// registered with the wrapped Registerer in a modified way. The modified
|
||||
// Collector adds the provided Labels to all Metrics it collects (as
|
||||
// ConstLabels). The Metrics collected by the unmodified Collector must not
|
||||
// duplicate any of those labels.
|
||||
//
|
||||
// WrapRegistererWith provides a way to add fixed labels to a subset of
|
||||
// Collectors. It should not be used to add fixed labels to all metrics exposed.
|
||||
//
|
||||
// The Collector example demonstrates a use of WrapRegistererWith.
|
||||
func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
|
||||
return &wrappingRegisterer{
|
||||
wrappedRegisterer: reg,
|
||||
labels: labels,
|
||||
}
|
||||
}
|
||||
|
||||
// WrapRegistererWithPrefix returns a Registerer wrapping the provided
|
||||
// Registerer. Collectors registered with the returned Registerer will be
|
||||
// registered with the wrapped Registerer in a modified way. The modified
|
||||
// Collector adds the provided prefix to the name of all Metrics it collects.
|
||||
//
|
||||
// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of
|
||||
// a sub-system. To make this work, register metrics of the sub-system with the
|
||||
// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful
|
||||
// to use the same prefix for all metrics exposed. In particular, do not prefix
|
||||
// metric names that are standardized across applications, as that would break
|
||||
// horizontal monitoring, for example the metrics provided by the Go collector
|
||||
// (see NewGoCollector) and the process collector (see NewProcessCollector). (In
|
||||
// fact, those metrics are already prefixed with “go_” or “process_”,
|
||||
// respectively.)
|
||||
func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
|
||||
return &wrappingRegisterer{
|
||||
wrappedRegisterer: reg,
|
||||
prefix: prefix,
|
||||
}
|
||||
}
|
||||
|
||||
type wrappingRegisterer struct {
|
||||
wrappedRegisterer Registerer
|
||||
prefix string
|
||||
labels Labels
|
||||
}
|
||||
|
||||
func (r *wrappingRegisterer) Register(c Collector) error {
|
||||
return r.wrappedRegisterer.Register(&wrappingCollector{
|
||||
wrappedCollector: c,
|
||||
prefix: r.prefix,
|
||||
labels: r.labels,
|
||||
})
|
||||
}
|
||||
|
||||
func (r *wrappingRegisterer) MustRegister(cs ...Collector) {
|
||||
for _, c := range cs {
|
||||
if err := r.Register(c); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *wrappingRegisterer) Unregister(c Collector) bool {
|
||||
return r.wrappedRegisterer.Unregister(&wrappingCollector{
|
||||
wrappedCollector: c,
|
||||
prefix: r.prefix,
|
||||
labels: r.labels,
|
||||
})
|
||||
}
|
||||
|
||||
type wrappingCollector struct {
|
||||
wrappedCollector Collector
|
||||
prefix string
|
||||
labels Labels
|
||||
}
|
||||
|
||||
func (c *wrappingCollector) Collect(ch chan<- Metric) {
|
||||
wrappedCh := make(chan Metric)
|
||||
go func() {
|
||||
c.wrappedCollector.Collect(wrappedCh)
|
||||
close(wrappedCh)
|
||||
}()
|
||||
for m := range wrappedCh {
|
||||
ch <- &wrappingMetric{
|
||||
wrappedMetric: m,
|
||||
prefix: c.prefix,
|
||||
labels: c.labels,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *wrappingCollector) Describe(ch chan<- *Desc) {
|
||||
wrappedCh := make(chan *Desc)
|
||||
go func() {
|
||||
c.wrappedCollector.Describe(wrappedCh)
|
||||
close(wrappedCh)
|
||||
}()
|
||||
for desc := range wrappedCh {
|
||||
ch <- wrapDesc(desc, c.prefix, c.labels)
|
||||
}
|
||||
}
|
||||
|
||||
type wrappingMetric struct {
|
||||
wrappedMetric Metric
|
||||
prefix string
|
||||
labels Labels
|
||||
}
|
||||
|
||||
func (m *wrappingMetric) Desc() *Desc {
|
||||
return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels)
|
||||
}
|
||||
|
||||
func (m *wrappingMetric) Write(out *dto.Metric) error {
|
||||
if err := m.wrappedMetric.Write(out); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(m.labels) == 0 {
|
||||
// No wrapping labels.
|
||||
return nil
|
||||
}
|
||||
for ln, lv := range m.labels {
|
||||
out.Label = append(out.Label, &dto.LabelPair{
|
||||
Name: proto.String(ln),
|
||||
Value: proto.String(lv),
|
||||
})
|
||||
}
|
||||
sort.Sort(labelPairSorter(out.Label))
|
||||
return nil
|
||||
}
|
||||
|
||||
func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc {
|
||||
constLabels := Labels{}
|
||||
for _, lp := range desc.constLabelPairs {
|
||||
constLabels[*lp.Name] = *lp.Value
|
||||
}
|
||||
for ln, lv := range labels {
|
||||
if _, alreadyUsed := constLabels[ln]; alreadyUsed {
|
||||
return &Desc{
|
||||
fqName: desc.fqName,
|
||||
help: desc.help,
|
||||
variableLabels: desc.variableLabels,
|
||||
constLabelPairs: desc.constLabelPairs,
|
||||
err: fmt.Errorf("attempted wrapping with already existing label name %q", ln),
|
||||
}
|
||||
}
|
||||
constLabels[ln] = lv
|
||||
}
|
||||
// NewDesc will do remaining validations.
|
||||
newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
|
||||
// Propagate errors if there was any. This will override any errer
|
||||
// created by NewDesc above, i.e. earlier errors get precedence.
|
||||
if desc.err != nil {
|
||||
newDesc.err = desc.err
|
||||
}
|
||||
return newDesc
|
||||
}
|
322
vendor/github.com/prometheus/client_golang/prometheus/wrap_test.go
generated
vendored
Normal file
322
vendor/github.com/prometheus/client_golang/prometheus/wrap_test.go
generated
vendored
Normal file
|
@ -0,0 +1,322 @@
|
|||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
// uncheckedCollector wraps a Collector but its Describe method yields no Desc.
|
||||
type uncheckedCollector struct {
|
||||
c Collector
|
||||
}
|
||||
|
||||
func (u uncheckedCollector) Describe(_ chan<- *Desc) {}
|
||||
func (u uncheckedCollector) Collect(c chan<- Metric) {
|
||||
u.c.Collect(c)
|
||||
}
|
||||
|
||||
func toMetricFamilies(cs ...Collector) []*dto.MetricFamily {
|
||||
reg := NewRegistry()
|
||||
reg.MustRegister(cs...)
|
||||
out, err := reg.Gather()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func TestWrap(t *testing.T) {
|
||||
|
||||
simpleCnt := NewCounter(CounterOpts{
|
||||
Name: "simpleCnt",
|
||||
Help: "helpSimpleCnt",
|
||||
})
|
||||
simpleCnt.Inc()
|
||||
|
||||
simpleGge := NewGauge(GaugeOpts{
|
||||
Name: "simpleGge",
|
||||
Help: "helpSimpleGge",
|
||||
})
|
||||
simpleGge.Set(3.14)
|
||||
|
||||
preCnt := NewCounter(CounterOpts{
|
||||
Name: "pre_simpleCnt",
|
||||
Help: "helpSimpleCnt",
|
||||
})
|
||||
preCnt.Inc()
|
||||
|
||||
barLabeledCnt := NewCounter(CounterOpts{
|
||||
Name: "simpleCnt",
|
||||
Help: "helpSimpleCnt",
|
||||
ConstLabels: Labels{"foo": "bar"},
|
||||
})
|
||||
barLabeledCnt.Inc()
|
||||
|
||||
bazLabeledCnt := NewCounter(CounterOpts{
|
||||
Name: "simpleCnt",
|
||||
Help: "helpSimpleCnt",
|
||||
ConstLabels: Labels{"foo": "baz"},
|
||||
})
|
||||
bazLabeledCnt.Inc()
|
||||
|
||||
labeledPreCnt := NewCounter(CounterOpts{
|
||||
Name: "pre_simpleCnt",
|
||||
Help: "helpSimpleCnt",
|
||||
ConstLabels: Labels{"foo": "bar"},
|
||||
})
|
||||
labeledPreCnt.Inc()
|
||||
|
||||
twiceLabeledPreCnt := NewCounter(CounterOpts{
|
||||
Name: "pre_simpleCnt",
|
||||
Help: "helpSimpleCnt",
|
||||
ConstLabels: Labels{"foo": "bar", "dings": "bums"},
|
||||
})
|
||||
twiceLabeledPreCnt.Inc()
|
||||
|
||||
barLabeledUncheckedCollector := uncheckedCollector{barLabeledCnt}
|
||||
|
||||
scenarios := map[string]struct {
|
||||
prefix string // First wrap with this prefix.
|
||||
labels Labels // Then wrap the result with these labels.
|
||||
labels2 Labels // If any, wrap the prefix-wrapped one again.
|
||||
preRegister []Collector
|
||||
toRegister []struct { // If there are any labels2, register every other with that one.
|
||||
collector Collector
|
||||
registrationFails bool
|
||||
}
|
||||
gatherFails bool
|
||||
output []Collector
|
||||
}{
|
||||
"wrap nothing": {
|
||||
prefix: "pre_",
|
||||
labels: Labels{"foo": "bar"},
|
||||
},
|
||||
"wrap with nothing": {
|
||||
preRegister: []Collector{simpleGge},
|
||||
toRegister: []struct {
|
||||
collector Collector
|
||||
registrationFails bool
|
||||
}{{simpleCnt, false}},
|
||||
output: []Collector{simpleGge, simpleCnt},
|
||||
},
|
||||
"wrap counter with prefix": {
|
||||
prefix: "pre_",
|
||||
preRegister: []Collector{simpleGge},
|
||||
toRegister: []struct {
|
||||
collector Collector
|
||||
registrationFails bool
|
||||
}{{simpleCnt, false}},
|
||||
output: []Collector{simpleGge, preCnt},
|
||||
},
|
||||
"wrap counter with label pair": {
|
||||
labels: Labels{"foo": "bar"},
|
||||
preRegister: []Collector{simpleGge},
|
||||
toRegister: []struct {
|
||||
collector Collector
|
||||
registrationFails bool
|
||||
}{{simpleCnt, false}},
|
||||
output: []Collector{simpleGge, barLabeledCnt},
|
||||
},
|
||||
"wrap counter with label pair and prefix": {
|
||||
prefix: "pre_",
|
||||
labels: Labels{"foo": "bar"},
|
||||
preRegister: []Collector{simpleGge},
|
||||
toRegister: []struct {
|
||||
collector Collector
|
||||
registrationFails bool
|
||||
}{{simpleCnt, false}},
|
||||
output: []Collector{simpleGge, labeledPreCnt},
|
||||
},
|
||||
"wrap counter with invalid prefix": {
|
||||
prefix: "1+1",
|
||||
preRegister: []Collector{simpleGge},
|
||||
toRegister: []struct {
|
||||
collector Collector
|
||||
registrationFails bool
|
||||
}{{simpleCnt, true}},
|
||||
output: []Collector{simpleGge},
|
||||
},
|
||||
"wrap counter with invalid label": {
|
||||
preRegister: []Collector{simpleGge},
|
||||
labels: Labels{"42": "bar"},
|
||||
toRegister: []struct {
|
||||
collector Collector
|
||||
registrationFails bool
|
||||
}{{simpleCnt, true}},
|
||||
output: []Collector{simpleGge},
|
||||
},
|
||||
"counter registered twice but wrapped with different label values": {
|
||||
labels: Labels{"foo": "bar"},
|
||||
labels2: Labels{"foo": "baz"},
|
||||
toRegister: []struct {
|
||||
collector Collector
|
||||
registrationFails bool
|
||||
}{{simpleCnt, false}, {simpleCnt, false}},
|
||||
output: []Collector{barLabeledCnt, bazLabeledCnt},
|
||||
},
|
||||
"counter registered twice but wrapped with different inconsistent label values": {
|
||||
labels: Labels{"foo": "bar"},
|
||||
labels2: Labels{"bar": "baz"},
|
||||
toRegister: []struct {
|
||||
collector Collector
|
||||
registrationFails bool
|
||||
}{{simpleCnt, false}, {simpleCnt, true}},
|
||||
output: []Collector{barLabeledCnt},
|
||||
},
|
||||
"wrap counter with prefix and two labels": {
|
||||
prefix: "pre_",
|
||||
labels: Labels{"foo": "bar", "dings": "bums"},
|
||||
preRegister: []Collector{simpleGge},
|
||||
toRegister: []struct {
|
||||
collector Collector
|
||||
registrationFails bool
|
||||
}{{simpleCnt, false}},
|
||||
output: []Collector{simpleGge, twiceLabeledPreCnt},
|
||||
},
|
||||
"wrap labeled counter with prefix and another label": {
|
||||
prefix: "pre_",
|
||||
labels: Labels{"dings": "bums"},
|
||||
preRegister: []Collector{simpleGge},
|
||||
toRegister: []struct {
|
||||
collector Collector
|
||||
registrationFails bool
|
||||
}{{barLabeledCnt, false}},
|
||||
output: []Collector{simpleGge, twiceLabeledPreCnt},
|
||||
},
|
||||
"wrap labeled counter with prefix and inconsistent label": {
|
||||
prefix: "pre_",
|
||||
labels: Labels{"foo": "bums"},
|
||||
preRegister: []Collector{simpleGge},
|
||||
toRegister: []struct {
|
||||
collector Collector
|
||||
registrationFails bool
|
||||
}{{barLabeledCnt, true}},
|
||||
output: []Collector{simpleGge},
|
||||
},
|
||||
"wrap labeled counter with prefix and the same label again": {
|
||||
prefix: "pre_",
|
||||
labels: Labels{"foo": "bar"},
|
||||
preRegister: []Collector{simpleGge},
|
||||
toRegister: []struct {
|
||||
collector Collector
|
||||
registrationFails bool
|
||||
}{{barLabeledCnt, true}},
|
||||
output: []Collector{simpleGge},
|
||||
},
|
||||
"wrap labeled unchecked collector with prefix and another label": {
|
||||
prefix: "pre_",
|
||||
labels: Labels{"dings": "bums"},
|
||||
preRegister: []Collector{simpleGge},
|
||||
toRegister: []struct {
|
||||
collector Collector
|
||||
registrationFails bool
|
||||
}{{barLabeledUncheckedCollector, false}},
|
||||
output: []Collector{simpleGge, twiceLabeledPreCnt},
|
||||
},
|
||||
"wrap labeled unchecked collector with prefix and inconsistent label": {
|
||||
prefix: "pre_",
|
||||
labels: Labels{"foo": "bums"},
|
||||
preRegister: []Collector{simpleGge},
|
||||
toRegister: []struct {
|
||||
collector Collector
|
||||
registrationFails bool
|
||||
}{{barLabeledUncheckedCollector, false}},
|
||||
gatherFails: true,
|
||||
output: []Collector{simpleGge},
|
||||
},
|
||||
"wrap labeled unchecked collector with prefix and the same label again": {
|
||||
prefix: "pre_",
|
||||
labels: Labels{"foo": "bar"},
|
||||
preRegister: []Collector{simpleGge},
|
||||
toRegister: []struct {
|
||||
collector Collector
|
||||
registrationFails bool
|
||||
}{{barLabeledUncheckedCollector, false}},
|
||||
gatherFails: true,
|
||||
output: []Collector{simpleGge},
|
||||
},
|
||||
"wrap labeled unchecked collector with prefix and another label resulting in collision with pre-registered counter": {
|
||||
prefix: "pre_",
|
||||
labels: Labels{"dings": "bums"},
|
||||
preRegister: []Collector{twiceLabeledPreCnt},
|
||||
toRegister: []struct {
|
||||
collector Collector
|
||||
registrationFails bool
|
||||
}{{barLabeledUncheckedCollector, false}},
|
||||
gatherFails: true,
|
||||
output: []Collector{twiceLabeledPreCnt},
|
||||
},
|
||||
}
|
||||
|
||||
for n, s := range scenarios {
|
||||
t.Run(n, func(t *testing.T) {
|
||||
reg := NewPedanticRegistry()
|
||||
for _, c := range s.preRegister {
|
||||
if err := reg.Register(c); err != nil {
|
||||
t.Fatal("error registering with unwrapped registry:", err)
|
||||
}
|
||||
}
|
||||
preReg := WrapRegistererWithPrefix(s.prefix, reg)
|
||||
lReg := WrapRegistererWith(s.labels, preReg)
|
||||
l2Reg := WrapRegistererWith(s.labels2, preReg)
|
||||
for i, tr := range s.toRegister {
|
||||
var err error
|
||||
if i%2 != 0 && len(s.labels2) != 0 {
|
||||
err = l2Reg.Register(tr.collector)
|
||||
} else {
|
||||
err = lReg.Register(tr.collector)
|
||||
}
|
||||
if tr.registrationFails && err == nil {
|
||||
t.Fatalf("registration with wrapping registry unexpectedly succeded for collector #%d", i)
|
||||
}
|
||||
if !tr.registrationFails && err != nil {
|
||||
t.Fatalf("registration with wrapping registry failed for collector #%d: %s", i, err)
|
||||
}
|
||||
}
|
||||
wantMF := toMetricFamilies(s.output...)
|
||||
gotMF, err := reg.Gather()
|
||||
if s.gatherFails && err == nil {
|
||||
t.Fatal("gathering unexpectedly succeded")
|
||||
}
|
||||
if !s.gatherFails && err != nil {
|
||||
t.Fatal("gathering failed:", err)
|
||||
}
|
||||
if !reflect.DeepEqual(gotMF, wantMF) {
|
||||
var want, got []string
|
||||
|
||||
for i, mf := range wantMF {
|
||||
want = append(want, fmt.Sprintf("%3d: %s", i, proto.MarshalTextString(mf)))
|
||||
}
|
||||
for i, mf := range gotMF {
|
||||
got = append(got, fmt.Sprintf("%3d: %s", i, proto.MarshalTextString(mf)))
|
||||
}
|
||||
|
||||
t.Fatalf(
|
||||
"unexpected output of gathering:\n\nWANT:\n%s\n\nGOT:\n%s\n",
|
||||
strings.Join(want, "\n"),
|
||||
strings.Join(got, "\n"),
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue