vendor: github.com/prometheus/client_golang v1.12.1

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
Sebastiaan van Stijn 2022-05-04 20:54:58 +02:00
parent 985711c1f4
commit ec47096efc
No known key found for this signature in database
GPG key ID: 76698F39D527CE8C
574 changed files with 101741 additions and 22828 deletions

30
vendor/google.golang.org/api/internal/conn_pool.go generated vendored Normal file
View file

@ -0,0 +1,30 @@
// Copyright 2020 Google LLC.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package internal
import (
"google.golang.org/grpc"
)
// ConnPool is a pool of grpc.ClientConns.
type ConnPool interface {
// Conn returns a ClientConn from the pool.
//
// Conns aren't returned to the pool.
Conn() *grpc.ClientConn
// Num returns the number of connections in the pool.
//
// It will always return the same value.
Num() int
// Close closes every ClientConn in the pool.
//
// The error returned by Close may be a single error or multiple errors.
Close() error
// ConnPool implements grpc.ClientConnInterface to enable it to be used directly with generated proto stubs.
grpc.ClientConnInterface
}

105
vendor/google.golang.org/api/internal/creds.go generated vendored Normal file
View file

@ -0,0 +1,105 @@
// Copyright 2017 Google LLC.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package internal
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
)
// Creds returns credential information obtained from DialSettings, or if none, then
// it returns default credential information.
func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) {
if ds.Credentials != nil {
return ds.Credentials, nil
}
if ds.CredentialsJSON != nil {
return credentialsFromJSON(ctx, ds.CredentialsJSON, ds.Endpoint, ds.Scopes, ds.Audiences)
}
if ds.CredentialsFile != "" {
data, err := ioutil.ReadFile(ds.CredentialsFile)
if err != nil {
return nil, fmt.Errorf("cannot read credentials file: %v", err)
}
return credentialsFromJSON(ctx, data, ds.Endpoint, ds.Scopes, ds.Audiences)
}
if ds.TokenSource != nil {
return &google.Credentials{TokenSource: ds.TokenSource}, nil
}
cred, err := google.FindDefaultCredentials(ctx, ds.Scopes...)
if err != nil {
return nil, err
}
if len(cred.JSON) > 0 {
return credentialsFromJSON(ctx, cred.JSON, ds.Endpoint, ds.Scopes, ds.Audiences)
}
// For GAE and GCE, the JSON is empty so return the default credentials directly.
return cred, nil
}
// JSON key file type.
const (
serviceAccountKey = "service_account"
)
// credentialsFromJSON returns a google.Credentials based on the input.
//
// - If the JSON is a service account and no scopes provided, returns self-signed JWT auth flow
// - Otherwise, returns OAuth 2.0 flow.
func credentialsFromJSON(ctx context.Context, data []byte, endpoint string, scopes []string, audiences []string) (*google.Credentials, error) {
cred, err := google.CredentialsFromJSON(ctx, data, scopes...)
if err != nil {
return nil, err
}
if len(data) > 0 && len(scopes) == 0 {
var f struct {
Type string `json:"type"`
// The rest JSON fields are omitted because they are not used.
}
if err := json.Unmarshal(cred.JSON, &f); err != nil {
return nil, err
}
if f.Type == serviceAccountKey {
ts, err := selfSignedJWTTokenSource(data, endpoint, audiences)
if err != nil {
return nil, err
}
cred.TokenSource = ts
}
}
return cred, err
}
func selfSignedJWTTokenSource(data []byte, endpoint string, audiences []string) (oauth2.TokenSource, error) {
// Use the API endpoint as the default audience
audience := endpoint
if len(audiences) > 0 {
// TODO(shinfan): Update golang oauth to support multiple audiences.
if len(audiences) > 1 {
return nil, fmt.Errorf("multiple audiences support is not implemented")
}
audience = audiences[0]
}
return google.JWTAccessTokenSourceFromJSON(data, audience)
}
// QuotaProjectFromCreds returns the quota project from the JSON blob in the provided credentials.
//
// NOTE(cbro): consider promoting this to a field on google.Credentials.
func QuotaProjectFromCreds(cred *google.Credentials) string {
var v struct {
QuotaProject string `json:"quota_project_id"`
}
if err := json.Unmarshal(cred.JSON, &v); err != nil {
return ""
}
return v.QuotaProject
}

View file

@ -0,0 +1,79 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gensupport
import (
"bytes"
"io"
"google.golang.org/api/googleapi"
)
// MediaBuffer buffers data from an io.Reader to support uploading media in
// retryable chunks. It should be created with NewMediaBuffer.
type MediaBuffer struct {
media io.Reader
chunk []byte // The current chunk which is pending upload. The capacity is the chunk size.
err error // Any error generated when populating chunk by reading media.
// The absolute position of chunk in the underlying media.
off int64
}
// NewMediaBuffer initializes a MediaBuffer.
func NewMediaBuffer(media io.Reader, chunkSize int) *MediaBuffer {
return &MediaBuffer{media: media, chunk: make([]byte, 0, chunkSize)}
}
// Chunk returns the current buffered chunk, the offset in the underlying media
// from which the chunk is drawn, and the size of the chunk.
// Successive calls to Chunk return the same chunk between calls to Next.
func (mb *MediaBuffer) Chunk() (chunk io.Reader, off int64, size int, err error) {
// There may already be data in chunk if Next has not been called since the previous call to Chunk.
if mb.err == nil && len(mb.chunk) == 0 {
mb.err = mb.loadChunk()
}
return bytes.NewReader(mb.chunk), mb.off, len(mb.chunk), mb.err
}
// loadChunk will read from media into chunk, up to the capacity of chunk.
func (mb *MediaBuffer) loadChunk() error {
bufSize := cap(mb.chunk)
mb.chunk = mb.chunk[:bufSize]
read := 0
var err error
for err == nil && read < bufSize {
var n int
n, err = mb.media.Read(mb.chunk[read:])
read += n
}
mb.chunk = mb.chunk[:read]
return err
}
// Next advances to the next chunk, which will be returned by the next call to Chunk.
// Calls to Next without a corresponding prior call to Chunk will have no effect.
func (mb *MediaBuffer) Next() {
mb.off += int64(len(mb.chunk))
mb.chunk = mb.chunk[0:0]
}
type readerTyper struct {
io.Reader
googleapi.ContentTyper
}
// ReaderAtToReader adapts a ReaderAt to be used as a Reader.
// If ra implements googleapi.ContentTyper, then the returned reader
// will also implement googleapi.ContentTyper, delegating to ra.
func ReaderAtToReader(ra io.ReaderAt, size int64) io.Reader {
r := io.NewSectionReader(ra, 0, size)
if typer, ok := ra.(googleapi.ContentTyper); ok {
return readerTyper{r, typer}
}
return r
}

View file

@ -0,0 +1,10 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package gensupport is an internal implementation detail used by code
// generated by the google-api-go-generator tool.
//
// This package may be modified at any time without regard for backwards
// compatibility. It should not be used directly by API users.
package gensupport

View file

@ -0,0 +1,211 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gensupport
import (
"encoding/json"
"fmt"
"reflect"
"strings"
)
// MarshalJSON returns a JSON encoding of schema containing only selected fields.
// A field is selected if any of the following is true:
// * it has a non-empty value
// * its field name is present in forceSendFields and it is not a nil pointer or nil interface
// * its field name is present in nullFields.
// The JSON key for each selected field is taken from the field's json: struct tag.
func MarshalJSON(schema interface{}, forceSendFields, nullFields []string) ([]byte, error) {
if len(forceSendFields) == 0 && len(nullFields) == 0 {
return json.Marshal(schema)
}
mustInclude := make(map[string]bool)
for _, f := range forceSendFields {
mustInclude[f] = true
}
useNull := make(map[string]bool)
useNullMaps := make(map[string]map[string]bool)
for _, nf := range nullFields {
parts := strings.SplitN(nf, ".", 2)
field := parts[0]
if len(parts) == 1 {
useNull[field] = true
} else {
if useNullMaps[field] == nil {
useNullMaps[field] = map[string]bool{}
}
useNullMaps[field][parts[1]] = true
}
}
dataMap, err := schemaToMap(schema, mustInclude, useNull, useNullMaps)
if err != nil {
return nil, err
}
return json.Marshal(dataMap)
}
func schemaToMap(schema interface{}, mustInclude, useNull map[string]bool, useNullMaps map[string]map[string]bool) (map[string]interface{}, error) {
m := make(map[string]interface{})
s := reflect.ValueOf(schema)
st := s.Type()
for i := 0; i < s.NumField(); i++ {
jsonTag := st.Field(i).Tag.Get("json")
if jsonTag == "" {
continue
}
tag, err := parseJSONTag(jsonTag)
if err != nil {
return nil, err
}
if tag.ignore {
continue
}
v := s.Field(i)
f := st.Field(i)
if useNull[f.Name] {
if !isEmptyValue(v) {
return nil, fmt.Errorf("field %q in NullFields has non-empty value", f.Name)
}
m[tag.apiName] = nil
continue
}
if !includeField(v, f, mustInclude) {
continue
}
// If map fields are explicitly set to null, use a map[string]interface{}.
if f.Type.Kind() == reflect.Map && useNullMaps[f.Name] != nil {
ms, ok := v.Interface().(map[string]string)
if !ok {
return nil, fmt.Errorf("field %q has keys in NullFields but is not a map[string]string", f.Name)
}
mi := map[string]interface{}{}
for k, v := range ms {
mi[k] = v
}
for k := range useNullMaps[f.Name] {
mi[k] = nil
}
m[tag.apiName] = mi
continue
}
// nil maps are treated as empty maps.
if f.Type.Kind() == reflect.Map && v.IsNil() {
m[tag.apiName] = map[string]string{}
continue
}
// nil slices are treated as empty slices.
if f.Type.Kind() == reflect.Slice && v.IsNil() {
m[tag.apiName] = []bool{}
continue
}
if tag.stringFormat {
m[tag.apiName] = formatAsString(v, f.Type.Kind())
} else {
m[tag.apiName] = v.Interface()
}
}
return m, nil
}
// formatAsString returns a string representation of v, dereferencing it first if possible.
func formatAsString(v reflect.Value, kind reflect.Kind) string {
if kind == reflect.Ptr && !v.IsNil() {
v = v.Elem()
}
return fmt.Sprintf("%v", v.Interface())
}
// jsonTag represents a restricted version of the struct tag format used by encoding/json.
// It is used to describe the JSON encoding of fields in a Schema struct.
type jsonTag struct {
apiName string
stringFormat bool
ignore bool
}
// parseJSONTag parses a restricted version of the struct tag format used by encoding/json.
// The format of the tag must match that generated by the Schema.writeSchemaStruct method
// in the api generator.
func parseJSONTag(val string) (jsonTag, error) {
if val == "-" {
return jsonTag{ignore: true}, nil
}
var tag jsonTag
i := strings.Index(val, ",")
if i == -1 || val[:i] == "" {
return tag, fmt.Errorf("malformed json tag: %s", val)
}
tag = jsonTag{
apiName: val[:i],
}
switch val[i+1:] {
case "omitempty":
case "omitempty,string":
tag.stringFormat = true
default:
return tag, fmt.Errorf("malformed json tag: %s", val)
}
return tag, nil
}
// Reports whether the struct field "f" with value "v" should be included in JSON output.
func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]bool) bool {
// The regular JSON encoding of a nil pointer is "null", which means "delete this field".
// Therefore, we could enable field deletion by honoring pointer fields' presence in the mustInclude set.
// However, many fields are not pointers, so there would be no way to delete these fields.
// Rather than partially supporting field deletion, we ignore mustInclude for nil pointer fields.
// Deletion will be handled by a separate mechanism.
if f.Type.Kind() == reflect.Ptr && v.IsNil() {
return false
}
// The "any" type is represented as an interface{}. If this interface
// is nil, there is no reasonable representation to send. We ignore
// these fields, for the same reasons as given above for pointers.
if f.Type.Kind() == reflect.Interface && v.IsNil() {
return false
}
return mustInclude[f.Name] || !isEmptyValue(v)
}
// isEmptyValue reports whether v is the empty value for its type. This
// implementation is based on that of the encoding/json package, but its
// correctness does not depend on it being identical. What's important is that
// this function return false in situations where v should not be sent as part
// of a PATCH operation.
func isEmptyValue(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
}
return false
}

View file

@ -0,0 +1,47 @@
// Copyright 2016 Google LLC.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gensupport
import (
"encoding/json"
"errors"
"fmt"
"math"
)
// JSONFloat64 is a float64 that supports proper unmarshaling of special float
// values in JSON, according to
// https://developers.google.com/protocol-buffers/docs/proto3#json. Although
// that is a proto-to-JSON spec, it applies to all Google APIs.
//
// The jsonpb package
// (https://github.com/golang/protobuf/blob/master/jsonpb/jsonpb.go) has
// similar functionality, but only for direct translation from proto messages
// to JSON.
type JSONFloat64 float64
func (f *JSONFloat64) UnmarshalJSON(data []byte) error {
var ff float64
if err := json.Unmarshal(data, &ff); err == nil {
*f = JSONFloat64(ff)
return nil
}
var s string
if err := json.Unmarshal(data, &s); err == nil {
switch s {
case "NaN":
ff = math.NaN()
case "Infinity":
ff = math.Inf(1)
case "-Infinity":
ff = math.Inf(-1)
default:
return fmt.Errorf("google.golang.org/api/internal: bad float string %q", s)
}
*f = JSONFloat64(ff)
return nil
}
return errors.New("google.golang.org/api/internal: data not float or string")
}

View file

@ -0,0 +1,372 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gensupport
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"mime"
"mime/multipart"
"net/http"
"net/textproto"
"strings"
"sync"
"google.golang.org/api/googleapi"
)
const sniffBuffSize = 512
func newContentSniffer(r io.Reader) *contentSniffer {
return &contentSniffer{r: r}
}
// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader.
type contentSniffer struct {
r io.Reader
start []byte // buffer for the sniffed bytes.
err error // set to any error encountered while reading bytes to be sniffed.
ctype string // set on first sniff.
sniffed bool // set to true on first sniff.
}
func (cs *contentSniffer) Read(p []byte) (n int, err error) {
// Ensure that the content type is sniffed before any data is consumed from Reader.
_, _ = cs.ContentType()
if len(cs.start) > 0 {
n := copy(p, cs.start)
cs.start = cs.start[n:]
return n, nil
}
// We may have read some bytes into start while sniffing, even if the read ended in an error.
// We should first return those bytes, then the error.
if cs.err != nil {
return 0, cs.err
}
// Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader.
return cs.r.Read(p)
}
// ContentType returns the sniffed content type, and whether the content type was succesfully sniffed.
func (cs *contentSniffer) ContentType() (string, bool) {
if cs.sniffed {
return cs.ctype, cs.ctype != ""
}
cs.sniffed = true
// If ReadAll hits EOF, it returns err==nil.
cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize))
// Don't try to detect the content type based on possibly incomplete data.
if cs.err != nil {
return "", false
}
cs.ctype = http.DetectContentType(cs.start)
return cs.ctype, true
}
// DetermineContentType determines the content type of the supplied reader.
// If the content type is already known, it can be specified via ctype.
// Otherwise, the content of media will be sniffed to determine the content type.
// If media implements googleapi.ContentTyper (deprecated), this will be used
// instead of sniffing the content.
// After calling DetectContentType the caller must not perform further reads on
// media, but rather read from the Reader that is returned.
func DetermineContentType(media io.Reader, ctype string) (io.Reader, string) {
// Note: callers could avoid calling DetectContentType if ctype != "",
// but doing the check inside this function reduces the amount of
// generated code.
if ctype != "" {
return media, ctype
}
// For backwards compatability, allow clients to set content
// type by providing a ContentTyper for media.
if typer, ok := media.(googleapi.ContentTyper); ok {
return media, typer.ContentType()
}
sniffer := newContentSniffer(media)
if ctype, ok := sniffer.ContentType(); ok {
return sniffer, ctype
}
// If content type could not be sniffed, reads from sniffer will eventually fail with an error.
return sniffer, ""
}
type typeReader struct {
io.Reader
typ string
}
// multipartReader combines the contents of multiple readers to create a multipart/related HTTP body.
// Close must be called if reads from the multipartReader are abandoned before reaching EOF.
type multipartReader struct {
pr *io.PipeReader
ctype string
mu sync.Mutex
pipeOpen bool
}
// boundary optionally specifies the MIME boundary
func newMultipartReader(parts []typeReader, boundary string) *multipartReader {
mp := &multipartReader{pipeOpen: true}
var pw *io.PipeWriter
mp.pr, pw = io.Pipe()
mpw := multipart.NewWriter(pw)
if boundary != "" {
mpw.SetBoundary(boundary)
}
mp.ctype = "multipart/related; boundary=" + mpw.Boundary()
go func() {
for _, part := range parts {
w, err := mpw.CreatePart(typeHeader(part.typ))
if err != nil {
mpw.Close()
pw.CloseWithError(fmt.Errorf("googleapi: CreatePart failed: %v", err))
return
}
_, err = io.Copy(w, part.Reader)
if err != nil {
mpw.Close()
pw.CloseWithError(fmt.Errorf("googleapi: Copy failed: %v", err))
return
}
}
mpw.Close()
pw.Close()
}()
return mp
}
func (mp *multipartReader) Read(data []byte) (n int, err error) {
return mp.pr.Read(data)
}
func (mp *multipartReader) Close() error {
mp.mu.Lock()
if !mp.pipeOpen {
mp.mu.Unlock()
return nil
}
mp.pipeOpen = false
mp.mu.Unlock()
return mp.pr.Close()
}
// CombineBodyMedia combines a json body with media content to create a multipart/related HTTP body.
// It returns a ReadCloser containing the combined body, and the overall "multipart/related" content type, with random boundary.
//
// The caller must call Close on the returned ReadCloser if reads are abandoned before reaching EOF.
func CombineBodyMedia(body io.Reader, bodyContentType string, media io.Reader, mediaContentType string) (io.ReadCloser, string) {
return combineBodyMedia(body, bodyContentType, media, mediaContentType, "")
}
// combineBodyMedia is CombineBodyMedia but with an optional mimeBoundary field.
func combineBodyMedia(body io.Reader, bodyContentType string, media io.Reader, mediaContentType, mimeBoundary string) (io.ReadCloser, string) {
mp := newMultipartReader([]typeReader{
{body, bodyContentType},
{media, mediaContentType},
}, mimeBoundary)
return mp, mp.ctype
}
func typeHeader(contentType string) textproto.MIMEHeader {
h := make(textproto.MIMEHeader)
if contentType != "" {
h.Set("Content-Type", contentType)
}
return h
}
// PrepareUpload determines whether the data in the supplied reader should be
// uploaded in a single request, or in sequential chunks.
// chunkSize is the size of the chunk that media should be split into.
//
// If chunkSize is zero, media is returned as the first value, and the other
// two return values are nil, true.
//
// Otherwise, a MediaBuffer is returned, along with a bool indicating whether the
// contents of media fit in a single chunk.
//
// After PrepareUpload has been called, media should no longer be used: the
// media content should be accessed via one of the return values.
func PrepareUpload(media io.Reader, chunkSize int) (r io.Reader, mb *MediaBuffer, singleChunk bool) {
if chunkSize == 0 { // do not chunk
return media, nil, true
}
mb = NewMediaBuffer(media, chunkSize)
_, _, _, err := mb.Chunk()
// If err is io.EOF, we can upload this in a single request. Otherwise, err is
// either nil or a non-EOF error. If it is the latter, then the next call to
// mb.Chunk will return the same error. Returning a MediaBuffer ensures that this
// error will be handled at some point.
return nil, mb, err == io.EOF
}
// MediaInfo holds information for media uploads. It is intended for use by generated
// code only.
type MediaInfo struct {
// At most one of Media and MediaBuffer will be set.
media io.Reader
buffer *MediaBuffer
singleChunk bool
mType string
size int64 // mediaSize, if known. Used only for calls to progressUpdater_.
progressUpdater googleapi.ProgressUpdater
}
// NewInfoFromMedia should be invoked from the Media method of a call. It returns a
// MediaInfo populated with chunk size and content type, and a reader or MediaBuffer
// if needed.
func NewInfoFromMedia(r io.Reader, options []googleapi.MediaOption) *MediaInfo {
mi := &MediaInfo{}
opts := googleapi.ProcessMediaOptions(options)
if !opts.ForceEmptyContentType {
r, mi.mType = DetermineContentType(r, opts.ContentType)
}
mi.media, mi.buffer, mi.singleChunk = PrepareUpload(r, opts.ChunkSize)
return mi
}
// NewInfoFromResumableMedia should be invoked from the ResumableMedia method of a
// call. It returns a MediaInfo using the given reader, size and media type.
func NewInfoFromResumableMedia(r io.ReaderAt, size int64, mediaType string) *MediaInfo {
rdr := ReaderAtToReader(r, size)
rdr, mType := DetermineContentType(rdr, mediaType)
return &MediaInfo{
size: size,
mType: mType,
buffer: NewMediaBuffer(rdr, googleapi.DefaultUploadChunkSize),
media: nil,
singleChunk: false,
}
}
// SetProgressUpdater sets the progress updater for the media info.
func (mi *MediaInfo) SetProgressUpdater(pu googleapi.ProgressUpdater) {
if mi != nil {
mi.progressUpdater = pu
}
}
// UploadType determines the type of upload: a single request, or a resumable
// series of requests.
func (mi *MediaInfo) UploadType() string {
if mi.singleChunk {
return "multipart"
}
return "resumable"
}
// UploadRequest sets up an HTTP request for media upload. It adds headers
// as necessary, and returns a replacement for the body and a function for http.Request.GetBody.
func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newBody io.Reader, getBody func() (io.ReadCloser, error), cleanup func()) {
cleanup = func() {}
if mi == nil {
return body, nil, cleanup
}
var media io.Reader
if mi.media != nil {
// This only happens when the caller has turned off chunking. In that
// case, we write all of media in a single non-retryable request.
media = mi.media
} else if mi.singleChunk {
// The data fits in a single chunk, which has now been read into the MediaBuffer.
// We obtain that chunk so we can write it in a single request. The request can
// be retried because the data is stored in the MediaBuffer.
media, _, _, _ = mi.buffer.Chunk()
}
if media != nil {
fb := readerFunc(body)
fm := readerFunc(media)
combined, ctype := CombineBodyMedia(body, "application/json", media, mi.mType)
toCleanup := []io.Closer{
combined,
}
if fb != nil && fm != nil {
getBody = func() (io.ReadCloser, error) {
rb := ioutil.NopCloser(fb())
rm := ioutil.NopCloser(fm())
var mimeBoundary string
if _, params, err := mime.ParseMediaType(ctype); err == nil {
mimeBoundary = params["boundary"]
}
r, _ := combineBodyMedia(rb, "application/json", rm, mi.mType, mimeBoundary)
toCleanup = append(toCleanup, r)
return r, nil
}
}
cleanup = func() {
for _, closer := range toCleanup {
_ = closer.Close()
}
}
reqHeaders.Set("Content-Type", ctype)
body = combined
}
if mi.buffer != nil && mi.mType != "" && !mi.singleChunk {
reqHeaders.Set("X-Upload-Content-Type", mi.mType)
}
return body, getBody, cleanup
}
// readerFunc returns a function that always returns an io.Reader that has the same
// contents as r, provided that can be done without consuming r. Otherwise, it
// returns nil.
// See http.NewRequest (in net/http/request.go).
func readerFunc(r io.Reader) func() io.Reader {
switch r := r.(type) {
case *bytes.Buffer:
buf := r.Bytes()
return func() io.Reader { return bytes.NewReader(buf) }
case *bytes.Reader:
snapshot := *r
return func() io.Reader { r := snapshot; return &r }
case *strings.Reader:
snapshot := *r
return func() io.Reader { r := snapshot; return &r }
default:
return nil
}
}
// ResumableUpload returns an appropriately configured ResumableUpload value if the
// upload is resumable, or nil otherwise.
func (mi *MediaInfo) ResumableUpload(locURI string) *ResumableUpload {
if mi == nil || mi.singleChunk {
return nil
}
return &ResumableUpload{
URI: locURI,
Media: mi.buffer,
MediaType: mi.mType,
Callback: func(curr int64) {
if mi.progressUpdater != nil {
mi.progressUpdater(curr, mi.size)
}
},
}
}
// SetGetBody sets the GetBody field of req to f. This was once needed
// to gracefully support Go 1.7 and earlier which didn't have that
// field.
//
// Deprecated: the code generator no longer uses this as of
// 2019-02-19. Nothing else should be calling this anyway, but we
// won't delete this immediately; it will be deleted in as early as 6
// months.
func SetGetBody(req *http.Request, f func() (io.ReadCloser, error)) {
req.GetBody = f
}

View file

@ -0,0 +1,51 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gensupport
import (
"net/url"
"google.golang.org/api/googleapi"
)
// URLParams is a simplified replacement for url.Values
// that safely builds up URL parameters for encoding.
type URLParams map[string][]string
// Get returns the first value for the given key, or "".
func (u URLParams) Get(key string) string {
vs := u[key]
if len(vs) == 0 {
return ""
}
return vs[0]
}
// Set sets the key to value.
// It replaces any existing values.
func (u URLParams) Set(key, value string) {
u[key] = []string{value}
}
// SetMulti sets the key to an array of values.
// It replaces any existing values.
// Note that values must not be modified after calling SetMulti
// so the caller is responsible for making a copy if necessary.
func (u URLParams) SetMulti(key string, values []string) {
u[key] = values
}
// Encode encodes the values into ``URL encoded'' form
// ("bar=baz&foo=quux") sorted by key.
func (u URLParams) Encode() string {
return url.Values(u).Encode()
}
// SetOptions sets the URL params and any additional call options.
func SetOptions(u URLParams, opts ...googleapi.CallOption) {
for _, o := range opts {
u.Set(o.Get())
}
}

View file

@ -0,0 +1,258 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gensupport
import (
"context"
"errors"
"fmt"
"io"
"net/http"
"sync"
"time"
gax "github.com/googleapis/gax-go/v2"
)
// Backoff is an interface around gax.Backoff's Pause method, allowing tests to provide their
// own implementation.
type Backoff interface {
Pause() time.Duration
}
// These are declared as global variables so that tests can overwrite them.
var (
retryDeadline = 32 * time.Second
backoff = func() Backoff {
return &gax.Backoff{Initial: 100 * time.Millisecond}
}
// isRetryable is a platform-specific hook, specified in retryable_linux.go
syscallRetryable func(error) bool = func(err error) bool { return false }
)
const (
// statusTooManyRequests is returned by the storage API if the
// per-project limits have been temporarily exceeded. The request
// should be retried.
// https://cloud.google.com/storage/docs/json_api/v1/status-codes#standardcodes
statusTooManyRequests = 429
)
// ResumableUpload is used by the generated APIs to provide resumable uploads.
// It is not used by developers directly.
type ResumableUpload struct {
Client *http.Client
// URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable".
URI string
UserAgent string // User-Agent for header of the request
// Media is the object being uploaded.
Media *MediaBuffer
// MediaType defines the media type, e.g. "image/jpeg".
MediaType string
mu sync.Mutex // guards progress
progress int64 // number of bytes uploaded so far
// Callback is an optional function that will be periodically called with the cumulative number of bytes uploaded.
Callback func(int64)
}
// Progress returns the number of bytes uploaded at this point.
func (rx *ResumableUpload) Progress() int64 {
rx.mu.Lock()
defer rx.mu.Unlock()
return rx.progress
}
// doUploadRequest performs a single HTTP request to upload data.
// off specifies the offset in rx.Media from which data is drawn.
// size is the number of bytes in data.
// final specifies whether data is the final chunk to be uploaded.
func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, off, size int64, final bool) (*http.Response, error) {
req, err := http.NewRequest("POST", rx.URI, data)
if err != nil {
return nil, err
}
req.ContentLength = size
var contentRange string
if final {
if size == 0 {
contentRange = fmt.Sprintf("bytes */%v", off)
} else {
contentRange = fmt.Sprintf("bytes %v-%v/%v", off, off+size-1, off+size)
}
} else {
contentRange = fmt.Sprintf("bytes %v-%v/*", off, off+size-1)
}
req.Header.Set("Content-Range", contentRange)
req.Header.Set("Content-Type", rx.MediaType)
req.Header.Set("User-Agent", rx.UserAgent)
// Google's upload endpoint uses status code 308 for a
// different purpose than the "308 Permanent Redirect"
// since-standardized in RFC 7238. Because of the conflict in
// semantics, Google added this new request header which
// causes it to not use "308" and instead reply with 200 OK
// and sets the upload-specific "X-HTTP-Status-Code-Override:
// 308" response header.
req.Header.Set("X-GUploader-No-308", "yes")
return SendRequest(ctx, rx.Client, req)
}
func statusResumeIncomplete(resp *http.Response) bool {
// This is how the server signals "status resume incomplete"
// when X-GUploader-No-308 is set to "yes":
return resp != nil && resp.Header.Get("X-Http-Status-Code-Override") == "308"
}
// reportProgress calls a user-supplied callback to report upload progress.
// If old==updated, the callback is not called.
func (rx *ResumableUpload) reportProgress(old, updated int64) {
if updated-old == 0 {
return
}
rx.mu.Lock()
rx.progress = updated
rx.mu.Unlock()
if rx.Callback != nil {
rx.Callback(updated)
}
}
// transferChunk performs a single HTTP request to upload a single chunk from rx.Media.
func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, error) {
chunk, off, size, err := rx.Media.Chunk()
done := err == io.EOF
if !done && err != nil {
return nil, err
}
res, err := rx.doUploadRequest(ctx, chunk, off, int64(size), done)
if err != nil {
return res, err
}
// We sent "X-GUploader-No-308: yes" (see comment elsewhere in
// this file), so we don't expect to get a 308.
if res.StatusCode == 308 {
return nil, errors.New("unexpected 308 response status code")
}
if res.StatusCode == http.StatusOK {
rx.reportProgress(off, off+int64(size))
}
if statusResumeIncomplete(res) {
rx.Media.Next()
}
return res, nil
}
// Upload starts the process of a resumable upload with a cancellable context.
// It retries using the provided back off strategy until cancelled or the
// strategy indicates to stop retrying.
// It is called from the auto-generated API code and is not visible to the user.
// Before sending an HTTP request, Upload calls any registered hook functions,
// and calls the returned functions after the request returns (see send.go).
// rx is private to the auto-generated API code.
// Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close.
func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) {
// There are a couple of cases where it's possible for err and resp to both
// be non-nil. However, we expose a simpler contract to our callers: exactly
// one of resp and err will be non-nil. This means that any response body
// must be closed here before returning a non-nil error.
var prepareReturn = func(resp *http.Response, err error) (*http.Response, error) {
if err != nil {
if resp != nil && resp.Body != nil {
resp.Body.Close()
}
return nil, err
}
return resp, nil
}
// Send all chunks.
for {
var pause time.Duration
// Each chunk gets its own initialized-at-zero retry.
bo := backoff()
quitAfter := time.After(retryDeadline)
// Retry loop for a single chunk.
for {
select {
case <-ctx.Done():
if err == nil {
err = ctx.Err()
}
return prepareReturn(resp, err)
case <-time.After(pause):
case <-quitAfter:
return prepareReturn(resp, err)
}
resp, err = rx.transferChunk(ctx)
var status int
if resp != nil {
status = resp.StatusCode
}
// Check if we should retry the request.
if !shouldRetry(status, err) {
break
}
pause = bo.Pause()
if resp != nil && resp.Body != nil {
resp.Body.Close()
}
}
// If the chunk was uploaded successfully, but there's still
// more to go, upload the next chunk without any delay.
if statusResumeIncomplete(resp) {
resp.Body.Close()
continue
}
return prepareReturn(resp, err)
}
}
// shouldRetry indicates whether an error is retryable for the purposes of this
// package, following guidance from
// https://cloud.google.com/storage/docs/exponential-backoff .
func shouldRetry(status int, err error) bool {
if 500 <= status && status <= 599 {
return true
}
if status == statusTooManyRequests {
return true
}
if err == io.ErrUnexpectedEOF {
return true
}
// Transient network errors should be retried.
if syscallRetryable(err) {
return true
}
if err, ok := err.(interface{ Temporary() bool }); ok {
if err.Temporary() {
return true
}
}
// If Go 1.13 error unwrapping is available, use this to examine wrapped
// errors.
if err, ok := err.(interface{ Unwrap() error }); ok {
return shouldRetry(status, err.Unwrap())
}
return false
}

View file

@ -0,0 +1,15 @@
// Copyright 2020 Google LLC.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
package gensupport
import "syscall"
func init() {
// Initialize syscallRetryable to return true on transient socket-level
// errors. These errors are specific to Linux.
syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED }
}

View file

@ -0,0 +1,172 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gensupport
import (
"context"
"encoding/json"
"errors"
"net/http"
"time"
)
// Hook is the type of a function that is called once before each HTTP request
// that is sent by a generated API. It returns a function that is called after
// the request returns.
// Hooks are not called if the context is nil.
type Hook func(ctx context.Context, req *http.Request) func(resp *http.Response)
var hooks []Hook
// RegisterHook registers a Hook to be called before each HTTP request by a
// generated API. Hooks are called in the order they are registered. Each
// hook can return a function; if it is non-nil, it is called after the HTTP
// request returns. These functions are called in the reverse order.
// RegisterHook should not be called concurrently with itself or SendRequest.
func RegisterHook(h Hook) {
hooks = append(hooks, h)
}
// SendRequest sends a single HTTP request using the given client.
// If ctx is non-nil, it calls all hooks, then sends the request with
// req.WithContext, then calls any functions returned by the hooks in
// reverse order.
func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
// Disallow Accept-Encoding because it interferes with the automatic gzip handling
// done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219.
if _, ok := req.Header["Accept-Encoding"]; ok {
return nil, errors.New("google api: custom Accept-Encoding headers not allowed")
}
if ctx == nil {
return client.Do(req)
}
// Call hooks in order of registration, store returned funcs.
post := make([]func(resp *http.Response), len(hooks))
for i, h := range hooks {
fn := h(ctx, req)
post[i] = fn
}
// Send request.
resp, err := send(ctx, client, req)
// Call returned funcs in reverse order.
for i := len(post) - 1; i >= 0; i-- {
if fn := post[i]; fn != nil {
fn(resp)
}
}
return resp, err
}
func send(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
if client == nil {
client = http.DefaultClient
}
resp, err := client.Do(req.WithContext(ctx))
// If we got an error, and the context has been canceled,
// the context's error is probably more useful.
if err != nil {
select {
case <-ctx.Done():
err = ctx.Err()
default:
}
}
return resp, err
}
// SendRequestWithRetry sends a single HTTP request using the given client,
// with retries if a retryable error is returned.
// If ctx is non-nil, it calls all hooks, then sends the request with
// req.WithContext, then calls any functions returned by the hooks in
// reverse order.
func SendRequestWithRetry(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
// Disallow Accept-Encoding because it interferes with the automatic gzip handling
// done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219.
if _, ok := req.Header["Accept-Encoding"]; ok {
return nil, errors.New("google api: custom Accept-Encoding headers not allowed")
}
if ctx == nil {
return client.Do(req)
}
// Call hooks in order of registration, store returned funcs.
post := make([]func(resp *http.Response), len(hooks))
for i, h := range hooks {
fn := h(ctx, req)
post[i] = fn
}
// Send request with retry.
resp, err := sendAndRetry(ctx, client, req)
// Call returned funcs in reverse order.
for i := len(post) - 1; i >= 0; i-- {
if fn := post[i]; fn != nil {
fn(resp)
}
}
return resp, err
}
func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
if client == nil {
client = http.DefaultClient
}
var resp *http.Response
var err error
// Loop to retry the request, up to the context deadline.
var pause time.Duration
bo := backoff()
for {
select {
case <-ctx.Done():
// If we got an error, and the context has been canceled,
// the context's error is probably more useful.
if err == nil {
err = ctx.Err()
}
return resp, err
case <-time.After(pause):
}
resp, err = client.Do(req.WithContext(ctx))
var status int
if resp != nil {
status = resp.StatusCode
}
// Check if we can retry the request. A retry can only be done if the error
// is retryable and the request body can be re-created using GetBody (this
// will not be possible if the body was unbuffered).
if req.GetBody == nil || !shouldRetry(status, err) {
break
}
var errBody error
req.Body, errBody = req.GetBody()
if errBody != nil {
break
}
pause = bo.Pause()
if resp != nil && resp.Body != nil {
resp.Body.Close()
}
}
return resp, err
}
// DecodeResponse decodes the body of res into target. If there is no body,
// target is unchanged.
func DecodeResponse(target interface{}, res *http.Response) error {
if res.StatusCode == http.StatusNoContent {
return nil
}
return json.NewDecoder(res.Body).Decode(target)
}

View file

@ -0,0 +1,53 @@
// Copyright 2020 Google LLC. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gensupport
import (
"runtime"
"strings"
"unicode"
)
// GoVersion returns the Go runtime version. The returned string
// has no whitespace.
func GoVersion() string {
return goVersion
}
var goVersion = goVer(runtime.Version())
const develPrefix = "devel +"
func goVer(s string) string {
if strings.HasPrefix(s, develPrefix) {
s = s[len(develPrefix):]
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
s = s[:p]
}
return s
}
if strings.HasPrefix(s, "go1") {
s = s[2:]
var prerelease string
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
s, prerelease = s[:p], s[p:]
}
if strings.HasSuffix(s, ".") {
s += "0"
} else if strings.Count(s, ".") < 2 {
s += ".0"
}
if prerelease != "" {
s += "-" + prerelease
}
return s
}
return ""
}
func notSemverRune(r rune) bool {
return !strings.ContainsRune("0123456789.", r)
}

View file

@ -0,0 +1,12 @@
{
"type": "service_account",
"project_id": "project_id",
"private_key_id": "private_key_id",
"private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCzd9ZdbPLAR4/g\nj+Rodu15kEasMpxf/Mz+gKRb2fmgR2Y18Y/iRBYZ4SkmF2pBSfzvwE/aTCzSPBGl\njHhPzohXnSN029eWoItmxVONlqCbR29pD07aLzv08LGeIGdHIEdhVjhvRwTkYZIF\ndXmlHNDRUU/EbJN9D+3ahw22BNnC4PaDgfIWTs3xIlTCSf2rL39I4DSNLTS/LzxK\n/XrQfBMtfwMWwyQaemXbc7gRgzOy8L56wa1W1zyXx99th97j1bLnoAXBGplhB4Co\n25ohyDAuhxRm+XGMEaO0Mzo7u97kvhj48a569RH1QRhOf7EBf60jO4h5eOmfi5P5\nPV3l7041AgMBAAECggEAEZ0RTNoEeRqM5F067YW+iM/AH+ZXspP9Cn1VpC4gcbqQ\nLXsnw+0qvh97CmIB66Z3TJBzRdl0DK4YjUbcB/kdKHwjnrR01DOtesijCqJd4N+B\n762w73jzSXbV9872U+S3HLZ5k3JE6KUqz55X8fyCAgkY6w4862lEzs2yasrPFHEV\nRoQp3PM0Miif8R3hGDhOWcHxcobullthG6JHAQFfc1ctwEjZI4TK0iWqlzfWGyKN\nT9UgvjUDud5cGvS9el0AiLN6keAf77tcPn1zetUVhxN1KN4bVAm1Q+6O8esl63Rj\n7JXpHzxaRnit9S6/aH/twHsGGtLg5Puw6jey6xs4AQKBgQD2JNy1wzewCRkD+jug\n8CHbJ+LIJVRNIaWa/RK1QD8/UjmFPkIzRQSF3AKC5mRAWSa2FL3yVK3N/DD7hazW\n85XSBB7IDcnoJnA9SkUeWwqQGkDx3EntlU3gX8Kn/+ofF8O9jLXxAa901MAVXVuf\n5YDzrl4PNE3bFnPCdiNmSdRfhQKBgQC6p4DsCpwqbeTu9f5ak9VW/fQP47Fgt+Mf\nwGjBnKP5PbbNJpHCfamF7jqSRH83Xy0KNssH7jD/NZ2oT594sMmiQPUC5ni9VYY6\nsuYB0JbD5Mq+EjKIVhYtxaQJ76LzHreEI+G4z6k3H7/hRpr3/C48n9G/uVkT9DbJ\noplxxEx68QKBgQCdJ23vcwO0Firtmi/GEmtbVHz70rGfSXNFoHz4UlvPXv0wsE5u\nE4vOt2i3EMhDOWh46odYGG6bzH+tp2xyFTW70Dui+QLHgPs6dpfoyLHWzZxXj5F3\n6lK9hgZvYvqk/XRRKmzjwnK2wjsdqOyeC1covlR5mqh20D/6kZkKbur0TQKBgAwy\nCZBimRWEnKKoW/gbFKNccGfhXqONID/g2Hdd/rC4QYth68AjacIgcJ9B7nX1uAGk\n1tsryvPB0w0+NpMyKdp6GAgaeuUUA3MuYSzZLiCagEyu77JMvaI7+Z3UlHcCGMd/\neK4Uk1/QqT7U2Cc/yN2ZK6E1QQa2vCWshA4U31JhAoGAbtbSSSsul1c+PsJ13Cfk\n6qVnqYzPqt23QTyOZmGAvUHH/M4xRiQpOE0cDF4t/r5PwenAQPQzTvMmWRzj6uAY\n3eaU0eAK7ZfoweCoOIAPnpFbbRLrXfoY46H7MYh7euWGXOKEpxz5yzuEkd9ByNUE\n86vSEidqbMIiXVgEgnu/k08=\n-----END PRIVATE KEY-----\n",
"client_email": "xyz@developer.gserviceaccount.com",
"client_id": "123",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://accounts.google.com/o/oauth2/token",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/xyz%40developer.gserviceaccount.com"
}

110
vendor/google.golang.org/api/internal/settings.go generated vendored Normal file
View file

@ -0,0 +1,110 @@
// Copyright 2017 Google LLC.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package internal supports the options and transport packages.
package internal
import (
"crypto/tls"
"errors"
"net/http"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/grpc"
)
// DialSettings holds information needed to establish a connection with a
// Google API service.
type DialSettings struct {
Endpoint string
DefaultEndpoint string
DefaultMTLSEndpoint string
Scopes []string
TokenSource oauth2.TokenSource
Credentials *google.Credentials
CredentialsFile string // if set, Token Source is ignored.
CredentialsJSON []byte
UserAgent string
APIKey string
Audiences []string
HTTPClient *http.Client
GRPCDialOpts []grpc.DialOption
GRPCConn *grpc.ClientConn
GRPCConnPool ConnPool
GRPCConnPoolSize int
NoAuth bool
TelemetryDisabled bool
ClientCertSource func(*tls.CertificateRequestInfo) (*tls.Certificate, error)
CustomClaims map[string]interface{}
SkipValidation bool
// Google API system parameters. For more information please read:
// https://cloud.google.com/apis/docs/system-parameters
QuotaProject string
RequestReason string
}
// Validate reports an error if ds is invalid.
func (ds *DialSettings) Validate() error {
if ds.SkipValidation {
return nil
}
hasCreds := ds.APIKey != "" || ds.TokenSource != nil || ds.CredentialsFile != "" || ds.Credentials != nil
if ds.NoAuth && hasCreds {
return errors.New("options.WithoutAuthentication is incompatible with any option that provides credentials")
}
// Credentials should not appear with other options.
// We currently allow TokenSource and CredentialsFile to coexist.
// TODO(jba): make TokenSource & CredentialsFile an error (breaking change).
nCreds := 0
if ds.Credentials != nil {
nCreds++
}
if ds.CredentialsJSON != nil {
nCreds++
}
if ds.CredentialsFile != "" {
nCreds++
}
if ds.APIKey != "" {
nCreds++
}
if ds.TokenSource != nil {
nCreds++
}
if len(ds.Scopes) > 0 && len(ds.Audiences) > 0 {
return errors.New("WithScopes is incompatible with WithAudience")
}
// Accept only one form of credentials, except we allow TokenSource and CredentialsFile for backwards compatibility.
if nCreds > 1 && !(nCreds == 2 && ds.TokenSource != nil && ds.CredentialsFile != "") {
return errors.New("multiple credential options provided")
}
if ds.GRPCConn != nil && ds.GRPCConnPool != nil {
return errors.New("WithGRPCConn is incompatible with WithConnPool")
}
if ds.HTTPClient != nil && ds.GRPCConnPool != nil {
return errors.New("WithHTTPClient is incompatible with WithConnPool")
}
if ds.HTTPClient != nil && ds.GRPCConn != nil {
return errors.New("WithHTTPClient is incompatible with WithGRPCConn")
}
if ds.HTTPClient != nil && ds.GRPCDialOpts != nil {
return errors.New("WithHTTPClient is incompatible with gRPC dial options")
}
if ds.HTTPClient != nil && ds.QuotaProject != "" {
return errors.New("WithHTTPClient is incompatible with QuotaProject")
}
if ds.HTTPClient != nil && ds.RequestReason != "" {
return errors.New("WithHTTPClient is incompatible with RequestReason")
}
if ds.HTTPClient != nil && ds.ClientCertSource != nil {
return errors.New("WithHTTPClient is incompatible with WithClientCertSource")
}
if ds.ClientCertSource != nil && (ds.GRPCConn != nil || ds.GRPCConnPool != nil || ds.GRPCConnPoolSize != 0 || ds.GRPCDialOpts != nil) {
return errors.New("WithClientCertSource is currently only supported for HTTP. gRPC settings are incompatible")
}
return nil
}

View file

@ -0,0 +1,27 @@
Copyright (c) 2013 Joshua Tacoma. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -0,0 +1,14 @@
name: "uritemplates"
description:
"Package uritemplates is a level 4 implementation of RFC 6570 (URI "
"Template, http://tools.ietf.org/html/rfc6570)."
third_party {
url {
type: GIT
value: "https://github.com/jtacoma/uritemplates"
}
version: "0.1"
last_upgrade_date { year: 2014 month: 8 day: 18 }
license_type: NOTICE
}

View file

@ -0,0 +1,248 @@
// Copyright 2013 Joshua Tacoma. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package uritemplates is a level 3 implementation of RFC 6570 (URI
// Template, http://tools.ietf.org/html/rfc6570).
// uritemplates does not support composite values (in Go: slices or maps)
// and so does not qualify as a level 4 implementation.
package uritemplates
import (
"bytes"
"errors"
"regexp"
"strconv"
"strings"
)
var (
unreserved = regexp.MustCompile("[^A-Za-z0-9\\-._~]")
reserved = regexp.MustCompile("[^A-Za-z0-9\\-._~:/?#[\\]@!$&'()*+,;=]")
validname = regexp.MustCompile("^([A-Za-z0-9_\\.]|%[0-9A-Fa-f][0-9A-Fa-f])+$")
hex = []byte("0123456789ABCDEF")
)
func pctEncode(src []byte) []byte {
dst := make([]byte, len(src)*3)
for i, b := range src {
buf := dst[i*3 : i*3+3]
buf[0] = 0x25
buf[1] = hex[b/16]
buf[2] = hex[b%16]
}
return dst
}
// pairWriter is a convenience struct which allows escaped and unescaped
// versions of the template to be written in parallel.
type pairWriter struct {
escaped, unescaped bytes.Buffer
}
// Write writes the provided string directly without any escaping.
func (w *pairWriter) Write(s string) {
w.escaped.WriteString(s)
w.unescaped.WriteString(s)
}
// Escape writes the provided string, escaping the string for the
// escaped output.
func (w *pairWriter) Escape(s string, allowReserved bool) {
w.unescaped.WriteString(s)
if allowReserved {
w.escaped.Write(reserved.ReplaceAllFunc([]byte(s), pctEncode))
} else {
w.escaped.Write(unreserved.ReplaceAllFunc([]byte(s), pctEncode))
}
}
// Escaped returns the escaped string.
func (w *pairWriter) Escaped() string {
return w.escaped.String()
}
// Unescaped returns the unescaped string.
func (w *pairWriter) Unescaped() string {
return w.unescaped.String()
}
// A uriTemplate is a parsed representation of a URI template.
type uriTemplate struct {
raw string
parts []templatePart
}
// parse parses a URI template string into a uriTemplate object.
func parse(rawTemplate string) (*uriTemplate, error) {
split := strings.Split(rawTemplate, "{")
parts := make([]templatePart, len(split)*2-1)
for i, s := range split {
if i == 0 {
if strings.Contains(s, "}") {
return nil, errors.New("unexpected }")
}
parts[i].raw = s
continue
}
subsplit := strings.Split(s, "}")
if len(subsplit) != 2 {
return nil, errors.New("malformed template")
}
expression := subsplit[0]
var err error
parts[i*2-1], err = parseExpression(expression)
if err != nil {
return nil, err
}
parts[i*2].raw = subsplit[1]
}
return &uriTemplate{
raw: rawTemplate,
parts: parts,
}, nil
}
type templatePart struct {
raw string
terms []templateTerm
first string
sep string
named bool
ifemp string
allowReserved bool
}
type templateTerm struct {
name string
explode bool
truncate int
}
func parseExpression(expression string) (result templatePart, err error) {
switch expression[0] {
case '+':
result.sep = ","
result.allowReserved = true
expression = expression[1:]
case '.':
result.first = "."
result.sep = "."
expression = expression[1:]
case '/':
result.first = "/"
result.sep = "/"
expression = expression[1:]
case ';':
result.first = ";"
result.sep = ";"
result.named = true
expression = expression[1:]
case '?':
result.first = "?"
result.sep = "&"
result.named = true
result.ifemp = "="
expression = expression[1:]
case '&':
result.first = "&"
result.sep = "&"
result.named = true
result.ifemp = "="
expression = expression[1:]
case '#':
result.first = "#"
result.sep = ","
result.allowReserved = true
expression = expression[1:]
default:
result.sep = ","
}
rawterms := strings.Split(expression, ",")
result.terms = make([]templateTerm, len(rawterms))
for i, raw := range rawterms {
result.terms[i], err = parseTerm(raw)
if err != nil {
break
}
}
return result, err
}
func parseTerm(term string) (result templateTerm, err error) {
// TODO(djd): Remove "*" suffix parsing once we check that no APIs have
// mistakenly used that attribute.
if strings.HasSuffix(term, "*") {
result.explode = true
term = term[:len(term)-1]
}
split := strings.Split(term, ":")
if len(split) == 1 {
result.name = term
} else if len(split) == 2 {
result.name = split[0]
var parsed int64
parsed, err = strconv.ParseInt(split[1], 10, 0)
result.truncate = int(parsed)
} else {
err = errors.New("multiple colons in same term")
}
if !validname.MatchString(result.name) {
err = errors.New("not a valid name: " + result.name)
}
if result.explode && result.truncate > 0 {
err = errors.New("both explode and prefix modifiers on same term")
}
return result, err
}
// Expand expands a URI template with a set of values to produce the
// resultant URI. Two forms of the result are returned: one with all the
// elements escaped, and one with the elements unescaped.
func (t *uriTemplate) Expand(values map[string]string) (escaped, unescaped string) {
var w pairWriter
for _, p := range t.parts {
p.expand(&w, values)
}
return w.Escaped(), w.Unescaped()
}
func (tp *templatePart) expand(w *pairWriter, values map[string]string) {
if len(tp.raw) > 0 {
w.Write(tp.raw)
return
}
var first = true
for _, term := range tp.terms {
value, exists := values[term.name]
if !exists {
continue
}
if first {
w.Write(tp.first)
first = false
} else {
w.Write(tp.sep)
}
tp.expandString(w, term, value)
}
}
func (tp *templatePart) expandName(w *pairWriter, name string, empty bool) {
if tp.named {
w.Write(name)
if empty {
w.Write(tp.ifemp)
} else {
w.Write("=")
}
}
}
func (tp *templatePart) expandString(w *pairWriter, t templateTerm, s string) {
if len(s) > t.truncate && t.truncate > 0 {
s = s[:t.truncate]
}
tp.expandName(w, t.name, len(s) == 0)
w.Escape(s, tp.allowReserved)
}

View file

@ -0,0 +1,17 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uritemplates
// Expand parses then expands a URI template with a set of values to produce
// the resultant URI. Two forms of the result are returned: one with all the
// elements escaped, and one with the elements unescaped.
func Expand(path string, values map[string]string) (escaped, unescaped string, err error) {
template, err := parse(path)
if err != nil {
return "", "", err
}
escaped, unescaped = template.Expand(values)
return escaped, unescaped, nil
}