vendor: github.com/prometheus/client_golang v1.12.1
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
parent
985711c1f4
commit
ec47096efc
574 changed files with 101741 additions and 22828 deletions
1
vendor/google.golang.org/api/AUTHORS
generated
vendored
1
vendor/google.golang.org/api/AUTHORS
generated
vendored
|
@ -8,3 +8,4 @@
|
|||
|
||||
# Please keep the list sorted.
|
||||
Google Inc.
|
||||
LightStep Inc.
|
||||
|
|
6
vendor/google.golang.org/api/CONTRIBUTORS
generated
vendored
6
vendor/google.golang.org/api/CONTRIBUTORS
generated
vendored
|
@ -44,7 +44,13 @@ Ivan Krasin <krasin@golang.org>
|
|||
Jason Hall <jasonhall@google.com>
|
||||
Johan Euphrosine <proppy@google.com>
|
||||
Kostik Shtoyk <kostik@google.com>
|
||||
Kunpei Sakai <namusyaka@gmail.com>
|
||||
Matthew Dolan <dolan@lightstep.com>
|
||||
Matthew Whisenhunt <matt.whisenhunt@gmail.com>
|
||||
Michael McGreevy <mcgreevy@golang.org>
|
||||
Nick Craig-Wood <nickcw@gmail.com>
|
||||
Robbie Trencheny <me@robbiet.us>
|
||||
Ross Light <light@google.com>
|
||||
Sarah Adams <shadams@google.com>
|
||||
Scott Van Woudenberg <scottvw@google.com>
|
||||
Takashi Matsuo <tmatsuo@google.com>
|
||||
|
|
46
vendor/google.golang.org/api/gensupport/backoff.go
generated
vendored
46
vendor/google.golang.org/api/gensupport/backoff.go
generated
vendored
|
@ -1,46 +0,0 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
type BackoffStrategy interface {
|
||||
// Pause returns the duration of the next pause and true if the operation should be
|
||||
// retried, or false if no further retries should be attempted.
|
||||
Pause() (time.Duration, bool)
|
||||
|
||||
// Reset restores the strategy to its initial state.
|
||||
Reset()
|
||||
}
|
||||
|
||||
// ExponentialBackoff performs exponential backoff as per https://en.wikipedia.org/wiki/Exponential_backoff.
|
||||
// The initial pause time is given by Base.
|
||||
// Once the total pause time exceeds Max, Pause will indicate no further retries.
|
||||
type ExponentialBackoff struct {
|
||||
Base time.Duration
|
||||
Max time.Duration
|
||||
total time.Duration
|
||||
n uint
|
||||
}
|
||||
|
||||
func (eb *ExponentialBackoff) Pause() (time.Duration, bool) {
|
||||
if eb.total > eb.Max {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// The next pause is selected from randomly from [0, 2^n * Base).
|
||||
d := time.Duration(rand.Int63n((1 << eb.n) * int64(eb.Base)))
|
||||
eb.total += d
|
||||
eb.n++
|
||||
return d, true
|
||||
}
|
||||
|
||||
func (eb *ExponentialBackoff) Reset() {
|
||||
eb.n = 0
|
||||
eb.total = 0
|
||||
}
|
200
vendor/google.golang.org/api/gensupport/media.go
generated
vendored
200
vendor/google.golang.org/api/gensupport/media.go
generated
vendored
|
@ -1,200 +0,0 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
const sniffBuffSize = 512
|
||||
|
||||
func newContentSniffer(r io.Reader) *contentSniffer {
|
||||
return &contentSniffer{r: r}
|
||||
}
|
||||
|
||||
// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader.
|
||||
type contentSniffer struct {
|
||||
r io.Reader
|
||||
start []byte // buffer for the sniffed bytes.
|
||||
err error // set to any error encountered while reading bytes to be sniffed.
|
||||
|
||||
ctype string // set on first sniff.
|
||||
sniffed bool // set to true on first sniff.
|
||||
}
|
||||
|
||||
func (cs *contentSniffer) Read(p []byte) (n int, err error) {
|
||||
// Ensure that the content type is sniffed before any data is consumed from Reader.
|
||||
_, _ = cs.ContentType()
|
||||
|
||||
if len(cs.start) > 0 {
|
||||
n := copy(p, cs.start)
|
||||
cs.start = cs.start[n:]
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// We may have read some bytes into start while sniffing, even if the read ended in an error.
|
||||
// We should first return those bytes, then the error.
|
||||
if cs.err != nil {
|
||||
return 0, cs.err
|
||||
}
|
||||
|
||||
// Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader.
|
||||
return cs.r.Read(p)
|
||||
}
|
||||
|
||||
// ContentType returns the sniffed content type, and whether the content type was succesfully sniffed.
|
||||
func (cs *contentSniffer) ContentType() (string, bool) {
|
||||
if cs.sniffed {
|
||||
return cs.ctype, cs.ctype != ""
|
||||
}
|
||||
cs.sniffed = true
|
||||
// If ReadAll hits EOF, it returns err==nil.
|
||||
cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize))
|
||||
|
||||
// Don't try to detect the content type based on possibly incomplete data.
|
||||
if cs.err != nil {
|
||||
return "", false
|
||||
}
|
||||
|
||||
cs.ctype = http.DetectContentType(cs.start)
|
||||
return cs.ctype, true
|
||||
}
|
||||
|
||||
// DetermineContentType determines the content type of the supplied reader.
|
||||
// If the content type is already known, it can be specified via ctype.
|
||||
// Otherwise, the content of media will be sniffed to determine the content type.
|
||||
// If media implements googleapi.ContentTyper (deprecated), this will be used
|
||||
// instead of sniffing the content.
|
||||
// After calling DetectContentType the caller must not perform further reads on
|
||||
// media, but rather read from the Reader that is returned.
|
||||
func DetermineContentType(media io.Reader, ctype string) (io.Reader, string) {
|
||||
// Note: callers could avoid calling DetectContentType if ctype != "",
|
||||
// but doing the check inside this function reduces the amount of
|
||||
// generated code.
|
||||
if ctype != "" {
|
||||
return media, ctype
|
||||
}
|
||||
|
||||
// For backwards compatability, allow clients to set content
|
||||
// type by providing a ContentTyper for media.
|
||||
if typer, ok := media.(googleapi.ContentTyper); ok {
|
||||
return media, typer.ContentType()
|
||||
}
|
||||
|
||||
sniffer := newContentSniffer(media)
|
||||
if ctype, ok := sniffer.ContentType(); ok {
|
||||
return sniffer, ctype
|
||||
}
|
||||
// If content type could not be sniffed, reads from sniffer will eventually fail with an error.
|
||||
return sniffer, ""
|
||||
}
|
||||
|
||||
type typeReader struct {
|
||||
io.Reader
|
||||
typ string
|
||||
}
|
||||
|
||||
// multipartReader combines the contents of multiple readers to creat a multipart/related HTTP body.
|
||||
// Close must be called if reads from the multipartReader are abandoned before reaching EOF.
|
||||
type multipartReader struct {
|
||||
pr *io.PipeReader
|
||||
pipeOpen bool
|
||||
ctype string
|
||||
}
|
||||
|
||||
func newMultipartReader(parts []typeReader) *multipartReader {
|
||||
mp := &multipartReader{pipeOpen: true}
|
||||
var pw *io.PipeWriter
|
||||
mp.pr, pw = io.Pipe()
|
||||
mpw := multipart.NewWriter(pw)
|
||||
mp.ctype = "multipart/related; boundary=" + mpw.Boundary()
|
||||
go func() {
|
||||
for _, part := range parts {
|
||||
w, err := mpw.CreatePart(typeHeader(part.typ))
|
||||
if err != nil {
|
||||
mpw.Close()
|
||||
pw.CloseWithError(fmt.Errorf("googleapi: CreatePart failed: %v", err))
|
||||
return
|
||||
}
|
||||
_, err = io.Copy(w, part.Reader)
|
||||
if err != nil {
|
||||
mpw.Close()
|
||||
pw.CloseWithError(fmt.Errorf("googleapi: Copy failed: %v", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
mpw.Close()
|
||||
pw.Close()
|
||||
}()
|
||||
return mp
|
||||
}
|
||||
|
||||
func (mp *multipartReader) Read(data []byte) (n int, err error) {
|
||||
return mp.pr.Read(data)
|
||||
}
|
||||
|
||||
func (mp *multipartReader) Close() error {
|
||||
if !mp.pipeOpen {
|
||||
return nil
|
||||
}
|
||||
mp.pipeOpen = false
|
||||
return mp.pr.Close()
|
||||
}
|
||||
|
||||
// CombineBodyMedia combines a json body with media content to create a multipart/related HTTP body.
|
||||
// It returns a ReadCloser containing the combined body, and the overall "multipart/related" content type, with random boundary.
|
||||
//
|
||||
// The caller must call Close on the returned ReadCloser if reads are abandoned before reaching EOF.
|
||||
func CombineBodyMedia(body io.Reader, bodyContentType string, media io.Reader, mediaContentType string) (io.ReadCloser, string) {
|
||||
mp := newMultipartReader([]typeReader{
|
||||
{body, bodyContentType},
|
||||
{media, mediaContentType},
|
||||
})
|
||||
return mp, mp.ctype
|
||||
}
|
||||
|
||||
func typeHeader(contentType string) textproto.MIMEHeader {
|
||||
h := make(textproto.MIMEHeader)
|
||||
if contentType != "" {
|
||||
h.Set("Content-Type", contentType)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// PrepareUpload determines whether the data in the supplied reader should be
|
||||
// uploaded in a single request, or in sequential chunks.
|
||||
// chunkSize is the size of the chunk that media should be split into.
|
||||
// If chunkSize is non-zero and the contents of media do not fit in a single
|
||||
// chunk (or there is an error reading media), then media will be returned as a
|
||||
// ResumableBuffer. Otherwise, media will be returned as a Reader.
|
||||
//
|
||||
// After PrepareUpload has been called, media should no longer be used: the
|
||||
// media content should be accessed via one of the return values.
|
||||
func PrepareUpload(media io.Reader, chunkSize int) (io.Reader,
|
||||
*ResumableBuffer) {
|
||||
if chunkSize == 0 { // do not chunk
|
||||
return media, nil
|
||||
}
|
||||
|
||||
rb := NewResumableBuffer(media, chunkSize)
|
||||
rdr, _, _, err := rb.Chunk()
|
||||
|
||||
if err == io.EOF { // we can upload this in a single request
|
||||
return rdr, nil
|
||||
}
|
||||
// err might be a non-EOF error. If it is, the next call to rb.Chunk will
|
||||
// return the same error. Returning a ResumableBuffer ensures that this error
|
||||
// will be handled at some point.
|
||||
|
||||
return nil, rb
|
||||
}
|
77
vendor/google.golang.org/api/gensupport/retry.go
generated
vendored
77
vendor/google.golang.org/api/gensupport/retry.go
generated
vendored
|
@ -1,77 +0,0 @@
|
|||
package gensupport
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Retry invokes the given function, retrying it multiple times if the connection failed or
|
||||
// the HTTP status response indicates the request should be attempted again. ctx may be nil.
|
||||
func Retry(ctx context.Context, f func() (*http.Response, error), backoff BackoffStrategy) (*http.Response, error) {
|
||||
for {
|
||||
resp, err := f()
|
||||
|
||||
var status int
|
||||
if resp != nil {
|
||||
status = resp.StatusCode
|
||||
}
|
||||
|
||||
// Return if we shouldn't retry.
|
||||
pause, retry := backoff.Pause()
|
||||
if !shouldRetry(status, err) || !retry {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// Ensure the response body is closed, if any.
|
||||
if resp != nil && resp.Body != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
||||
// Pause, but still listen to ctx.Done if context is not nil.
|
||||
var done <-chan struct{}
|
||||
if ctx != nil {
|
||||
done = ctx.Done()
|
||||
}
|
||||
select {
|
||||
case <-done:
|
||||
return nil, ctx.Err()
|
||||
case <-time.After(pause):
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultBackoffStrategy returns a default strategy to use for retrying failed upload requests.
|
||||
func DefaultBackoffStrategy() BackoffStrategy {
|
||||
return &ExponentialBackoff{
|
||||
Base: 250 * time.Millisecond,
|
||||
Max: 16 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
// shouldRetry returns true if the HTTP response / error indicates that the
|
||||
// request should be attempted again.
|
||||
func shouldRetry(status int, err error) bool {
|
||||
// Retry for 5xx response codes.
|
||||
if 500 <= status && status < 600 {
|
||||
return true
|
||||
}
|
||||
|
||||
// Retry on statusTooManyRequests{
|
||||
if status == statusTooManyRequests {
|
||||
return true
|
||||
}
|
||||
|
||||
// Retry on unexpected EOFs and temporary network errors.
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
return true
|
||||
}
|
||||
if err, ok := err.(net.Error); ok {
|
||||
return err.Temporary()
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
136
vendor/google.golang.org/api/googleapi/googleapi.go
generated
vendored
136
vendor/google.golang.org/api/googleapi/googleapi.go
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Copyright 2011 Google LLC. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
|
@ -16,7 +16,7 @@ import (
|
|||
"net/url"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/api/googleapi/internal/uritemplates"
|
||||
"google.golang.org/api/internal/third_party/uritemplates"
|
||||
)
|
||||
|
||||
// ContentTyper is an interface for Readers which know (or would like
|
||||
|
@ -37,24 +37,28 @@ type SizeReaderAt interface {
|
|||
// ServerResponse is embedded in each Do response and
|
||||
// provides the HTTP status code and header sent by the server.
|
||||
type ServerResponse struct {
|
||||
// HTTPStatusCode is the server's response status code.
|
||||
// When using a resource method's Do call, this will always be in the 2xx range.
|
||||
// HTTPStatusCode is the server's response status code. When using a
|
||||
// resource method's Do call, this will always be in the 2xx range.
|
||||
HTTPStatusCode int
|
||||
// Header contains the response header fields from the server.
|
||||
Header http.Header
|
||||
}
|
||||
|
||||
const (
|
||||
// Version defines the gax version being used. This is typically sent
|
||||
// in an HTTP header to services.
|
||||
Version = "0.5"
|
||||
|
||||
// UserAgent is the header string used to identify this package.
|
||||
UserAgent = "google-api-go-client/" + Version
|
||||
|
||||
// The default chunk size to use for resumable uplods if not specified by the user.
|
||||
DefaultUploadChunkSize = 8 * 1024 * 1024
|
||||
// DefaultUploadChunkSize is the default chunk size to use for resumable
|
||||
// uploads if not specified by the user.
|
||||
DefaultUploadChunkSize = 16 * 1024 * 1024
|
||||
|
||||
// The minimum chunk size that can be used for resumable uploads. All
|
||||
// user-specified chunk sizes must be multiple of this value.
|
||||
// MinUploadChunkSize is the minimum chunk size that can be used for
|
||||
// resumable uploads. All user-specified chunk sizes must be multiple of
|
||||
// this value.
|
||||
MinUploadChunkSize = 256 * 1024
|
||||
)
|
||||
|
||||
|
@ -65,6 +69,8 @@ type Error struct {
|
|||
// Message is the server response message and is only populated when
|
||||
// explicitly referenced by the JSON server response.
|
||||
Message string `json:"message"`
|
||||
// Details provide more context to an error.
|
||||
Details []interface{} `json:"details"`
|
||||
// Body is the raw response returned by the server.
|
||||
// It is often but not always JSON, depending on how the request fails.
|
||||
Body string
|
||||
|
@ -91,6 +97,16 @@ func (e *Error) Error() string {
|
|||
if e.Message != "" {
|
||||
fmt.Fprintf(&buf, "%s", e.Message)
|
||||
}
|
||||
if len(e.Details) > 0 {
|
||||
var detailBuf bytes.Buffer
|
||||
enc := json.NewEncoder(&detailBuf)
|
||||
enc.SetIndent("", " ")
|
||||
if err := enc.Encode(e.Details); err == nil {
|
||||
fmt.Fprint(&buf, "\nDetails:")
|
||||
fmt.Fprintf(&buf, "\n%s", detailBuf.String())
|
||||
|
||||
}
|
||||
}
|
||||
if len(e.Errors) == 0 {
|
||||
return strings.TrimSpace(buf.String())
|
||||
}
|
||||
|
@ -149,21 +165,25 @@ func IsNotModified(err error) bool {
|
|||
// CheckMediaResponse returns an error (of type *Error) if the response
|
||||
// status code is not 2xx. Unlike CheckResponse it does not assume the
|
||||
// body is a JSON error document.
|
||||
// It is the caller's responsibility to close res.Body.
|
||||
func CheckMediaResponse(res *http.Response) error {
|
||||
if res.StatusCode >= 200 && res.StatusCode <= 299 {
|
||||
return nil
|
||||
}
|
||||
slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20))
|
||||
res.Body.Close()
|
||||
return &Error{
|
||||
Code: res.StatusCode,
|
||||
Body: string(slurp),
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalStyle defines whether to marshal JSON with a {"data": ...} wrapper.
|
||||
type MarshalStyle bool
|
||||
|
||||
// WithDataWrapper marshals JSON with a {"data": ...} wrapper.
|
||||
var WithDataWrapper = MarshalStyle(true)
|
||||
|
||||
// WithoutDataWrapper marshals JSON without a {"data": ...} wrapper.
|
||||
var WithoutDataWrapper = MarshalStyle(false)
|
||||
|
||||
func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) {
|
||||
|
@ -181,37 +201,12 @@ func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) {
|
|||
return buf, nil
|
||||
}
|
||||
|
||||
// endingWithErrorReader from r until it returns an error. If the
|
||||
// final error from r is io.EOF and e is non-nil, e is used instead.
|
||||
type endingWithErrorReader struct {
|
||||
r io.Reader
|
||||
e error
|
||||
}
|
||||
|
||||
func (er endingWithErrorReader) Read(p []byte) (n int, err error) {
|
||||
n, err = er.r.Read(p)
|
||||
if err == io.EOF && er.e != nil {
|
||||
err = er.e
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// countingWriter counts the number of bytes it receives to write, but
|
||||
// discards them.
|
||||
type countingWriter struct {
|
||||
n *int64
|
||||
}
|
||||
|
||||
func (w countingWriter) Write(p []byte) (int, error) {
|
||||
*w.n += int64(len(p))
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// ProgressUpdater is a function that is called upon every progress update of a resumable upload.
|
||||
// This is the only part of a resumable upload (from googleapi) that is usable by the developer.
|
||||
// The remaining usable pieces of resumable uploads is exposed in each auto-generated API.
|
||||
type ProgressUpdater func(current, total int64)
|
||||
|
||||
// MediaOption defines the interface for setting media options.
|
||||
type MediaOption interface {
|
||||
setOptions(o *MediaOptions)
|
||||
}
|
||||
|
@ -268,51 +263,47 @@ func ProcessMediaOptions(opts []MediaOption) *MediaOptions {
|
|||
return mo
|
||||
}
|
||||
|
||||
// ResolveRelative resolves relatives such as "http://www.golang.org/" and
|
||||
// "topics/myproject/mytopic" into a single string, such as
|
||||
// "http://www.golang.org/topics/myproject/mytopic". It strips all parent
|
||||
// references (e.g. ../..) as well as anything after the host
|
||||
// (e.g. /bar/gaz gets stripped out of foo.com/bar/gaz).
|
||||
//
|
||||
// ResolveRelative panics if either basestr or relstr is not able to be parsed.
|
||||
func ResolveRelative(basestr, relstr string) string {
|
||||
u, _ := url.Parse(basestr)
|
||||
rel, _ := url.Parse(relstr)
|
||||
u, err := url.Parse(basestr)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to parse %q", basestr))
|
||||
}
|
||||
afterColonPath := ""
|
||||
if i := strings.IndexRune(relstr, ':'); i > 0 {
|
||||
afterColonPath = relstr[i+1:]
|
||||
relstr = relstr[:i]
|
||||
}
|
||||
rel, err := url.Parse(relstr)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to parse %q", relstr))
|
||||
}
|
||||
u = u.ResolveReference(rel)
|
||||
us := u.String()
|
||||
if afterColonPath != "" {
|
||||
us = fmt.Sprintf("%s:%s", us, afterColonPath)
|
||||
}
|
||||
us = strings.Replace(us, "%7B", "{", -1)
|
||||
us = strings.Replace(us, "%7D", "}", -1)
|
||||
us = strings.Replace(us, "%2A", "*", -1)
|
||||
return us
|
||||
}
|
||||
|
||||
// has4860Fix is whether this Go environment contains the fix for
|
||||
// http://golang.org/issue/4860
|
||||
var has4860Fix bool
|
||||
|
||||
// init initializes has4860Fix by checking the behavior of the net/http package.
|
||||
func init() {
|
||||
r := http.Request{
|
||||
URL: &url.URL{
|
||||
Scheme: "http",
|
||||
Opaque: "//opaque",
|
||||
},
|
||||
}
|
||||
b := &bytes.Buffer{}
|
||||
r.Write(b)
|
||||
has4860Fix = bytes.HasPrefix(b.Bytes(), []byte("GET http"))
|
||||
}
|
||||
|
||||
// SetOpaque sets u.Opaque from u.Path such that HTTP requests to it
|
||||
// don't alter any hex-escaped characters in u.Path.
|
||||
func SetOpaque(u *url.URL) {
|
||||
u.Opaque = "//" + u.Host + u.Path
|
||||
if !has4860Fix {
|
||||
u.Opaque = u.Scheme + ":" + u.Opaque
|
||||
}
|
||||
}
|
||||
|
||||
// Expand subsitutes any {encoded} strings in the URL passed in using
|
||||
// the map supplied.
|
||||
//
|
||||
// This calls SetOpaque to avoid encoding of the parameters in the URL path.
|
||||
func Expand(u *url.URL, expansions map[string]string) {
|
||||
expanded, err := uritemplates.Expand(u.Path, expansions)
|
||||
escaped, unescaped, err := uritemplates.Expand(u.Path, expansions)
|
||||
if err == nil {
|
||||
u.Path = expanded
|
||||
SetOpaque(u)
|
||||
u.Path = unescaped
|
||||
u.RawPath = escaped
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -360,7 +351,7 @@ func ConvertVariant(v map[string]interface{}, dst interface{}) bool {
|
|||
}
|
||||
|
||||
// A Field names a field to be retrieved with a partial response.
|
||||
// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
|
||||
// https://cloud.google.com/storage/docs/json_api/v1/how-tos/performance
|
||||
//
|
||||
// Partial responses can dramatically reduce the amount of data that must be sent to your application.
|
||||
// In order to request partial responses, you can specify the full list of fields
|
||||
|
@ -377,9 +368,6 @@ func ConvertVariant(v map[string]interface{}, dst interface{}) bool {
|
|||
//
|
||||
// svc.Events.List().Fields("nextPageToken", "items(id,updated)").Do()
|
||||
//
|
||||
// More information about field formatting can be found here:
|
||||
// https://developers.google.com/+/api/#fields-syntax
|
||||
//
|
||||
// Another way to find field names is through the Google API explorer:
|
||||
// https://developers.google.com/apis-explorer/#p/
|
||||
type Field string
|
||||
|
@ -421,4 +409,12 @@ type userIP string
|
|||
|
||||
func (i userIP) Get() (string, string) { return "userIp", string(i) }
|
||||
|
||||
// Trace returns a CallOption that enables diagnostic tracing for a call.
|
||||
// traceToken is an ID supplied by Google support.
|
||||
func Trace(traceToken string) CallOption { return traceTok(traceToken) }
|
||||
|
||||
type traceTok string
|
||||
|
||||
func (t traceTok) Get() (string, string) { return "trace", "token:" + string(t) }
|
||||
|
||||
// TODO: Fields too
|
||||
|
|
18
vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE
generated
vendored
18
vendor/google.golang.org/api/googleapi/internal/uritemplates/LICENSE
generated
vendored
|
@ -1,18 +0,0 @@
|
|||
Copyright (c) 2013 Joshua Tacoma
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
13
vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go
generated
vendored
13
vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go
generated
vendored
|
@ -1,13 +0,0 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uritemplates
|
||||
|
||||
func Expand(path string, values map[string]string) (string, error) {
|
||||
template, err := parse(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return template.Expand(values), nil
|
||||
}
|
44
vendor/google.golang.org/api/googleapi/transport/apikey.go
generated
vendored
Normal file
44
vendor/google.golang.org/api/googleapi/transport/apikey.go
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
// Copyright 2012 Google LLC. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package transport contains HTTP transports used to make
|
||||
// authenticated API requests.
|
||||
//
|
||||
// This package is DEPRECATED. Users should instead use,
|
||||
//
|
||||
// service, err := NewService(..., option.WithAPIKey(...))
|
||||
package transport
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// APIKey is an HTTP Transport which wraps an underlying transport and
|
||||
// appends an API Key "key" parameter to the URL of outgoing requests.
|
||||
//
|
||||
// Deprecated: please use NewService(..., option.WithAPIKey(...)) instead.
|
||||
type APIKey struct {
|
||||
// Key is the API Key to set on requests.
|
||||
Key string
|
||||
|
||||
// Transport is the underlying HTTP transport.
|
||||
// If nil, http.DefaultTransport is used.
|
||||
Transport http.RoundTripper
|
||||
}
|
||||
|
||||
func (t *APIKey) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
rt := t.Transport
|
||||
if rt == nil {
|
||||
rt = http.DefaultTransport
|
||||
if rt == nil {
|
||||
return nil, errors.New("googleapi/transport: no Transport specified or available")
|
||||
}
|
||||
}
|
||||
newReq := *req
|
||||
args := newReq.URL.Query()
|
||||
args.Set("key", t.Key)
|
||||
newReq.URL.RawQuery = args.Encode()
|
||||
return rt.RoundTrip(&newReq)
|
||||
}
|
52
vendor/google.golang.org/api/googleapi/types.go
generated
vendored
52
vendor/google.golang.org/api/googleapi/types.go
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2013 Google Inc. All rights reserved.
|
||||
// Copyright 2013 Google LLC. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
|
@ -6,6 +6,7 @@ package googleapi
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
|
@ -119,36 +120,55 @@ func quotedList(n int, fn func(dst []byte, i int) []byte) ([]byte, error) {
|
|||
return dst, nil
|
||||
}
|
||||
|
||||
func (s Int64s) MarshalJSON() ([]byte, error) {
|
||||
return quotedList(len(s), func(dst []byte, i int) []byte {
|
||||
return strconv.AppendInt(dst, s[i], 10)
|
||||
func (q Int64s) MarshalJSON() ([]byte, error) {
|
||||
return quotedList(len(q), func(dst []byte, i int) []byte {
|
||||
return strconv.AppendInt(dst, q[i], 10)
|
||||
})
|
||||
}
|
||||
|
||||
func (s Int32s) MarshalJSON() ([]byte, error) {
|
||||
return quotedList(len(s), func(dst []byte, i int) []byte {
|
||||
return strconv.AppendInt(dst, int64(s[i]), 10)
|
||||
func (q Int32s) MarshalJSON() ([]byte, error) {
|
||||
return quotedList(len(q), func(dst []byte, i int) []byte {
|
||||
return strconv.AppendInt(dst, int64(q[i]), 10)
|
||||
})
|
||||
}
|
||||
|
||||
func (s Uint64s) MarshalJSON() ([]byte, error) {
|
||||
return quotedList(len(s), func(dst []byte, i int) []byte {
|
||||
return strconv.AppendUint(dst, s[i], 10)
|
||||
func (q Uint64s) MarshalJSON() ([]byte, error) {
|
||||
return quotedList(len(q), func(dst []byte, i int) []byte {
|
||||
return strconv.AppendUint(dst, q[i], 10)
|
||||
})
|
||||
}
|
||||
|
||||
func (s Uint32s) MarshalJSON() ([]byte, error) {
|
||||
return quotedList(len(s), func(dst []byte, i int) []byte {
|
||||
return strconv.AppendUint(dst, uint64(s[i]), 10)
|
||||
func (q Uint32s) MarshalJSON() ([]byte, error) {
|
||||
return quotedList(len(q), func(dst []byte, i int) []byte {
|
||||
return strconv.AppendUint(dst, uint64(q[i]), 10)
|
||||
})
|
||||
}
|
||||
|
||||
func (s Float64s) MarshalJSON() ([]byte, error) {
|
||||
return quotedList(len(s), func(dst []byte, i int) []byte {
|
||||
return strconv.AppendFloat(dst, s[i], 'g', -1, 64)
|
||||
func (q Float64s) MarshalJSON() ([]byte, error) {
|
||||
return quotedList(len(q), func(dst []byte, i int) []byte {
|
||||
return strconv.AppendFloat(dst, q[i], 'g', -1, 64)
|
||||
})
|
||||
}
|
||||
|
||||
// RawMessage is a raw encoded JSON value.
|
||||
// It is identical to json.RawMessage, except it does not suffer from
|
||||
// https://golang.org/issue/14493.
|
||||
type RawMessage []byte
|
||||
|
||||
// MarshalJSON returns m.
|
||||
func (m RawMessage) MarshalJSON() ([]byte, error) {
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON sets *m to a copy of data.
|
||||
func (m *RawMessage) UnmarshalJSON(data []byte) error {
|
||||
if m == nil {
|
||||
return errors.New("googleapi.RawMessage: UnmarshalJSON on nil pointer")
|
||||
}
|
||||
*m = append((*m)[:0], data...)
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper routines for simplifying the creation of optional fields of basic type.
|
||||
*/
|
||||
|
|
30
vendor/google.golang.org/api/internal/conn_pool.go
generated
vendored
Normal file
30
vendor/google.golang.org/api/internal/conn_pool.go
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
// Copyright 2020 Google LLC.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// ConnPool is a pool of grpc.ClientConns.
|
||||
type ConnPool interface {
|
||||
// Conn returns a ClientConn from the pool.
|
||||
//
|
||||
// Conns aren't returned to the pool.
|
||||
Conn() *grpc.ClientConn
|
||||
|
||||
// Num returns the number of connections in the pool.
|
||||
//
|
||||
// It will always return the same value.
|
||||
Num() int
|
||||
|
||||
// Close closes every ClientConn in the pool.
|
||||
//
|
||||
// The error returned by Close may be a single error or multiple errors.
|
||||
Close() error
|
||||
|
||||
// ConnPool implements grpc.ClientConnInterface to enable it to be used directly with generated proto stubs.
|
||||
grpc.ClientConnInterface
|
||||
}
|
105
vendor/google.golang.org/api/internal/creds.go
generated
vendored
Normal file
105
vendor/google.golang.org/api/internal/creds.go
generated
vendored
Normal file
|
@ -0,0 +1,105 @@
|
|||
// Copyright 2017 Google LLC.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
|
||||
"golang.org/x/oauth2/google"
|
||||
)
|
||||
|
||||
// Creds returns credential information obtained from DialSettings, or if none, then
|
||||
// it returns default credential information.
|
||||
func Creds(ctx context.Context, ds *DialSettings) (*google.Credentials, error) {
|
||||
if ds.Credentials != nil {
|
||||
return ds.Credentials, nil
|
||||
}
|
||||
if ds.CredentialsJSON != nil {
|
||||
return credentialsFromJSON(ctx, ds.CredentialsJSON, ds.Endpoint, ds.Scopes, ds.Audiences)
|
||||
}
|
||||
if ds.CredentialsFile != "" {
|
||||
data, err := ioutil.ReadFile(ds.CredentialsFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read credentials file: %v", err)
|
||||
}
|
||||
return credentialsFromJSON(ctx, data, ds.Endpoint, ds.Scopes, ds.Audiences)
|
||||
}
|
||||
if ds.TokenSource != nil {
|
||||
return &google.Credentials{TokenSource: ds.TokenSource}, nil
|
||||
}
|
||||
cred, err := google.FindDefaultCredentials(ctx, ds.Scopes...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(cred.JSON) > 0 {
|
||||
return credentialsFromJSON(ctx, cred.JSON, ds.Endpoint, ds.Scopes, ds.Audiences)
|
||||
}
|
||||
// For GAE and GCE, the JSON is empty so return the default credentials directly.
|
||||
return cred, nil
|
||||
}
|
||||
|
||||
// JSON key file type.
|
||||
const (
|
||||
serviceAccountKey = "service_account"
|
||||
)
|
||||
|
||||
// credentialsFromJSON returns a google.Credentials based on the input.
|
||||
//
|
||||
// - If the JSON is a service account and no scopes provided, returns self-signed JWT auth flow
|
||||
// - Otherwise, returns OAuth 2.0 flow.
|
||||
func credentialsFromJSON(ctx context.Context, data []byte, endpoint string, scopes []string, audiences []string) (*google.Credentials, error) {
|
||||
cred, err := google.CredentialsFromJSON(ctx, data, scopes...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(data) > 0 && len(scopes) == 0 {
|
||||
var f struct {
|
||||
Type string `json:"type"`
|
||||
// The rest JSON fields are omitted because they are not used.
|
||||
}
|
||||
if err := json.Unmarshal(cred.JSON, &f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if f.Type == serviceAccountKey {
|
||||
ts, err := selfSignedJWTTokenSource(data, endpoint, audiences)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cred.TokenSource = ts
|
||||
}
|
||||
}
|
||||
return cred, err
|
||||
}
|
||||
|
||||
func selfSignedJWTTokenSource(data []byte, endpoint string, audiences []string) (oauth2.TokenSource, error) {
|
||||
// Use the API endpoint as the default audience
|
||||
audience := endpoint
|
||||
if len(audiences) > 0 {
|
||||
// TODO(shinfan): Update golang oauth to support multiple audiences.
|
||||
if len(audiences) > 1 {
|
||||
return nil, fmt.Errorf("multiple audiences support is not implemented")
|
||||
}
|
||||
audience = audiences[0]
|
||||
}
|
||||
return google.JWTAccessTokenSourceFromJSON(data, audience)
|
||||
}
|
||||
|
||||
// QuotaProjectFromCreds returns the quota project from the JSON blob in the provided credentials.
|
||||
//
|
||||
// NOTE(cbro): consider promoting this to a field on google.Credentials.
|
||||
func QuotaProjectFromCreds(cred *google.Credentials) string {
|
||||
var v struct {
|
||||
QuotaProject string `json:"quota_project_id"`
|
||||
}
|
||||
if err := json.Unmarshal(cred.JSON, &v); err != nil {
|
||||
return ""
|
||||
}
|
||||
return v.QuotaProject
|
||||
}
|
|
@ -11,8 +11,9 @@ import (
|
|||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
// ResumableBuffer buffers data from an io.Reader to support uploading media in retryable chunks.
|
||||
type ResumableBuffer struct {
|
||||
// MediaBuffer buffers data from an io.Reader to support uploading media in
|
||||
// retryable chunks. It should be created with NewMediaBuffer.
|
||||
type MediaBuffer struct {
|
||||
media io.Reader
|
||||
|
||||
chunk []byte // The current chunk which is pending upload. The capacity is the chunk size.
|
||||
|
@ -22,42 +23,43 @@ type ResumableBuffer struct {
|
|||
off int64
|
||||
}
|
||||
|
||||
func NewResumableBuffer(media io.Reader, chunkSize int) *ResumableBuffer {
|
||||
return &ResumableBuffer{media: media, chunk: make([]byte, 0, chunkSize)}
|
||||
// NewMediaBuffer initializes a MediaBuffer.
|
||||
func NewMediaBuffer(media io.Reader, chunkSize int) *MediaBuffer {
|
||||
return &MediaBuffer{media: media, chunk: make([]byte, 0, chunkSize)}
|
||||
}
|
||||
|
||||
// Chunk returns the current buffered chunk, the offset in the underlying media
|
||||
// from which the chunk is drawn, and the size of the chunk.
|
||||
// Successive calls to Chunk return the same chunk between calls to Next.
|
||||
func (rb *ResumableBuffer) Chunk() (chunk io.Reader, off int64, size int, err error) {
|
||||
func (mb *MediaBuffer) Chunk() (chunk io.Reader, off int64, size int, err error) {
|
||||
// There may already be data in chunk if Next has not been called since the previous call to Chunk.
|
||||
if rb.err == nil && len(rb.chunk) == 0 {
|
||||
rb.err = rb.loadChunk()
|
||||
if mb.err == nil && len(mb.chunk) == 0 {
|
||||
mb.err = mb.loadChunk()
|
||||
}
|
||||
return bytes.NewReader(rb.chunk), rb.off, len(rb.chunk), rb.err
|
||||
return bytes.NewReader(mb.chunk), mb.off, len(mb.chunk), mb.err
|
||||
}
|
||||
|
||||
// loadChunk will read from media into chunk, up to the capacity of chunk.
|
||||
func (rb *ResumableBuffer) loadChunk() error {
|
||||
bufSize := cap(rb.chunk)
|
||||
rb.chunk = rb.chunk[:bufSize]
|
||||
func (mb *MediaBuffer) loadChunk() error {
|
||||
bufSize := cap(mb.chunk)
|
||||
mb.chunk = mb.chunk[:bufSize]
|
||||
|
||||
read := 0
|
||||
var err error
|
||||
for err == nil && read < bufSize {
|
||||
var n int
|
||||
n, err = rb.media.Read(rb.chunk[read:])
|
||||
n, err = mb.media.Read(mb.chunk[read:])
|
||||
read += n
|
||||
}
|
||||
rb.chunk = rb.chunk[:read]
|
||||
mb.chunk = mb.chunk[:read]
|
||||
return err
|
||||
}
|
||||
|
||||
// Next advances to the next chunk, which will be returned by the next call to Chunk.
|
||||
// Calls to Next without a corresponding prior call to Chunk will have no effect.
|
||||
func (rb *ResumableBuffer) Next() {
|
||||
rb.off += int64(len(rb.chunk))
|
||||
rb.chunk = rb.chunk[0:0]
|
||||
func (mb *MediaBuffer) Next() {
|
||||
mb.off += int64(len(mb.chunk))
|
||||
mb.chunk = mb.chunk[0:0]
|
||||
}
|
||||
|
||||
type readerTyper struct {
|
|
@ -12,29 +12,43 @@ import (
|
|||
)
|
||||
|
||||
// MarshalJSON returns a JSON encoding of schema containing only selected fields.
|
||||
// A field is selected if:
|
||||
// * it has a non-empty value, or
|
||||
// * its field name is present in forceSendFields, and
|
||||
// * it is not a nil pointer or nil interface.
|
||||
// A field is selected if any of the following is true:
|
||||
// * it has a non-empty value
|
||||
// * its field name is present in forceSendFields and it is not a nil pointer or nil interface
|
||||
// * its field name is present in nullFields.
|
||||
// The JSON key for each selected field is taken from the field's json: struct tag.
|
||||
func MarshalJSON(schema interface{}, forceSendFields []string) ([]byte, error) {
|
||||
if len(forceSendFields) == 0 {
|
||||
func MarshalJSON(schema interface{}, forceSendFields, nullFields []string) ([]byte, error) {
|
||||
if len(forceSendFields) == 0 && len(nullFields) == 0 {
|
||||
return json.Marshal(schema)
|
||||
}
|
||||
|
||||
mustInclude := make(map[string]struct{})
|
||||
mustInclude := make(map[string]bool)
|
||||
for _, f := range forceSendFields {
|
||||
mustInclude[f] = struct{}{}
|
||||
mustInclude[f] = true
|
||||
}
|
||||
useNull := make(map[string]bool)
|
||||
useNullMaps := make(map[string]map[string]bool)
|
||||
for _, nf := range nullFields {
|
||||
parts := strings.SplitN(nf, ".", 2)
|
||||
field := parts[0]
|
||||
if len(parts) == 1 {
|
||||
useNull[field] = true
|
||||
} else {
|
||||
if useNullMaps[field] == nil {
|
||||
useNullMaps[field] = map[string]bool{}
|
||||
}
|
||||
useNullMaps[field][parts[1]] = true
|
||||
}
|
||||
}
|
||||
|
||||
dataMap, err := schemaToMap(schema, mustInclude)
|
||||
dataMap, err := schemaToMap(schema, mustInclude, useNull, useNullMaps)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return json.Marshal(dataMap)
|
||||
}
|
||||
|
||||
func schemaToMap(schema interface{}, mustInclude map[string]struct{}) (map[string]interface{}, error) {
|
||||
func schemaToMap(schema interface{}, mustInclude, useNull map[string]bool, useNullMaps map[string]map[string]bool) (map[string]interface{}, error) {
|
||||
m := make(map[string]interface{})
|
||||
s := reflect.ValueOf(schema)
|
||||
st := s.Type()
|
||||
|
@ -54,10 +68,36 @@ func schemaToMap(schema interface{}, mustInclude map[string]struct{}) (map[strin
|
|||
|
||||
v := s.Field(i)
|
||||
f := st.Field(i)
|
||||
|
||||
if useNull[f.Name] {
|
||||
if !isEmptyValue(v) {
|
||||
return nil, fmt.Errorf("field %q in NullFields has non-empty value", f.Name)
|
||||
}
|
||||
m[tag.apiName] = nil
|
||||
continue
|
||||
}
|
||||
|
||||
if !includeField(v, f, mustInclude) {
|
||||
continue
|
||||
}
|
||||
|
||||
// If map fields are explicitly set to null, use a map[string]interface{}.
|
||||
if f.Type.Kind() == reflect.Map && useNullMaps[f.Name] != nil {
|
||||
ms, ok := v.Interface().(map[string]string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("field %q has keys in NullFields but is not a map[string]string", f.Name)
|
||||
}
|
||||
mi := map[string]interface{}{}
|
||||
for k, v := range ms {
|
||||
mi[k] = v
|
||||
}
|
||||
for k := range useNullMaps[f.Name] {
|
||||
mi[k] = nil
|
||||
}
|
||||
m[tag.apiName] = mi
|
||||
continue
|
||||
}
|
||||
|
||||
// nil maps are treated as empty maps.
|
||||
if f.Type.Kind() == reflect.Map && v.IsNil() {
|
||||
m[tag.apiName] = map[string]string{}
|
||||
|
@ -127,7 +167,7 @@ func parseJSONTag(val string) (jsonTag, error) {
|
|||
}
|
||||
|
||||
// Reports whether the struct field "f" with value "v" should be included in JSON output.
|
||||
func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]struct{}) bool {
|
||||
func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]bool) bool {
|
||||
// The regular JSON encoding of a nil pointer is "null", which means "delete this field".
|
||||
// Therefore, we could enable field deletion by honoring pointer fields' presence in the mustInclude set.
|
||||
// However, many fields are not pointers, so there would be no way to delete these fields.
|
||||
|
@ -144,8 +184,7 @@ func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string
|
|||
return false
|
||||
}
|
||||
|
||||
_, ok := mustInclude[f.Name]
|
||||
return ok || !isEmptyValue(v)
|
||||
return mustInclude[f.Name] || !isEmptyValue(v)
|
||||
}
|
||||
|
||||
// isEmptyValue reports whether v is the empty value for its type. This
|
47
vendor/google.golang.org/api/internal/gensupport/jsonfloat.go
generated
vendored
Normal file
47
vendor/google.golang.org/api/internal/gensupport/jsonfloat.go
generated
vendored
Normal file
|
@ -0,0 +1,47 @@
|
|||
// Copyright 2016 Google LLC.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
)
|
||||
|
||||
// JSONFloat64 is a float64 that supports proper unmarshaling of special float
|
||||
// values in JSON, according to
|
||||
// https://developers.google.com/protocol-buffers/docs/proto3#json. Although
|
||||
// that is a proto-to-JSON spec, it applies to all Google APIs.
|
||||
//
|
||||
// The jsonpb package
|
||||
// (https://github.com/golang/protobuf/blob/master/jsonpb/jsonpb.go) has
|
||||
// similar functionality, but only for direct translation from proto messages
|
||||
// to JSON.
|
||||
type JSONFloat64 float64
|
||||
|
||||
func (f *JSONFloat64) UnmarshalJSON(data []byte) error {
|
||||
var ff float64
|
||||
if err := json.Unmarshal(data, &ff); err == nil {
|
||||
*f = JSONFloat64(ff)
|
||||
return nil
|
||||
}
|
||||
var s string
|
||||
if err := json.Unmarshal(data, &s); err == nil {
|
||||
switch s {
|
||||
case "NaN":
|
||||
ff = math.NaN()
|
||||
case "Infinity":
|
||||
ff = math.Inf(1)
|
||||
case "-Infinity":
|
||||
ff = math.Inf(-1)
|
||||
default:
|
||||
return fmt.Errorf("google.golang.org/api/internal: bad float string %q", s)
|
||||
}
|
||||
*f = JSONFloat64(ff)
|
||||
return nil
|
||||
}
|
||||
return errors.New("google.golang.org/api/internal: data not float or string")
|
||||
}
|
372
vendor/google.golang.org/api/internal/gensupport/media.go
generated
vendored
Normal file
372
vendor/google.golang.org/api/internal/gensupport/media.go
generated
vendored
Normal file
|
@ -0,0 +1,372 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
const sniffBuffSize = 512
|
||||
|
||||
func newContentSniffer(r io.Reader) *contentSniffer {
|
||||
return &contentSniffer{r: r}
|
||||
}
|
||||
|
||||
// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader.
|
||||
type contentSniffer struct {
|
||||
r io.Reader
|
||||
start []byte // buffer for the sniffed bytes.
|
||||
err error // set to any error encountered while reading bytes to be sniffed.
|
||||
|
||||
ctype string // set on first sniff.
|
||||
sniffed bool // set to true on first sniff.
|
||||
}
|
||||
|
||||
func (cs *contentSniffer) Read(p []byte) (n int, err error) {
|
||||
// Ensure that the content type is sniffed before any data is consumed from Reader.
|
||||
_, _ = cs.ContentType()
|
||||
|
||||
if len(cs.start) > 0 {
|
||||
n := copy(p, cs.start)
|
||||
cs.start = cs.start[n:]
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// We may have read some bytes into start while sniffing, even if the read ended in an error.
|
||||
// We should first return those bytes, then the error.
|
||||
if cs.err != nil {
|
||||
return 0, cs.err
|
||||
}
|
||||
|
||||
// Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader.
|
||||
return cs.r.Read(p)
|
||||
}
|
||||
|
||||
// ContentType returns the sniffed content type, and whether the content type was succesfully sniffed.
|
||||
func (cs *contentSniffer) ContentType() (string, bool) {
|
||||
if cs.sniffed {
|
||||
return cs.ctype, cs.ctype != ""
|
||||
}
|
||||
cs.sniffed = true
|
||||
// If ReadAll hits EOF, it returns err==nil.
|
||||
cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize))
|
||||
|
||||
// Don't try to detect the content type based on possibly incomplete data.
|
||||
if cs.err != nil {
|
||||
return "", false
|
||||
}
|
||||
|
||||
cs.ctype = http.DetectContentType(cs.start)
|
||||
return cs.ctype, true
|
||||
}
|
||||
|
||||
// DetermineContentType determines the content type of the supplied reader.
|
||||
// If the content type is already known, it can be specified via ctype.
|
||||
// Otherwise, the content of media will be sniffed to determine the content type.
|
||||
// If media implements googleapi.ContentTyper (deprecated), this will be used
|
||||
// instead of sniffing the content.
|
||||
// After calling DetectContentType the caller must not perform further reads on
|
||||
// media, but rather read from the Reader that is returned.
|
||||
func DetermineContentType(media io.Reader, ctype string) (io.Reader, string) {
|
||||
// Note: callers could avoid calling DetectContentType if ctype != "",
|
||||
// but doing the check inside this function reduces the amount of
|
||||
// generated code.
|
||||
if ctype != "" {
|
||||
return media, ctype
|
||||
}
|
||||
|
||||
// For backwards compatability, allow clients to set content
|
||||
// type by providing a ContentTyper for media.
|
||||
if typer, ok := media.(googleapi.ContentTyper); ok {
|
||||
return media, typer.ContentType()
|
||||
}
|
||||
|
||||
sniffer := newContentSniffer(media)
|
||||
if ctype, ok := sniffer.ContentType(); ok {
|
||||
return sniffer, ctype
|
||||
}
|
||||
// If content type could not be sniffed, reads from sniffer will eventually fail with an error.
|
||||
return sniffer, ""
|
||||
}
|
||||
|
||||
type typeReader struct {
|
||||
io.Reader
|
||||
typ string
|
||||
}
|
||||
|
||||
// multipartReader combines the contents of multiple readers to create a multipart/related HTTP body.
|
||||
// Close must be called if reads from the multipartReader are abandoned before reaching EOF.
|
||||
type multipartReader struct {
|
||||
pr *io.PipeReader
|
||||
ctype string
|
||||
mu sync.Mutex
|
||||
pipeOpen bool
|
||||
}
|
||||
|
||||
// boundary optionally specifies the MIME boundary
|
||||
func newMultipartReader(parts []typeReader, boundary string) *multipartReader {
|
||||
mp := &multipartReader{pipeOpen: true}
|
||||
var pw *io.PipeWriter
|
||||
mp.pr, pw = io.Pipe()
|
||||
mpw := multipart.NewWriter(pw)
|
||||
if boundary != "" {
|
||||
mpw.SetBoundary(boundary)
|
||||
}
|
||||
mp.ctype = "multipart/related; boundary=" + mpw.Boundary()
|
||||
go func() {
|
||||
for _, part := range parts {
|
||||
w, err := mpw.CreatePart(typeHeader(part.typ))
|
||||
if err != nil {
|
||||
mpw.Close()
|
||||
pw.CloseWithError(fmt.Errorf("googleapi: CreatePart failed: %v", err))
|
||||
return
|
||||
}
|
||||
_, err = io.Copy(w, part.Reader)
|
||||
if err != nil {
|
||||
mpw.Close()
|
||||
pw.CloseWithError(fmt.Errorf("googleapi: Copy failed: %v", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
mpw.Close()
|
||||
pw.Close()
|
||||
}()
|
||||
return mp
|
||||
}
|
||||
|
||||
func (mp *multipartReader) Read(data []byte) (n int, err error) {
|
||||
return mp.pr.Read(data)
|
||||
}
|
||||
|
||||
func (mp *multipartReader) Close() error {
|
||||
mp.mu.Lock()
|
||||
if !mp.pipeOpen {
|
||||
mp.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
mp.pipeOpen = false
|
||||
mp.mu.Unlock()
|
||||
return mp.pr.Close()
|
||||
}
|
||||
|
||||
// CombineBodyMedia combines a json body with media content to create a multipart/related HTTP body.
|
||||
// It returns a ReadCloser containing the combined body, and the overall "multipart/related" content type, with random boundary.
|
||||
//
|
||||
// The caller must call Close on the returned ReadCloser if reads are abandoned before reaching EOF.
|
||||
func CombineBodyMedia(body io.Reader, bodyContentType string, media io.Reader, mediaContentType string) (io.ReadCloser, string) {
|
||||
return combineBodyMedia(body, bodyContentType, media, mediaContentType, "")
|
||||
}
|
||||
|
||||
// combineBodyMedia is CombineBodyMedia but with an optional mimeBoundary field.
|
||||
func combineBodyMedia(body io.Reader, bodyContentType string, media io.Reader, mediaContentType, mimeBoundary string) (io.ReadCloser, string) {
|
||||
mp := newMultipartReader([]typeReader{
|
||||
{body, bodyContentType},
|
||||
{media, mediaContentType},
|
||||
}, mimeBoundary)
|
||||
return mp, mp.ctype
|
||||
}
|
||||
|
||||
func typeHeader(contentType string) textproto.MIMEHeader {
|
||||
h := make(textproto.MIMEHeader)
|
||||
if contentType != "" {
|
||||
h.Set("Content-Type", contentType)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// PrepareUpload determines whether the data in the supplied reader should be
|
||||
// uploaded in a single request, or in sequential chunks.
|
||||
// chunkSize is the size of the chunk that media should be split into.
|
||||
//
|
||||
// If chunkSize is zero, media is returned as the first value, and the other
|
||||
// two return values are nil, true.
|
||||
//
|
||||
// Otherwise, a MediaBuffer is returned, along with a bool indicating whether the
|
||||
// contents of media fit in a single chunk.
|
||||
//
|
||||
// After PrepareUpload has been called, media should no longer be used: the
|
||||
// media content should be accessed via one of the return values.
|
||||
func PrepareUpload(media io.Reader, chunkSize int) (r io.Reader, mb *MediaBuffer, singleChunk bool) {
|
||||
if chunkSize == 0 { // do not chunk
|
||||
return media, nil, true
|
||||
}
|
||||
mb = NewMediaBuffer(media, chunkSize)
|
||||
_, _, _, err := mb.Chunk()
|
||||
// If err is io.EOF, we can upload this in a single request. Otherwise, err is
|
||||
// either nil or a non-EOF error. If it is the latter, then the next call to
|
||||
// mb.Chunk will return the same error. Returning a MediaBuffer ensures that this
|
||||
// error will be handled at some point.
|
||||
return nil, mb, err == io.EOF
|
||||
}
|
||||
|
||||
// MediaInfo holds information for media uploads. It is intended for use by generated
|
||||
// code only.
|
||||
type MediaInfo struct {
|
||||
// At most one of Media and MediaBuffer will be set.
|
||||
media io.Reader
|
||||
buffer *MediaBuffer
|
||||
singleChunk bool
|
||||
mType string
|
||||
size int64 // mediaSize, if known. Used only for calls to progressUpdater_.
|
||||
progressUpdater googleapi.ProgressUpdater
|
||||
}
|
||||
|
||||
// NewInfoFromMedia should be invoked from the Media method of a call. It returns a
|
||||
// MediaInfo populated with chunk size and content type, and a reader or MediaBuffer
|
||||
// if needed.
|
||||
func NewInfoFromMedia(r io.Reader, options []googleapi.MediaOption) *MediaInfo {
|
||||
mi := &MediaInfo{}
|
||||
opts := googleapi.ProcessMediaOptions(options)
|
||||
if !opts.ForceEmptyContentType {
|
||||
r, mi.mType = DetermineContentType(r, opts.ContentType)
|
||||
}
|
||||
mi.media, mi.buffer, mi.singleChunk = PrepareUpload(r, opts.ChunkSize)
|
||||
return mi
|
||||
}
|
||||
|
||||
// NewInfoFromResumableMedia should be invoked from the ResumableMedia method of a
|
||||
// call. It returns a MediaInfo using the given reader, size and media type.
|
||||
func NewInfoFromResumableMedia(r io.ReaderAt, size int64, mediaType string) *MediaInfo {
|
||||
rdr := ReaderAtToReader(r, size)
|
||||
rdr, mType := DetermineContentType(rdr, mediaType)
|
||||
return &MediaInfo{
|
||||
size: size,
|
||||
mType: mType,
|
||||
buffer: NewMediaBuffer(rdr, googleapi.DefaultUploadChunkSize),
|
||||
media: nil,
|
||||
singleChunk: false,
|
||||
}
|
||||
}
|
||||
|
||||
// SetProgressUpdater sets the progress updater for the media info.
|
||||
func (mi *MediaInfo) SetProgressUpdater(pu googleapi.ProgressUpdater) {
|
||||
if mi != nil {
|
||||
mi.progressUpdater = pu
|
||||
}
|
||||
}
|
||||
|
||||
// UploadType determines the type of upload: a single request, or a resumable
|
||||
// series of requests.
|
||||
func (mi *MediaInfo) UploadType() string {
|
||||
if mi.singleChunk {
|
||||
return "multipart"
|
||||
}
|
||||
return "resumable"
|
||||
}
|
||||
|
||||
// UploadRequest sets up an HTTP request for media upload. It adds headers
|
||||
// as necessary, and returns a replacement for the body and a function for http.Request.GetBody.
|
||||
func (mi *MediaInfo) UploadRequest(reqHeaders http.Header, body io.Reader) (newBody io.Reader, getBody func() (io.ReadCloser, error), cleanup func()) {
|
||||
cleanup = func() {}
|
||||
if mi == nil {
|
||||
return body, nil, cleanup
|
||||
}
|
||||
var media io.Reader
|
||||
if mi.media != nil {
|
||||
// This only happens when the caller has turned off chunking. In that
|
||||
// case, we write all of media in a single non-retryable request.
|
||||
media = mi.media
|
||||
} else if mi.singleChunk {
|
||||
// The data fits in a single chunk, which has now been read into the MediaBuffer.
|
||||
// We obtain that chunk so we can write it in a single request. The request can
|
||||
// be retried because the data is stored in the MediaBuffer.
|
||||
media, _, _, _ = mi.buffer.Chunk()
|
||||
}
|
||||
if media != nil {
|
||||
fb := readerFunc(body)
|
||||
fm := readerFunc(media)
|
||||
combined, ctype := CombineBodyMedia(body, "application/json", media, mi.mType)
|
||||
toCleanup := []io.Closer{
|
||||
combined,
|
||||
}
|
||||
if fb != nil && fm != nil {
|
||||
getBody = func() (io.ReadCloser, error) {
|
||||
rb := ioutil.NopCloser(fb())
|
||||
rm := ioutil.NopCloser(fm())
|
||||
var mimeBoundary string
|
||||
if _, params, err := mime.ParseMediaType(ctype); err == nil {
|
||||
mimeBoundary = params["boundary"]
|
||||
}
|
||||
r, _ := combineBodyMedia(rb, "application/json", rm, mi.mType, mimeBoundary)
|
||||
toCleanup = append(toCleanup, r)
|
||||
return r, nil
|
||||
}
|
||||
}
|
||||
cleanup = func() {
|
||||
for _, closer := range toCleanup {
|
||||
_ = closer.Close()
|
||||
}
|
||||
|
||||
}
|
||||
reqHeaders.Set("Content-Type", ctype)
|
||||
body = combined
|
||||
}
|
||||
if mi.buffer != nil && mi.mType != "" && !mi.singleChunk {
|
||||
reqHeaders.Set("X-Upload-Content-Type", mi.mType)
|
||||
}
|
||||
return body, getBody, cleanup
|
||||
}
|
||||
|
||||
// readerFunc returns a function that always returns an io.Reader that has the same
|
||||
// contents as r, provided that can be done without consuming r. Otherwise, it
|
||||
// returns nil.
|
||||
// See http.NewRequest (in net/http/request.go).
|
||||
func readerFunc(r io.Reader) func() io.Reader {
|
||||
switch r := r.(type) {
|
||||
case *bytes.Buffer:
|
||||
buf := r.Bytes()
|
||||
return func() io.Reader { return bytes.NewReader(buf) }
|
||||
case *bytes.Reader:
|
||||
snapshot := *r
|
||||
return func() io.Reader { r := snapshot; return &r }
|
||||
case *strings.Reader:
|
||||
snapshot := *r
|
||||
return func() io.Reader { r := snapshot; return &r }
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ResumableUpload returns an appropriately configured ResumableUpload value if the
|
||||
// upload is resumable, or nil otherwise.
|
||||
func (mi *MediaInfo) ResumableUpload(locURI string) *ResumableUpload {
|
||||
if mi == nil || mi.singleChunk {
|
||||
return nil
|
||||
}
|
||||
return &ResumableUpload{
|
||||
URI: locURI,
|
||||
Media: mi.buffer,
|
||||
MediaType: mi.mType,
|
||||
Callback: func(curr int64) {
|
||||
if mi.progressUpdater != nil {
|
||||
mi.progressUpdater(curr, mi.size)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// SetGetBody sets the GetBody field of req to f. This was once needed
|
||||
// to gracefully support Go 1.7 and earlier which didn't have that
|
||||
// field.
|
||||
//
|
||||
// Deprecated: the code generator no longer uses this as of
|
||||
// 2019-02-19. Nothing else should be calling this anyway, but we
|
||||
// won't delete this immediately; it will be deleted in as early as 6
|
||||
// months.
|
||||
func SetGetBody(req *http.Request, f func() (io.ReadCloser, error)) {
|
||||
req.GetBody = f
|
||||
}
|
|
@ -43,6 +43,7 @@ func (u URLParams) Encode() string {
|
|||
return url.Values(u).Encode()
|
||||
}
|
||||
|
||||
// SetOptions sets the URL params and any additional call options.
|
||||
func SetOptions(u URLParams, opts ...googleapi.CallOption) {
|
||||
for _, o := range opts {
|
||||
u.Set(o.Get())
|
|
@ -5,21 +5,34 @@
|
|||
package gensupport
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/context/ctxhttp"
|
||||
gax "github.com/googleapis/gax-go/v2"
|
||||
)
|
||||
|
||||
// Backoff is an interface around gax.Backoff's Pause method, allowing tests to provide their
|
||||
// own implementation.
|
||||
type Backoff interface {
|
||||
Pause() time.Duration
|
||||
}
|
||||
|
||||
// These are declared as global variables so that tests can overwrite them.
|
||||
var (
|
||||
retryDeadline = 32 * time.Second
|
||||
backoff = func() Backoff {
|
||||
return &gax.Backoff{Initial: 100 * time.Millisecond}
|
||||
}
|
||||
// isRetryable is a platform-specific hook, specified in retryable_linux.go
|
||||
syscallRetryable func(error) bool = func(err error) bool { return false }
|
||||
)
|
||||
|
||||
const (
|
||||
// statusResumeIncomplete is the code returned by the Google uploader
|
||||
// when the transfer is not yet complete.
|
||||
statusResumeIncomplete = 308
|
||||
|
||||
// statusTooManyRequests is returned by the storage API if the
|
||||
// per-project limits have been temporarily exceeded. The request
|
||||
// should be retried.
|
||||
|
@ -35,7 +48,7 @@ type ResumableUpload struct {
|
|||
URI string
|
||||
UserAgent string // User-Agent for header of the request
|
||||
// Media is the object being uploaded.
|
||||
Media *ResumableBuffer
|
||||
Media *MediaBuffer
|
||||
// MediaType defines the media type, e.g. "image/jpeg".
|
||||
MediaType string
|
||||
|
||||
|
@ -44,9 +57,6 @@ type ResumableUpload struct {
|
|||
|
||||
// Callback is an optional function that will be periodically called with the cumulative number of bytes uploaded.
|
||||
Callback func(int64)
|
||||
|
||||
// If not specified, a default exponential backoff strategy will be used.
|
||||
Backoff BackoffStrategy
|
||||
}
|
||||
|
||||
// Progress returns the number of bytes uploaded at this point.
|
||||
|
@ -80,8 +90,23 @@ func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader,
|
|||
req.Header.Set("Content-Range", contentRange)
|
||||
req.Header.Set("Content-Type", rx.MediaType)
|
||||
req.Header.Set("User-Agent", rx.UserAgent)
|
||||
return ctxhttp.Do(ctx, rx.Client, req)
|
||||
|
||||
// Google's upload endpoint uses status code 308 for a
|
||||
// different purpose than the "308 Permanent Redirect"
|
||||
// since-standardized in RFC 7238. Because of the conflict in
|
||||
// semantics, Google added this new request header which
|
||||
// causes it to not use "308" and instead reply with 200 OK
|
||||
// and sets the upload-specific "X-HTTP-Status-Code-Override:
|
||||
// 308" response header.
|
||||
req.Header.Set("X-GUploader-No-308", "yes")
|
||||
|
||||
return SendRequest(ctx, rx.Client, req)
|
||||
}
|
||||
|
||||
func statusResumeIncomplete(resp *http.Response) bool {
|
||||
// This is how the server signals "status resume incomplete"
|
||||
// when X-GUploader-No-308 is set to "yes":
|
||||
return resp != nil && resp.Header.Get("X-Http-Status-Code-Override") == "308"
|
||||
}
|
||||
|
||||
// reportProgress calls a user-supplied callback to report upload progress.
|
||||
|
@ -112,87 +137,122 @@ func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, e
|
|||
return res, err
|
||||
}
|
||||
|
||||
if res.StatusCode == statusResumeIncomplete || res.StatusCode == http.StatusOK {
|
||||
// We sent "X-GUploader-No-308: yes" (see comment elsewhere in
|
||||
// this file), so we don't expect to get a 308.
|
||||
if res.StatusCode == 308 {
|
||||
return nil, errors.New("unexpected 308 response status code")
|
||||
}
|
||||
|
||||
if res.StatusCode == http.StatusOK {
|
||||
rx.reportProgress(off, off+int64(size))
|
||||
}
|
||||
|
||||
if res.StatusCode == statusResumeIncomplete {
|
||||
if statusResumeIncomplete(res) {
|
||||
rx.Media.Next()
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func contextDone(ctx context.Context) bool {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Upload starts the process of a resumable upload with a cancellable context.
|
||||
// It retries using the provided back off strategy until cancelled or the
|
||||
// strategy indicates to stop retrying.
|
||||
// It is called from the auto-generated API code and is not visible to the user.
|
||||
// Before sending an HTTP request, Upload calls any registered hook functions,
|
||||
// and calls the returned functions after the request returns (see send.go).
|
||||
// rx is private to the auto-generated API code.
|
||||
// Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close.
|
||||
func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) {
|
||||
var pause time.Duration
|
||||
backoff := rx.Backoff
|
||||
if backoff == nil {
|
||||
backoff = DefaultBackoffStrategy()
|
||||
}
|
||||
|
||||
for {
|
||||
// Ensure that we return in the case of cancelled context, even if pause is 0.
|
||||
if contextDone(ctx) {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case <-time.After(pause):
|
||||
}
|
||||
|
||||
resp, err = rx.transferChunk(ctx)
|
||||
|
||||
var status int
|
||||
if resp != nil {
|
||||
status = resp.StatusCode
|
||||
}
|
||||
|
||||
// Check if we should retry the request.
|
||||
if shouldRetry(status, err) {
|
||||
var retry bool
|
||||
pause, retry = backoff.Pause()
|
||||
if retry {
|
||||
if resp != nil && resp.Body != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// If the chunk was uploaded successfully, but there's still
|
||||
// more to go, upload the next chunk without any delay.
|
||||
if status == statusResumeIncomplete {
|
||||
pause = 0
|
||||
backoff.Reset()
|
||||
resp.Body.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
// It's possible for err and resp to both be non-nil here, but we expose a simpler
|
||||
// contract to our callers: exactly one of resp and err will be non-nil. This means
|
||||
// that any response body must be closed here before returning a non-nil error.
|
||||
// There are a couple of cases where it's possible for err and resp to both
|
||||
// be non-nil. However, we expose a simpler contract to our callers: exactly
|
||||
// one of resp and err will be non-nil. This means that any response body
|
||||
// must be closed here before returning a non-nil error.
|
||||
var prepareReturn = func(resp *http.Response, err error) (*http.Response, error) {
|
||||
if err != nil {
|
||||
if resp != nil && resp.Body != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Send all chunks.
|
||||
for {
|
||||
var pause time.Duration
|
||||
|
||||
// Each chunk gets its own initialized-at-zero retry.
|
||||
bo := backoff()
|
||||
quitAfter := time.After(retryDeadline)
|
||||
|
||||
// Retry loop for a single chunk.
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if err == nil {
|
||||
err = ctx.Err()
|
||||
}
|
||||
return prepareReturn(resp, err)
|
||||
case <-time.After(pause):
|
||||
case <-quitAfter:
|
||||
return prepareReturn(resp, err)
|
||||
}
|
||||
|
||||
resp, err = rx.transferChunk(ctx)
|
||||
|
||||
var status int
|
||||
if resp != nil {
|
||||
status = resp.StatusCode
|
||||
}
|
||||
|
||||
// Check if we should retry the request.
|
||||
if !shouldRetry(status, err) {
|
||||
break
|
||||
}
|
||||
|
||||
pause = bo.Pause()
|
||||
if resp != nil && resp.Body != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// If the chunk was uploaded successfully, but there's still
|
||||
// more to go, upload the next chunk without any delay.
|
||||
if statusResumeIncomplete(resp) {
|
||||
resp.Body.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
return prepareReturn(resp, err)
|
||||
}
|
||||
}
|
||||
|
||||
// shouldRetry indicates whether an error is retryable for the purposes of this
|
||||
// package, following guidance from
|
||||
// https://cloud.google.com/storage/docs/exponential-backoff .
|
||||
func shouldRetry(status int, err error) bool {
|
||||
if 500 <= status && status <= 599 {
|
||||
return true
|
||||
}
|
||||
if status == statusTooManyRequests {
|
||||
return true
|
||||
}
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
return true
|
||||
}
|
||||
// Transient network errors should be retried.
|
||||
if syscallRetryable(err) {
|
||||
return true
|
||||
}
|
||||
if err, ok := err.(interface{ Temporary() bool }); ok {
|
||||
if err.Temporary() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
// If Go 1.13 error unwrapping is available, use this to examine wrapped
|
||||
// errors.
|
||||
if err, ok := err.(interface{ Unwrap() error }); ok {
|
||||
return shouldRetry(status, err.Unwrap())
|
||||
}
|
||||
return false
|
||||
}
|
15
vendor/google.golang.org/api/internal/gensupport/retryable_linux.go
generated
vendored
Normal file
15
vendor/google.golang.org/api/internal/gensupport/retryable_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
// Copyright 2020 Google LLC.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux
|
||||
|
||||
package gensupport
|
||||
|
||||
import "syscall"
|
||||
|
||||
func init() {
|
||||
// Initialize syscallRetryable to return true on transient socket-level
|
||||
// errors. These errors are specific to Linux.
|
||||
syscallRetryable = func(err error) bool { return err == syscall.ECONNRESET || err == syscall.ECONNREFUSED }
|
||||
}
|
172
vendor/google.golang.org/api/internal/gensupport/send.go
generated
vendored
Normal file
172
vendor/google.golang.org/api/internal/gensupport/send.go
generated
vendored
Normal file
|
@ -0,0 +1,172 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Hook is the type of a function that is called once before each HTTP request
|
||||
// that is sent by a generated API. It returns a function that is called after
|
||||
// the request returns.
|
||||
// Hooks are not called if the context is nil.
|
||||
type Hook func(ctx context.Context, req *http.Request) func(resp *http.Response)
|
||||
|
||||
var hooks []Hook
|
||||
|
||||
// RegisterHook registers a Hook to be called before each HTTP request by a
|
||||
// generated API. Hooks are called in the order they are registered. Each
|
||||
// hook can return a function; if it is non-nil, it is called after the HTTP
|
||||
// request returns. These functions are called in the reverse order.
|
||||
// RegisterHook should not be called concurrently with itself or SendRequest.
|
||||
func RegisterHook(h Hook) {
|
||||
hooks = append(hooks, h)
|
||||
}
|
||||
|
||||
// SendRequest sends a single HTTP request using the given client.
|
||||
// If ctx is non-nil, it calls all hooks, then sends the request with
|
||||
// req.WithContext, then calls any functions returned by the hooks in
|
||||
// reverse order.
|
||||
func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
|
||||
// Disallow Accept-Encoding because it interferes with the automatic gzip handling
|
||||
// done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219.
|
||||
if _, ok := req.Header["Accept-Encoding"]; ok {
|
||||
return nil, errors.New("google api: custom Accept-Encoding headers not allowed")
|
||||
}
|
||||
if ctx == nil {
|
||||
return client.Do(req)
|
||||
}
|
||||
// Call hooks in order of registration, store returned funcs.
|
||||
post := make([]func(resp *http.Response), len(hooks))
|
||||
for i, h := range hooks {
|
||||
fn := h(ctx, req)
|
||||
post[i] = fn
|
||||
}
|
||||
|
||||
// Send request.
|
||||
resp, err := send(ctx, client, req)
|
||||
|
||||
// Call returned funcs in reverse order.
|
||||
for i := len(post) - 1; i >= 0; i-- {
|
||||
if fn := post[i]; fn != nil {
|
||||
fn(resp)
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func send(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
|
||||
if client == nil {
|
||||
client = http.DefaultClient
|
||||
}
|
||||
resp, err := client.Do(req.WithContext(ctx))
|
||||
// If we got an error, and the context has been canceled,
|
||||
// the context's error is probably more useful.
|
||||
if err != nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
err = ctx.Err()
|
||||
default:
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// SendRequestWithRetry sends a single HTTP request using the given client,
|
||||
// with retries if a retryable error is returned.
|
||||
// If ctx is non-nil, it calls all hooks, then sends the request with
|
||||
// req.WithContext, then calls any functions returned by the hooks in
|
||||
// reverse order.
|
||||
func SendRequestWithRetry(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
|
||||
// Disallow Accept-Encoding because it interferes with the automatic gzip handling
|
||||
// done by the default http.Transport. See https://github.com/google/google-api-go-client/issues/219.
|
||||
if _, ok := req.Header["Accept-Encoding"]; ok {
|
||||
return nil, errors.New("google api: custom Accept-Encoding headers not allowed")
|
||||
}
|
||||
if ctx == nil {
|
||||
return client.Do(req)
|
||||
}
|
||||
// Call hooks in order of registration, store returned funcs.
|
||||
post := make([]func(resp *http.Response), len(hooks))
|
||||
for i, h := range hooks {
|
||||
fn := h(ctx, req)
|
||||
post[i] = fn
|
||||
}
|
||||
|
||||
// Send request with retry.
|
||||
resp, err := sendAndRetry(ctx, client, req)
|
||||
|
||||
// Call returned funcs in reverse order.
|
||||
for i := len(post) - 1; i >= 0; i-- {
|
||||
if fn := post[i]; fn != nil {
|
||||
fn(resp)
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func sendAndRetry(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) {
|
||||
if client == nil {
|
||||
client = http.DefaultClient
|
||||
}
|
||||
|
||||
var resp *http.Response
|
||||
var err error
|
||||
|
||||
// Loop to retry the request, up to the context deadline.
|
||||
var pause time.Duration
|
||||
bo := backoff()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
// If we got an error, and the context has been canceled,
|
||||
// the context's error is probably more useful.
|
||||
if err == nil {
|
||||
err = ctx.Err()
|
||||
}
|
||||
return resp, err
|
||||
case <-time.After(pause):
|
||||
}
|
||||
|
||||
resp, err = client.Do(req.WithContext(ctx))
|
||||
|
||||
var status int
|
||||
if resp != nil {
|
||||
status = resp.StatusCode
|
||||
}
|
||||
|
||||
// Check if we can retry the request. A retry can only be done if the error
|
||||
// is retryable and the request body can be re-created using GetBody (this
|
||||
// will not be possible if the body was unbuffered).
|
||||
if req.GetBody == nil || !shouldRetry(status, err) {
|
||||
break
|
||||
}
|
||||
var errBody error
|
||||
req.Body, errBody = req.GetBody()
|
||||
if errBody != nil {
|
||||
break
|
||||
}
|
||||
|
||||
pause = bo.Pause()
|
||||
if resp != nil && resp.Body != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// DecodeResponse decodes the body of res into target. If there is no body,
|
||||
// target is unchanged.
|
||||
func DecodeResponse(target interface{}, res *http.Response) error {
|
||||
if res.StatusCode == http.StatusNoContent {
|
||||
return nil
|
||||
}
|
||||
return json.NewDecoder(res.Body).Decode(target)
|
||||
}
|
53
vendor/google.golang.org/api/internal/gensupport/version.go
generated
vendored
Normal file
53
vendor/google.golang.org/api/internal/gensupport/version.go
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
|||
// Copyright 2020 Google LLC. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// GoVersion returns the Go runtime version. The returned string
|
||||
// has no whitespace.
|
||||
func GoVersion() string {
|
||||
return goVersion
|
||||
}
|
||||
|
||||
var goVersion = goVer(runtime.Version())
|
||||
|
||||
const develPrefix = "devel +"
|
||||
|
||||
func goVer(s string) string {
|
||||
if strings.HasPrefix(s, develPrefix) {
|
||||
s = s[len(develPrefix):]
|
||||
if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 {
|
||||
s = s[:p]
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
if strings.HasPrefix(s, "go1") {
|
||||
s = s[2:]
|
||||
var prerelease string
|
||||
if p := strings.IndexFunc(s, notSemverRune); p >= 0 {
|
||||
s, prerelease = s[:p], s[p:]
|
||||
}
|
||||
if strings.HasSuffix(s, ".") {
|
||||
s += "0"
|
||||
} else if strings.Count(s, ".") < 2 {
|
||||
s += ".0"
|
||||
}
|
||||
if prerelease != "" {
|
||||
s += "-" + prerelease
|
||||
}
|
||||
return s
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func notSemverRune(r rune) bool {
|
||||
return !strings.ContainsRune("0123456789.", r)
|
||||
}
|
12
vendor/google.golang.org/api/internal/service-account.json
generated
vendored
Normal file
12
vendor/google.golang.org/api/internal/service-account.json
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
{
|
||||
"type": "service_account",
|
||||
"project_id": "project_id",
|
||||
"private_key_id": "private_key_id",
|
||||
"private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCzd9ZdbPLAR4/g\nj+Rodu15kEasMpxf/Mz+gKRb2fmgR2Y18Y/iRBYZ4SkmF2pBSfzvwE/aTCzSPBGl\njHhPzohXnSN029eWoItmxVONlqCbR29pD07aLzv08LGeIGdHIEdhVjhvRwTkYZIF\ndXmlHNDRUU/EbJN9D+3ahw22BNnC4PaDgfIWTs3xIlTCSf2rL39I4DSNLTS/LzxK\n/XrQfBMtfwMWwyQaemXbc7gRgzOy8L56wa1W1zyXx99th97j1bLnoAXBGplhB4Co\n25ohyDAuhxRm+XGMEaO0Mzo7u97kvhj48a569RH1QRhOf7EBf60jO4h5eOmfi5P5\nPV3l7041AgMBAAECggEAEZ0RTNoEeRqM5F067YW+iM/AH+ZXspP9Cn1VpC4gcbqQ\nLXsnw+0qvh97CmIB66Z3TJBzRdl0DK4YjUbcB/kdKHwjnrR01DOtesijCqJd4N+B\n762w73jzSXbV9872U+S3HLZ5k3JE6KUqz55X8fyCAgkY6w4862lEzs2yasrPFHEV\nRoQp3PM0Miif8R3hGDhOWcHxcobullthG6JHAQFfc1ctwEjZI4TK0iWqlzfWGyKN\nT9UgvjUDud5cGvS9el0AiLN6keAf77tcPn1zetUVhxN1KN4bVAm1Q+6O8esl63Rj\n7JXpHzxaRnit9S6/aH/twHsGGtLg5Puw6jey6xs4AQKBgQD2JNy1wzewCRkD+jug\n8CHbJ+LIJVRNIaWa/RK1QD8/UjmFPkIzRQSF3AKC5mRAWSa2FL3yVK3N/DD7hazW\n85XSBB7IDcnoJnA9SkUeWwqQGkDx3EntlU3gX8Kn/+ofF8O9jLXxAa901MAVXVuf\n5YDzrl4PNE3bFnPCdiNmSdRfhQKBgQC6p4DsCpwqbeTu9f5ak9VW/fQP47Fgt+Mf\nwGjBnKP5PbbNJpHCfamF7jqSRH83Xy0KNssH7jD/NZ2oT594sMmiQPUC5ni9VYY6\nsuYB0JbD5Mq+EjKIVhYtxaQJ76LzHreEI+G4z6k3H7/hRpr3/C48n9G/uVkT9DbJ\noplxxEx68QKBgQCdJ23vcwO0Firtmi/GEmtbVHz70rGfSXNFoHz4UlvPXv0wsE5u\nE4vOt2i3EMhDOWh46odYGG6bzH+tp2xyFTW70Dui+QLHgPs6dpfoyLHWzZxXj5F3\n6lK9hgZvYvqk/XRRKmzjwnK2wjsdqOyeC1covlR5mqh20D/6kZkKbur0TQKBgAwy\nCZBimRWEnKKoW/gbFKNccGfhXqONID/g2Hdd/rC4QYth68AjacIgcJ9B7nX1uAGk\n1tsryvPB0w0+NpMyKdp6GAgaeuUUA3MuYSzZLiCagEyu77JMvaI7+Z3UlHcCGMd/\neK4Uk1/QqT7U2Cc/yN2ZK6E1QQa2vCWshA4U31JhAoGAbtbSSSsul1c+PsJ13Cfk\n6qVnqYzPqt23QTyOZmGAvUHH/M4xRiQpOE0cDF4t/r5PwenAQPQzTvMmWRzj6uAY\n3eaU0eAK7ZfoweCoOIAPnpFbbRLrXfoY46H7MYh7euWGXOKEpxz5yzuEkd9ByNUE\n86vSEidqbMIiXVgEgnu/k08=\n-----END PRIVATE KEY-----\n",
|
||||
"client_email": "xyz@developer.gserviceaccount.com",
|
||||
"client_id": "123",
|
||||
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
|
||||
"token_uri": "https://accounts.google.com/o/oauth2/token",
|
||||
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
|
||||
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/xyz%40developer.gserviceaccount.com"
|
||||
}
|
110
vendor/google.golang.org/api/internal/settings.go
generated
vendored
Normal file
110
vendor/google.golang.org/api/internal/settings.go
generated
vendored
Normal file
|
@ -0,0 +1,110 @@
|
|||
// Copyright 2017 Google LLC.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package internal supports the options and transport packages.
|
||||
package internal
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// DialSettings holds information needed to establish a connection with a
|
||||
// Google API service.
|
||||
type DialSettings struct {
|
||||
Endpoint string
|
||||
DefaultEndpoint string
|
||||
DefaultMTLSEndpoint string
|
||||
Scopes []string
|
||||
TokenSource oauth2.TokenSource
|
||||
Credentials *google.Credentials
|
||||
CredentialsFile string // if set, Token Source is ignored.
|
||||
CredentialsJSON []byte
|
||||
UserAgent string
|
||||
APIKey string
|
||||
Audiences []string
|
||||
HTTPClient *http.Client
|
||||
GRPCDialOpts []grpc.DialOption
|
||||
GRPCConn *grpc.ClientConn
|
||||
GRPCConnPool ConnPool
|
||||
GRPCConnPoolSize int
|
||||
NoAuth bool
|
||||
TelemetryDisabled bool
|
||||
ClientCertSource func(*tls.CertificateRequestInfo) (*tls.Certificate, error)
|
||||
CustomClaims map[string]interface{}
|
||||
SkipValidation bool
|
||||
|
||||
// Google API system parameters. For more information please read:
|
||||
// https://cloud.google.com/apis/docs/system-parameters
|
||||
QuotaProject string
|
||||
RequestReason string
|
||||
}
|
||||
|
||||
// Validate reports an error if ds is invalid.
|
||||
func (ds *DialSettings) Validate() error {
|
||||
if ds.SkipValidation {
|
||||
return nil
|
||||
}
|
||||
hasCreds := ds.APIKey != "" || ds.TokenSource != nil || ds.CredentialsFile != "" || ds.Credentials != nil
|
||||
if ds.NoAuth && hasCreds {
|
||||
return errors.New("options.WithoutAuthentication is incompatible with any option that provides credentials")
|
||||
}
|
||||
// Credentials should not appear with other options.
|
||||
// We currently allow TokenSource and CredentialsFile to coexist.
|
||||
// TODO(jba): make TokenSource & CredentialsFile an error (breaking change).
|
||||
nCreds := 0
|
||||
if ds.Credentials != nil {
|
||||
nCreds++
|
||||
}
|
||||
if ds.CredentialsJSON != nil {
|
||||
nCreds++
|
||||
}
|
||||
if ds.CredentialsFile != "" {
|
||||
nCreds++
|
||||
}
|
||||
if ds.APIKey != "" {
|
||||
nCreds++
|
||||
}
|
||||
if ds.TokenSource != nil {
|
||||
nCreds++
|
||||
}
|
||||
if len(ds.Scopes) > 0 && len(ds.Audiences) > 0 {
|
||||
return errors.New("WithScopes is incompatible with WithAudience")
|
||||
}
|
||||
// Accept only one form of credentials, except we allow TokenSource and CredentialsFile for backwards compatibility.
|
||||
if nCreds > 1 && !(nCreds == 2 && ds.TokenSource != nil && ds.CredentialsFile != "") {
|
||||
return errors.New("multiple credential options provided")
|
||||
}
|
||||
if ds.GRPCConn != nil && ds.GRPCConnPool != nil {
|
||||
return errors.New("WithGRPCConn is incompatible with WithConnPool")
|
||||
}
|
||||
if ds.HTTPClient != nil && ds.GRPCConnPool != nil {
|
||||
return errors.New("WithHTTPClient is incompatible with WithConnPool")
|
||||
}
|
||||
if ds.HTTPClient != nil && ds.GRPCConn != nil {
|
||||
return errors.New("WithHTTPClient is incompatible with WithGRPCConn")
|
||||
}
|
||||
if ds.HTTPClient != nil && ds.GRPCDialOpts != nil {
|
||||
return errors.New("WithHTTPClient is incompatible with gRPC dial options")
|
||||
}
|
||||
if ds.HTTPClient != nil && ds.QuotaProject != "" {
|
||||
return errors.New("WithHTTPClient is incompatible with QuotaProject")
|
||||
}
|
||||
if ds.HTTPClient != nil && ds.RequestReason != "" {
|
||||
return errors.New("WithHTTPClient is incompatible with RequestReason")
|
||||
}
|
||||
if ds.HTTPClient != nil && ds.ClientCertSource != nil {
|
||||
return errors.New("WithHTTPClient is incompatible with WithClientCertSource")
|
||||
}
|
||||
if ds.ClientCertSource != nil && (ds.GRPCConn != nil || ds.GRPCConnPool != nil || ds.GRPCConnPoolSize != 0 || ds.GRPCDialOpts != nil) {
|
||||
return errors.New("WithClientCertSource is currently only supported for HTTP. gRPC settings are incompatible")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
27
vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE
generated
vendored
Normal file
27
vendor/google.golang.org/api/internal/third_party/uritemplates/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2013 Joshua Tacoma. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
14
vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA
generated
vendored
Normal file
14
vendor/google.golang.org/api/internal/third_party/uritemplates/METADATA
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
name: "uritemplates"
|
||||
description:
|
||||
"Package uritemplates is a level 4 implementation of RFC 6570 (URI "
|
||||
"Template, http://tools.ietf.org/html/rfc6570)."
|
||||
|
||||
third_party {
|
||||
url {
|
||||
type: GIT
|
||||
value: "https://github.com/jtacoma/uritemplates"
|
||||
}
|
||||
version: "0.1"
|
||||
last_upgrade_date { year: 2014 month: 8 day: 18 }
|
||||
license_type: NOTICE
|
||||
}
|
|
@ -34,11 +34,37 @@ func pctEncode(src []byte) []byte {
|
|||
return dst
|
||||
}
|
||||
|
||||
func escape(s string, allowReserved bool) string {
|
||||
// pairWriter is a convenience struct which allows escaped and unescaped
|
||||
// versions of the template to be written in parallel.
|
||||
type pairWriter struct {
|
||||
escaped, unescaped bytes.Buffer
|
||||
}
|
||||
|
||||
// Write writes the provided string directly without any escaping.
|
||||
func (w *pairWriter) Write(s string) {
|
||||
w.escaped.WriteString(s)
|
||||
w.unescaped.WriteString(s)
|
||||
}
|
||||
|
||||
// Escape writes the provided string, escaping the string for the
|
||||
// escaped output.
|
||||
func (w *pairWriter) Escape(s string, allowReserved bool) {
|
||||
w.unescaped.WriteString(s)
|
||||
if allowReserved {
|
||||
return string(reserved.ReplaceAllFunc([]byte(s), pctEncode))
|
||||
w.escaped.Write(reserved.ReplaceAllFunc([]byte(s), pctEncode))
|
||||
} else {
|
||||
w.escaped.Write(unreserved.ReplaceAllFunc([]byte(s), pctEncode))
|
||||
}
|
||||
return string(unreserved.ReplaceAllFunc([]byte(s), pctEncode))
|
||||
}
|
||||
|
||||
// Escaped returns the escaped string.
|
||||
func (w *pairWriter) Escaped() string {
|
||||
return w.escaped.String()
|
||||
}
|
||||
|
||||
// Unescaped returns the unescaped string.
|
||||
func (w *pairWriter) Unescaped() string {
|
||||
return w.unescaped.String()
|
||||
}
|
||||
|
||||
// A uriTemplate is a parsed representation of a URI template.
|
||||
|
@ -165,23 +191,25 @@ func parseTerm(term string) (result templateTerm, err error) {
|
|||
err = errors.New("not a valid name: " + result.name)
|
||||
}
|
||||
if result.explode && result.truncate > 0 {
|
||||
err = errors.New("both explode and prefix modifers on same term")
|
||||
err = errors.New("both explode and prefix modifiers on same term")
|
||||
}
|
||||
return result, err
|
||||
}
|
||||
|
||||
// Expand expands a URI template with a set of values to produce a string.
|
||||
func (t *uriTemplate) Expand(values map[string]string) string {
|
||||
var buf bytes.Buffer
|
||||
// Expand expands a URI template with a set of values to produce the
|
||||
// resultant URI. Two forms of the result are returned: one with all the
|
||||
// elements escaped, and one with the elements unescaped.
|
||||
func (t *uriTemplate) Expand(values map[string]string) (escaped, unescaped string) {
|
||||
var w pairWriter
|
||||
for _, p := range t.parts {
|
||||
p.expand(&buf, values)
|
||||
p.expand(&w, values)
|
||||
}
|
||||
return buf.String()
|
||||
return w.Escaped(), w.Unescaped()
|
||||
}
|
||||
|
||||
func (tp *templatePart) expand(buf *bytes.Buffer, values map[string]string) {
|
||||
func (tp *templatePart) expand(w *pairWriter, values map[string]string) {
|
||||
if len(tp.raw) > 0 {
|
||||
buf.WriteString(tp.raw)
|
||||
w.Write(tp.raw)
|
||||
return
|
||||
}
|
||||
var first = true
|
||||
|
@ -191,30 +219,30 @@ func (tp *templatePart) expand(buf *bytes.Buffer, values map[string]string) {
|
|||
continue
|
||||
}
|
||||
if first {
|
||||
buf.WriteString(tp.first)
|
||||
w.Write(tp.first)
|
||||
first = false
|
||||
} else {
|
||||
buf.WriteString(tp.sep)
|
||||
w.Write(tp.sep)
|
||||
}
|
||||
tp.expandString(buf, term, value)
|
||||
tp.expandString(w, term, value)
|
||||
}
|
||||
}
|
||||
|
||||
func (tp *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) {
|
||||
func (tp *templatePart) expandName(w *pairWriter, name string, empty bool) {
|
||||
if tp.named {
|
||||
buf.WriteString(name)
|
||||
w.Write(name)
|
||||
if empty {
|
||||
buf.WriteString(tp.ifemp)
|
||||
w.Write(tp.ifemp)
|
||||
} else {
|
||||
buf.WriteString("=")
|
||||
w.Write("=")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (tp *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) {
|
||||
func (tp *templatePart) expandString(w *pairWriter, t templateTerm, s string) {
|
||||
if len(s) > t.truncate && t.truncate > 0 {
|
||||
s = s[:t.truncate]
|
||||
}
|
||||
tp.expandName(buf, t.name, len(s) == 0)
|
||||
buf.WriteString(escape(s, tp.allowReserved))
|
||||
tp.expandName(w, t.name, len(s) == 0)
|
||||
w.Escape(s, tp.allowReserved)
|
||||
}
|
17
vendor/google.golang.org/api/internal/third_party/uritemplates/utils.go
generated
vendored
Normal file
17
vendor/google.golang.org/api/internal/third_party/uritemplates/utils.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uritemplates
|
||||
|
||||
// Expand parses then expands a URI template with a set of values to produce
|
||||
// the resultant URI. Two forms of the result are returned: one with all the
|
||||
// elements escaped, and one with the elements unescaped.
|
||||
func Expand(path string, values map[string]string) (escaped, unescaped string, err error) {
|
||||
template, err := parse(path)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
escaped, unescaped = template.Expand(values)
|
||||
return escaped, unescaped, nil
|
||||
}
|
23
vendor/google.golang.org/api/option/credentials_go19.go
generated
vendored
Normal file
23
vendor/google.golang.org/api/option/credentials_go19.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
// Copyright 2018 Google LLC.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.9
|
||||
|
||||
package option
|
||||
|
||||
import (
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/internal"
|
||||
)
|
||||
|
||||
type withCreds google.Credentials
|
||||
|
||||
func (w *withCreds) Apply(o *internal.DialSettings) {
|
||||
o.Credentials = (*google.Credentials)(w)
|
||||
}
|
||||
|
||||
// WithCredentials returns a ClientOption that authenticates API calls.
|
||||
func WithCredentials(creds *google.Credentials) ClientOption {
|
||||
return (*withCreds)(creds)
|
||||
}
|
22
vendor/google.golang.org/api/option/credentials_notgo19.go
generated
vendored
Normal file
22
vendor/google.golang.org/api/option/credentials_notgo19.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
// Copyright 2018 Google LLC.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.9
|
||||
|
||||
package option
|
||||
|
||||
import (
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/api/internal"
|
||||
)
|
||||
|
||||
type withCreds google.DefaultCredentials
|
||||
|
||||
func (w *withCreds) Apply(o *internal.DialSettings) {
|
||||
o.Credentials = (*google.DefaultCredentials)(w)
|
||||
}
|
||||
|
||||
func WithCredentials(creds *google.DefaultCredentials) ClientOption {
|
||||
return (*withCreds)(creds)
|
||||
}
|
52
vendor/google.golang.org/api/option/internaloption/internaloption.go
generated
vendored
Normal file
52
vendor/google.golang.org/api/option/internaloption/internaloption.go
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
// Copyright 2020 Google LLC.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package internaloption contains options used internally by Google client code.
|
||||
package internaloption
|
||||
|
||||
import (
|
||||
"google.golang.org/api/internal"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
type defaultEndpointOption string
|
||||
|
||||
func (o defaultEndpointOption) Apply(settings *internal.DialSettings) {
|
||||
settings.DefaultEndpoint = string(o)
|
||||
}
|
||||
|
||||
// WithDefaultEndpoint is an option that indicates the default endpoint.
|
||||
//
|
||||
// It should only be used internally by generated clients.
|
||||
//
|
||||
// This is similar to WithEndpoint, but allows us to determine whether the user has overriden the default endpoint.
|
||||
func WithDefaultEndpoint(url string) option.ClientOption {
|
||||
return defaultEndpointOption(url)
|
||||
}
|
||||
|
||||
type defaultMTLSEndpointOption string
|
||||
|
||||
func (o defaultMTLSEndpointOption) Apply(settings *internal.DialSettings) {
|
||||
settings.DefaultMTLSEndpoint = string(o)
|
||||
}
|
||||
|
||||
// WithDefaultMTLSEndpoint is an option that indicates the default mTLS endpoint.
|
||||
//
|
||||
// It should only be used internally by generated clients.
|
||||
func WithDefaultMTLSEndpoint(url string) option.ClientOption {
|
||||
return defaultMTLSEndpointOption(url)
|
||||
}
|
||||
|
||||
// SkipDialSettingsValidation bypasses validation on ClientOptions.
|
||||
//
|
||||
// It should only be used internally.
|
||||
func SkipDialSettingsValidation() option.ClientOption {
|
||||
return skipDialSettingsValidation{}
|
||||
}
|
||||
|
||||
type skipDialSettingsValidation struct{}
|
||||
|
||||
func (s skipDialSettingsValidation) Apply(settings *internal.DialSettings) {
|
||||
settings.SkipValidation = true
|
||||
}
|
271
vendor/google.golang.org/api/option/option.go
generated
vendored
Normal file
271
vendor/google.golang.org/api/option/option.go
generated
vendored
Normal file
|
@ -0,0 +1,271 @@
|
|||
// Copyright 2017 Google LLC.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package option contains options for Google API clients.
|
||||
package option
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
"google.golang.org/api/internal"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// A ClientOption is an option for a Google API client.
|
||||
type ClientOption interface {
|
||||
Apply(*internal.DialSettings)
|
||||
}
|
||||
|
||||
// WithTokenSource returns a ClientOption that specifies an OAuth2 token
|
||||
// source to be used as the basis for authentication.
|
||||
func WithTokenSource(s oauth2.TokenSource) ClientOption {
|
||||
return withTokenSource{s}
|
||||
}
|
||||
|
||||
type withTokenSource struct{ ts oauth2.TokenSource }
|
||||
|
||||
func (w withTokenSource) Apply(o *internal.DialSettings) {
|
||||
o.TokenSource = w.ts
|
||||
}
|
||||
|
||||
type withCredFile string
|
||||
|
||||
func (w withCredFile) Apply(o *internal.DialSettings) {
|
||||
o.CredentialsFile = string(w)
|
||||
}
|
||||
|
||||
// WithCredentialsFile returns a ClientOption that authenticates
|
||||
// API calls with the given service account or refresh token JSON
|
||||
// credentials file.
|
||||
func WithCredentialsFile(filename string) ClientOption {
|
||||
return withCredFile(filename)
|
||||
}
|
||||
|
||||
// WithServiceAccountFile returns a ClientOption that uses a Google service
|
||||
// account credentials file to authenticate.
|
||||
//
|
||||
// Deprecated: Use WithCredentialsFile instead.
|
||||
func WithServiceAccountFile(filename string) ClientOption {
|
||||
return WithCredentialsFile(filename)
|
||||
}
|
||||
|
||||
// WithCredentialsJSON returns a ClientOption that authenticates
|
||||
// API calls with the given service account or refresh token JSON
|
||||
// credentials.
|
||||
func WithCredentialsJSON(p []byte) ClientOption {
|
||||
return withCredentialsJSON(p)
|
||||
}
|
||||
|
||||
type withCredentialsJSON []byte
|
||||
|
||||
func (w withCredentialsJSON) Apply(o *internal.DialSettings) {
|
||||
o.CredentialsJSON = make([]byte, len(w))
|
||||
copy(o.CredentialsJSON, w)
|
||||
}
|
||||
|
||||
// WithEndpoint returns a ClientOption that overrides the default endpoint
|
||||
// to be used for a service.
|
||||
func WithEndpoint(url string) ClientOption {
|
||||
return withEndpoint(url)
|
||||
}
|
||||
|
||||
type withEndpoint string
|
||||
|
||||
func (w withEndpoint) Apply(o *internal.DialSettings) {
|
||||
o.Endpoint = string(w)
|
||||
}
|
||||
|
||||
// WithScopes returns a ClientOption that overrides the default OAuth2 scopes
|
||||
// to be used for a service.
|
||||
func WithScopes(scope ...string) ClientOption {
|
||||
return withScopes(scope)
|
||||
}
|
||||
|
||||
type withScopes []string
|
||||
|
||||
func (w withScopes) Apply(o *internal.DialSettings) {
|
||||
o.Scopes = make([]string, len(w))
|
||||
copy(o.Scopes, w)
|
||||
}
|
||||
|
||||
// WithUserAgent returns a ClientOption that sets the User-Agent.
|
||||
func WithUserAgent(ua string) ClientOption {
|
||||
return withUA(ua)
|
||||
}
|
||||
|
||||
type withUA string
|
||||
|
||||
func (w withUA) Apply(o *internal.DialSettings) { o.UserAgent = string(w) }
|
||||
|
||||
// WithHTTPClient returns a ClientOption that specifies the HTTP client to use
|
||||
// as the basis of communications. This option may only be used with services
|
||||
// that support HTTP as their communication transport. When used, the
|
||||
// WithHTTPClient option takes precedent over all other supplied options.
|
||||
func WithHTTPClient(client *http.Client) ClientOption {
|
||||
return withHTTPClient{client}
|
||||
}
|
||||
|
||||
type withHTTPClient struct{ client *http.Client }
|
||||
|
||||
func (w withHTTPClient) Apply(o *internal.DialSettings) {
|
||||
o.HTTPClient = w.client
|
||||
}
|
||||
|
||||
// WithGRPCConn returns a ClientOption that specifies the gRPC client
|
||||
// connection to use as the basis of communications. This option may only be
|
||||
// used with services that support gRPC as their communication transport. When
|
||||
// used, the WithGRPCConn option takes precedent over all other supplied
|
||||
// options.
|
||||
func WithGRPCConn(conn *grpc.ClientConn) ClientOption {
|
||||
return withGRPCConn{conn}
|
||||
}
|
||||
|
||||
type withGRPCConn struct{ conn *grpc.ClientConn }
|
||||
|
||||
func (w withGRPCConn) Apply(o *internal.DialSettings) {
|
||||
o.GRPCConn = w.conn
|
||||
}
|
||||
|
||||
// WithGRPCDialOption returns a ClientOption that appends a new grpc.DialOption
|
||||
// to an underlying gRPC dial. It does not work with WithGRPCConn.
|
||||
func WithGRPCDialOption(opt grpc.DialOption) ClientOption {
|
||||
return withGRPCDialOption{opt}
|
||||
}
|
||||
|
||||
type withGRPCDialOption struct{ opt grpc.DialOption }
|
||||
|
||||
func (w withGRPCDialOption) Apply(o *internal.DialSettings) {
|
||||
o.GRPCDialOpts = append(o.GRPCDialOpts, w.opt)
|
||||
}
|
||||
|
||||
// WithGRPCConnectionPool returns a ClientOption that creates a pool of gRPC
|
||||
// connections that requests will be balanced between.
|
||||
//
|
||||
// This is an EXPERIMENTAL API and may be changed or removed in the future.
|
||||
func WithGRPCConnectionPool(size int) ClientOption {
|
||||
return withGRPCConnectionPool(size)
|
||||
}
|
||||
|
||||
type withGRPCConnectionPool int
|
||||
|
||||
func (w withGRPCConnectionPool) Apply(o *internal.DialSettings) {
|
||||
o.GRPCConnPoolSize = int(w)
|
||||
}
|
||||
|
||||
// WithAPIKey returns a ClientOption that specifies an API key to be used
|
||||
// as the basis for authentication.
|
||||
//
|
||||
// API Keys can only be used for JSON-over-HTTP APIs, including those under
|
||||
// the import path google.golang.org/api/....
|
||||
func WithAPIKey(apiKey string) ClientOption {
|
||||
return withAPIKey(apiKey)
|
||||
}
|
||||
|
||||
type withAPIKey string
|
||||
|
||||
func (w withAPIKey) Apply(o *internal.DialSettings) { o.APIKey = string(w) }
|
||||
|
||||
// WithAudiences returns a ClientOption that specifies an audience to be used
|
||||
// as the audience field ("aud") for the JWT token authentication.
|
||||
func WithAudiences(audience ...string) ClientOption {
|
||||
return withAudiences(audience)
|
||||
}
|
||||
|
||||
type withAudiences []string
|
||||
|
||||
func (w withAudiences) Apply(o *internal.DialSettings) {
|
||||
o.Audiences = make([]string, len(w))
|
||||
copy(o.Audiences, w)
|
||||
}
|
||||
|
||||
// WithoutAuthentication returns a ClientOption that specifies that no
|
||||
// authentication should be used. It is suitable only for testing and for
|
||||
// accessing public resources, like public Google Cloud Storage buckets.
|
||||
// It is an error to provide both WithoutAuthentication and any of WithAPIKey,
|
||||
// WithTokenSource, WithCredentialsFile or WithServiceAccountFile.
|
||||
func WithoutAuthentication() ClientOption {
|
||||
return withoutAuthentication{}
|
||||
}
|
||||
|
||||
type withoutAuthentication struct{}
|
||||
|
||||
func (w withoutAuthentication) Apply(o *internal.DialSettings) { o.NoAuth = true }
|
||||
|
||||
// WithQuotaProject returns a ClientOption that specifies the project used
|
||||
// for quota and billing purposes.
|
||||
//
|
||||
// For more information please read:
|
||||
// https://cloud.google.com/apis/docs/system-parameters
|
||||
func WithQuotaProject(quotaProject string) ClientOption {
|
||||
return withQuotaProject(quotaProject)
|
||||
}
|
||||
|
||||
type withQuotaProject string
|
||||
|
||||
func (w withQuotaProject) Apply(o *internal.DialSettings) {
|
||||
o.QuotaProject = string(w)
|
||||
}
|
||||
|
||||
// WithRequestReason returns a ClientOption that specifies a reason for
|
||||
// making the request, which is intended to be recorded in audit logging.
|
||||
// An example reason would be a support-case ticket number.
|
||||
//
|
||||
// For more information please read:
|
||||
// https://cloud.google.com/apis/docs/system-parameters
|
||||
func WithRequestReason(requestReason string) ClientOption {
|
||||
return withRequestReason(requestReason)
|
||||
}
|
||||
|
||||
type withRequestReason string
|
||||
|
||||
func (w withRequestReason) Apply(o *internal.DialSettings) {
|
||||
o.RequestReason = string(w)
|
||||
}
|
||||
|
||||
// WithTelemetryDisabled returns a ClientOption that disables default telemetry (OpenCensus)
|
||||
// settings on gRPC and HTTP clients.
|
||||
// An example reason would be to bind custom telemetry that overrides the defaults.
|
||||
func WithTelemetryDisabled() ClientOption {
|
||||
return withTelemetryDisabled{}
|
||||
}
|
||||
|
||||
type withTelemetryDisabled struct{}
|
||||
|
||||
func (w withTelemetryDisabled) Apply(o *internal.DialSettings) {
|
||||
o.TelemetryDisabled = true
|
||||
}
|
||||
|
||||
// ClientCertSource is a function that returns a TLS client certificate to be used
|
||||
// when opening TLS connections.
|
||||
//
|
||||
// It follows the same semantics as crypto/tls.Config.GetClientCertificate.
|
||||
//
|
||||
// This is an EXPERIMENTAL API and may be changed or removed in the future.
|
||||
type ClientCertSource = func(*tls.CertificateRequestInfo) (*tls.Certificate, error)
|
||||
|
||||
// WithClientCertSource returns a ClientOption that specifies a
|
||||
// callback function for obtaining a TLS client certificate.
|
||||
//
|
||||
// This option is used for supporting mTLS authentication, where the
|
||||
// server validates the client certifcate when establishing a connection.
|
||||
//
|
||||
// The callback function will be invoked whenever the server requests a
|
||||
// certificate from the client. Implementations of the callback function
|
||||
// should try to ensure that a valid certificate can be repeatedly returned
|
||||
// on demand for the entire life cycle of the transport client. If a nil
|
||||
// Certificate is returned (i.e. no Certificate can be obtained), an error
|
||||
// should be returned.
|
||||
//
|
||||
// This is an EXPERIMENTAL API and may be changed or removed in the future.
|
||||
func WithClientCertSource(s ClientCertSource) ClientOption {
|
||||
return withClientCertSource{s}
|
||||
}
|
||||
|
||||
type withClientCertSource struct{ s ClientCertSource }
|
||||
|
||||
func (w withClientCertSource) Apply(o *internal.DialSettings) {
|
||||
o.ClientCertSource = w.s
|
||||
}
|
7298
vendor/google.golang.org/api/storage/v1/storage-api.json
generated
vendored
7298
vendor/google.golang.org/api/storage/v1/storage-api.json
generated
vendored
File diff suppressed because it is too large
Load diff
7017
vendor/google.golang.org/api/storage/v1/storage-gen.go
generated
vendored
7017
vendor/google.golang.org/api/storage/v1/storage-gen.go
generated
vendored
File diff suppressed because it is too large
Load diff
110
vendor/google.golang.org/api/transport/cert/default_cert.go
generated
vendored
Normal file
110
vendor/google.golang.org/api/transport/cert/default_cert.go
generated
vendored
Normal file
|
@ -0,0 +1,110 @@
|
|||
// Copyright 2020 Google LLC.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package cert contains certificate tools for Google API clients.
|
||||
// This package is intended to be used with crypto/tls.Config.GetClientCertificate.
|
||||
//
|
||||
// The certificates can be used to satisfy Google's Endpoint Validation.
|
||||
// See https://cloud.google.com/endpoint-verification/docs/overview
|
||||
//
|
||||
// This package is not intended for use by end developers. Use the
|
||||
// google.golang.org/api/option package to configure API clients.
|
||||
package cert
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/user"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
metadataPath = ".secureConnect"
|
||||
metadataFile = "context_aware_metadata.json"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultSourceOnce sync.Once
|
||||
defaultSource Source
|
||||
defaultSourceErr error
|
||||
)
|
||||
|
||||
// Source is a function that can be passed into crypto/tls.Config.GetClientCertificate.
|
||||
type Source func(*tls.CertificateRequestInfo) (*tls.Certificate, error)
|
||||
|
||||
// DefaultSource returns a certificate source that execs the command specified
|
||||
// in the file at ~/.secureConnect/context_aware_metadata.json
|
||||
//
|
||||
// If that file does not exist, a nil source is returned.
|
||||
func DefaultSource() (Source, error) {
|
||||
defaultSourceOnce.Do(func() {
|
||||
defaultSource, defaultSourceErr = newSecureConnectSource()
|
||||
})
|
||||
return defaultSource, defaultSourceErr
|
||||
}
|
||||
|
||||
type secureConnectSource struct {
|
||||
metadata secureConnectMetadata
|
||||
}
|
||||
|
||||
type secureConnectMetadata struct {
|
||||
Cmd []string `json:"cert_provider_command"`
|
||||
}
|
||||
|
||||
// newSecureConnectSource creates a secureConnectSource by reading the well-known file.
|
||||
func newSecureConnectSource() (Source, error) {
|
||||
user, err := user.Current()
|
||||
if err != nil {
|
||||
// Ignore.
|
||||
return nil, nil
|
||||
}
|
||||
filename := filepath.Join(user.HomeDir, metadataPath, metadataFile)
|
||||
file, err := ioutil.ReadFile(filename)
|
||||
if os.IsNotExist(err) {
|
||||
// Ignore.
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var metadata secureConnectMetadata
|
||||
if err := json.Unmarshal(file, &metadata); err != nil {
|
||||
return nil, fmt.Errorf("cert: could not parse JSON in %q: %v", filename, err)
|
||||
}
|
||||
if err := validateMetadata(metadata); err != nil {
|
||||
return nil, fmt.Errorf("cert: invalid config in %q: %v", filename, err)
|
||||
}
|
||||
return (&secureConnectSource{
|
||||
metadata: metadata,
|
||||
}).getClientCertificate, nil
|
||||
}
|
||||
|
||||
func validateMetadata(metadata secureConnectMetadata) error {
|
||||
if len(metadata.Cmd) == 0 {
|
||||
return errors.New("empty cert_provider_command")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) {
|
||||
// TODO(cbro): consider caching valid certificates rather than exec'ing every time.
|
||||
command := s.metadata.Cmd
|
||||
data, err := exec.Command(command[0], command[1:]...).Output()
|
||||
if err != nil {
|
||||
// TODO(cbro): read stderr for error message? Might contain sensitive info.
|
||||
return nil, err
|
||||
}
|
||||
cert, err := tls.X509KeyPair(data, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &cert, nil
|
||||
}
|
20
vendor/google.golang.org/api/transport/http/default_transport_go113.go
generated
vendored
Normal file
20
vendor/google.golang.org/api/transport/http/default_transport_go113.go
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
// Copyright 2020 Google LLC.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.13
|
||||
|
||||
package http
|
||||
|
||||
import "net/http"
|
||||
|
||||
// clonedTransport returns the given RoundTripper as a cloned *http.Transport.
|
||||
// It returns nil if the RoundTripper can't be cloned or coerced to
|
||||
// *http.Transport.
|
||||
func clonedTransport(rt http.RoundTripper) *http.Transport {
|
||||
t, ok := rt.(*http.Transport)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return t.Clone()
|
||||
}
|
15
vendor/google.golang.org/api/transport/http/default_transport_not_go113.go
generated
vendored
Normal file
15
vendor/google.golang.org/api/transport/http/default_transport_not_go113.go
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
// Copyright 2020 Google LLC.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.13
|
||||
|
||||
package http
|
||||
|
||||
import "net/http"
|
||||
|
||||
// clonedTransport returns the given RoundTripper as a cloned *http.Transport.
|
||||
// For versions of Go <1.13, this is not supported, so return nil.
|
||||
func clonedTransport(rt http.RoundTripper) *http.Transport {
|
||||
return nil
|
||||
}
|
304
vendor/google.golang.org/api/transport/http/dial.go
generated
vendored
Normal file
304
vendor/google.golang.org/api/transport/http/dial.go
generated
vendored
Normal file
|
@ -0,0 +1,304 @@
|
|||
// Copyright 2015 Google LLC.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package http supports network connections to HTTP servers.
|
||||
// This package is not intended for use by end developers. Use the
|
||||
// google.golang.org/api/option package to configure API clients.
|
||||
package http
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.opencensus.io/plugin/ochttp"
|
||||
"golang.org/x/oauth2"
|
||||
"google.golang.org/api/googleapi/transport"
|
||||
"google.golang.org/api/internal"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/transport/cert"
|
||||
"google.golang.org/api/transport/http/internal/propagation"
|
||||
)
|
||||
|
||||
const (
|
||||
mTLSModeAlways = "always"
|
||||
mTLSModeNever = "never"
|
||||
mTLSModeAuto = "auto"
|
||||
)
|
||||
|
||||
// NewClient returns an HTTP client for use communicating with a Google cloud
|
||||
// service, configured with the given ClientOptions. It also returns the endpoint
|
||||
// for the service as specified in the options.
|
||||
func NewClient(ctx context.Context, opts ...option.ClientOption) (*http.Client, string, error) {
|
||||
settings, err := newSettings(opts)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
clientCertSource, err := getClientCertificateSource(settings)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
endpoint, err := getEndpoint(settings, clientCertSource)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
// TODO(cbro): consider injecting the User-Agent even if an explicit HTTP client is provided?
|
||||
if settings.HTTPClient != nil {
|
||||
return settings.HTTPClient, endpoint, nil
|
||||
}
|
||||
trans, err := newTransport(ctx, defaultBaseTransport(ctx, clientCertSource), settings)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return &http.Client{Transport: trans}, endpoint, nil
|
||||
}
|
||||
|
||||
// NewTransport creates an http.RoundTripper for use communicating with a Google
|
||||
// cloud service, configured with the given ClientOptions. Its RoundTrip method delegates to base.
|
||||
func NewTransport(ctx context.Context, base http.RoundTripper, opts ...option.ClientOption) (http.RoundTripper, error) {
|
||||
settings, err := newSettings(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if settings.HTTPClient != nil {
|
||||
return nil, errors.New("transport/http: WithHTTPClient passed to NewTransport")
|
||||
}
|
||||
return newTransport(ctx, base, settings)
|
||||
}
|
||||
|
||||
func newTransport(ctx context.Context, base http.RoundTripper, settings *internal.DialSettings) (http.RoundTripper, error) {
|
||||
paramTransport := ¶meterTransport{
|
||||
base: base,
|
||||
userAgent: settings.UserAgent,
|
||||
quotaProject: settings.QuotaProject,
|
||||
requestReason: settings.RequestReason,
|
||||
}
|
||||
var trans http.RoundTripper = paramTransport
|
||||
trans = addOCTransport(trans, settings)
|
||||
switch {
|
||||
case settings.NoAuth:
|
||||
// Do nothing.
|
||||
case settings.APIKey != "":
|
||||
trans = &transport.APIKey{
|
||||
Transport: trans,
|
||||
Key: settings.APIKey,
|
||||
}
|
||||
default:
|
||||
creds, err := internal.Creds(ctx, settings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if paramTransport.quotaProject == "" {
|
||||
paramTransport.quotaProject = internal.QuotaProjectFromCreds(creds)
|
||||
}
|
||||
|
||||
ts := creds.TokenSource
|
||||
if settings.TokenSource != nil {
|
||||
ts = settings.TokenSource
|
||||
}
|
||||
trans = &oauth2.Transport{
|
||||
Base: trans,
|
||||
Source: ts,
|
||||
}
|
||||
}
|
||||
return trans, nil
|
||||
}
|
||||
|
||||
func newSettings(opts []option.ClientOption) (*internal.DialSettings, error) {
|
||||
var o internal.DialSettings
|
||||
for _, opt := range opts {
|
||||
opt.Apply(&o)
|
||||
}
|
||||
if err := o.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if o.GRPCConn != nil {
|
||||
return nil, errors.New("unsupported gRPC connection specified")
|
||||
}
|
||||
return &o, nil
|
||||
}
|
||||
|
||||
type parameterTransport struct {
|
||||
userAgent string
|
||||
quotaProject string
|
||||
requestReason string
|
||||
|
||||
base http.RoundTripper
|
||||
}
|
||||
|
||||
func (t *parameterTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
rt := t.base
|
||||
if rt == nil {
|
||||
return nil, errors.New("transport: no Transport specified")
|
||||
}
|
||||
newReq := *req
|
||||
newReq.Header = make(http.Header)
|
||||
for k, vv := range req.Header {
|
||||
newReq.Header[k] = vv
|
||||
}
|
||||
if t.userAgent != "" {
|
||||
// TODO(cbro): append to existing User-Agent header?
|
||||
newReq.Header.Set("User-Agent", t.userAgent)
|
||||
}
|
||||
|
||||
// Attach system parameters into the header
|
||||
if t.quotaProject != "" {
|
||||
newReq.Header.Set("X-Goog-User-Project", t.quotaProject)
|
||||
}
|
||||
if t.requestReason != "" {
|
||||
newReq.Header.Set("X-Goog-Request-Reason", t.requestReason)
|
||||
}
|
||||
|
||||
return rt.RoundTrip(&newReq)
|
||||
}
|
||||
|
||||
// Set at init time by dial_appengine.go. If nil, we're not on App Engine.
|
||||
var appengineUrlfetchHook func(context.Context) http.RoundTripper
|
||||
|
||||
// defaultBaseTransport returns the base HTTP transport.
|
||||
// On App Engine, this is urlfetch.Transport.
|
||||
// Otherwise, use a default transport, taking most defaults from
|
||||
// http.DefaultTransport.
|
||||
// If TLSCertificate is available, set TLSClientConfig as well.
|
||||
func defaultBaseTransport(ctx context.Context, clientCertSource cert.Source) http.RoundTripper {
|
||||
if appengineUrlfetchHook != nil {
|
||||
return appengineUrlfetchHook(ctx)
|
||||
}
|
||||
|
||||
// Copy http.DefaultTransport except for MaxIdleConnsPerHost setting,
|
||||
// which is increased due to reported performance issues under load in the GCS
|
||||
// client. Transport.Clone is only available in Go 1.13 and up.
|
||||
trans := clonedTransport(http.DefaultTransport)
|
||||
if trans == nil {
|
||||
trans = fallbackBaseTransport()
|
||||
}
|
||||
trans.MaxIdleConnsPerHost = 100
|
||||
|
||||
if clientCertSource != nil {
|
||||
trans.TLSClientConfig = &tls.Config{
|
||||
GetClientCertificate: clientCertSource,
|
||||
}
|
||||
}
|
||||
|
||||
return trans
|
||||
}
|
||||
|
||||
// fallbackBaseTransport is used in <go1.13 as well as in the rare case if
|
||||
// http.DefaultTransport has been reassigned something that's not a
|
||||
// *http.Transport.
|
||||
func fallbackBaseTransport() *http.Transport {
|
||||
return &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: (&net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
DualStack: true,
|
||||
}).DialContext,
|
||||
MaxIdleConns: 100,
|
||||
MaxIdleConnsPerHost: 100,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
TLSHandshakeTimeout: 10 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
func addOCTransport(trans http.RoundTripper, settings *internal.DialSettings) http.RoundTripper {
|
||||
if settings.TelemetryDisabled {
|
||||
return trans
|
||||
}
|
||||
return &ochttp.Transport{
|
||||
Base: trans,
|
||||
Propagation: &propagation.HTTPFormat{},
|
||||
}
|
||||
}
|
||||
|
||||
// getClientCertificateSource returns a default client certificate source, if
|
||||
// not provided by the user.
|
||||
//
|
||||
// A nil default source can be returned if the source does not exist. Any exceptions
|
||||
// encountered while initializing the default source will be reported as client
|
||||
// error (ex. corrupt metadata file).
|
||||
//
|
||||
// The overall logic is as follows:
|
||||
// 1. If both endpoint override and client certificate are specified, use them as is.
|
||||
// 2. If user does not specify client certificate, we will attempt to use default
|
||||
// client certificate.
|
||||
// 3. If user does not specify endpoint override, we will use defaultMtlsEndpoint if
|
||||
// client certificate is available and defaultEndpoint otherwise.
|
||||
//
|
||||
// Implications of the above logic:
|
||||
// 1. If the user specifies a non-mTLS endpoint override but client certificate is
|
||||
// available, we will pass along the cert anyway and let the server decide what to do.
|
||||
// 2. If the user specifies an mTLS endpoint override but client certificate is not
|
||||
// available, we will not fail-fast, but let backend throw error when connecting.
|
||||
//
|
||||
// We would like to avoid introducing client-side logic that parses whether the
|
||||
// endpoint override is an mTLS url, since the url pattern may change at anytime.
|
||||
func getClientCertificateSource(settings *internal.DialSettings) (cert.Source, error) {
|
||||
if settings.HTTPClient != nil {
|
||||
return nil, nil // HTTPClient is incompatible with ClientCertificateSource
|
||||
} else if settings.ClientCertSource != nil {
|
||||
return settings.ClientCertSource, nil
|
||||
} else {
|
||||
return cert.DefaultSource()
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// getEndpoint returns the endpoint for the service, taking into account the
|
||||
// user-provided endpoint override "settings.Endpoint"
|
||||
//
|
||||
// If no endpoint override is specified, we will either return the default endpoint or
|
||||
// the default mTLS endpoint if a client certificate is available.
|
||||
//
|
||||
// You can override the default endpoint (mtls vs. regular) by setting the
|
||||
// GOOGLE_API_USE_MTLS environment variable.
|
||||
//
|
||||
// If the endpoint override is an address (host:port) rather than full base
|
||||
// URL (ex. https://...), then the user-provided address will be merged into
|
||||
// the default endpoint. For example, WithEndpoint("myhost:8000") and
|
||||
// WithDefaultEndpoint("https://foo.com/bar/baz") will return "https://myhost:8080/bar/baz"
|
||||
func getEndpoint(settings *internal.DialSettings, clientCertSource cert.Source) (string, error) {
|
||||
if settings.Endpoint == "" {
|
||||
mtlsMode := getMTLSMode()
|
||||
if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) {
|
||||
return settings.DefaultMTLSEndpoint, nil
|
||||
}
|
||||
return settings.DefaultEndpoint, nil
|
||||
}
|
||||
if strings.Contains(settings.Endpoint, "://") {
|
||||
// User passed in a full URL path, use it verbatim.
|
||||
return settings.Endpoint, nil
|
||||
}
|
||||
if settings.DefaultEndpoint == "" {
|
||||
return "", errors.New("WithEndpoint requires a full URL path")
|
||||
}
|
||||
|
||||
// Assume user-provided endpoint is host[:port], merge it with the default endpoint.
|
||||
return mergeEndpoints(settings.DefaultEndpoint, settings.Endpoint)
|
||||
}
|
||||
|
||||
func getMTLSMode() string {
|
||||
mode := os.Getenv("GOOGLE_API_USE_MTLS")
|
||||
if mode == "" {
|
||||
// TODO(shinfan): Update this to "auto" when the mTLS feature is fully released.
|
||||
return mTLSModeNever
|
||||
}
|
||||
return strings.ToLower(mode)
|
||||
}
|
||||
|
||||
func mergeEndpoints(base, newHost string) (string, error) {
|
||||
u, err := url.Parse(base)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
u.Host = newHost
|
||||
return u.String(), nil
|
||||
}
|
20
vendor/google.golang.org/api/transport/http/dial_appengine.go
generated
vendored
Normal file
20
vendor/google.golang.org/api/transport/http/dial_appengine.go
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
// Copyright 2016 Google LLC.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build appengine
|
||||
|
||||
package http
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
|
||||
"google.golang.org/appengine/urlfetch"
|
||||
)
|
||||
|
||||
func init() {
|
||||
appengineUrlfetchHook = func(ctx context.Context) http.RoundTripper {
|
||||
return &urlfetch.Transport{Context: ctx}
|
||||
}
|
||||
}
|
86
vendor/google.golang.org/api/transport/http/internal/propagation/http.go
generated
vendored
Normal file
86
vendor/google.golang.org/api/transport/http/internal/propagation/http.go
generated
vendored
Normal file
|
@ -0,0 +1,86 @@
|
|||
// Copyright 2018 Google LLC.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.8
|
||||
|
||||
// Package propagation implements X-Cloud-Trace-Context header propagation used
|
||||
// by Google Cloud products.
|
||||
package propagation
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"go.opencensus.io/trace"
|
||||
"go.opencensus.io/trace/propagation"
|
||||
)
|
||||
|
||||
const (
|
||||
httpHeaderMaxSize = 200
|
||||
httpHeader = `X-Cloud-Trace-Context`
|
||||
)
|
||||
|
||||
var _ propagation.HTTPFormat = (*HTTPFormat)(nil)
|
||||
|
||||
// HTTPFormat implements propagation.HTTPFormat to propagate
|
||||
// traces in HTTP headers for Google Cloud Platform and Stackdriver Trace.
|
||||
type HTTPFormat struct{}
|
||||
|
||||
// SpanContextFromRequest extracts a Stackdriver Trace span context from incoming requests.
|
||||
func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) {
|
||||
h := req.Header.Get(httpHeader)
|
||||
// See https://cloud.google.com/trace/docs/faq for the header HTTPFormat.
|
||||
// Return if the header is empty or missing, or if the header is unreasonably
|
||||
// large, to avoid making unnecessary copies of a large string.
|
||||
if h == "" || len(h) > httpHeaderMaxSize {
|
||||
return trace.SpanContext{}, false
|
||||
}
|
||||
|
||||
// Parse the trace id field.
|
||||
slash := strings.Index(h, `/`)
|
||||
if slash == -1 {
|
||||
return trace.SpanContext{}, false
|
||||
}
|
||||
tid, h := h[:slash], h[slash+1:]
|
||||
|
||||
buf, err := hex.DecodeString(tid)
|
||||
if err != nil {
|
||||
return trace.SpanContext{}, false
|
||||
}
|
||||
copy(sc.TraceID[:], buf)
|
||||
|
||||
// Parse the span id field.
|
||||
spanstr := h
|
||||
semicolon := strings.Index(h, `;`)
|
||||
if semicolon != -1 {
|
||||
spanstr, h = h[:semicolon], h[semicolon+1:]
|
||||
}
|
||||
sid, err := strconv.ParseUint(spanstr, 10, 64)
|
||||
if err != nil {
|
||||
return trace.SpanContext{}, false
|
||||
}
|
||||
binary.BigEndian.PutUint64(sc.SpanID[:], sid)
|
||||
|
||||
// Parse the options field, options field is optional.
|
||||
if !strings.HasPrefix(h, "o=") {
|
||||
return sc, true
|
||||
}
|
||||
o, err := strconv.ParseUint(h[2:], 10, 64)
|
||||
if err != nil {
|
||||
return trace.SpanContext{}, false
|
||||
}
|
||||
sc.TraceOptions = trace.TraceOptions(o)
|
||||
return sc, true
|
||||
}
|
||||
|
||||
// SpanContextToRequest modifies the given request to include a Stackdriver Trace header.
|
||||
func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) {
|
||||
sid := binary.BigEndian.Uint64(sc.SpanID[:])
|
||||
header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions))
|
||||
req.Header.Set(httpHeader, header)
|
||||
}
|
2
vendor/google.golang.org/appengine/.travis.yml
generated
vendored
2
vendor/google.golang.org/appengine/.travis.yml
generated
vendored
|
@ -10,8 +10,6 @@ script:
|
|||
|
||||
matrix:
|
||||
include:
|
||||
- go: 1.8.x
|
||||
env: GOAPP=true
|
||||
- go: 1.9.x
|
||||
env: GOAPP=true
|
||||
- go: 1.10.x
|
||||
|
|
27
vendor/google.golang.org/appengine/README.md
generated
vendored
27
vendor/google.golang.org/appengine/README.md
generated
vendored
|
@ -71,3 +71,30 @@ A few APIs were cleaned up, and there are some differences:
|
|||
[blobstore package](https://google.golang.org/appengine/blobstore).
|
||||
* `appengine/socket` is not required on App Engine flexible environment / Managed VMs.
|
||||
Use the standard `net` package instead.
|
||||
|
||||
## Key Encode/Decode compatibiltiy to help with datastore library migrations
|
||||
|
||||
Key compatibility updates have been added to help customers transition from google.golang.org/appengine/datastore to cloud.google.com/go/datastore.
|
||||
The `EnableKeyConversion` enables automatic conversion from a key encoded with cloud.google.com/go/datastore to google.golang.org/appengine/datastore key type.
|
||||
|
||||
### Enabling key conversion
|
||||
|
||||
Enable key conversion by calling `EnableKeyConversion(ctx)` in the `/_ah/start` handler for basic and manual scaling or any handler in automatic scaling.
|
||||
|
||||
#### 1. Basic or manual scaling
|
||||
|
||||
This start handler will enable key conversion for all handlers in the service.
|
||||
|
||||
```
|
||||
http.HandleFunc("/_ah/start", func(w http.ResponseWriter, r *http.Request) {
|
||||
datastore.EnableKeyConversion(appengine.NewContext(r))
|
||||
})
|
||||
```
|
||||
|
||||
#### 2. Automatic scaling
|
||||
|
||||
`/_ah/start` is not supported for automatic scaling and `/_ah/warmup` is not guaranteed to run, so you must call `datastore.EnableKeyConversion(appengine.NewContext(r))`
|
||||
before you use code that needs key conversion.
|
||||
|
||||
You may want to add this to each of your handlers, or introduce middleware where it's called.
|
||||
`EnableKeyConversion` is safe for concurrent use. Any call to it after the first is ignored.
|
2
vendor/google.golang.org/appengine/appengine.go
generated
vendored
2
vendor/google.golang.org/appengine/appengine.go
generated
vendored
|
@ -97,8 +97,6 @@ func WithContext(parent context.Context, req *http.Request) context.Context {
|
|||
return internal.WithContext(parent, req)
|
||||
}
|
||||
|
||||
// TODO(dsymonds): Add a Call function here? Otherwise other packages can't access internal.Call.
|
||||
|
||||
// BlobKey is a key for a blobstore blob.
|
||||
//
|
||||
// Conceptually, this type belongs in the blobstore package, but it lives in
|
||||
|
|
11
vendor/google.golang.org/appengine/internal/api.go
generated
vendored
11
vendor/google.golang.org/appengine/internal/api.go
generated
vendored
|
@ -44,6 +44,7 @@ var (
|
|||
curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
|
||||
userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP")
|
||||
remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr")
|
||||
devRequestIdHeader = http.CanonicalHeaderKey("X-Appengine-Dev-Request-Id")
|
||||
|
||||
// Outgoing headers.
|
||||
apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint")
|
||||
|
@ -57,8 +58,11 @@ var (
|
|||
|
||||
apiHTTPClient = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
Dial: limitDial,
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
Dial: limitDial,
|
||||
MaxIdleConns: 1000,
|
||||
MaxIdleConnsPerHost: 10000,
|
||||
IdleConnTimeout: 90 * time.Second,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -494,6 +498,9 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message)
|
|||
if ticket == "" {
|
||||
ticket = DefaultTicket()
|
||||
}
|
||||
if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" {
|
||||
ticket = dri
|
||||
}
|
||||
req := &remotepb.Request{
|
||||
ServiceName: &service,
|
||||
Method: &method,
|
||||
|
|
2
vendor/google.golang.org/appengine/internal/net.go
generated
vendored
2
vendor/google.golang.org/appengine/internal/net.go
generated
vendored
|
@ -32,7 +32,7 @@ func limitDial(network, addr string) (net.Conn, error) {
|
|||
|
||||
// Dial with a timeout in case the API host is MIA.
|
||||
// The connection should normally be very fast.
|
||||
conn, err := net.DialTimeout(network, addr, 500*time.Millisecond)
|
||||
conn, err := net.DialTimeout(network, addr, 10*time.Second)
|
||||
if err != nil {
|
||||
limitRelease()
|
||||
return nil, err
|
||||
|
|
202
vendor/google.golang.org/genproto/LICENSE
generated
vendored
Normal file
202
vendor/google.golang.org/genproto/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
206
vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
generated
vendored
Normal file
206
vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,206 @@
|
|||
// Copyright 2020 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.25.0
|
||||
// protoc v3.12.3
|
||||
// source: google/rpc/status.proto
|
||||
|
||||
package status
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
anypb "google.golang.org/protobuf/types/known/anypb"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// This is a compile-time assertion that a sufficiently up-to-date version
|
||||
// of the legacy proto package is being used.
|
||||
const _ = proto.ProtoPackageIsVersion4
|
||||
|
||||
// The `Status` type defines a logical error model that is suitable for
|
||||
// different programming environments, including REST APIs and RPC APIs. It is
|
||||
// used by [gRPC](https://github.com/grpc). Each `Status` message contains
|
||||
// three pieces of data: error code, error message, and error details.
|
||||
//
|
||||
// You can find out more about this error model and how to work with it in the
|
||||
// [API Design Guide](https://cloud.google.com/apis/design/errors).
|
||||
type Status struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
|
||||
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
|
||||
// A developer-facing error message, which should be in English. Any
|
||||
// user-facing error message should be localized and sent in the
|
||||
// [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
|
||||
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
|
||||
// A list of messages that carry the error details. There is a common set of
|
||||
// message types for APIs to use.
|
||||
Details []*anypb.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Status) Reset() {
|
||||
*x = Status{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_google_rpc_status_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Status) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Status) ProtoMessage() {}
|
||||
|
||||
func (x *Status) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_rpc_status_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Status.ProtoReflect.Descriptor instead.
|
||||
func (*Status) Descriptor() ([]byte, []int) {
|
||||
return file_google_rpc_status_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *Status) GetCode() int32 {
|
||||
if x != nil {
|
||||
return x.Code
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Status) GetMessage() string {
|
||||
if x != nil {
|
||||
return x.Message
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Status) GetDetails() []*anypb.Any {
|
||||
if x != nil {
|
||||
return x.Details
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_google_rpc_status_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_google_rpc_status_proto_rawDesc = []byte{
|
||||
0x0a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61,
|
||||
0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2e, 0x72, 0x70, 0x63, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x22, 0x66, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f,
|
||||
0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18,
|
||||
0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
|
||||
0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61,
|
||||
0x69, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
|
||||
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52,
|
||||
0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x42, 0x61, 0x0a, 0x0e, 0x63, 0x6f, 0x6d, 0x2e,
|
||||
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x42, 0x0b, 0x53, 0x74, 0x61, 0x74,
|
||||
0x75, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73,
|
||||
0x2f, 0x72, 0x70, 0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x3b, 0x73, 0x74, 0x61, 0x74,
|
||||
0x75, 0x73, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x52, 0x50, 0x43, 0x62, 0x06, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_google_rpc_status_proto_rawDescOnce sync.Once
|
||||
file_google_rpc_status_proto_rawDescData = file_google_rpc_status_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_google_rpc_status_proto_rawDescGZIP() []byte {
|
||||
file_google_rpc_status_proto_rawDescOnce.Do(func() {
|
||||
file_google_rpc_status_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_rpc_status_proto_rawDescData)
|
||||
})
|
||||
return file_google_rpc_status_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_google_rpc_status_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
||||
var file_google_rpc_status_proto_goTypes = []interface{}{
|
||||
(*Status)(nil), // 0: google.rpc.Status
|
||||
(*anypb.Any)(nil), // 1: google.protobuf.Any
|
||||
}
|
||||
var file_google_rpc_status_proto_depIdxs = []int32{
|
||||
1, // 0: google.rpc.Status.details:type_name -> google.protobuf.Any
|
||||
1, // [1:1] is the sub-list for method output_type
|
||||
1, // [1:1] is the sub-list for method input_type
|
||||
1, // [1:1] is the sub-list for extension type_name
|
||||
1, // [1:1] is the sub-list for extension extendee
|
||||
0, // [0:1] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_google_rpc_status_proto_init() }
|
||||
func file_google_rpc_status_proto_init() {
|
||||
if File_google_rpc_status_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_google_rpc_status_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Status); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_google_rpc_status_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 1,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_google_rpc_status_proto_goTypes,
|
||||
DependencyIndexes: file_google_rpc_status_proto_depIdxs,
|
||||
MessageInfos: file_google_rpc_status_proto_msgTypes,
|
||||
}.Build()
|
||||
File_google_rpc_status_proto = out.File
|
||||
file_google_rpc_status_proto_rawDesc = nil
|
||||
file_google_rpc_status_proto_goTypes = nil
|
||||
file_google_rpc_status_proto_depIdxs = nil
|
||||
}
|
41
vendor/google.golang.org/grpc/.travis.yml
generated
vendored
41
vendor/google.golang.org/grpc/.travis.yml
generated
vendored
|
@ -1,13 +1,42 @@
|
|||
language: go
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- go: 1.14.x
|
||||
env: VET=1 GO111MODULE=on
|
||||
- go: 1.14.x
|
||||
env: RACE=1 GO111MODULE=on
|
||||
- go: 1.14.x
|
||||
env: RUN386=1
|
||||
- go: 1.14.x
|
||||
env: GRPC_GO_RETRY=on
|
||||
- go: 1.14.x
|
||||
env: TESTEXTRAS=1
|
||||
- go: 1.13.x
|
||||
env: GO111MODULE=on
|
||||
- go: 1.12.x
|
||||
env: GO111MODULE=on
|
||||
- go: 1.9.x
|
||||
env: GAE=1
|
||||
|
||||
go_import_path: google.golang.org/grpc
|
||||
|
||||
before_install:
|
||||
- go get github.com/axw/gocov/gocov
|
||||
- go get github.com/mattn/goveralls
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- if [[ "${GO111MODULE}" = "on" ]]; then mkdir "${HOME}/go"; export GOPATH="${HOME}/go"; fi
|
||||
- if [[ -n "${RUN386}" ]]; then export GOARCH=386; fi
|
||||
- if [[ "${TRAVIS_EVENT_TYPE}" = "cron" && -z "${RUN386}" ]]; then RACE=1; fi
|
||||
- if [[ "${TRAVIS_EVENT_TYPE}" != "cron" ]]; then export VET_SKIP_PROTO=1; fi
|
||||
|
||||
install:
|
||||
- mkdir -p "$GOPATH/src/google.golang.org"
|
||||
- mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/google.golang.org/grpc"
|
||||
- try3() { eval "$*" || eval "$*" || eval "$*"; }
|
||||
- try3 'if [[ "${GO111MODULE}" = "on" ]]; then go mod download; else make testdeps; fi'
|
||||
- if [[ -n "${GAE}" ]]; then source ./install_gae.sh; make testappenginedeps; fi
|
||||
- if [[ -n "${VET}" ]]; then ./vet.sh -install; fi
|
||||
|
||||
script:
|
||||
- make test testrace
|
||||
- set -e
|
||||
- if [[ -n "${TESTEXTRAS}" ]]; then examples/examples_test.sh; interop/interop_test.sh; make testsubmodule; exit 0; fi
|
||||
- if [[ -n "${VET}" ]]; then ./vet.sh; fi
|
||||
- if [[ -n "${GAE}" ]]; then make testappengine; exit 0; fi
|
||||
- if [[ -n "${RACE}" ]]; then make testrace; exit 0; fi
|
||||
- make test
|
||||
|
|
1
vendor/google.golang.org/grpc/AUTHORS
generated
vendored
Normal file
1
vendor/google.golang.org/grpc/AUTHORS
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
Google Inc.
|
3
vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md
generated
vendored
Normal file
3
vendor/google.golang.org/grpc/CODE-OF-CONDUCT.md
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
## Community Code of Conduct
|
||||
|
||||
gRPC follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
67
vendor/google.golang.org/grpc/CONTRIBUTING.md
generated
vendored
67
vendor/google.golang.org/grpc/CONTRIBUTING.md
generated
vendored
|
@ -1,23 +1,62 @@
|
|||
# How to contribute
|
||||
|
||||
We definitely welcome patches and contribution to grpc! Here is some guideline
|
||||
and information about how to do so.
|
||||
We definitely welcome your patches and contributions to gRPC! Please read the gRPC
|
||||
organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md)
|
||||
and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding.
|
||||
|
||||
## Getting started
|
||||
If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/)
|
||||
|
||||
### Legal requirements
|
||||
## Legal requirements
|
||||
|
||||
In order to protect both you and ourselves, you will need to sign the
|
||||
[Contributor License Agreement](https://cla.developers.google.com/clas).
|
||||
[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf).
|
||||
|
||||
### Filing Issues
|
||||
When filing an issue, make sure to answer these five questions:
|
||||
## Guidelines for Pull Requests
|
||||
How to get your contributions merged smoothly and quickly.
|
||||
|
||||
1. What version of Go are you using (`go version`)?
|
||||
2. What operating system and processor architecture are you using?
|
||||
3. What did you do?
|
||||
4. What did you expect to see?
|
||||
5. What did you see instead?
|
||||
- Create **small PRs** that are narrowly focused on **addressing a single
|
||||
concern**. We often times receive PRs that are trying to fix several things at
|
||||
a time, but only one fix is considered acceptable, nothing gets merged and
|
||||
both author's & review's time is wasted. Create more PRs to address different
|
||||
concerns and everyone will be happy.
|
||||
|
||||
### Contributing code
|
||||
Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file.
|
||||
- The grpc package should only depend on standard Go packages and a small number
|
||||
of exceptions. If your contribution introduces new dependencies which are NOT
|
||||
in the [list](https://godoc.org/google.golang.org/grpc?imports), you need a
|
||||
discussion with gRPC-Go authors and consultants.
|
||||
|
||||
- For speculative changes, consider opening an issue and discussing it first. If
|
||||
you are suggesting a behavioral or API change, consider starting with a [gRFC
|
||||
proposal](https://github.com/grpc/proposal).
|
||||
|
||||
- Provide a good **PR description** as a record of **what** change is being made
|
||||
and **why** it was made. Link to a github issue if it exists.
|
||||
|
||||
- Don't fix code style and formatting unless you are already changing that line
|
||||
to address an issue. PRs with irrelevant changes won't be merged. If you do
|
||||
want to fix formatting or style, do that in a separate PR.
|
||||
|
||||
- Unless your PR is trivial, you should expect there will be reviewer comments
|
||||
that you'll need to address before merging. We expect you to be reasonably
|
||||
responsive to those comments, otherwise the PR will be closed after 2-3 weeks
|
||||
of inactivity.
|
||||
|
||||
- Maintain **clean commit history** and use **meaningful commit messages**. PRs
|
||||
with messy commit history are difficult to review and won't be merged. Use
|
||||
`rebase -i upstream/master` to curate your commit history and/or to bring in
|
||||
latest changes from master (but avoid rebasing in the middle of a code
|
||||
review).
|
||||
|
||||
- Keep your PR up to date with upstream/master (if there are merge conflicts, we
|
||||
can't really merge your change).
|
||||
|
||||
- **All tests need to be passing** before your change can be merged. We
|
||||
recommend you **run tests locally** before creating your PR to catch breakages
|
||||
early on.
|
||||
- `make all` to test everything, OR
|
||||
- `make vet` to catch vet errors
|
||||
- `make test` to run the tests
|
||||
- `make testrace` to run tests in race mode
|
||||
- optional `make testappengine` to run tests with appengine
|
||||
|
||||
- Exceptions to the rules can be made if there's a compelling reason for doing so.
|
||||
|
|
1
vendor/google.golang.org/grpc/GOVERNANCE.md
generated
vendored
Normal file
1
vendor/google.golang.org/grpc/GOVERNANCE.md
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
This repository is governed by the gRPC organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md).
|
224
vendor/google.golang.org/grpc/LICENSE
generated
vendored
224
vendor/google.golang.org/grpc/LICENSE
generated
vendored
|
@ -1,28 +1,202 @@
|
|||
Copyright 2014, Google Inc.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
|
27
vendor/google.golang.org/grpc/MAINTAINERS.md
generated
vendored
Normal file
27
vendor/google.golang.org/grpc/MAINTAINERS.md
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
This page lists all active maintainers of this repository. If you were a
|
||||
maintainer and would like to add your name to the Emeritus list, please send us a
|
||||
PR.
|
||||
|
||||
See [GOVERNANCE.md](https://github.com/grpc/grpc-community/blob/master/governance.md)
|
||||
for governance guidelines and how to become a maintainer.
|
||||
See [CONTRIBUTING.md](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md)
|
||||
for general contribution guidelines.
|
||||
|
||||
## Maintainers (in alphabetical order)
|
||||
- [canguler](https://github.com/canguler), Google LLC
|
||||
- [cesarghali](https://github.com/cesarghali), Google LLC
|
||||
- [dfawley](https://github.com/dfawley), Google LLC
|
||||
- [easwars](https://github.com/easwars), Google LLC
|
||||
- [jadekler](https://github.com/jadekler), Google LLC
|
||||
- [menghanl](https://github.com/menghanl), Google LLC
|
||||
- [srini100](https://github.com/srini100), Google LLC
|
||||
|
||||
## Emeritus Maintainers (in alphabetical order)
|
||||
- [adelez](https://github.com/adelez), Google LLC
|
||||
- [iamqizhao](https://github.com/iamqizhao), Google LLC
|
||||
- [jtattermusch](https://github.com/jtattermusch), Google LLC
|
||||
- [lyuxuan](https://github.com/lyuxuan), Google LLC
|
||||
- [makmukhi](https://github.com/makmukhi), Google LLC
|
||||
- [matt-kwong](https://github.com/matt-kwong), Google LLC
|
||||
- [nicolasnoble](https://github.com/nicolasnoble), Google LLC
|
||||
- [yongni](https://github.com/yongni), Google LLC
|
83
vendor/google.golang.org/grpc/Makefile
generated
vendored
83
vendor/google.golang.org/grpc/Makefile
generated
vendored
|
@ -1,50 +1,63 @@
|
|||
.PHONY: \
|
||||
all \
|
||||
deps \
|
||||
updatedeps \
|
||||
testdeps \
|
||||
updatetestdeps \
|
||||
build \
|
||||
proto \
|
||||
test \
|
||||
testrace \
|
||||
clean \
|
||||
|
||||
all: test testrace
|
||||
|
||||
deps:
|
||||
go get -d -v google.golang.org/grpc/...
|
||||
|
||||
updatedeps:
|
||||
go get -d -v -u -f google.golang.org/grpc/...
|
||||
|
||||
testdeps:
|
||||
go get -d -v -t google.golang.org/grpc/...
|
||||
|
||||
updatetestdeps:
|
||||
go get -d -v -t -u -f google.golang.org/grpc/...
|
||||
all: vet test testrace
|
||||
|
||||
build: deps
|
||||
go build google.golang.org/grpc/...
|
||||
|
||||
clean:
|
||||
go clean -i google.golang.org/grpc/...
|
||||
|
||||
deps:
|
||||
go get -d -v google.golang.org/grpc/...
|
||||
|
||||
proto:
|
||||
@ if ! which protoc > /dev/null; then \
|
||||
echo "error: protoc not installed" >&2; \
|
||||
exit 1; \
|
||||
fi
|
||||
go get -v github.com/golang/protobuf/protoc-gen-go
|
||||
for file in $$(git ls-files '*.proto'); do \
|
||||
protoc -I $$(dirname $$file) --go_out=plugins=grpc:$$(dirname $$file) $$file; \
|
||||
done
|
||||
go generate google.golang.org/grpc/...
|
||||
|
||||
test: testdeps
|
||||
go test -v -cpu 1,4 google.golang.org/grpc/...
|
||||
go test -cpu 1,4 -timeout 7m google.golang.org/grpc/...
|
||||
|
||||
testsubmodule: testdeps
|
||||
cd security/advancedtls && go test -cpu 1,4 -timeout 7m google.golang.org/grpc/security/advancedtls/...
|
||||
|
||||
testappengine: testappenginedeps
|
||||
goapp test -cpu 1,4 -timeout 7m google.golang.org/grpc/...
|
||||
|
||||
testappenginedeps:
|
||||
goapp get -d -v -t -tags 'appengine appenginevm' google.golang.org/grpc/...
|
||||
|
||||
testdeps:
|
||||
go get -d -v -t google.golang.org/grpc/...
|
||||
|
||||
testrace: testdeps
|
||||
go test -v -race -cpu 1,4 google.golang.org/grpc/...
|
||||
go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/...
|
||||
|
||||
clean:
|
||||
go clean google.golang.org/grpc/...
|
||||
updatedeps:
|
||||
go get -d -v -u -f google.golang.org/grpc/...
|
||||
|
||||
coverage: testdeps
|
||||
./coverage.sh --coveralls
|
||||
updatetestdeps:
|
||||
go get -d -v -t -u -f google.golang.org/grpc/...
|
||||
|
||||
vet: vetdeps
|
||||
./vet.sh
|
||||
|
||||
vetdeps:
|
||||
./vet.sh -install
|
||||
|
||||
.PHONY: \
|
||||
all \
|
||||
build \
|
||||
clean \
|
||||
deps \
|
||||
proto \
|
||||
test \
|
||||
testappengine \
|
||||
testappenginedeps \
|
||||
testdeps \
|
||||
testrace \
|
||||
updatedeps \
|
||||
updatetestdeps \
|
||||
vet \
|
||||
vetdeps
|
||||
|
|
149
vendor/google.golang.org/grpc/README.md
generated
vendored
149
vendor/google.golang.org/grpc/README.md
generated
vendored
|
@ -1,32 +1,141 @@
|
|||
#gRPC-Go
|
||||
# gRPC-Go
|
||||
|
||||
[](https://travis-ci.org/grpc/grpc-go) [](https://godoc.org/google.golang.org/grpc)
|
||||
[](https://travis-ci.org/grpc/grpc-go)
|
||||
[][API]
|
||||
[](https://goreportcard.com/report/github.com/grpc/grpc-go)
|
||||
|
||||
The Go implementation of [gRPC](http://www.grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start](http://www.grpc.io/docs/) guide.
|
||||
The [Go][] implementation of [gRPC][]: A high performance, open source, general
|
||||
RPC framework that puts mobile and HTTP/2 first. For more information see the
|
||||
[Go gRPC docs][], or jump directly into the [quick start][].
|
||||
|
||||
Installation
|
||||
------------
|
||||
## Prerequisites
|
||||
|
||||
To install this package, you need to install Go 1.4 or above and setup your Go workspace on your computer. The simplest way to install the library is to run:
|
||||
- **[Go][]**: any one of the **three latest major** [releases][go-releases].
|
||||
|
||||
```
|
||||
$ go get google.golang.org/grpc
|
||||
## Installation
|
||||
|
||||
With [Go module][] support (Go 1.11+), simply add the following import
|
||||
|
||||
```go
|
||||
import "google.golang.org/grpc"
|
||||
```
|
||||
|
||||
Prerequisites
|
||||
-------------
|
||||
to your code, and then `go [build|run|test]` will automatically fetch the
|
||||
necessary dependencies.
|
||||
|
||||
This requires Go 1.4 or above.
|
||||
Otherwise, to install the `grpc-go` package, run the following command:
|
||||
|
||||
Constraints
|
||||
-----------
|
||||
The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](http://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants.
|
||||
```console
|
||||
$ go get -u google.golang.org/grpc
|
||||
```
|
||||
|
||||
Documentation
|
||||
-------------
|
||||
See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/).
|
||||
> **Note:** If you are trying to access `grpc-go` from **China**, see the
|
||||
> [FAQ](#FAQ) below.
|
||||
|
||||
Status
|
||||
------
|
||||
Beta release
|
||||
## Learn more
|
||||
|
||||
- [Go gRPC docs][], which include a [quick start][] and [API
|
||||
reference][API] among other resources
|
||||
- [Low-level technical docs](Documentation) from this repository
|
||||
- [Performance benchmark][]
|
||||
- [Examples](examples)
|
||||
|
||||
## FAQ
|
||||
|
||||
### I/O Timeout Errors
|
||||
|
||||
The `golang.org` domain may be blocked from some countries. `go get` usually
|
||||
produces an error like the following when this happens:
|
||||
|
||||
```console
|
||||
$ go get -u google.golang.org/grpc
|
||||
package google.golang.org/grpc: unrecognized import path "google.golang.org/grpc" (https fetch: Get https://google.golang.org/grpc?go-get=1: dial tcp 216.239.37.1:443: i/o timeout)
|
||||
```
|
||||
|
||||
To build Go code, there are several options:
|
||||
|
||||
- Set up a VPN and access google.golang.org through that.
|
||||
|
||||
- Without Go module support: `git clone` the repo manually:
|
||||
|
||||
```sh
|
||||
git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc
|
||||
```
|
||||
|
||||
You will need to do the same for all of grpc's dependencies in `golang.org`,
|
||||
e.g. `golang.org/x/net`.
|
||||
|
||||
- With Go module support: it is possible to use the `replace` feature of `go
|
||||
mod` to create aliases for golang.org packages. In your project's directory:
|
||||
|
||||
```sh
|
||||
go mod edit -replace=google.golang.org/grpc=github.com/grpc/grpc-go@latest
|
||||
go mod tidy
|
||||
go mod vendor
|
||||
go build -mod=vendor
|
||||
```
|
||||
|
||||
Again, this will need to be done for all transitive dependencies hosted on
|
||||
golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652).
|
||||
|
||||
### Compiling error, undefined: grpc.SupportPackageIsVersion
|
||||
|
||||
#### If you are using Go modules:
|
||||
|
||||
Ensure your gRPC-Go version is `require`d at the appropriate version in
|
||||
the same module containing the generated `.pb.go` files. For example,
|
||||
`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file:
|
||||
|
||||
```go
|
||||
module <your module name>
|
||||
|
||||
require (
|
||||
google.golang.org/grpc v1.27.0
|
||||
)
|
||||
```
|
||||
|
||||
#### If you are *not* using Go modules:
|
||||
|
||||
Update the `proto` package, gRPC package, and rebuild the `.proto` files:
|
||||
|
||||
```sh
|
||||
go get -u github.com/golang/protobuf/{proto,protoc-gen-go}
|
||||
go get -u google.golang.org/grpc
|
||||
protoc --go_out=plugins=grpc:. *.proto
|
||||
```
|
||||
|
||||
### How to turn on logging
|
||||
|
||||
The default logger is controlled by environment variables. Turn everything on
|
||||
like this:
|
||||
|
||||
```console
|
||||
$ export GRPC_GO_LOG_VERBOSITY_LEVEL=99
|
||||
$ export GRPC_GO_LOG_SEVERITY_LEVEL=info
|
||||
```
|
||||
|
||||
### The RPC failed with error `"code = Unavailable desc = transport is closing"`
|
||||
|
||||
This error means the connection the RPC is using was closed, and there are many
|
||||
possible reasons, including:
|
||||
1. mis-configured transport credentials, connection failed on handshaking
|
||||
1. bytes disrupted, possibly by a proxy in between
|
||||
1. server shutdown
|
||||
1. Keepalive parameters caused connection shutdown, for example if you have configured
|
||||
your server to terminate connections regularly to [trigger DNS lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779).
|
||||
If this is the case, you may want to increase your [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters),
|
||||
to allow longer RPC calls to finish.
|
||||
|
||||
It can be tricky to debug this because the error happens on the client side but
|
||||
the root cause of the connection being closed is on the server side. Turn on
|
||||
logging on __both client and server__, and see if there are any transport
|
||||
errors.
|
||||
|
||||
[API]: https://grpc.io/docs/languages/go/api
|
||||
[Go]: https://golang.org
|
||||
[Go module]: https://github.com/golang/go/wiki/Modules
|
||||
[gRPC]: https://grpc.io
|
||||
[Go gRPC docs]: https://grpc.io/docs/languages/go
|
||||
[Performance benchmark]: https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696
|
||||
[quick start]: https://grpc.io/docs/languages/go/quickstart
|
||||
[go-releases]: https://golang.org/doc/devel/release.html
|
||||
|
|
76
vendor/google.golang.org/grpc/attributes/attributes.go
generated
vendored
Normal file
76
vendor/google.golang.org/grpc/attributes/attributes.go
generated
vendored
Normal file
|
@ -0,0 +1,76 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2019 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package attributes defines a generic key/value store used in various gRPC
|
||||
// components.
|
||||
//
|
||||
// All APIs in this package are EXPERIMENTAL.
|
||||
package attributes
|
||||
|
||||
import "fmt"
|
||||
|
||||
// Attributes is an immutable struct for storing and retrieving generic
|
||||
// key/value pairs. Keys must be hashable, and users should define their own
|
||||
// types for keys.
|
||||
type Attributes struct {
|
||||
m map[interface{}]interface{}
|
||||
}
|
||||
|
||||
// New returns a new Attributes containing all key/value pairs in kvs. If the
|
||||
// same key appears multiple times, the last value overwrites all previous
|
||||
// values for that key. Panics if len(kvs) is not even.
|
||||
func New(kvs ...interface{}) *Attributes {
|
||||
if len(kvs)%2 != 0 {
|
||||
panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs)))
|
||||
}
|
||||
a := &Attributes{m: make(map[interface{}]interface{}, len(kvs)/2)}
|
||||
for i := 0; i < len(kvs)/2; i++ {
|
||||
a.m[kvs[i*2]] = kvs[i*2+1]
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
// WithValues returns a new Attributes containing all key/value pairs in a and
|
||||
// kvs. Panics if len(kvs) is not even. If the same key appears multiple
|
||||
// times, the last value overwrites all previous values for that key. To
|
||||
// remove an existing key, use a nil value.
|
||||
func (a *Attributes) WithValues(kvs ...interface{}) *Attributes {
|
||||
if a == nil {
|
||||
return New(kvs...)
|
||||
}
|
||||
if len(kvs)%2 != 0 {
|
||||
panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs)))
|
||||
}
|
||||
n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+len(kvs)/2)}
|
||||
for k, v := range a.m {
|
||||
n.m[k] = v
|
||||
}
|
||||
for i := 0; i < len(kvs)/2; i++ {
|
||||
n.m[kvs[i*2]] = kvs[i*2+1]
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// Value returns the value associated with these attributes for key, or nil if
|
||||
// no value is associated with key.
|
||||
func (a *Attributes) Value(key interface{}) interface{} {
|
||||
if a == nil {
|
||||
return nil
|
||||
}
|
||||
return a.m[key]
|
||||
}
|
58
vendor/google.golang.org/grpc/backoff.go
generated
vendored
Normal file
58
vendor/google.golang.org/grpc/backoff.go
generated
vendored
Normal file
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// See internal/backoff package for the backoff implementation. This file is
|
||||
// kept for the exported types and API backward compatibility.
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/backoff"
|
||||
)
|
||||
|
||||
// DefaultBackoffConfig uses values specified for backoff in
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
//
|
||||
// Deprecated: use ConnectParams instead. Will be supported throughout 1.x.
|
||||
var DefaultBackoffConfig = BackoffConfig{
|
||||
MaxDelay: 120 * time.Second,
|
||||
}
|
||||
|
||||
// BackoffConfig defines the parameters for the default gRPC backoff strategy.
|
||||
//
|
||||
// Deprecated: use ConnectParams instead. Will be supported throughout 1.x.
|
||||
type BackoffConfig struct {
|
||||
// MaxDelay is the upper bound of backoff delay.
|
||||
MaxDelay time.Duration
|
||||
}
|
||||
|
||||
// ConnectParams defines the parameters for connecting and retrying. Users are
|
||||
// encouraged to use this instead of the BackoffConfig type defined above. See
|
||||
// here for more details:
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
type ConnectParams struct {
|
||||
// Backoff specifies the configuration options for connection backoff.
|
||||
Backoff backoff.Config
|
||||
// MinConnectTimeout is the minimum amount of time we are willing to give a
|
||||
// connection to complete.
|
||||
MinConnectTimeout time.Duration
|
||||
}
|
52
vendor/google.golang.org/grpc/backoff/backoff.go
generated
vendored
Normal file
52
vendor/google.golang.org/grpc/backoff/backoff.go
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2019 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package backoff provides configuration options for backoff.
|
||||
//
|
||||
// More details can be found at:
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
//
|
||||
// All APIs in this package are experimental.
|
||||
package backoff
|
||||
|
||||
import "time"
|
||||
|
||||
// Config defines the configuration options for backoff.
|
||||
type Config struct {
|
||||
// BaseDelay is the amount of time to backoff after the first failure.
|
||||
BaseDelay time.Duration
|
||||
// Multiplier is the factor with which to multiply backoffs after a
|
||||
// failed retry. Should ideally be greater than 1.
|
||||
Multiplier float64
|
||||
// Jitter is the factor with which backoffs are randomized.
|
||||
Jitter float64
|
||||
// MaxDelay is the upper bound of backoff delay.
|
||||
MaxDelay time.Duration
|
||||
}
|
||||
|
||||
// DefaultConfig is a backoff configuration with the default values specfied
|
||||
// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
//
|
||||
// This should be useful for callers who want to configure backoff with
|
||||
// non-default values only for a subset of the options.
|
||||
var DefaultConfig = Config{
|
||||
BaseDelay: 1.0 * time.Second,
|
||||
Multiplier: 1.6,
|
||||
Jitter: 0.2,
|
||||
MaxDelay: 120 * time.Second,
|
||||
}
|
374
vendor/google.golang.org/grpc/balancer/balancer.go
generated
vendored
Normal file
374
vendor/google.golang.org/grpc/balancer/balancer.go
generated
vendored
Normal file
|
@ -0,0 +1,374 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package balancer defines APIs for load balancing in gRPC.
|
||||
// All APIs in this package are experimental.
|
||||
package balancer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/serviceconfig"
|
||||
)
|
||||
|
||||
var (
|
||||
// m is a map from name to balancer builder.
|
||||
m = make(map[string]Builder)
|
||||
)
|
||||
|
||||
// Register registers the balancer builder to the balancer map. b.Name
|
||||
// (lowercased) will be used as the name registered with this builder. If the
|
||||
// Builder implements ConfigParser, ParseConfig will be called when new service
|
||||
// configs are received by the resolver, and the result will be provided to the
|
||||
// Balancer in UpdateClientConnState.
|
||||
//
|
||||
// NOTE: this function must only be called during initialization time (i.e. in
|
||||
// an init() function), and is not thread-safe. If multiple Balancers are
|
||||
// registered with the same name, the one registered last will take effect.
|
||||
func Register(b Builder) {
|
||||
m[strings.ToLower(b.Name())] = b
|
||||
}
|
||||
|
||||
// unregisterForTesting deletes the balancer with the given name from the
|
||||
// balancer map.
|
||||
//
|
||||
// This function is not thread-safe.
|
||||
func unregisterForTesting(name string) {
|
||||
delete(m, name)
|
||||
}
|
||||
|
||||
func init() {
|
||||
internal.BalancerUnregister = unregisterForTesting
|
||||
}
|
||||
|
||||
// Get returns the resolver builder registered with the given name.
|
||||
// Note that the compare is done in a case-insensitive fashion.
|
||||
// If no builder is register with the name, nil will be returned.
|
||||
func Get(name string) Builder {
|
||||
if b, ok := m[strings.ToLower(name)]; ok {
|
||||
return b
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SubConn represents a gRPC sub connection.
|
||||
// Each sub connection contains a list of addresses. gRPC will
|
||||
// try to connect to them (in sequence), and stop trying the
|
||||
// remainder once one connection is successful.
|
||||
//
|
||||
// The reconnect backoff will be applied on the list, not a single address.
|
||||
// For example, try_on_all_addresses -> backoff -> try_on_all_addresses.
|
||||
//
|
||||
// All SubConns start in IDLE, and will not try to connect. To trigger
|
||||
// the connecting, Balancers must call Connect.
|
||||
// When the connection encounters an error, it will reconnect immediately.
|
||||
// When the connection becomes IDLE, it will not reconnect unless Connect is
|
||||
// called.
|
||||
//
|
||||
// This interface is to be implemented by gRPC. Users should not need a
|
||||
// brand new implementation of this interface. For the situations like
|
||||
// testing, the new implementation should embed this interface. This allows
|
||||
// gRPC to add new methods to this interface.
|
||||
type SubConn interface {
|
||||
// UpdateAddresses updates the addresses used in this SubConn.
|
||||
// gRPC checks if currently-connected address is still in the new list.
|
||||
// If it's in the list, the connection will be kept.
|
||||
// If it's not in the list, the connection will gracefully closed, and
|
||||
// a new connection will be created.
|
||||
//
|
||||
// This will trigger a state transition for the SubConn.
|
||||
UpdateAddresses([]resolver.Address)
|
||||
// Connect starts the connecting for this SubConn.
|
||||
Connect()
|
||||
}
|
||||
|
||||
// NewSubConnOptions contains options to create new SubConn.
|
||||
type NewSubConnOptions struct {
|
||||
// CredsBundle is the credentials bundle that will be used in the created
|
||||
// SubConn. If it's nil, the original creds from grpc DialOptions will be
|
||||
// used.
|
||||
//
|
||||
// Deprecated: Use the Attributes field in resolver.Address to pass
|
||||
// arbitrary data to the credential handshaker.
|
||||
CredsBundle credentials.Bundle
|
||||
// HealthCheckEnabled indicates whether health check service should be
|
||||
// enabled on this SubConn
|
||||
HealthCheckEnabled bool
|
||||
}
|
||||
|
||||
// State contains the balancer's state relevant to the gRPC ClientConn.
|
||||
type State struct {
|
||||
// State contains the connectivity state of the balancer, which is used to
|
||||
// determine the state of the ClientConn.
|
||||
ConnectivityState connectivity.State
|
||||
// Picker is used to choose connections (SubConns) for RPCs.
|
||||
Picker Picker
|
||||
}
|
||||
|
||||
// ClientConn represents a gRPC ClientConn.
|
||||
//
|
||||
// This interface is to be implemented by gRPC. Users should not need a
|
||||
// brand new implementation of this interface. For the situations like
|
||||
// testing, the new implementation should embed this interface. This allows
|
||||
// gRPC to add new methods to this interface.
|
||||
type ClientConn interface {
|
||||
// NewSubConn is called by balancer to create a new SubConn.
|
||||
// It doesn't block and wait for the connections to be established.
|
||||
// Behaviors of the SubConn can be controlled by options.
|
||||
NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error)
|
||||
// RemoveSubConn removes the SubConn from ClientConn.
|
||||
// The SubConn will be shutdown.
|
||||
RemoveSubConn(SubConn)
|
||||
|
||||
// UpdateState notifies gRPC that the balancer's internal state has
|
||||
// changed.
|
||||
//
|
||||
// gRPC will update the connectivity state of the ClientConn, and will call
|
||||
// Pick on the new Picker to pick new SubConns.
|
||||
UpdateState(State)
|
||||
|
||||
// ResolveNow is called by balancer to notify gRPC to do a name resolving.
|
||||
ResolveNow(resolver.ResolveNowOptions)
|
||||
|
||||
// Target returns the dial target for this ClientConn.
|
||||
//
|
||||
// Deprecated: Use the Target field in the BuildOptions instead.
|
||||
Target() string
|
||||
}
|
||||
|
||||
// BuildOptions contains additional information for Build.
|
||||
type BuildOptions struct {
|
||||
// DialCreds is the transport credential the Balancer implementation can
|
||||
// use to dial to a remote load balancer server. The Balancer implementations
|
||||
// can ignore this if it does not need to talk to another party securely.
|
||||
DialCreds credentials.TransportCredentials
|
||||
// CredsBundle is the credentials bundle that the Balancer can use.
|
||||
CredsBundle credentials.Bundle
|
||||
// Dialer is the custom dialer the Balancer implementation can use to dial
|
||||
// to a remote load balancer server. The Balancer implementations
|
||||
// can ignore this if it doesn't need to talk to remote balancer.
|
||||
Dialer func(context.Context, string) (net.Conn, error)
|
||||
// ChannelzParentID is the entity parent's channelz unique identification number.
|
||||
ChannelzParentID int64
|
||||
// Target contains the parsed address info of the dial target. It is the same resolver.Target as
|
||||
// passed to the resolver.
|
||||
// See the documentation for the resolver.Target type for details about what it contains.
|
||||
Target resolver.Target
|
||||
}
|
||||
|
||||
// Builder creates a balancer.
|
||||
type Builder interface {
|
||||
// Build creates a new balancer with the ClientConn.
|
||||
Build(cc ClientConn, opts BuildOptions) Balancer
|
||||
// Name returns the name of balancers built by this builder.
|
||||
// It will be used to pick balancers (for example in service config).
|
||||
Name() string
|
||||
}
|
||||
|
||||
// ConfigParser parses load balancer configs.
|
||||
type ConfigParser interface {
|
||||
// ParseConfig parses the JSON load balancer config provided into an
|
||||
// internal form or returns an error if the config is invalid. For future
|
||||
// compatibility reasons, unknown fields in the config should be ignored.
|
||||
ParseConfig(LoadBalancingConfigJSON json.RawMessage) (serviceconfig.LoadBalancingConfig, error)
|
||||
}
|
||||
|
||||
// PickInfo contains additional information for the Pick operation.
|
||||
type PickInfo struct {
|
||||
// FullMethodName is the method name that NewClientStream() is called
|
||||
// with. The canonical format is /service/Method.
|
||||
FullMethodName string
|
||||
// Ctx is the RPC's context, and may contain relevant RPC-level information
|
||||
// like the outgoing header metadata.
|
||||
Ctx context.Context
|
||||
}
|
||||
|
||||
// DoneInfo contains additional information for done.
|
||||
type DoneInfo struct {
|
||||
// Err is the rpc error the RPC finished with. It could be nil.
|
||||
Err error
|
||||
// Trailer contains the metadata from the RPC's trailer, if present.
|
||||
Trailer metadata.MD
|
||||
// BytesSent indicates if any bytes have been sent to the server.
|
||||
BytesSent bool
|
||||
// BytesReceived indicates if any byte has been received from the server.
|
||||
BytesReceived bool
|
||||
// ServerLoad is the load received from server. It's usually sent as part of
|
||||
// trailing metadata.
|
||||
//
|
||||
// The only supported type now is *orca_v1.LoadReport.
|
||||
ServerLoad interface{}
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrNoSubConnAvailable indicates no SubConn is available for pick().
|
||||
// gRPC will block the RPC until a new picker is available via UpdateState().
|
||||
ErrNoSubConnAvailable = errors.New("no SubConn is available")
|
||||
// ErrTransientFailure indicates all SubConns are in TransientFailure.
|
||||
// WaitForReady RPCs will block, non-WaitForReady RPCs will fail.
|
||||
//
|
||||
// Deprecated: return an appropriate error based on the last resolution or
|
||||
// connection attempt instead. The behavior is the same for any non-gRPC
|
||||
// status error.
|
||||
ErrTransientFailure = errors.New("all SubConns are in TransientFailure")
|
||||
)
|
||||
|
||||
// PickResult contains information related to a connection chosen for an RPC.
|
||||
type PickResult struct {
|
||||
// SubConn is the connection to use for this pick, if its state is Ready.
|
||||
// If the state is not Ready, gRPC will block the RPC until a new Picker is
|
||||
// provided by the balancer (using ClientConn.UpdateState). The SubConn
|
||||
// must be one returned by ClientConn.NewSubConn.
|
||||
SubConn SubConn
|
||||
|
||||
// Done is called when the RPC is completed. If the SubConn is not ready,
|
||||
// this will be called with a nil parameter. If the SubConn is not a valid
|
||||
// type, Done may not be called. May be nil if the balancer does not wish
|
||||
// to be notified when the RPC completes.
|
||||
Done func(DoneInfo)
|
||||
}
|
||||
|
||||
// TransientFailureError returns e. It exists for backward compatibility and
|
||||
// will be deleted soon.
|
||||
//
|
||||
// Deprecated: no longer necessary, picker errors are treated this way by
|
||||
// default.
|
||||
func TransientFailureError(e error) error { return e }
|
||||
|
||||
// Picker is used by gRPC to pick a SubConn to send an RPC.
|
||||
// Balancer is expected to generate a new picker from its snapshot every time its
|
||||
// internal state has changed.
|
||||
//
|
||||
// The pickers used by gRPC can be updated by ClientConn.UpdateState().
|
||||
type Picker interface {
|
||||
// Pick returns the connection to use for this RPC and related information.
|
||||
//
|
||||
// Pick should not block. If the balancer needs to do I/O or any blocking
|
||||
// or time-consuming work to service this call, it should return
|
||||
// ErrNoSubConnAvailable, and the Pick call will be repeated by gRPC when
|
||||
// the Picker is updated (using ClientConn.UpdateState).
|
||||
//
|
||||
// If an error is returned:
|
||||
//
|
||||
// - If the error is ErrNoSubConnAvailable, gRPC will block until a new
|
||||
// Picker is provided by the balancer (using ClientConn.UpdateState).
|
||||
//
|
||||
// - If the error is a status error (implemented by the grpc/status
|
||||
// package), gRPC will terminate the RPC with the code and message
|
||||
// provided.
|
||||
//
|
||||
// - For all other errors, wait for ready RPCs will wait, but non-wait for
|
||||
// ready RPCs will be terminated with this error's Error() string and
|
||||
// status code Unavailable.
|
||||
Pick(info PickInfo) (PickResult, error)
|
||||
}
|
||||
|
||||
// Balancer takes input from gRPC, manages SubConns, and collects and aggregates
|
||||
// the connectivity states.
|
||||
//
|
||||
// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs.
|
||||
//
|
||||
// UpdateClientConnState, ResolverError, UpdateSubConnState, and Close are
|
||||
// guaranteed to be called synchronously from the same goroutine. There's no
|
||||
// guarantee on picker.Pick, it may be called anytime.
|
||||
type Balancer interface {
|
||||
// UpdateClientConnState is called by gRPC when the state of the ClientConn
|
||||
// changes. If the error returned is ErrBadResolverState, the ClientConn
|
||||
// will begin calling ResolveNow on the active name resolver with
|
||||
// exponential backoff until a subsequent call to UpdateClientConnState
|
||||
// returns a nil error. Any other errors are currently ignored.
|
||||
UpdateClientConnState(ClientConnState) error
|
||||
// ResolverError is called by gRPC when the name resolver reports an error.
|
||||
ResolverError(error)
|
||||
// UpdateSubConnState is called by gRPC when the state of a SubConn
|
||||
// changes.
|
||||
UpdateSubConnState(SubConn, SubConnState)
|
||||
// Close closes the balancer. The balancer is not required to call
|
||||
// ClientConn.RemoveSubConn for its existing SubConns.
|
||||
Close()
|
||||
}
|
||||
|
||||
// SubConnState describes the state of a SubConn.
|
||||
type SubConnState struct {
|
||||
// ConnectivityState is the connectivity state of the SubConn.
|
||||
ConnectivityState connectivity.State
|
||||
// ConnectionError is set if the ConnectivityState is TransientFailure,
|
||||
// describing the reason the SubConn failed. Otherwise, it is nil.
|
||||
ConnectionError error
|
||||
}
|
||||
|
||||
// ClientConnState describes the state of a ClientConn relevant to the
|
||||
// balancer.
|
||||
type ClientConnState struct {
|
||||
ResolverState resolver.State
|
||||
// The parsed load balancing configuration returned by the builder's
|
||||
// ParseConfig method, if implemented.
|
||||
BalancerConfig serviceconfig.LoadBalancingConfig
|
||||
}
|
||||
|
||||
// ErrBadResolverState may be returned by UpdateClientConnState to indicate a
|
||||
// problem with the provided name resolver data.
|
||||
var ErrBadResolverState = errors.New("bad resolver state")
|
||||
|
||||
// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns
|
||||
// and returns one aggregated connectivity state.
|
||||
//
|
||||
// It's not thread safe.
|
||||
type ConnectivityStateEvaluator struct {
|
||||
numReady uint64 // Number of addrConns in ready state.
|
||||
numConnecting uint64 // Number of addrConns in connecting state.
|
||||
}
|
||||
|
||||
// RecordTransition records state change happening in subConn and based on that
|
||||
// it evaluates what aggregated state should be.
|
||||
//
|
||||
// - If at least one SubConn in Ready, the aggregated state is Ready;
|
||||
// - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
|
||||
// - Else the aggregated state is TransientFailure.
|
||||
//
|
||||
// Idle and Shutdown are not considered.
|
||||
func (cse *ConnectivityStateEvaluator) RecordTransition(oldState, newState connectivity.State) connectivity.State {
|
||||
// Update counters.
|
||||
for idx, state := range []connectivity.State{oldState, newState} {
|
||||
updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
|
||||
switch state {
|
||||
case connectivity.Ready:
|
||||
cse.numReady += updateVal
|
||||
case connectivity.Connecting:
|
||||
cse.numConnecting += updateVal
|
||||
}
|
||||
}
|
||||
|
||||
// Evaluate.
|
||||
if cse.numReady > 0 {
|
||||
return connectivity.Ready
|
||||
}
|
||||
if cse.numConnecting > 0 {
|
||||
return connectivity.Connecting
|
||||
}
|
||||
return connectivity.TransientFailure
|
||||
}
|
237
vendor/google.golang.org/grpc/balancer/base/balancer.go
generated
vendored
Normal file
237
vendor/google.golang.org/grpc/balancer/base/balancer.go
generated
vendored
Normal file
|
@ -0,0 +1,237 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package base
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
var logger = grpclog.Component("balancer")
|
||||
|
||||
type baseBuilder struct {
|
||||
name string
|
||||
pickerBuilder PickerBuilder
|
||||
config Config
|
||||
}
|
||||
|
||||
func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
|
||||
bal := &baseBalancer{
|
||||
cc: cc,
|
||||
pickerBuilder: bb.pickerBuilder,
|
||||
|
||||
subConns: make(map[resolver.Address]balancer.SubConn),
|
||||
scStates: make(map[balancer.SubConn]connectivity.State),
|
||||
csEvltr: &balancer.ConnectivityStateEvaluator{},
|
||||
config: bb.config,
|
||||
}
|
||||
// Initialize picker to a picker that always returns
|
||||
// ErrNoSubConnAvailable, because when state of a SubConn changes, we
|
||||
// may call UpdateState with this picker.
|
||||
bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable)
|
||||
return bal
|
||||
}
|
||||
|
||||
func (bb *baseBuilder) Name() string {
|
||||
return bb.name
|
||||
}
|
||||
|
||||
type baseBalancer struct {
|
||||
cc balancer.ClientConn
|
||||
pickerBuilder PickerBuilder
|
||||
|
||||
csEvltr *balancer.ConnectivityStateEvaluator
|
||||
state connectivity.State
|
||||
|
||||
subConns map[resolver.Address]balancer.SubConn
|
||||
scStates map[balancer.SubConn]connectivity.State
|
||||
picker balancer.Picker
|
||||
config Config
|
||||
|
||||
resolverErr error // the last error reported by the resolver; cleared on successful resolution
|
||||
connErr error // the last connection error; cleared upon leaving TransientFailure
|
||||
}
|
||||
|
||||
func (b *baseBalancer) ResolverError(err error) {
|
||||
b.resolverErr = err
|
||||
if len(b.subConns) == 0 {
|
||||
b.state = connectivity.TransientFailure
|
||||
}
|
||||
|
||||
if b.state != connectivity.TransientFailure {
|
||||
// The picker will not change since the balancer does not currently
|
||||
// report an error.
|
||||
return
|
||||
}
|
||||
b.regeneratePicker()
|
||||
b.cc.UpdateState(balancer.State{
|
||||
ConnectivityState: b.state,
|
||||
Picker: b.picker,
|
||||
})
|
||||
}
|
||||
|
||||
func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
|
||||
// TODO: handle s.ResolverState.ServiceConfig?
|
||||
if logger.V(2) {
|
||||
logger.Info("base.baseBalancer: got new ClientConn state: ", s)
|
||||
}
|
||||
// Successful resolution; clear resolver error and ensure we return nil.
|
||||
b.resolverErr = nil
|
||||
// addrsSet is the set converted from addrs, it's used for quick lookup of an address.
|
||||
addrsSet := make(map[resolver.Address]struct{})
|
||||
for _, a := range s.ResolverState.Addresses {
|
||||
addrsSet[a] = struct{}{}
|
||||
if _, ok := b.subConns[a]; !ok {
|
||||
// a is a new address (not existing in b.subConns).
|
||||
sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck})
|
||||
if err != nil {
|
||||
logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err)
|
||||
continue
|
||||
}
|
||||
b.subConns[a] = sc
|
||||
b.scStates[sc] = connectivity.Idle
|
||||
sc.Connect()
|
||||
}
|
||||
}
|
||||
for a, sc := range b.subConns {
|
||||
// a was removed by resolver.
|
||||
if _, ok := addrsSet[a]; !ok {
|
||||
b.cc.RemoveSubConn(sc)
|
||||
delete(b.subConns, a)
|
||||
// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
|
||||
// The entry will be deleted in UpdateSubConnState.
|
||||
}
|
||||
}
|
||||
// If resolver state contains no addresses, return an error so ClientConn
|
||||
// will trigger re-resolve. Also records this as an resolver error, so when
|
||||
// the overall state turns transient failure, the error message will have
|
||||
// the zero address information.
|
||||
if len(s.ResolverState.Addresses) == 0 {
|
||||
b.ResolverError(errors.New("produced zero addresses"))
|
||||
return balancer.ErrBadResolverState
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// mergeErrors builds an error from the last connection error and the last
|
||||
// resolver error. Must only be called if b.state is TransientFailure.
|
||||
func (b *baseBalancer) mergeErrors() error {
|
||||
// connErr must always be non-nil unless there are no SubConns, in which
|
||||
// case resolverErr must be non-nil.
|
||||
if b.connErr == nil {
|
||||
return fmt.Errorf("last resolver error: %v", b.resolverErr)
|
||||
}
|
||||
if b.resolverErr == nil {
|
||||
return fmt.Errorf("last connection error: %v", b.connErr)
|
||||
}
|
||||
return fmt.Errorf("last connection error: %v; last resolver error: %v", b.connErr, b.resolverErr)
|
||||
}
|
||||
|
||||
// regeneratePicker takes a snapshot of the balancer, and generates a picker
|
||||
// from it. The picker is
|
||||
// - errPicker if the balancer is in TransientFailure,
|
||||
// - built by the pickerBuilder with all READY SubConns otherwise.
|
||||
func (b *baseBalancer) regeneratePicker() {
|
||||
if b.state == connectivity.TransientFailure {
|
||||
b.picker = NewErrPicker(b.mergeErrors())
|
||||
return
|
||||
}
|
||||
readySCs := make(map[balancer.SubConn]SubConnInfo)
|
||||
|
||||
// Filter out all ready SCs from full subConn map.
|
||||
for addr, sc := range b.subConns {
|
||||
if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
|
||||
readySCs[sc] = SubConnInfo{Address: addr}
|
||||
}
|
||||
}
|
||||
b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs})
|
||||
}
|
||||
|
||||
func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
||||
s := state.ConnectivityState
|
||||
if logger.V(2) {
|
||||
logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s)
|
||||
}
|
||||
oldS, ok := b.scStates[sc]
|
||||
if !ok {
|
||||
if logger.V(2) {
|
||||
logger.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
|
||||
}
|
||||
return
|
||||
}
|
||||
if oldS == connectivity.TransientFailure && s == connectivity.Connecting {
|
||||
// Once a subconn enters TRANSIENT_FAILURE, ignore subsequent
|
||||
// CONNECTING transitions to prevent the aggregated state from being
|
||||
// always CONNECTING when many backends exist but are all down.
|
||||
return
|
||||
}
|
||||
b.scStates[sc] = s
|
||||
switch s {
|
||||
case connectivity.Idle:
|
||||
sc.Connect()
|
||||
case connectivity.Shutdown:
|
||||
// When an address was removed by resolver, b called RemoveSubConn but
|
||||
// kept the sc's state in scStates. Remove state for this sc here.
|
||||
delete(b.scStates, sc)
|
||||
case connectivity.TransientFailure:
|
||||
// Save error to be reported via picker.
|
||||
b.connErr = state.ConnectionError
|
||||
}
|
||||
|
||||
b.state = b.csEvltr.RecordTransition(oldS, s)
|
||||
|
||||
// Regenerate picker when one of the following happens:
|
||||
// - this sc entered or left ready
|
||||
// - the aggregated state of balancer is TransientFailure
|
||||
// (may need to update error message)
|
||||
if (s == connectivity.Ready) != (oldS == connectivity.Ready) ||
|
||||
b.state == connectivity.TransientFailure {
|
||||
b.regeneratePicker()
|
||||
}
|
||||
|
||||
b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker})
|
||||
}
|
||||
|
||||
// Close is a nop because base balancer doesn't have internal state to clean up,
|
||||
// and it doesn't need to call RemoveSubConn for the SubConns.
|
||||
func (b *baseBalancer) Close() {
|
||||
}
|
||||
|
||||
// NewErrPicker returns a Picker that always returns err on Pick().
|
||||
func NewErrPicker(err error) balancer.Picker {
|
||||
return &errPicker{err: err}
|
||||
}
|
||||
|
||||
// NewErrPickerV2 is temporarily defined for backward compatibility reasons.
|
||||
//
|
||||
// Deprecated: use NewErrPicker instead.
|
||||
var NewErrPickerV2 = NewErrPicker
|
||||
|
||||
type errPicker struct {
|
||||
err error // Pick() always returns this err.
|
||||
}
|
||||
|
||||
func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
|
||||
return balancer.PickResult{}, p.err
|
||||
}
|
71
vendor/google.golang.org/grpc/balancer/base/base.go
generated
vendored
Normal file
71
vendor/google.golang.org/grpc/balancer/base/base.go
generated
vendored
Normal file
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package base defines a balancer base that can be used to build balancers with
|
||||
// different picking algorithms.
|
||||
//
|
||||
// The base balancer creates a new SubConn for each resolved address. The
|
||||
// provided picker will only be notified about READY SubConns.
|
||||
//
|
||||
// This package is the base of round_robin balancer, its purpose is to be used
|
||||
// to build round_robin like balancers with complex picking algorithms.
|
||||
// Balancers with more complicated logic should try to implement a balancer
|
||||
// builder from scratch.
|
||||
//
|
||||
// All APIs in this package are experimental.
|
||||
package base
|
||||
|
||||
import (
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// PickerBuilder creates balancer.Picker.
|
||||
type PickerBuilder interface {
|
||||
// Build returns a picker that will be used by gRPC to pick a SubConn.
|
||||
Build(info PickerBuildInfo) balancer.Picker
|
||||
}
|
||||
|
||||
// PickerBuildInfo contains information needed by the picker builder to
|
||||
// construct a picker.
|
||||
type PickerBuildInfo struct {
|
||||
// ReadySCs is a map from all ready SubConns to the Addresses used to
|
||||
// create them.
|
||||
ReadySCs map[balancer.SubConn]SubConnInfo
|
||||
}
|
||||
|
||||
// SubConnInfo contains information about a SubConn created by the base
|
||||
// balancer.
|
||||
type SubConnInfo struct {
|
||||
Address resolver.Address // the address used to create this SubConn
|
||||
}
|
||||
|
||||
// Config contains the config info about the base balancer builder.
|
||||
type Config struct {
|
||||
// HealthCheck indicates whether health checking should be enabled for this specific balancer.
|
||||
HealthCheck bool
|
||||
}
|
||||
|
||||
// NewBalancerBuilder returns a base balancer builder configured by the provided config.
|
||||
func NewBalancerBuilder(name string, pb PickerBuilder, config Config) balancer.Builder {
|
||||
return &baseBuilder{
|
||||
name: name,
|
||||
pickerBuilder: pb,
|
||||
config: config,
|
||||
}
|
||||
}
|
51
vendor/google.golang.org/grpc/balancer/grpclb/state/state.go
generated
vendored
Normal file
51
vendor/google.golang.org/grpc/balancer/grpclb/state/state.go
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package state declares grpclb types to be set by resolvers wishing to pass
|
||||
// information to grpclb via resolver.State Attributes.
|
||||
package state
|
||||
|
||||
import (
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// keyType is the key to use for storing State in Attributes.
|
||||
type keyType string
|
||||
|
||||
const key = keyType("grpc.grpclb.state")
|
||||
|
||||
// State contains gRPCLB-relevant data passed from the name resolver.
|
||||
type State struct {
|
||||
// BalancerAddresses contains the remote load balancer address(es). If
|
||||
// set, overrides any resolver-provided addresses with Type of GRPCLB.
|
||||
BalancerAddresses []resolver.Address
|
||||
}
|
||||
|
||||
// Set returns a copy of the provided state with attributes containing s. s's
|
||||
// data should not be mutated after calling Set.
|
||||
func Set(state resolver.State, s *State) resolver.State {
|
||||
state.Attributes = state.Attributes.WithValues(key, s)
|
||||
return state
|
||||
}
|
||||
|
||||
// Get returns the grpclb State in the resolver.State, or nil if not present.
|
||||
// The returned data should not be mutated.
|
||||
func Get(state resolver.State) *State {
|
||||
s, _ := state.Attributes.Value(key).(*State)
|
||||
return s
|
||||
}
|
83
vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
generated
vendored
Normal file
83
vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
generated
vendored
Normal file
|
@ -0,0 +1,83 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is
|
||||
// installed as one of the default balancers in gRPC, users don't need to
|
||||
// explicitly install this balancer.
|
||||
package roundrobin
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/balancer/base"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
)
|
||||
|
||||
// Name is the name of round_robin balancer.
|
||||
const Name = "round_robin"
|
||||
|
||||
var logger = grpclog.Component("roundrobin")
|
||||
|
||||
// newBuilder creates a new roundrobin balancer builder.
|
||||
func newBuilder() balancer.Builder {
|
||||
return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
|
||||
}
|
||||
|
||||
func init() {
|
||||
balancer.Register(newBuilder())
|
||||
}
|
||||
|
||||
type rrPickerBuilder struct{}
|
||||
|
||||
func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker {
|
||||
logger.Infof("roundrobinPicker: newPicker called with info: %v", info)
|
||||
if len(info.ReadySCs) == 0 {
|
||||
return base.NewErrPicker(balancer.ErrNoSubConnAvailable)
|
||||
}
|
||||
var scs []balancer.SubConn
|
||||
for sc := range info.ReadySCs {
|
||||
scs = append(scs, sc)
|
||||
}
|
||||
return &rrPicker{
|
||||
subConns: scs,
|
||||
// Start at a random index, as the same RR balancer rebuilds a new
|
||||
// picker when SubConn states change, and we don't want to apply excess
|
||||
// load to the first server in the list.
|
||||
next: grpcrand.Intn(len(scs)),
|
||||
}
|
||||
}
|
||||
|
||||
type rrPicker struct {
|
||||
// subConns is the snapshot of the roundrobin balancer when this picker was
|
||||
// created. The slice is immutable. Each Get() will do a round robin
|
||||
// selection from it and return the selected SubConn.
|
||||
subConns []balancer.SubConn
|
||||
|
||||
mu sync.Mutex
|
||||
next int
|
||||
}
|
||||
|
||||
func (p *rrPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
|
||||
p.mu.Lock()
|
||||
sc := p.subConns[p.next]
|
||||
p.next = (p.next + 1) % len(p.subConns)
|
||||
p.mu.Unlock()
|
||||
return balancer.PickResult{SubConn: sc}, nil
|
||||
}
|
246
vendor/google.golang.org/grpc/balancer_conn_wrappers.go
generated
vendored
Normal file
246
vendor/google.golang.org/grpc/balancer_conn_wrappers.go
generated
vendored
Normal file
|
@ -0,0 +1,246 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/internal/buffer"
|
||||
"google.golang.org/grpc/internal/channelz"
|
||||
"google.golang.org/grpc/internal/grpcsync"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
// scStateUpdate contains the subConn and the new state it changed to.
|
||||
type scStateUpdate struct {
|
||||
sc balancer.SubConn
|
||||
state connectivity.State
|
||||
err error
|
||||
}
|
||||
|
||||
// ccBalancerWrapper is a wrapper on top of cc for balancers.
|
||||
// It implements balancer.ClientConn interface.
|
||||
type ccBalancerWrapper struct {
|
||||
cc *ClientConn
|
||||
balancerMu sync.Mutex // synchronizes calls to the balancer
|
||||
balancer balancer.Balancer
|
||||
scBuffer *buffer.Unbounded
|
||||
done *grpcsync.Event
|
||||
|
||||
mu sync.Mutex
|
||||
subConns map[*acBalancerWrapper]struct{}
|
||||
}
|
||||
|
||||
func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper {
|
||||
ccb := &ccBalancerWrapper{
|
||||
cc: cc,
|
||||
scBuffer: buffer.NewUnbounded(),
|
||||
done: grpcsync.NewEvent(),
|
||||
subConns: make(map[*acBalancerWrapper]struct{}),
|
||||
}
|
||||
go ccb.watcher()
|
||||
ccb.balancer = b.Build(ccb, bopts)
|
||||
return ccb
|
||||
}
|
||||
|
||||
// watcher balancer functions sequentially, so the balancer can be implemented
|
||||
// lock-free.
|
||||
func (ccb *ccBalancerWrapper) watcher() {
|
||||
for {
|
||||
select {
|
||||
case t := <-ccb.scBuffer.Get():
|
||||
ccb.scBuffer.Load()
|
||||
if ccb.done.HasFired() {
|
||||
break
|
||||
}
|
||||
ccb.balancerMu.Lock()
|
||||
su := t.(*scStateUpdate)
|
||||
ccb.balancer.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err})
|
||||
ccb.balancerMu.Unlock()
|
||||
case <-ccb.done.Done():
|
||||
}
|
||||
|
||||
if ccb.done.HasFired() {
|
||||
ccb.balancer.Close()
|
||||
ccb.mu.Lock()
|
||||
scs := ccb.subConns
|
||||
ccb.subConns = nil
|
||||
ccb.mu.Unlock()
|
||||
for acbw := range scs {
|
||||
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
|
||||
}
|
||||
ccb.UpdateState(balancer.State{ConnectivityState: connectivity.Connecting, Picker: nil})
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) close() {
|
||||
ccb.done.Fire()
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) {
|
||||
// When updating addresses for a SubConn, if the address in use is not in
|
||||
// the new addresses, the old ac will be tearDown() and a new ac will be
|
||||
// created. tearDown() generates a state change with Shutdown state, we
|
||||
// don't want the balancer to receive this state change. So before
|
||||
// tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and
|
||||
// this function will be called with (nil, Shutdown). We don't need to call
|
||||
// balancer method in this case.
|
||||
if sc == nil {
|
||||
return
|
||||
}
|
||||
ccb.scBuffer.Put(&scStateUpdate{
|
||||
sc: sc,
|
||||
state: s,
|
||||
err: err,
|
||||
})
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
|
||||
ccb.balancerMu.Lock()
|
||||
defer ccb.balancerMu.Unlock()
|
||||
return ccb.balancer.UpdateClientConnState(*ccs)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) resolverError(err error) {
|
||||
ccb.balancerMu.Lock()
|
||||
ccb.balancer.ResolverError(err)
|
||||
ccb.balancerMu.Unlock()
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
||||
if len(addrs) <= 0 {
|
||||
return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list")
|
||||
}
|
||||
ccb.mu.Lock()
|
||||
defer ccb.mu.Unlock()
|
||||
if ccb.subConns == nil {
|
||||
return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed")
|
||||
}
|
||||
ac, err := ccb.cc.newAddrConn(addrs, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
acbw := &acBalancerWrapper{ac: ac}
|
||||
acbw.ac.mu.Lock()
|
||||
ac.acbw = acbw
|
||||
acbw.ac.mu.Unlock()
|
||||
ccb.subConns[acbw] = struct{}{}
|
||||
return acbw, nil
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
|
||||
acbw, ok := sc.(*acBalancerWrapper)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
ccb.mu.Lock()
|
||||
defer ccb.mu.Unlock()
|
||||
if ccb.subConns == nil {
|
||||
return
|
||||
}
|
||||
delete(ccb.subConns, acbw)
|
||||
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
|
||||
ccb.mu.Lock()
|
||||
defer ccb.mu.Unlock()
|
||||
if ccb.subConns == nil {
|
||||
return
|
||||
}
|
||||
// Update picker before updating state. Even though the ordering here does
|
||||
// not matter, it can lead to multiple calls of Pick in the common start-up
|
||||
// case where we wait for ready and then perform an RPC. If the picker is
|
||||
// updated later, we could call the "connecting" picker when the state is
|
||||
// updated, and then call the "ready" picker after the picker gets updated.
|
||||
ccb.cc.blockingpicker.updatePicker(s.Picker)
|
||||
ccb.cc.csMgr.updateState(s.ConnectivityState)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) {
|
||||
ccb.cc.resolveNow(o)
|
||||
}
|
||||
|
||||
func (ccb *ccBalancerWrapper) Target() string {
|
||||
return ccb.cc.target
|
||||
}
|
||||
|
||||
// acBalancerWrapper is a wrapper on top of ac for balancers.
|
||||
// It implements balancer.SubConn interface.
|
||||
type acBalancerWrapper struct {
|
||||
mu sync.Mutex
|
||||
ac *addrConn
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) {
|
||||
acbw.mu.Lock()
|
||||
defer acbw.mu.Unlock()
|
||||
if len(addrs) <= 0 {
|
||||
acbw.ac.tearDown(errConnDrain)
|
||||
return
|
||||
}
|
||||
if !acbw.ac.tryUpdateAddrs(addrs) {
|
||||
cc := acbw.ac.cc
|
||||
opts := acbw.ac.scopts
|
||||
acbw.ac.mu.Lock()
|
||||
// Set old ac.acbw to nil so the Shutdown state update will be ignored
|
||||
// by balancer.
|
||||
//
|
||||
// TODO(bar) the state transition could be wrong when tearDown() old ac
|
||||
// and creating new ac, fix the transition.
|
||||
acbw.ac.acbw = nil
|
||||
acbw.ac.mu.Unlock()
|
||||
acState := acbw.ac.getState()
|
||||
acbw.ac.tearDown(errConnDrain)
|
||||
|
||||
if acState == connectivity.Shutdown {
|
||||
return
|
||||
}
|
||||
|
||||
ac, err := cc.newAddrConn(addrs, opts)
|
||||
if err != nil {
|
||||
channelz.Warningf(logger, acbw.ac.channelzID, "acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err)
|
||||
return
|
||||
}
|
||||
acbw.ac = ac
|
||||
ac.mu.Lock()
|
||||
ac.acbw = acbw
|
||||
ac.mu.Unlock()
|
||||
if acState != connectivity.Idle {
|
||||
ac.connect()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) Connect() {
|
||||
acbw.mu.Lock()
|
||||
defer acbw.mu.Unlock()
|
||||
acbw.ac.connect()
|
||||
}
|
||||
|
||||
func (acbw *acBalancerWrapper) getAddrConn() *addrConn {
|
||||
acbw.mu.Lock()
|
||||
defer acbw.mu.Unlock()
|
||||
return acbw.ac
|
||||
}
|
812
vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
generated
vendored
Normal file
812
vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,812 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: grpc/binlog/v1/binarylog.proto
|
||||
|
||||
package grpc_binarylog_v1
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
duration "github.com/golang/protobuf/ptypes/duration"
|
||||
timestamp "github.com/golang/protobuf/ptypes/timestamp"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// Enumerates the type of event
|
||||
// Note the terminology is different from the RPC semantics
|
||||
// definition, but the same meaning is expressed here.
|
||||
type GrpcLogEntry_EventType int32
|
||||
|
||||
const (
|
||||
GrpcLogEntry_EVENT_TYPE_UNKNOWN GrpcLogEntry_EventType = 0
|
||||
// Header sent from client to server
|
||||
GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER GrpcLogEntry_EventType = 1
|
||||
// Header sent from server to client
|
||||
GrpcLogEntry_EVENT_TYPE_SERVER_HEADER GrpcLogEntry_EventType = 2
|
||||
// Message sent from client to server
|
||||
GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE GrpcLogEntry_EventType = 3
|
||||
// Message sent from server to client
|
||||
GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE GrpcLogEntry_EventType = 4
|
||||
// A signal that client is done sending
|
||||
GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE GrpcLogEntry_EventType = 5
|
||||
// Trailer indicates the end of the RPC.
|
||||
// On client side, this event means a trailer was either received
|
||||
// from the network or the gRPC library locally generated a status
|
||||
// to inform the application about a failure.
|
||||
// On server side, this event means the server application requested
|
||||
// to send a trailer. Note: EVENT_TYPE_CANCEL may still arrive after
|
||||
// this due to races on server side.
|
||||
GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER GrpcLogEntry_EventType = 6
|
||||
// A signal that the RPC is cancelled. On client side, this
|
||||
// indicates the client application requests a cancellation.
|
||||
// On server side, this indicates that cancellation was detected.
|
||||
// Note: This marks the end of the RPC. Events may arrive after
|
||||
// this due to races. For example, on client side a trailer
|
||||
// may arrive even though the application requested to cancel the RPC.
|
||||
GrpcLogEntry_EVENT_TYPE_CANCEL GrpcLogEntry_EventType = 7
|
||||
)
|
||||
|
||||
var GrpcLogEntry_EventType_name = map[int32]string{
|
||||
0: "EVENT_TYPE_UNKNOWN",
|
||||
1: "EVENT_TYPE_CLIENT_HEADER",
|
||||
2: "EVENT_TYPE_SERVER_HEADER",
|
||||
3: "EVENT_TYPE_CLIENT_MESSAGE",
|
||||
4: "EVENT_TYPE_SERVER_MESSAGE",
|
||||
5: "EVENT_TYPE_CLIENT_HALF_CLOSE",
|
||||
6: "EVENT_TYPE_SERVER_TRAILER",
|
||||
7: "EVENT_TYPE_CANCEL",
|
||||
}
|
||||
|
||||
var GrpcLogEntry_EventType_value = map[string]int32{
|
||||
"EVENT_TYPE_UNKNOWN": 0,
|
||||
"EVENT_TYPE_CLIENT_HEADER": 1,
|
||||
"EVENT_TYPE_SERVER_HEADER": 2,
|
||||
"EVENT_TYPE_CLIENT_MESSAGE": 3,
|
||||
"EVENT_TYPE_SERVER_MESSAGE": 4,
|
||||
"EVENT_TYPE_CLIENT_HALF_CLOSE": 5,
|
||||
"EVENT_TYPE_SERVER_TRAILER": 6,
|
||||
"EVENT_TYPE_CANCEL": 7,
|
||||
}
|
||||
|
||||
func (x GrpcLogEntry_EventType) String() string {
|
||||
return proto.EnumName(GrpcLogEntry_EventType_name, int32(x))
|
||||
}
|
||||
|
||||
func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b7972e58de45083a, []int{0, 0}
|
||||
}
|
||||
|
||||
// Enumerates the entity that generates the log entry
|
||||
type GrpcLogEntry_Logger int32
|
||||
|
||||
const (
|
||||
GrpcLogEntry_LOGGER_UNKNOWN GrpcLogEntry_Logger = 0
|
||||
GrpcLogEntry_LOGGER_CLIENT GrpcLogEntry_Logger = 1
|
||||
GrpcLogEntry_LOGGER_SERVER GrpcLogEntry_Logger = 2
|
||||
)
|
||||
|
||||
var GrpcLogEntry_Logger_name = map[int32]string{
|
||||
0: "LOGGER_UNKNOWN",
|
||||
1: "LOGGER_CLIENT",
|
||||
2: "LOGGER_SERVER",
|
||||
}
|
||||
|
||||
var GrpcLogEntry_Logger_value = map[string]int32{
|
||||
"LOGGER_UNKNOWN": 0,
|
||||
"LOGGER_CLIENT": 1,
|
||||
"LOGGER_SERVER": 2,
|
||||
}
|
||||
|
||||
func (x GrpcLogEntry_Logger) String() string {
|
||||
return proto.EnumName(GrpcLogEntry_Logger_name, int32(x))
|
||||
}
|
||||
|
||||
func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b7972e58de45083a, []int{0, 1}
|
||||
}
|
||||
|
||||
type Address_Type int32
|
||||
|
||||
const (
|
||||
Address_TYPE_UNKNOWN Address_Type = 0
|
||||
// address is in 1.2.3.4 form
|
||||
Address_TYPE_IPV4 Address_Type = 1
|
||||
// address is in IPv6 canonical form (RFC5952 section 4)
|
||||
// The scope is NOT included in the address string.
|
||||
Address_TYPE_IPV6 Address_Type = 2
|
||||
// address is UDS string
|
||||
Address_TYPE_UNIX Address_Type = 3
|
||||
)
|
||||
|
||||
var Address_Type_name = map[int32]string{
|
||||
0: "TYPE_UNKNOWN",
|
||||
1: "TYPE_IPV4",
|
||||
2: "TYPE_IPV6",
|
||||
3: "TYPE_UNIX",
|
||||
}
|
||||
|
||||
var Address_Type_value = map[string]int32{
|
||||
"TYPE_UNKNOWN": 0,
|
||||
"TYPE_IPV4": 1,
|
||||
"TYPE_IPV6": 2,
|
||||
"TYPE_UNIX": 3,
|
||||
}
|
||||
|
||||
func (x Address_Type) String() string {
|
||||
return proto.EnumName(Address_Type_name, int32(x))
|
||||
}
|
||||
|
||||
func (Address_Type) EnumDescriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b7972e58de45083a, []int{7, 0}
|
||||
}
|
||||
|
||||
// Log entry we store in binary logs
|
||||
type GrpcLogEntry struct {
|
||||
// The timestamp of the binary log message
|
||||
Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
|
||||
// Uniquely identifies a call. The value must not be 0 in order to disambiguate
|
||||
// from an unset value.
|
||||
// Each call may have several log entries, they will all have the same call_id.
|
||||
// Nothing is guaranteed about their value other than they are unique across
|
||||
// different RPCs in the same gRPC process.
|
||||
CallId uint64 `protobuf:"varint,2,opt,name=call_id,json=callId,proto3" json:"call_id,omitempty"`
|
||||
// The entry sequence id for this call. The first GrpcLogEntry has a
|
||||
// value of 1, to disambiguate from an unset value. The purpose of
|
||||
// this field is to detect missing entries in environments where
|
||||
// durability or ordering is not guaranteed.
|
||||
SequenceIdWithinCall uint64 `protobuf:"varint,3,opt,name=sequence_id_within_call,json=sequenceIdWithinCall,proto3" json:"sequence_id_within_call,omitempty"`
|
||||
Type GrpcLogEntry_EventType `protobuf:"varint,4,opt,name=type,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_EventType" json:"type,omitempty"`
|
||||
Logger GrpcLogEntry_Logger `protobuf:"varint,5,opt,name=logger,proto3,enum=grpc.binarylog.v1.GrpcLogEntry_Logger" json:"logger,omitempty"`
|
||||
// The logger uses one of the following fields to record the payload,
|
||||
// according to the type of the log entry.
|
||||
//
|
||||
// Types that are valid to be assigned to Payload:
|
||||
// *GrpcLogEntry_ClientHeader
|
||||
// *GrpcLogEntry_ServerHeader
|
||||
// *GrpcLogEntry_Message
|
||||
// *GrpcLogEntry_Trailer
|
||||
Payload isGrpcLogEntry_Payload `protobuf_oneof:"payload"`
|
||||
// true if payload does not represent the full message or metadata.
|
||||
PayloadTruncated bool `protobuf:"varint,10,opt,name=payload_truncated,json=payloadTruncated,proto3" json:"payload_truncated,omitempty"`
|
||||
// Peer address information, will only be recorded on the first
|
||||
// incoming event. On client side, peer is logged on
|
||||
// EVENT_TYPE_SERVER_HEADER normally or EVENT_TYPE_SERVER_TRAILER in
|
||||
// the case of trailers-only. On server side, peer is always
|
||||
// logged on EVENT_TYPE_CLIENT_HEADER.
|
||||
Peer *Address `protobuf:"bytes,11,opt,name=peer,proto3" json:"peer,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GrpcLogEntry) Reset() { *m = GrpcLogEntry{} }
|
||||
func (m *GrpcLogEntry) String() string { return proto.CompactTextString(m) }
|
||||
func (*GrpcLogEntry) ProtoMessage() {}
|
||||
func (*GrpcLogEntry) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b7972e58de45083a, []int{0}
|
||||
}
|
||||
|
||||
func (m *GrpcLogEntry) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_GrpcLogEntry.Unmarshal(m, b)
|
||||
}
|
||||
func (m *GrpcLogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_GrpcLogEntry.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *GrpcLogEntry) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_GrpcLogEntry.Merge(m, src)
|
||||
}
|
||||
func (m *GrpcLogEntry) XXX_Size() int {
|
||||
return xxx_messageInfo_GrpcLogEntry.Size(m)
|
||||
}
|
||||
func (m *GrpcLogEntry) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_GrpcLogEntry.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_GrpcLogEntry proto.InternalMessageInfo
|
||||
|
||||
func (m *GrpcLogEntry) GetTimestamp() *timestamp.Timestamp {
|
||||
if m != nil {
|
||||
return m.Timestamp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *GrpcLogEntry) GetCallId() uint64 {
|
||||
if m != nil {
|
||||
return m.CallId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *GrpcLogEntry) GetSequenceIdWithinCall() uint64 {
|
||||
if m != nil {
|
||||
return m.SequenceIdWithinCall
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *GrpcLogEntry) GetType() GrpcLogEntry_EventType {
|
||||
if m != nil {
|
||||
return m.Type
|
||||
}
|
||||
return GrpcLogEntry_EVENT_TYPE_UNKNOWN
|
||||
}
|
||||
|
||||
func (m *GrpcLogEntry) GetLogger() GrpcLogEntry_Logger {
|
||||
if m != nil {
|
||||
return m.Logger
|
||||
}
|
||||
return GrpcLogEntry_LOGGER_UNKNOWN
|
||||
}
|
||||
|
||||
type isGrpcLogEntry_Payload interface {
|
||||
isGrpcLogEntry_Payload()
|
||||
}
|
||||
|
||||
type GrpcLogEntry_ClientHeader struct {
|
||||
ClientHeader *ClientHeader `protobuf:"bytes,6,opt,name=client_header,json=clientHeader,proto3,oneof"`
|
||||
}
|
||||
|
||||
type GrpcLogEntry_ServerHeader struct {
|
||||
ServerHeader *ServerHeader `protobuf:"bytes,7,opt,name=server_header,json=serverHeader,proto3,oneof"`
|
||||
}
|
||||
|
||||
type GrpcLogEntry_Message struct {
|
||||
Message *Message `protobuf:"bytes,8,opt,name=message,proto3,oneof"`
|
||||
}
|
||||
|
||||
type GrpcLogEntry_Trailer struct {
|
||||
Trailer *Trailer `protobuf:"bytes,9,opt,name=trailer,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*GrpcLogEntry_ClientHeader) isGrpcLogEntry_Payload() {}
|
||||
|
||||
func (*GrpcLogEntry_ServerHeader) isGrpcLogEntry_Payload() {}
|
||||
|
||||
func (*GrpcLogEntry_Message) isGrpcLogEntry_Payload() {}
|
||||
|
||||
func (*GrpcLogEntry_Trailer) isGrpcLogEntry_Payload() {}
|
||||
|
||||
func (m *GrpcLogEntry) GetPayload() isGrpcLogEntry_Payload {
|
||||
if m != nil {
|
||||
return m.Payload
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *GrpcLogEntry) GetClientHeader() *ClientHeader {
|
||||
if x, ok := m.GetPayload().(*GrpcLogEntry_ClientHeader); ok {
|
||||
return x.ClientHeader
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *GrpcLogEntry) GetServerHeader() *ServerHeader {
|
||||
if x, ok := m.GetPayload().(*GrpcLogEntry_ServerHeader); ok {
|
||||
return x.ServerHeader
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *GrpcLogEntry) GetMessage() *Message {
|
||||
if x, ok := m.GetPayload().(*GrpcLogEntry_Message); ok {
|
||||
return x.Message
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *GrpcLogEntry) GetTrailer() *Trailer {
|
||||
if x, ok := m.GetPayload().(*GrpcLogEntry_Trailer); ok {
|
||||
return x.Trailer
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *GrpcLogEntry) GetPayloadTruncated() bool {
|
||||
if m != nil {
|
||||
return m.PayloadTruncated
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *GrpcLogEntry) GetPeer() *Address {
|
||||
if m != nil {
|
||||
return m.Peer
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// XXX_OneofWrappers is for the internal use of the proto package.
|
||||
func (*GrpcLogEntry) XXX_OneofWrappers() []interface{} {
|
||||
return []interface{}{
|
||||
(*GrpcLogEntry_ClientHeader)(nil),
|
||||
(*GrpcLogEntry_ServerHeader)(nil),
|
||||
(*GrpcLogEntry_Message)(nil),
|
||||
(*GrpcLogEntry_Trailer)(nil),
|
||||
}
|
||||
}
|
||||
|
||||
type ClientHeader struct {
|
||||
// This contains only the metadata from the application.
|
||||
Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
|
||||
// The name of the RPC method, which looks something like:
|
||||
// /<service>/<method>
|
||||
// Note the leading "/" character.
|
||||
MethodName string `protobuf:"bytes,2,opt,name=method_name,json=methodName,proto3" json:"method_name,omitempty"`
|
||||
// A single process may be used to run multiple virtual
|
||||
// servers with different identities.
|
||||
// The authority is the name of such a server identitiy.
|
||||
// It is typically a portion of the URI in the form of
|
||||
// <host> or <host>:<port> .
|
||||
Authority string `protobuf:"bytes,3,opt,name=authority,proto3" json:"authority,omitempty"`
|
||||
// the RPC timeout
|
||||
Timeout *duration.Duration `protobuf:"bytes,4,opt,name=timeout,proto3" json:"timeout,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ClientHeader) Reset() { *m = ClientHeader{} }
|
||||
func (m *ClientHeader) String() string { return proto.CompactTextString(m) }
|
||||
func (*ClientHeader) ProtoMessage() {}
|
||||
func (*ClientHeader) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b7972e58de45083a, []int{1}
|
||||
}
|
||||
|
||||
func (m *ClientHeader) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ClientHeader.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ClientHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ClientHeader.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ClientHeader) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ClientHeader.Merge(m, src)
|
||||
}
|
||||
func (m *ClientHeader) XXX_Size() int {
|
||||
return xxx_messageInfo_ClientHeader.Size(m)
|
||||
}
|
||||
func (m *ClientHeader) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ClientHeader.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ClientHeader proto.InternalMessageInfo
|
||||
|
||||
func (m *ClientHeader) GetMetadata() *Metadata {
|
||||
if m != nil {
|
||||
return m.Metadata
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ClientHeader) GetMethodName() string {
|
||||
if m != nil {
|
||||
return m.MethodName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ClientHeader) GetAuthority() string {
|
||||
if m != nil {
|
||||
return m.Authority
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *ClientHeader) GetTimeout() *duration.Duration {
|
||||
if m != nil {
|
||||
return m.Timeout
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ServerHeader struct {
|
||||
// This contains only the metadata from the application.
|
||||
Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ServerHeader) Reset() { *m = ServerHeader{} }
|
||||
func (m *ServerHeader) String() string { return proto.CompactTextString(m) }
|
||||
func (*ServerHeader) ProtoMessage() {}
|
||||
func (*ServerHeader) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b7972e58de45083a, []int{2}
|
||||
}
|
||||
|
||||
func (m *ServerHeader) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ServerHeader.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ServerHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ServerHeader.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ServerHeader) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ServerHeader.Merge(m, src)
|
||||
}
|
||||
func (m *ServerHeader) XXX_Size() int {
|
||||
return xxx_messageInfo_ServerHeader.Size(m)
|
||||
}
|
||||
func (m *ServerHeader) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ServerHeader.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ServerHeader proto.InternalMessageInfo
|
||||
|
||||
func (m *ServerHeader) GetMetadata() *Metadata {
|
||||
if m != nil {
|
||||
return m.Metadata
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Trailer struct {
|
||||
// This contains only the metadata from the application.
|
||||
Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
|
||||
// The gRPC status code.
|
||||
StatusCode uint32 `protobuf:"varint,2,opt,name=status_code,json=statusCode,proto3" json:"status_code,omitempty"`
|
||||
// An original status message before any transport specific
|
||||
// encoding.
|
||||
StatusMessage string `protobuf:"bytes,3,opt,name=status_message,json=statusMessage,proto3" json:"status_message,omitempty"`
|
||||
// The value of the 'grpc-status-details-bin' metadata key. If
|
||||
// present, this is always an encoded 'google.rpc.Status' message.
|
||||
StatusDetails []byte `protobuf:"bytes,4,opt,name=status_details,json=statusDetails,proto3" json:"status_details,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Trailer) Reset() { *m = Trailer{} }
|
||||
func (m *Trailer) String() string { return proto.CompactTextString(m) }
|
||||
func (*Trailer) ProtoMessage() {}
|
||||
func (*Trailer) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b7972e58de45083a, []int{3}
|
||||
}
|
||||
|
||||
func (m *Trailer) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Trailer.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Trailer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Trailer.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Trailer) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Trailer.Merge(m, src)
|
||||
}
|
||||
func (m *Trailer) XXX_Size() int {
|
||||
return xxx_messageInfo_Trailer.Size(m)
|
||||
}
|
||||
func (m *Trailer) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Trailer.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Trailer proto.InternalMessageInfo
|
||||
|
||||
func (m *Trailer) GetMetadata() *Metadata {
|
||||
if m != nil {
|
||||
return m.Metadata
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Trailer) GetStatusCode() uint32 {
|
||||
if m != nil {
|
||||
return m.StatusCode
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Trailer) GetStatusMessage() string {
|
||||
if m != nil {
|
||||
return m.StatusMessage
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Trailer) GetStatusDetails() []byte {
|
||||
if m != nil {
|
||||
return m.StatusDetails
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Message payload, used by CLIENT_MESSAGE and SERVER_MESSAGE
|
||||
type Message struct {
|
||||
// Length of the message. It may not be the same as the length of the
|
||||
// data field, as the logging payload can be truncated or omitted.
|
||||
Length uint32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"`
|
||||
// May be truncated or omitted.
|
||||
Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Message) Reset() { *m = Message{} }
|
||||
func (m *Message) String() string { return proto.CompactTextString(m) }
|
||||
func (*Message) ProtoMessage() {}
|
||||
func (*Message) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b7972e58de45083a, []int{4}
|
||||
}
|
||||
|
||||
func (m *Message) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Message.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Message.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Message) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Message.Merge(m, src)
|
||||
}
|
||||
func (m *Message) XXX_Size() int {
|
||||
return xxx_messageInfo_Message.Size(m)
|
||||
}
|
||||
func (m *Message) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Message.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Message proto.InternalMessageInfo
|
||||
|
||||
func (m *Message) GetLength() uint32 {
|
||||
if m != nil {
|
||||
return m.Length
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Message) GetData() []byte {
|
||||
if m != nil {
|
||||
return m.Data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A list of metadata pairs, used in the payload of client header,
|
||||
// server header, and server trailer.
|
||||
// Implementations may omit some entries to honor the header limits
|
||||
// of GRPC_BINARY_LOG_CONFIG.
|
||||
//
|
||||
// Header keys added by gRPC are omitted. To be more specific,
|
||||
// implementations will not log the following entries, and this is
|
||||
// not to be treated as a truncation:
|
||||
// - entries handled by grpc that are not user visible, such as those
|
||||
// that begin with 'grpc-' (with exception of grpc-trace-bin)
|
||||
// or keys like 'lb-token'
|
||||
// - transport specific entries, including but not limited to:
|
||||
// ':path', ':authority', 'content-encoding', 'user-agent', 'te', etc
|
||||
// - entries added for call credentials
|
||||
//
|
||||
// Implementations must always log grpc-trace-bin if it is present.
|
||||
// Practically speaking it will only be visible on server side because
|
||||
// grpc-trace-bin is managed by low level client side mechanisms
|
||||
// inaccessible from the application level. On server side, the
|
||||
// header is just a normal metadata key.
|
||||
// The pair will not count towards the size limit.
|
||||
type Metadata struct {
|
||||
Entry []*MetadataEntry `protobuf:"bytes,1,rep,name=entry,proto3" json:"entry,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Metadata) Reset() { *m = Metadata{} }
|
||||
func (m *Metadata) String() string { return proto.CompactTextString(m) }
|
||||
func (*Metadata) ProtoMessage() {}
|
||||
func (*Metadata) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b7972e58de45083a, []int{5}
|
||||
}
|
||||
|
||||
func (m *Metadata) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Metadata.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Metadata.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Metadata) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Metadata.Merge(m, src)
|
||||
}
|
||||
func (m *Metadata) XXX_Size() int {
|
||||
return xxx_messageInfo_Metadata.Size(m)
|
||||
}
|
||||
func (m *Metadata) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Metadata.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Metadata proto.InternalMessageInfo
|
||||
|
||||
func (m *Metadata) GetEntry() []*MetadataEntry {
|
||||
if m != nil {
|
||||
return m.Entry
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A metadata key value pair
|
||||
type MetadataEntry struct {
|
||||
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *MetadataEntry) Reset() { *m = MetadataEntry{} }
|
||||
func (m *MetadataEntry) String() string { return proto.CompactTextString(m) }
|
||||
func (*MetadataEntry) ProtoMessage() {}
|
||||
func (*MetadataEntry) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b7972e58de45083a, []int{6}
|
||||
}
|
||||
|
||||
func (m *MetadataEntry) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_MetadataEntry.Unmarshal(m, b)
|
||||
}
|
||||
func (m *MetadataEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_MetadataEntry.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *MetadataEntry) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_MetadataEntry.Merge(m, src)
|
||||
}
|
||||
func (m *MetadataEntry) XXX_Size() int {
|
||||
return xxx_messageInfo_MetadataEntry.Size(m)
|
||||
}
|
||||
func (m *MetadataEntry) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_MetadataEntry.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_MetadataEntry proto.InternalMessageInfo
|
||||
|
||||
func (m *MetadataEntry) GetKey() string {
|
||||
if m != nil {
|
||||
return m.Key
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *MetadataEntry) GetValue() []byte {
|
||||
if m != nil {
|
||||
return m.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Address information
|
||||
type Address struct {
|
||||
Type Address_Type `protobuf:"varint,1,opt,name=type,proto3,enum=grpc.binarylog.v1.Address_Type" json:"type,omitempty"`
|
||||
Address string `protobuf:"bytes,2,opt,name=address,proto3" json:"address,omitempty"`
|
||||
// only for TYPE_IPV4 and TYPE_IPV6
|
||||
IpPort uint32 `protobuf:"varint,3,opt,name=ip_port,json=ipPort,proto3" json:"ip_port,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Address) Reset() { *m = Address{} }
|
||||
func (m *Address) String() string { return proto.CompactTextString(m) }
|
||||
func (*Address) ProtoMessage() {}
|
||||
func (*Address) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b7972e58de45083a, []int{7}
|
||||
}
|
||||
|
||||
func (m *Address) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_Address.Unmarshal(m, b)
|
||||
}
|
||||
func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_Address.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *Address) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Address.Merge(m, src)
|
||||
}
|
||||
func (m *Address) XXX_Size() int {
|
||||
return xxx_messageInfo_Address.Size(m)
|
||||
}
|
||||
func (m *Address) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Address.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Address proto.InternalMessageInfo
|
||||
|
||||
func (m *Address) GetType() Address_Type {
|
||||
if m != nil {
|
||||
return m.Type
|
||||
}
|
||||
return Address_TYPE_UNKNOWN
|
||||
}
|
||||
|
||||
func (m *Address) GetAddress() string {
|
||||
if m != nil {
|
||||
return m.Address
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Address) GetIpPort() uint32 {
|
||||
if m != nil {
|
||||
return m.IpPort
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value)
|
||||
proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value)
|
||||
proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value)
|
||||
proto.RegisterType((*GrpcLogEntry)(nil), "grpc.binarylog.v1.GrpcLogEntry")
|
||||
proto.RegisterType((*ClientHeader)(nil), "grpc.binarylog.v1.ClientHeader")
|
||||
proto.RegisterType((*ServerHeader)(nil), "grpc.binarylog.v1.ServerHeader")
|
||||
proto.RegisterType((*Trailer)(nil), "grpc.binarylog.v1.Trailer")
|
||||
proto.RegisterType((*Message)(nil), "grpc.binarylog.v1.Message")
|
||||
proto.RegisterType((*Metadata)(nil), "grpc.binarylog.v1.Metadata")
|
||||
proto.RegisterType((*MetadataEntry)(nil), "grpc.binarylog.v1.MetadataEntry")
|
||||
proto.RegisterType((*Address)(nil), "grpc.binarylog.v1.Address")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("grpc/binlog/v1/binarylog.proto", fileDescriptor_b7972e58de45083a) }
|
||||
|
||||
var fileDescriptor_b7972e58de45083a = []byte{
|
||||
// 904 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x51, 0x6f, 0xe3, 0x44,
|
||||
0x10, 0xae, 0xdb, 0x34, 0x6e, 0x26, 0x49, 0xe5, 0xae, 0xca, 0x9d, 0xaf, 0x94, 0x6b, 0x64, 0x09,
|
||||
0x14, 0x84, 0xe4, 0xa8, 0x29, 0xd7, 0xe3, 0x05, 0xa4, 0x24, 0xf5, 0xa5, 0x11, 0xb9, 0x34, 0xda,
|
||||
0xe4, 0x7a, 0x80, 0x90, 0xac, 0x6d, 0xbc, 0x38, 0x16, 0x8e, 0xd7, 0xac, 0x37, 0x41, 0xf9, 0x59,
|
||||
0xbc, 0x21, 0xdd, 0xef, 0xe2, 0x1d, 0x79, 0xd7, 0x4e, 0x4d, 0xd3, 0x82, 0xc4, 0xbd, 0xed, 0x7c,
|
||||
0xf3, 0xcd, 0x37, 0xbb, 0xe3, 0x99, 0x31, 0xbc, 0xf4, 0x79, 0x3c, 0x6b, 0xdd, 0x05, 0x51, 0xc8,
|
||||
0xfc, 0xd6, 0xea, 0x3c, 0x3d, 0x11, 0xbe, 0x0e, 0x99, 0x6f, 0xc7, 0x9c, 0x09, 0x86, 0x8e, 0x52,
|
||||
0xbf, 0x7d, 0x8f, 0xae, 0xce, 0x4f, 0x5e, 0xfa, 0x8c, 0xf9, 0x21, 0x6d, 0x49, 0xc2, 0xdd, 0xf2,
|
||||
0x97, 0x96, 0xb7, 0xe4, 0x44, 0x04, 0x2c, 0x52, 0x21, 0x27, 0x67, 0x0f, 0xfd, 0x22, 0x58, 0xd0,
|
||||
0x44, 0x90, 0x45, 0xac, 0x08, 0xd6, 0x07, 0x1d, 0x6a, 0x7d, 0x1e, 0xcf, 0x86, 0xcc, 0x77, 0x22,
|
||||
0xc1, 0xd7, 0xe8, 0x1b, 0xa8, 0x6c, 0x38, 0xa6, 0xd6, 0xd0, 0x9a, 0xd5, 0xf6, 0x89, 0xad, 0x54,
|
||||
0xec, 0x5c, 0xc5, 0x9e, 0xe6, 0x0c, 0x7c, 0x4f, 0x46, 0xcf, 0x41, 0x9f, 0x91, 0x30, 0x74, 0x03,
|
||||
0xcf, 0xdc, 0x6d, 0x68, 0xcd, 0x12, 0x2e, 0xa7, 0xe6, 0xc0, 0x43, 0xaf, 0xe0, 0x79, 0x42, 0x7f,
|
||||
0x5b, 0xd2, 0x68, 0x46, 0xdd, 0xc0, 0x73, 0x7f, 0x0f, 0xc4, 0x3c, 0x88, 0xdc, 0xd4, 0x69, 0xee,
|
||||
0x49, 0xe2, 0x71, 0xee, 0x1e, 0x78, 0xef, 0xa5, 0xb3, 0x47, 0xc2, 0x10, 0x7d, 0x0b, 0x25, 0xb1,
|
||||
0x8e, 0xa9, 0x59, 0x6a, 0x68, 0xcd, 0xc3, 0xf6, 0x97, 0xf6, 0xd6, 0xeb, 0xed, 0xe2, 0xc5, 0x6d,
|
||||
0x67, 0x45, 0x23, 0x31, 0x5d, 0xc7, 0x14, 0xcb, 0x30, 0xf4, 0x1d, 0x94, 0x43, 0xe6, 0xfb, 0x94,
|
||||
0x9b, 0xfb, 0x52, 0xe0, 0x8b, 0xff, 0x12, 0x18, 0x4a, 0x36, 0xce, 0xa2, 0xd0, 0x1b, 0xa8, 0xcf,
|
||||
0xc2, 0x80, 0x46, 0xc2, 0x9d, 0x53, 0xe2, 0x51, 0x6e, 0x96, 0x65, 0x31, 0xce, 0x1e, 0x91, 0xe9,
|
||||
0x49, 0xde, 0xb5, 0xa4, 0x5d, 0xef, 0xe0, 0xda, 0xac, 0x60, 0xa7, 0x3a, 0x09, 0xe5, 0x2b, 0xca,
|
||||
0x73, 0x1d, 0xfd, 0x49, 0x9d, 0x89, 0xe4, 0xdd, 0xeb, 0x24, 0x05, 0x1b, 0x5d, 0x82, 0xbe, 0xa0,
|
||||
0x49, 0x42, 0x7c, 0x6a, 0x1e, 0xe4, 0x9f, 0x65, 0x4b, 0xe1, 0xad, 0x62, 0x5c, 0xef, 0xe0, 0x9c,
|
||||
0x9c, 0xc6, 0x09, 0x4e, 0x82, 0x90, 0x72, 0xb3, 0xf2, 0x64, 0xdc, 0x54, 0x31, 0xd2, 0xb8, 0x8c,
|
||||
0x8c, 0xbe, 0x82, 0xa3, 0x98, 0xac, 0x43, 0x46, 0x3c, 0x57, 0xf0, 0x65, 0x34, 0x23, 0x82, 0x7a,
|
||||
0x26, 0x34, 0xb4, 0xe6, 0x01, 0x36, 0x32, 0xc7, 0x34, 0xc7, 0x91, 0x0d, 0xa5, 0x98, 0x52, 0x6e,
|
||||
0x56, 0x9f, 0xcc, 0xd0, 0xf1, 0x3c, 0x4e, 0x93, 0x04, 0x4b, 0x9e, 0xf5, 0x97, 0x06, 0x95, 0xcd,
|
||||
0x07, 0x43, 0xcf, 0x00, 0x39, 0xb7, 0xce, 0x68, 0xea, 0x4e, 0x7f, 0x1c, 0x3b, 0xee, 0xbb, 0xd1,
|
||||
0xf7, 0xa3, 0x9b, 0xf7, 0x23, 0x63, 0x07, 0x9d, 0x82, 0x59, 0xc0, 0x7b, 0xc3, 0x41, 0x7a, 0xbe,
|
||||
0x76, 0x3a, 0x57, 0x0e, 0x36, 0xb4, 0x07, 0xde, 0x89, 0x83, 0x6f, 0x1d, 0x9c, 0x7b, 0x77, 0xd1,
|
||||
0x67, 0xf0, 0x62, 0x3b, 0xf6, 0xad, 0x33, 0x99, 0x74, 0xfa, 0x8e, 0xb1, 0xf7, 0xc0, 0x9d, 0x05,
|
||||
0xe7, 0xee, 0x12, 0x6a, 0xc0, 0xe9, 0x23, 0x99, 0x3b, 0xc3, 0x37, 0x6e, 0x6f, 0x78, 0x33, 0x71,
|
||||
0x8c, 0xfd, 0xc7, 0x05, 0xa6, 0xb8, 0x33, 0x18, 0x3a, 0xd8, 0x28, 0xa3, 0x4f, 0xe0, 0xa8, 0x28,
|
||||
0xd0, 0x19, 0xf5, 0x9c, 0xa1, 0xa1, 0x5b, 0x5d, 0x28, 0xab, 0x36, 0x43, 0x08, 0x0e, 0x87, 0x37,
|
||||
0xfd, 0xbe, 0x83, 0x0b, 0xef, 0x3d, 0x82, 0x7a, 0x86, 0xa9, 0x8c, 0x86, 0x56, 0x80, 0x54, 0x0a,
|
||||
0x63, 0xb7, 0x5b, 0x01, 0x3d, 0xab, 0xbf, 0xf5, 0x41, 0x83, 0x5a, 0xb1, 0xf9, 0xd0, 0x6b, 0x38,
|
||||
0x58, 0x50, 0x41, 0x3c, 0x22, 0x48, 0x36, 0xbc, 0x9f, 0x3e, 0xda, 0x25, 0x8a, 0x82, 0x37, 0x64,
|
||||
0x74, 0x06, 0xd5, 0x05, 0x15, 0x73, 0xe6, 0xb9, 0x11, 0x59, 0x50, 0x39, 0xc0, 0x15, 0x0c, 0x0a,
|
||||
0x1a, 0x91, 0x05, 0x45, 0xa7, 0x50, 0x21, 0x4b, 0x31, 0x67, 0x3c, 0x10, 0x6b, 0x39, 0xb6, 0x15,
|
||||
0x7c, 0x0f, 0xa0, 0x0b, 0xd0, 0xd3, 0x45, 0xc0, 0x96, 0x42, 0x8e, 0x6b, 0xb5, 0xfd, 0x62, 0x6b,
|
||||
0x67, 0x5c, 0x65, 0x9b, 0x09, 0xe7, 0x4c, 0xab, 0x0f, 0xb5, 0x62, 0xc7, 0xff, 0xef, 0xcb, 0x5b,
|
||||
0x7f, 0x68, 0xa0, 0x67, 0x1d, 0xfc, 0x51, 0x15, 0x48, 0x04, 0x11, 0xcb, 0xc4, 0x9d, 0x31, 0x4f,
|
||||
0x55, 0xa0, 0x8e, 0x41, 0x41, 0x3d, 0xe6, 0x51, 0xf4, 0x39, 0x1c, 0x66, 0x84, 0x7c, 0x0e, 0x55,
|
||||
0x19, 0xea, 0x0a, 0xcd, 0x46, 0xaf, 0x40, 0xf3, 0xa8, 0x20, 0x41, 0x98, 0xc8, 0x8a, 0xd4, 0x72,
|
||||
0xda, 0x95, 0x02, 0xad, 0x57, 0xa0, 0xe7, 0x11, 0xcf, 0xa0, 0x1c, 0xd2, 0xc8, 0x17, 0x73, 0x79,
|
||||
0xe1, 0x3a, 0xce, 0x2c, 0x84, 0xa0, 0x24, 0x9f, 0xb1, 0x2b, 0xe3, 0xe5, 0xd9, 0xea, 0xc2, 0x41,
|
||||
0x7e, 0x77, 0x74, 0x09, 0xfb, 0x34, 0xdd, 0x5c, 0xa6, 0xd6, 0xd8, 0x6b, 0x56, 0xdb, 0x8d, 0x7f,
|
||||
0x79, 0xa7, 0xdc, 0x70, 0x58, 0xd1, 0xad, 0xd7, 0x50, 0xff, 0x07, 0x8e, 0x0c, 0xd8, 0xfb, 0x95,
|
||||
0xae, 0x65, 0xf6, 0x0a, 0x4e, 0x8f, 0xe8, 0x18, 0xf6, 0x57, 0x24, 0x5c, 0xd2, 0x2c, 0xb7, 0x32,
|
||||
0xac, 0x3f, 0x35, 0xd0, 0xb3, 0x39, 0x46, 0x17, 0xd9, 0x76, 0xd6, 0xe4, 0x72, 0x3d, 0x7b, 0x7a,
|
||||
0xe2, 0xed, 0xc2, 0x4e, 0x36, 0x41, 0x27, 0x0a, 0xcd, 0x3a, 0x2c, 0x37, 0xd3, 0x9f, 0x47, 0x10,
|
||||
0xbb, 0x31, 0xe3, 0x42, 0x56, 0xb5, 0x8e, 0xcb, 0x41, 0x3c, 0x66, 0x5c, 0x58, 0x0e, 0x94, 0xe4,
|
||||
0x8e, 0x30, 0xa0, 0xf6, 0x60, 0x3b, 0xd4, 0xa1, 0x22, 0x91, 0xc1, 0xf8, 0xf6, 0x6b, 0x43, 0x2b,
|
||||
0x9a, 0x97, 0xc6, 0xee, 0xc6, 0x7c, 0x37, 0x1a, 0xfc, 0x60, 0xec, 0x75, 0x7f, 0x86, 0xe3, 0x80,
|
||||
0x6d, 0x5f, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7, 0xda, 0x4f, 0xed,
|
||||
0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0xf7, 0x5b, 0xf9, 0x7f, 0x59, 0x85, 0x49, 0xd3,
|
||||
0xdd, 0x98, 0xee, 0xea, 0xfc, 0xae, 0x2c, 0xbb, 0xfc, 0xe2, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff,
|
||||
0x10, 0x93, 0x68, 0x41, 0xc2, 0x07, 0x00, 0x00,
|
||||
}
|
226
vendor/google.golang.org/grpc/call.go
generated
vendored
226
vendor/google.golang.org/grpc/call.go
generated
vendored
|
@ -1,190 +1,74 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2014, Google Inc.
|
||||
* All rights reserved.
|
||||
* Copyright 2014 gRPC authors.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/trace"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/transport"
|
||||
"context"
|
||||
)
|
||||
|
||||
// recvResponse receives and parses an RPC response.
|
||||
// On error, it returns the error and indicates whether the call should be retried.
|
||||
// Invoke sends the RPC request on the wire and returns after response is
|
||||
// received. This is typically called by generated code.
|
||||
//
|
||||
// TODO(zhaoq): Check whether the received message sequence is valid.
|
||||
func recvResponse(dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error {
|
||||
// Try to acquire header metadata from the server if there is any.
|
||||
var err error
|
||||
c.headerMD, err = stream.Header()
|
||||
// All errors returned by Invoke are compatible with the status package.
|
||||
func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error {
|
||||
// allow interceptor to see all applicable call options, which means those
|
||||
// configured as defaults from dial option as well as per-call options
|
||||
opts = combine(cc.dopts.callOptions, opts)
|
||||
|
||||
if cc.dopts.unaryInt != nil {
|
||||
return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...)
|
||||
}
|
||||
return invoke(ctx, method, args, reply, cc, opts...)
|
||||
}
|
||||
|
||||
func combine(o1 []CallOption, o2 []CallOption) []CallOption {
|
||||
// we don't use append because o1 could have extra capacity whose
|
||||
// elements would be overwritten, which could cause inadvertent
|
||||
// sharing (and race conditions) between concurrent calls
|
||||
if len(o1) == 0 {
|
||||
return o2
|
||||
} else if len(o2) == 0 {
|
||||
return o1
|
||||
}
|
||||
ret := make([]CallOption, len(o1)+len(o2))
|
||||
copy(ret, o1)
|
||||
copy(ret[len(o1):], o2)
|
||||
return ret
|
||||
}
|
||||
|
||||
// Invoke sends the RPC request on the wire and returns after response is
|
||||
// received. This is typically called by generated code.
|
||||
//
|
||||
// DEPRECATED: Use ClientConn.Invoke instead.
|
||||
func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error {
|
||||
return cc.Invoke(ctx, method, args, reply, opts...)
|
||||
}
|
||||
|
||||
var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false}
|
||||
|
||||
func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error {
|
||||
cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p := &parser{r: stream}
|
||||
for {
|
||||
if err = recv(p, dopts.codec, stream, dopts.dc, reply); err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
c.trailerMD = stream.Trailer()
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendRequest writes out various information of an RPC such as Context and Message.
|
||||
func sendRequest(ctx context.Context, codec Codec, compressor Compressor, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) {
|
||||
stream, err := t.NewStream(ctx, callHdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if _, ok := err.(transport.ConnectionError); !ok {
|
||||
t.CloseStream(stream, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
var cbuf *bytes.Buffer
|
||||
if compressor != nil {
|
||||
cbuf = new(bytes.Buffer)
|
||||
}
|
||||
outBuf, err := encode(codec, args, compressor, cbuf)
|
||||
if err != nil {
|
||||
return nil, transport.StreamErrorf(codes.Internal, "grpc: %v", err)
|
||||
}
|
||||
err = t.Write(stream, outBuf, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Sent successfully.
|
||||
return stream, nil
|
||||
}
|
||||
|
||||
// Invoke is called by the generated code. It sends the RPC request on the
|
||||
// wire and returns after response is received.
|
||||
func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) {
|
||||
var c callInfo
|
||||
for _, o := range opts {
|
||||
if err := o.before(&c); err != nil {
|
||||
return toRPCErr(err)
|
||||
}
|
||||
}
|
||||
defer func() {
|
||||
for _, o := range opts {
|
||||
o.after(&c)
|
||||
}
|
||||
}()
|
||||
if EnableTracing {
|
||||
c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
|
||||
defer c.traceInfo.tr.Finish()
|
||||
c.traceInfo.firstLine.client = true
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
c.traceInfo.firstLine.deadline = deadline.Sub(time.Now())
|
||||
}
|
||||
c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false)
|
||||
// TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set.
|
||||
defer func() {
|
||||
if err != nil {
|
||||
c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
c.traceInfo.tr.SetError()
|
||||
}
|
||||
}()
|
||||
}
|
||||
topts := &transport.Options{
|
||||
Last: true,
|
||||
Delay: false,
|
||||
}
|
||||
var (
|
||||
lastErr error // record the error that happened
|
||||
)
|
||||
for {
|
||||
var (
|
||||
err error
|
||||
t transport.ClientTransport
|
||||
stream *transport.Stream
|
||||
)
|
||||
// TODO(zhaoq): Need a formal spec of retry strategy for non-failfast rpcs.
|
||||
if lastErr != nil && c.failFast {
|
||||
return toRPCErr(lastErr)
|
||||
}
|
||||
callHdr := &transport.CallHdr{
|
||||
Host: cc.authority,
|
||||
Method: method,
|
||||
}
|
||||
if cc.dopts.cp != nil {
|
||||
callHdr.SendCompress = cc.dopts.cp.Type()
|
||||
}
|
||||
t, err = cc.dopts.picker.Pick(ctx)
|
||||
if err != nil {
|
||||
if lastErr != nil {
|
||||
// This was a retry; return the error from the last attempt.
|
||||
return toRPCErr(lastErr)
|
||||
}
|
||||
return toRPCErr(err)
|
||||
}
|
||||
if c.traceInfo.tr != nil {
|
||||
c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true)
|
||||
}
|
||||
stream, err = sendRequest(ctx, cc.dopts.codec, cc.dopts.cp, callHdr, t, args, topts)
|
||||
if err != nil {
|
||||
if _, ok := err.(transport.ConnectionError); ok {
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
if lastErr != nil {
|
||||
return toRPCErr(lastErr)
|
||||
}
|
||||
return toRPCErr(err)
|
||||
}
|
||||
// Receive the response
|
||||
lastErr = recvResponse(cc.dopts, t, &c, stream, reply)
|
||||
if _, ok := lastErr.(transport.ConnectionError); ok {
|
||||
continue
|
||||
}
|
||||
if c.traceInfo.tr != nil {
|
||||
c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true)
|
||||
}
|
||||
t.CloseStream(stream, lastErr)
|
||||
if lastErr != nil {
|
||||
return toRPCErr(lastErr)
|
||||
}
|
||||
return Errorf(stream.StatusCode(), stream.StatusDesc())
|
||||
if err := cs.SendMsg(req); err != nil {
|
||||
return err
|
||||
}
|
||||
return cs.RecvMsg(reply)
|
||||
}
|
||||
|
|
1956
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
1956
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
File diff suppressed because it is too large
Load diff
50
vendor/google.golang.org/grpc/codec.go
generated
vendored
Normal file
50
vendor/google.golang.org/grpc/codec.go
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2014 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"google.golang.org/grpc/encoding"
|
||||
_ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto"
|
||||
)
|
||||
|
||||
// baseCodec contains the functionality of both Codec and encoding.Codec, but
|
||||
// omits the name/string, which vary between the two and are not needed for
|
||||
// anything besides the registry in the encoding package.
|
||||
type baseCodec interface {
|
||||
Marshal(v interface{}) ([]byte, error)
|
||||
Unmarshal(data []byte, v interface{}) error
|
||||
}
|
||||
|
||||
var _ baseCodec = Codec(nil)
|
||||
var _ baseCodec = encoding.Codec(nil)
|
||||
|
||||
// Codec defines the interface gRPC uses to encode and decode messages.
|
||||
// Note that implementations of this interface must be thread safe;
|
||||
// a Codec's methods can be called from concurrent goroutines.
|
||||
//
|
||||
// Deprecated: use encoding.Codec instead.
|
||||
type Codec interface {
|
||||
// Marshal returns the wire format of v.
|
||||
Marshal(v interface{}) ([]byte, error)
|
||||
// Unmarshal parses the wire format into v.
|
||||
Unmarshal(data []byte, v interface{}) error
|
||||
// String returns the name of the Codec implementation. This is unused by
|
||||
// gRPC.
|
||||
String() string
|
||||
}
|
2
vendor/google.golang.org/grpc/codegen.sh
generated
vendored
2
vendor/google.golang.org/grpc/codegen.sh
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script serves as an example to demonstrate how to generate the gRPC-Go
|
||||
# interface and the related messages from .proto file.
|
||||
|
|
66
vendor/google.golang.org/grpc/codes/code_string.go
generated
vendored
66
vendor/google.golang.org/grpc/codes/code_string.go
generated
vendored
|
@ -1,16 +1,62 @@
|
|||
// generated by stringer -type=Code; DO NOT EDIT
|
||||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package codes
|
||||
|
||||
import "fmt"
|
||||
import "strconv"
|
||||
|
||||
const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlreadyExistsPermissionDeniedResourceExhaustedFailedPreconditionAbortedOutOfRangeUnimplementedInternalUnavailableDataLossUnauthenticated"
|
||||
|
||||
var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192}
|
||||
|
||||
func (i Code) String() string {
|
||||
if i+1 >= Code(len(_Code_index)) {
|
||||
return fmt.Sprintf("Code(%d)", i)
|
||||
func (c Code) String() string {
|
||||
switch c {
|
||||
case OK:
|
||||
return "OK"
|
||||
case Canceled:
|
||||
return "Canceled"
|
||||
case Unknown:
|
||||
return "Unknown"
|
||||
case InvalidArgument:
|
||||
return "InvalidArgument"
|
||||
case DeadlineExceeded:
|
||||
return "DeadlineExceeded"
|
||||
case NotFound:
|
||||
return "NotFound"
|
||||
case AlreadyExists:
|
||||
return "AlreadyExists"
|
||||
case PermissionDenied:
|
||||
return "PermissionDenied"
|
||||
case ResourceExhausted:
|
||||
return "ResourceExhausted"
|
||||
case FailedPrecondition:
|
||||
return "FailedPrecondition"
|
||||
case Aborted:
|
||||
return "Aborted"
|
||||
case OutOfRange:
|
||||
return "OutOfRange"
|
||||
case Unimplemented:
|
||||
return "Unimplemented"
|
||||
case Internal:
|
||||
return "Internal"
|
||||
case Unavailable:
|
||||
return "Unavailable"
|
||||
case DataLoss:
|
||||
return "DataLoss"
|
||||
case Unauthenticated:
|
||||
return "Unauthenticated"
|
||||
default:
|
||||
return "Code(" + strconv.FormatInt(int64(c), 10) + ")"
|
||||
}
|
||||
return _Code_name[_Code_index[i]:_Code_index[i+1]]
|
||||
}
|
||||
|
|
165
vendor/google.golang.org/grpc/codes/codes.go
generated
vendored
165
vendor/google.golang.org/grpc/codes/codes.go
generated
vendored
|
@ -1,33 +1,18 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2014, Google Inc.
|
||||
* All rights reserved.
|
||||
* Copyright 2014 gRPC authors.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
|
@ -35,29 +20,40 @@
|
|||
// consistent across various languages.
|
||||
package codes // import "google.golang.org/grpc/codes"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// A Code is an unsigned 32-bit error code as defined in the gRPC spec.
|
||||
type Code uint32
|
||||
|
||||
//go:generate stringer -type=Code
|
||||
|
||||
const (
|
||||
// OK is returned on success.
|
||||
OK Code = 0
|
||||
|
||||
// Canceled indicates the operation was cancelled (typically by the caller).
|
||||
// Canceled indicates the operation was canceled (typically by the caller).
|
||||
//
|
||||
// The gRPC framework will generate this error code when cancellation
|
||||
// is requested.
|
||||
Canceled Code = 1
|
||||
|
||||
// Unknown error. An example of where this error may be returned is
|
||||
// Unknown error. An example of where this error may be returned is
|
||||
// if a Status value received from another address space belongs to
|
||||
// an error-space that is not known in this address space. Also
|
||||
// an error-space that is not known in this address space. Also
|
||||
// errors raised by APIs that do not return enough error information
|
||||
// may be converted to this error.
|
||||
//
|
||||
// The gRPC framework will generate this error code in the above two
|
||||
// mentioned cases.
|
||||
Unknown Code = 2
|
||||
|
||||
// InvalidArgument indicates client specified an invalid argument.
|
||||
// Note that this differs from FailedPrecondition. It indicates arguments
|
||||
// that are problematic regardless of the state of the system
|
||||
// (e.g., a malformed file name).
|
||||
//
|
||||
// This error code will not be generated by the gRPC framework.
|
||||
InvalidArgument Code = 3
|
||||
|
||||
// DeadlineExceeded means operation expired before completion.
|
||||
|
@ -65,30 +61,40 @@ const (
|
|||
// returned even if the operation has completed successfully. For
|
||||
// example, a successful response from a server could have been delayed
|
||||
// long enough for the deadline to expire.
|
||||
//
|
||||
// The gRPC framework will generate this error code when the deadline is
|
||||
// exceeded.
|
||||
DeadlineExceeded Code = 4
|
||||
|
||||
// NotFound means some requested entity (e.g., file or directory) was
|
||||
// not found.
|
||||
//
|
||||
// This error code will not be generated by the gRPC framework.
|
||||
NotFound Code = 5
|
||||
|
||||
// AlreadyExists means an attempt to create an entity failed because one
|
||||
// already exists.
|
||||
//
|
||||
// This error code will not be generated by the gRPC framework.
|
||||
AlreadyExists Code = 6
|
||||
|
||||
// PermissionDenied indicates the caller does not have permission to
|
||||
// execute the specified operation. It must not be used for rejections
|
||||
// caused by exhausting some resource (use ResourceExhausted
|
||||
// instead for those errors). It must not be
|
||||
// instead for those errors). It must not be
|
||||
// used if the caller cannot be identified (use Unauthenticated
|
||||
// instead for those errors).
|
||||
//
|
||||
// This error code will not be generated by the gRPC core framework,
|
||||
// but expect authentication middleware to use it.
|
||||
PermissionDenied Code = 7
|
||||
|
||||
// Unauthenticated indicates the request does not have valid
|
||||
// authentication credentials for the operation.
|
||||
Unauthenticated Code = 16
|
||||
|
||||
// ResourceExhausted indicates some resource has been exhausted, perhaps
|
||||
// a per-user quota, or perhaps the entire file system is out of space.
|
||||
//
|
||||
// This error code will be generated by the gRPC framework in
|
||||
// out-of-memory and server overload situations, or when a message is
|
||||
// larger than the configured maximum size.
|
||||
ResourceExhausted Code = 8
|
||||
|
||||
// FailedPrecondition indicates operation was rejected because the
|
||||
|
@ -102,7 +108,7 @@ const (
|
|||
// (b) Use Aborted if the client should retry at a higher-level
|
||||
// (e.g., restarting a read-modify-write sequence).
|
||||
// (c) Use FailedPrecondition if the client should not retry until
|
||||
// the system state has been explicitly fixed. E.g., if an "rmdir"
|
||||
// the system state has been explicitly fixed. E.g., if an "rmdir"
|
||||
// fails because the directory is non-empty, FailedPrecondition
|
||||
// should be returned since the client should not retry unless
|
||||
// they have first fixed up the directory by deleting files from it.
|
||||
|
@ -110,6 +116,8 @@ const (
|
|||
// REST Get/Update/Delete on a resource and the resource on the
|
||||
// server does not match the condition. E.g., conflicting
|
||||
// read-modify-write on the same resource.
|
||||
//
|
||||
// This error code will not be generated by the gRPC framework.
|
||||
FailedPrecondition Code = 9
|
||||
|
||||
// Aborted indicates the operation was aborted, typically due to a
|
||||
|
@ -118,6 +126,8 @@ const (
|
|||
//
|
||||
// See litmus test above for deciding between FailedPrecondition,
|
||||
// Aborted, and Unavailable.
|
||||
//
|
||||
// This error code will not be generated by the gRPC framework.
|
||||
Aborted Code = 10
|
||||
|
||||
// OutOfRange means operation was attempted past the valid range.
|
||||
|
@ -131,29 +141,104 @@ const (
|
|||
// file size.
|
||||
//
|
||||
// There is a fair bit of overlap between FailedPrecondition and
|
||||
// OutOfRange. We recommend using OutOfRange (the more specific
|
||||
// OutOfRange. We recommend using OutOfRange (the more specific
|
||||
// error) when it applies so that callers who are iterating through
|
||||
// a space can easily look for an OutOfRange error to detect when
|
||||
// they are done.
|
||||
//
|
||||
// This error code will not be generated by the gRPC framework.
|
||||
OutOfRange Code = 11
|
||||
|
||||
// Unimplemented indicates operation is not implemented or not
|
||||
// supported/enabled in this service.
|
||||
//
|
||||
// This error code will be generated by the gRPC framework. Most
|
||||
// commonly, you will see this error code when a method implementation
|
||||
// is missing on the server. It can also be generated for unknown
|
||||
// compression algorithms or a disagreement as to whether an RPC should
|
||||
// be streaming.
|
||||
Unimplemented Code = 12
|
||||
|
||||
// Internal errors. Means some invariants expected by underlying
|
||||
// system has been broken. If you see one of these errors,
|
||||
// Internal errors. Means some invariants expected by underlying
|
||||
// system has been broken. If you see one of these errors,
|
||||
// something is very broken.
|
||||
//
|
||||
// This error code will be generated by the gRPC framework in several
|
||||
// internal error conditions.
|
||||
Internal Code = 13
|
||||
|
||||
// Unavailable indicates the service is currently unavailable.
|
||||
// This is a most likely a transient condition and may be corrected
|
||||
// by retrying with a backoff.
|
||||
// by retrying with a backoff. Note that it is not always safe to retry
|
||||
// non-idempotent operations.
|
||||
//
|
||||
// See litmus test above for deciding between FailedPrecondition,
|
||||
// Aborted, and Unavailable.
|
||||
//
|
||||
// This error code will be generated by the gRPC framework during
|
||||
// abrupt shutdown of a server process or network connection.
|
||||
Unavailable Code = 14
|
||||
|
||||
// DataLoss indicates unrecoverable data loss or corruption.
|
||||
//
|
||||
// This error code will not be generated by the gRPC framework.
|
||||
DataLoss Code = 15
|
||||
|
||||
// Unauthenticated indicates the request does not have valid
|
||||
// authentication credentials for the operation.
|
||||
//
|
||||
// The gRPC framework will generate this error code when the
|
||||
// authentication metadata is invalid or a Credentials callback fails,
|
||||
// but also expect authentication middleware to generate it.
|
||||
Unauthenticated Code = 16
|
||||
|
||||
_maxCode = 17
|
||||
)
|
||||
|
||||
var strToCode = map[string]Code{
|
||||
`"OK"`: OK,
|
||||
`"CANCELLED"`:/* [sic] */ Canceled,
|
||||
`"UNKNOWN"`: Unknown,
|
||||
`"INVALID_ARGUMENT"`: InvalidArgument,
|
||||
`"DEADLINE_EXCEEDED"`: DeadlineExceeded,
|
||||
`"NOT_FOUND"`: NotFound,
|
||||
`"ALREADY_EXISTS"`: AlreadyExists,
|
||||
`"PERMISSION_DENIED"`: PermissionDenied,
|
||||
`"RESOURCE_EXHAUSTED"`: ResourceExhausted,
|
||||
`"FAILED_PRECONDITION"`: FailedPrecondition,
|
||||
`"ABORTED"`: Aborted,
|
||||
`"OUT_OF_RANGE"`: OutOfRange,
|
||||
`"UNIMPLEMENTED"`: Unimplemented,
|
||||
`"INTERNAL"`: Internal,
|
||||
`"UNAVAILABLE"`: Unavailable,
|
||||
`"DATA_LOSS"`: DataLoss,
|
||||
`"UNAUTHENTICATED"`: Unauthenticated,
|
||||
}
|
||||
|
||||
// UnmarshalJSON unmarshals b into the Code.
|
||||
func (c *Code) UnmarshalJSON(b []byte) error {
|
||||
// From json.Unmarshaler: By convention, to approximate the behavior of
|
||||
// Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as
|
||||
// a no-op.
|
||||
if string(b) == "null" {
|
||||
return nil
|
||||
}
|
||||
if c == nil {
|
||||
return fmt.Errorf("nil receiver passed to UnmarshalJSON")
|
||||
}
|
||||
|
||||
if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil {
|
||||
if ci >= _maxCode {
|
||||
return fmt.Errorf("invalid code: %q", ci)
|
||||
}
|
||||
|
||||
*c = Code(ci)
|
||||
return nil
|
||||
}
|
||||
|
||||
if jc, ok := strToCode[string(b)]; ok {
|
||||
*c = jc
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("invalid code: %q", string(b))
|
||||
}
|
||||
|
|
73
vendor/google.golang.org/grpc/connectivity/connectivity.go
generated
vendored
Normal file
73
vendor/google.golang.org/grpc/connectivity/connectivity.go
generated
vendored
Normal file
|
@ -0,0 +1,73 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package connectivity defines connectivity semantics.
|
||||
// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md.
|
||||
// All APIs in this package are experimental.
|
||||
package connectivity
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
// State indicates the state of connectivity.
|
||||
// It can be the state of a ClientConn or SubConn.
|
||||
type State int
|
||||
|
||||
func (s State) String() string {
|
||||
switch s {
|
||||
case Idle:
|
||||
return "IDLE"
|
||||
case Connecting:
|
||||
return "CONNECTING"
|
||||
case Ready:
|
||||
return "READY"
|
||||
case TransientFailure:
|
||||
return "TRANSIENT_FAILURE"
|
||||
case Shutdown:
|
||||
return "SHUTDOWN"
|
||||
default:
|
||||
grpclog.Errorf("unknown connectivity state: %d", s)
|
||||
return "Invalid-State"
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// Idle indicates the ClientConn is idle.
|
||||
Idle State = iota
|
||||
// Connecting indicates the ClientConn is connecting.
|
||||
Connecting
|
||||
// Ready indicates the ClientConn is ready for work.
|
||||
Ready
|
||||
// TransientFailure indicates the ClientConn has seen a failure but expects to recover.
|
||||
TransientFailure
|
||||
// Shutdown indicates the ClientConn has started shutting down.
|
||||
Shutdown
|
||||
)
|
||||
|
||||
// Reporter reports the connectivity states.
|
||||
type Reporter interface {
|
||||
// CurrentState returns the current state of the reporter.
|
||||
CurrentState() State
|
||||
// WaitForStateChange blocks until the reporter's state is different from the given state,
|
||||
// and returns true.
|
||||
// It returns false if <-ctx.Done() can proceed (ctx got timeout or got canceled).
|
||||
WaitForStateChange(context.Context, State) bool
|
||||
}
|
47
vendor/google.golang.org/grpc/coverage.sh
generated
vendored
47
vendor/google.golang.org/grpc/coverage.sh
generated
vendored
|
@ -1,47 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
workdir=.cover
|
||||
profile="$workdir/cover.out"
|
||||
mode=set
|
||||
end2endtest="google.golang.org/grpc/test"
|
||||
|
||||
generate_cover_data() {
|
||||
rm -rf "$workdir"
|
||||
mkdir "$workdir"
|
||||
|
||||
for pkg in "$@"; do
|
||||
if [ $pkg == "google.golang.org/grpc" -o $pkg == "google.golang.org/grpc/transport" -o $pkg == "google.golang.org/grpc/metadata" -o $pkg == "google.golang.org/grpc/credentials" ]
|
||||
then
|
||||
f="$workdir/$(echo $pkg | tr / -)"
|
||||
go test -covermode="$mode" -coverprofile="$f.cover" "$pkg"
|
||||
go test -covermode="$mode" -coverpkg "$pkg" -coverprofile="$f.e2e.cover" "$end2endtest"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "mode: $mode" >"$profile"
|
||||
grep -h -v "^mode:" "$workdir"/*.cover >>"$profile"
|
||||
}
|
||||
|
||||
show_cover_report() {
|
||||
go tool cover -${1}="$profile"
|
||||
}
|
||||
|
||||
push_to_coveralls() {
|
||||
goveralls -coverprofile="$profile"
|
||||
}
|
||||
|
||||
generate_cover_data $(go list ./...)
|
||||
show_cover_report func
|
||||
case "$1" in
|
||||
"")
|
||||
;;
|
||||
--html)
|
||||
show_cover_report html ;;
|
||||
--coveralls)
|
||||
push_to_coveralls ;;
|
||||
*)
|
||||
echo >&2 "error: invalid option: $1" ;;
|
||||
esac
|
||||
rm -rf "$workdir"
|
377
vendor/google.golang.org/grpc/credentials/credentials.go
generated
vendored
377
vendor/google.golang.org/grpc/credentials/credentials.go
generated
vendored
|
@ -1,33 +1,18 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2014, Google Inc.
|
||||
* All rights reserved.
|
||||
* Copyright 2014 gRPC authors.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
|
@ -38,189 +23,267 @@
|
|||
package credentials // import "google.golang.org/grpc/credentials"
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/grpc/attributes"
|
||||
"google.golang.org/grpc/internal"
|
||||
)
|
||||
|
||||
var (
|
||||
// alpnProtoStr are the specified application level protocols for gRPC.
|
||||
alpnProtoStr = []string{"h2"}
|
||||
)
|
||||
|
||||
// Credentials defines the common interface all supported credentials must
|
||||
// implement.
|
||||
type Credentials interface {
|
||||
// PerRPCCredentials defines the common interface for the credentials which need to
|
||||
// attach security information to every RPC (e.g., oauth2).
|
||||
type PerRPCCredentials interface {
|
||||
// GetRequestMetadata gets the current request metadata, refreshing
|
||||
// tokens if required. This should be called by the transport layer on
|
||||
// each request, and the data should be populated in headers or other
|
||||
// context. uri is the URI of the entry point for the request. When
|
||||
// supported by the underlying implementation, ctx can be used for
|
||||
// timeout and cancellation.
|
||||
// context. If a status code is returned, it will be used as the status
|
||||
// for the RPC. uri is the URI of the entry point for the request.
|
||||
// When supported by the underlying implementation, ctx can be used for
|
||||
// timeout and cancellation. Additionally, RequestInfo data will be
|
||||
// available via ctx to this call.
|
||||
// TODO(zhaoq): Define the set of the qualified keys instead of leaving
|
||||
// it as an arbitrary string.
|
||||
GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
|
||||
// RequireTransportSecurity indicates whether the credentails requires
|
||||
// RequireTransportSecurity indicates whether the credentials requires
|
||||
// transport security.
|
||||
RequireTransportSecurity() bool
|
||||
}
|
||||
|
||||
// SecurityLevel defines the protection level on an established connection.
|
||||
//
|
||||
// This API is experimental.
|
||||
type SecurityLevel int
|
||||
|
||||
const (
|
||||
// Invalid indicates an invalid security level.
|
||||
// The zero SecurityLevel value is invalid for backward compatibility.
|
||||
Invalid SecurityLevel = iota
|
||||
// NoSecurity indicates a connection is insecure.
|
||||
NoSecurity
|
||||
// IntegrityOnly indicates a connection only provides integrity protection.
|
||||
IntegrityOnly
|
||||
// PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection.
|
||||
PrivacyAndIntegrity
|
||||
)
|
||||
|
||||
// String returns SecurityLevel in a string format.
|
||||
func (s SecurityLevel) String() string {
|
||||
switch s {
|
||||
case NoSecurity:
|
||||
return "NoSecurity"
|
||||
case IntegrityOnly:
|
||||
return "IntegrityOnly"
|
||||
case PrivacyAndIntegrity:
|
||||
return "PrivacyAndIntegrity"
|
||||
}
|
||||
return fmt.Sprintf("invalid SecurityLevel: %v", int(s))
|
||||
}
|
||||
|
||||
// CommonAuthInfo contains authenticated information common to AuthInfo implementations.
|
||||
// It should be embedded in a struct implementing AuthInfo to provide additional information
|
||||
// about the credentials.
|
||||
//
|
||||
// This API is experimental.
|
||||
type CommonAuthInfo struct {
|
||||
SecurityLevel SecurityLevel
|
||||
}
|
||||
|
||||
// GetCommonAuthInfo returns the pointer to CommonAuthInfo struct.
|
||||
func (c *CommonAuthInfo) GetCommonAuthInfo() *CommonAuthInfo {
|
||||
return c
|
||||
}
|
||||
|
||||
// ProtocolInfo provides information regarding the gRPC wire protocol version,
|
||||
// security protocol, security protocol version in use, etc.
|
||||
// security protocol, security protocol version in use, server name, etc.
|
||||
type ProtocolInfo struct {
|
||||
// ProtocolVersion is the gRPC wire protocol version.
|
||||
ProtocolVersion string
|
||||
// SecurityProtocol is the security protocol in use.
|
||||
SecurityProtocol string
|
||||
// SecurityVersion is the security protocol version.
|
||||
// SecurityVersion is the security protocol version. It is a static version string from the
|
||||
// credentials, not a value that reflects per-connection protocol negotiation. To retrieve
|
||||
// details about the credentials used for a connection, use the Peer's AuthInfo field instead.
|
||||
//
|
||||
// Deprecated: please use Peer.AuthInfo.
|
||||
SecurityVersion string
|
||||
// ServerName is the user-configured server name.
|
||||
ServerName string
|
||||
}
|
||||
|
||||
// AuthInfo defines the common interface for the auth information the users are interested in.
|
||||
// A struct that implements AuthInfo should embed CommonAuthInfo by including additional
|
||||
// information about the credentials in it.
|
||||
type AuthInfo interface {
|
||||
AuthType() string
|
||||
}
|
||||
|
||||
// TransportAuthenticator defines the common interface for all the live gRPC wire
|
||||
// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC
|
||||
// and the caller should not close rawConn.
|
||||
var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC")
|
||||
|
||||
// TransportCredentials defines the common interface for all the live gRPC wire
|
||||
// protocols and supported transport security protocols (e.g., TLS, SSL).
|
||||
type TransportAuthenticator interface {
|
||||
// ClientHandshake does the authentication handshake specified by the corresponding
|
||||
// authentication protocol on rawConn for clients. It returns the authenticated
|
||||
// connection and the corresponding auth information about the connection.
|
||||
ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (net.Conn, AuthInfo, error)
|
||||
type TransportCredentials interface {
|
||||
// ClientHandshake does the authentication handshake specified by the
|
||||
// corresponding authentication protocol on rawConn for clients. It returns
|
||||
// the authenticated connection and the corresponding auth information
|
||||
// about the connection. The auth information should embed CommonAuthInfo
|
||||
// to return additional information about the credentials. Implementations
|
||||
// must use the provided context to implement timely cancellation. gRPC
|
||||
// will try to reconnect if the error returned is a temporary error
|
||||
// (io.EOF, context.DeadlineExceeded or err.Temporary() == true). If the
|
||||
// returned error is a wrapper error, implementations should make sure that
|
||||
// the error implements Temporary() to have the correct retry behaviors.
|
||||
// Additionally, ClientHandshakeInfo data will be available via the context
|
||||
// passed to this call.
|
||||
//
|
||||
// If the returned net.Conn is closed, it MUST close the net.Conn provided.
|
||||
ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
|
||||
// ServerHandshake does the authentication handshake for servers. It returns
|
||||
// the authenticated connection and the corresponding auth information about
|
||||
// the connection.
|
||||
ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error)
|
||||
// Info provides the ProtocolInfo of this TransportAuthenticator.
|
||||
// the connection. The auth information should embed CommonAuthInfo to return additional information
|
||||
// about the credentials.
|
||||
//
|
||||
// If the returned net.Conn is closed, it MUST close the net.Conn provided.
|
||||
ServerHandshake(net.Conn) (net.Conn, AuthInfo, error)
|
||||
// Info provides the ProtocolInfo of this TransportCredentials.
|
||||
Info() ProtocolInfo
|
||||
Credentials
|
||||
// Clone makes a copy of this TransportCredentials.
|
||||
Clone() TransportCredentials
|
||||
// OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server.
|
||||
// gRPC internals also use it to override the virtual hosting name if it is set.
|
||||
// It must be called before dialing. Currently, this is only used by grpclb.
|
||||
OverrideServerName(string) error
|
||||
}
|
||||
|
||||
// TLSInfo contains the auth information for a TLS authenticated connection.
|
||||
// It implements the AuthInfo interface.
|
||||
type TLSInfo struct {
|
||||
State tls.ConnectionState
|
||||
// Bundle is a combination of TransportCredentials and PerRPCCredentials.
|
||||
//
|
||||
// It also contains a mode switching method, so it can be used as a combination
|
||||
// of different credential policies.
|
||||
//
|
||||
// Bundle cannot be used together with individual TransportCredentials.
|
||||
// PerRPCCredentials from Bundle will be appended to other PerRPCCredentials.
|
||||
//
|
||||
// This API is experimental.
|
||||
type Bundle interface {
|
||||
TransportCredentials() TransportCredentials
|
||||
PerRPCCredentials() PerRPCCredentials
|
||||
// NewWithMode should make a copy of Bundle, and switch mode. Modifying the
|
||||
// existing Bundle may cause races.
|
||||
//
|
||||
// NewWithMode returns nil if the requested mode is not supported.
|
||||
NewWithMode(mode string) (Bundle, error)
|
||||
}
|
||||
|
||||
func (t TLSInfo) AuthType() string {
|
||||
return "tls"
|
||||
// RequestInfo contains request data attached to the context passed to GetRequestMetadata calls.
|
||||
//
|
||||
// This API is experimental.
|
||||
type RequestInfo struct {
|
||||
// The method passed to Invoke or NewStream for this RPC. (For proto methods, this has the format "/some.Service/Method")
|
||||
Method string
|
||||
// AuthInfo contains the information from a security handshake (TransportCredentials.ClientHandshake, TransportCredentials.ServerHandshake)
|
||||
AuthInfo AuthInfo
|
||||
}
|
||||
|
||||
// tlsCreds is the credentials required for authenticating a connection using TLS.
|
||||
type tlsCreds struct {
|
||||
// TLS configuration
|
||||
config tls.Config
|
||||
// requestInfoKey is a struct to be used as the key when attaching a RequestInfo to a context object.
|
||||
type requestInfoKey struct{}
|
||||
|
||||
// RequestInfoFromContext extracts the RequestInfo from the context if it exists.
|
||||
//
|
||||
// This API is experimental.
|
||||
func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) {
|
||||
ri, ok = ctx.Value(requestInfoKey{}).(RequestInfo)
|
||||
return
|
||||
}
|
||||
|
||||
func (c tlsCreds) Info() ProtocolInfo {
|
||||
return ProtocolInfo{
|
||||
SecurityProtocol: "tls",
|
||||
SecurityVersion: "1.2",
|
||||
// ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes
|
||||
// it possible to pass arbitrary data to the handshaker from gRPC, resolver,
|
||||
// balancer etc. Individual credential implementations control the actual
|
||||
// format of the data that they are willing to receive.
|
||||
//
|
||||
// This API is experimental.
|
||||
type ClientHandshakeInfo struct {
|
||||
// Attributes contains the attributes for the address. It could be provided
|
||||
// by the gRPC, resolver, balancer etc.
|
||||
Attributes *attributes.Attributes
|
||||
}
|
||||
|
||||
// clientHandshakeInfoKey is a struct used as the key to store
|
||||
// ClientHandshakeInfo in a context.
|
||||
type clientHandshakeInfoKey struct{}
|
||||
|
||||
// ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored
|
||||
// in ctx.
|
||||
//
|
||||
// This API is experimental.
|
||||
func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo {
|
||||
chi, _ := ctx.Value(clientHandshakeInfoKey{}).(ClientHandshakeInfo)
|
||||
return chi
|
||||
}
|
||||
|
||||
// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one.
|
||||
// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method
|
||||
// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility.
|
||||
//
|
||||
// This API is experimental.
|
||||
func CheckSecurityLevel(ctx context.Context, level SecurityLevel) error {
|
||||
type internalInfo interface {
|
||||
GetCommonAuthInfo() *CommonAuthInfo
|
||||
}
|
||||
}
|
||||
|
||||
// GetRequestMetadata returns nil, nil since TLS credentials does not have
|
||||
// metadata.
|
||||
func (c *tlsCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *tlsCreds) RequireTransportSecurity() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type timeoutError struct{}
|
||||
|
||||
func (timeoutError) Error() string { return "credentials: Dial timed out" }
|
||||
func (timeoutError) Timeout() bool { return true }
|
||||
func (timeoutError) Temporary() bool { return true }
|
||||
|
||||
func (c *tlsCreds) ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (_ net.Conn, _ AuthInfo, err error) {
|
||||
// borrow some code from tls.DialWithDialer
|
||||
var errChannel chan error
|
||||
if timeout != 0 {
|
||||
errChannel = make(chan error, 2)
|
||||
time.AfterFunc(timeout, func() {
|
||||
errChannel <- timeoutError{}
|
||||
})
|
||||
ri, _ := RequestInfoFromContext(ctx)
|
||||
if ri.AuthInfo == nil {
|
||||
return errors.New("unable to obtain SecurityLevel from context")
|
||||
}
|
||||
if c.config.ServerName == "" {
|
||||
colonPos := strings.LastIndex(addr, ":")
|
||||
if colonPos == -1 {
|
||||
colonPos = len(addr)
|
||||
if ci, ok := ri.AuthInfo.(internalInfo); ok {
|
||||
// CommonAuthInfo.SecurityLevel has an invalid value.
|
||||
if ci.GetCommonAuthInfo().SecurityLevel == Invalid {
|
||||
return nil
|
||||
}
|
||||
if ci.GetCommonAuthInfo().SecurityLevel < level {
|
||||
return fmt.Errorf("requires SecurityLevel %v; connection has %v", level, ci.GetCommonAuthInfo().SecurityLevel)
|
||||
}
|
||||
c.config.ServerName = addr[:colonPos]
|
||||
}
|
||||
conn := tls.Client(rawConn, &c.config)
|
||||
if timeout == 0 {
|
||||
err = conn.Handshake()
|
||||
} else {
|
||||
go func() {
|
||||
errChannel <- conn.Handshake()
|
||||
}()
|
||||
err = <-errChannel
|
||||
}
|
||||
if err != nil {
|
||||
rawConn.Close()
|
||||
return nil, nil, err
|
||||
}
|
||||
// TODO(zhaoq): Omit the auth info for client now. It is more for
|
||||
// information than anything else.
|
||||
return conn, nil, nil
|
||||
// The condition is satisfied or AuthInfo struct does not implement GetCommonAuthInfo() method.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
|
||||
conn := tls.Server(rawConn, &c.config)
|
||||
if err := conn.Handshake(); err != nil {
|
||||
rawConn.Close()
|
||||
return nil, nil, err
|
||||
func init() {
|
||||
internal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context {
|
||||
return context.WithValue(ctx, requestInfoKey{}, ri)
|
||||
}
|
||||
return conn, TLSInfo{conn.ConnectionState()}, nil
|
||||
}
|
||||
|
||||
// NewTLS uses c to construct a TransportAuthenticator based on TLS.
|
||||
func NewTLS(c *tls.Config) TransportAuthenticator {
|
||||
tc := &tlsCreds{*c}
|
||||
tc.config.NextProtos = alpnProtoStr
|
||||
return tc
|
||||
}
|
||||
|
||||
// NewClientTLSFromCert constructs a TLS from the input certificate for client.
|
||||
func NewClientTLSFromCert(cp *x509.CertPool, serverName string) TransportAuthenticator {
|
||||
return NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp})
|
||||
}
|
||||
|
||||
// NewClientTLSFromFile constructs a TLS from the input certificate file for client.
|
||||
func NewClientTLSFromFile(certFile, serverName string) (TransportAuthenticator, error) {
|
||||
b, err := ioutil.ReadFile(certFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
internal.NewClientHandshakeInfoContext = func(ctx context.Context, chi ClientHandshakeInfo) context.Context {
|
||||
return context.WithValue(ctx, clientHandshakeInfoKey{}, chi)
|
||||
}
|
||||
cp := x509.NewCertPool()
|
||||
if !cp.AppendCertsFromPEM(b) {
|
||||
return nil, fmt.Errorf("credentials: failed to append certificates")
|
||||
}
|
||||
return NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp}), nil
|
||||
}
|
||||
|
||||
// NewServerTLSFromCert constructs a TLS from the input certificate for server.
|
||||
func NewServerTLSFromCert(cert *tls.Certificate) TransportAuthenticator {
|
||||
return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
|
||||
// ChannelzSecurityInfo defines the interface that security protocols should implement
|
||||
// in order to provide security info to channelz.
|
||||
//
|
||||
// This API is experimental.
|
||||
type ChannelzSecurityInfo interface {
|
||||
GetSecurityValue() ChannelzSecurityValue
|
||||
}
|
||||
|
||||
// NewServerTLSFromFile constructs a TLS from the input certificate file and key
|
||||
// file for server.
|
||||
func NewServerTLSFromFile(certFile, keyFile string) (TransportAuthenticator, error) {
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
|
||||
// ChannelzSecurityValue defines the interface that GetSecurityValue() return value
|
||||
// should satisfy. This interface should only be satisfied by *TLSChannelzSecurityValue
|
||||
// and *OtherChannelzSecurityValue.
|
||||
//
|
||||
// This API is experimental.
|
||||
type ChannelzSecurityValue interface {
|
||||
isChannelzSecurityValue()
|
||||
}
|
||||
|
||||
// OtherChannelzSecurityValue defines the struct that non-TLS protocol should return
|
||||
// from GetSecurityValue(), which contains protocol specific security info. Note
|
||||
// the Value field will be sent to users of channelz requesting channel info, and
|
||||
// thus sensitive info should better be avoided.
|
||||
//
|
||||
// This API is experimental.
|
||||
type OtherChannelzSecurityValue struct {
|
||||
ChannelzSecurityValue
|
||||
Name string
|
||||
Value proto.Message
|
||||
}
|
||||
|
|
30
vendor/google.golang.org/grpc/credentials/go12.go
generated
vendored
Normal file
30
vendor/google.golang.org/grpc/credentials/go12.go
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
// +build go1.12
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2019 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package credentials
|
||||
|
||||
import "crypto/tls"
|
||||
|
||||
// This init function adds cipher suite constants only defined in Go 1.12.
|
||||
func init() {
|
||||
cipherSuiteLookup[tls.TLS_AES_128_GCM_SHA256] = "TLS_AES_128_GCM_SHA256"
|
||||
cipherSuiteLookup[tls.TLS_AES_256_GCM_SHA384] = "TLS_AES_256_GCM_SHA384"
|
||||
cipherSuiteLookup[tls.TLS_CHACHA20_POLY1305_SHA256] = "TLS_CHACHA20_POLY1305_SHA256"
|
||||
}
|
61
vendor/google.golang.org/grpc/credentials/internal/syscallconn.go
generated
vendored
Normal file
61
vendor/google.golang.org/grpc/credentials/internal/syscallconn.go
generated
vendored
Normal file
|
@ -0,0 +1,61 @@
|
|||
// +build !appengine
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package internal contains credentials-internal code.
|
||||
package internal
|
||||
|
||||
import (
|
||||
"net"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
type sysConn = syscall.Conn
|
||||
|
||||
// syscallConn keeps reference of rawConn to support syscall.Conn for channelz.
|
||||
// SyscallConn() (the method in interface syscall.Conn) is explicitly
|
||||
// implemented on this type,
|
||||
//
|
||||
// Interface syscall.Conn is implemented by most net.Conn implementations (e.g.
|
||||
// TCPConn, UnixConn), but is not part of net.Conn interface. So wrapper conns
|
||||
// that embed net.Conn don't implement syscall.Conn. (Side note: tls.Conn
|
||||
// doesn't embed net.Conn, so even if syscall.Conn is part of net.Conn, it won't
|
||||
// help here).
|
||||
type syscallConn struct {
|
||||
net.Conn
|
||||
// sysConn is a type alias of syscall.Conn. It's necessary because the name
|
||||
// `Conn` collides with `net.Conn`.
|
||||
sysConn
|
||||
}
|
||||
|
||||
// WrapSyscallConn tries to wrap rawConn and newConn into a net.Conn that
|
||||
// implements syscall.Conn. rawConn will be used to support syscall, and newConn
|
||||
// will be used for read/write.
|
||||
//
|
||||
// This function returns newConn if rawConn doesn't implement syscall.Conn.
|
||||
func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn {
|
||||
sysConn, ok := rawConn.(syscall.Conn)
|
||||
if !ok {
|
||||
return newConn
|
||||
}
|
||||
return &syscallConn{
|
||||
Conn: newConn,
|
||||
sysConn: sysConn,
|
||||
}
|
||||
}
|
30
vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go
generated
vendored
Normal file
30
vendor/google.golang.org/grpc/credentials/internal/syscallconn_appengine.go
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
// +build appengine
|
||||
|
||||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"net"
|
||||
)
|
||||
|
||||
// WrapSyscallConn returns newConn on appengine.
|
||||
func WrapSyscallConn(rawConn, newConn net.Conn) net.Conn {
|
||||
return newConn
|
||||
}
|
259
vendor/google.golang.org/grpc/credentials/tls.go
generated
vendored
Normal file
259
vendor/google.golang.org/grpc/credentials/tls.go
generated
vendored
Normal file
|
@ -0,0 +1,259 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2014 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/url"
|
||||
|
||||
"google.golang.org/grpc/credentials/internal"
|
||||
credinternal "google.golang.org/grpc/internal/credentials"
|
||||
)
|
||||
|
||||
// TLSInfo contains the auth information for a TLS authenticated connection.
|
||||
// It implements the AuthInfo interface.
|
||||
type TLSInfo struct {
|
||||
State tls.ConnectionState
|
||||
CommonAuthInfo
|
||||
// This API is experimental.
|
||||
SPIFFEID *url.URL
|
||||
}
|
||||
|
||||
// AuthType returns the type of TLSInfo as a string.
|
||||
func (t TLSInfo) AuthType() string {
|
||||
return "tls"
|
||||
}
|
||||
|
||||
// GetSecurityValue returns security info requested by channelz.
|
||||
func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue {
|
||||
v := &TLSChannelzSecurityValue{
|
||||
StandardName: cipherSuiteLookup[t.State.CipherSuite],
|
||||
}
|
||||
// Currently there's no way to get LocalCertificate info from tls package.
|
||||
if len(t.State.PeerCertificates) > 0 {
|
||||
v.RemoteCertificate = t.State.PeerCertificates[0].Raw
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// tlsCreds is the credentials required for authenticating a connection using TLS.
|
||||
type tlsCreds struct {
|
||||
// TLS configuration
|
||||
config *tls.Config
|
||||
}
|
||||
|
||||
func (c tlsCreds) Info() ProtocolInfo {
|
||||
return ProtocolInfo{
|
||||
SecurityProtocol: "tls",
|
||||
SecurityVersion: "1.2",
|
||||
ServerName: c.config.ServerName,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) {
|
||||
// use local cfg to avoid clobbering ServerName if using multiple endpoints
|
||||
cfg := cloneTLSConfig(c.config)
|
||||
if cfg.ServerName == "" {
|
||||
serverName, _, err := net.SplitHostPort(authority)
|
||||
if err != nil {
|
||||
// If the authority had no host port or if the authority cannot be parsed, use it as-is.
|
||||
serverName = authority
|
||||
}
|
||||
cfg.ServerName = serverName
|
||||
}
|
||||
conn := tls.Client(rawConn, cfg)
|
||||
errChannel := make(chan error, 1)
|
||||
go func() {
|
||||
errChannel <- conn.Handshake()
|
||||
close(errChannel)
|
||||
}()
|
||||
select {
|
||||
case err := <-errChannel:
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return nil, nil, err
|
||||
}
|
||||
case <-ctx.Done():
|
||||
conn.Close()
|
||||
return nil, nil, ctx.Err()
|
||||
}
|
||||
tlsInfo := TLSInfo{
|
||||
State: conn.ConnectionState(),
|
||||
CommonAuthInfo: CommonAuthInfo{
|
||||
SecurityLevel: PrivacyAndIntegrity,
|
||||
},
|
||||
}
|
||||
id := credinternal.SPIFFEIDFromState(conn.ConnectionState())
|
||||
if id != nil {
|
||||
tlsInfo.SPIFFEID = id
|
||||
}
|
||||
return internal.WrapSyscallConn(rawConn, conn), tlsInfo, nil
|
||||
}
|
||||
|
||||
func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
|
||||
conn := tls.Server(rawConn, c.config)
|
||||
if err := conn.Handshake(); err != nil {
|
||||
conn.Close()
|
||||
return nil, nil, err
|
||||
}
|
||||
tlsInfo := TLSInfo{
|
||||
State: conn.ConnectionState(),
|
||||
CommonAuthInfo: CommonAuthInfo{
|
||||
SecurityLevel: PrivacyAndIntegrity,
|
||||
},
|
||||
}
|
||||
id := credinternal.SPIFFEIDFromState(conn.ConnectionState())
|
||||
if id != nil {
|
||||
tlsInfo.SPIFFEID = id
|
||||
}
|
||||
return internal.WrapSyscallConn(rawConn, conn), tlsInfo, nil
|
||||
}
|
||||
|
||||
func (c *tlsCreds) Clone() TransportCredentials {
|
||||
return NewTLS(c.config)
|
||||
}
|
||||
|
||||
func (c *tlsCreds) OverrideServerName(serverNameOverride string) error {
|
||||
c.config.ServerName = serverNameOverride
|
||||
return nil
|
||||
}
|
||||
|
||||
const alpnProtoStrH2 = "h2"
|
||||
|
||||
func appendH2ToNextProtos(ps []string) []string {
|
||||
for _, p := range ps {
|
||||
if p == alpnProtoStrH2 {
|
||||
return ps
|
||||
}
|
||||
}
|
||||
ret := make([]string, 0, len(ps)+1)
|
||||
ret = append(ret, ps...)
|
||||
return append(ret, alpnProtoStrH2)
|
||||
}
|
||||
|
||||
// NewTLS uses c to construct a TransportCredentials based on TLS.
|
||||
func NewTLS(c *tls.Config) TransportCredentials {
|
||||
tc := &tlsCreds{cloneTLSConfig(c)}
|
||||
tc.config.NextProtos = appendH2ToNextProtos(tc.config.NextProtos)
|
||||
return tc
|
||||
}
|
||||
|
||||
// NewClientTLSFromCert constructs TLS credentials from the provided root
|
||||
// certificate authority certificate(s) to validate server connections. If
|
||||
// certificates to establish the identity of the client need to be included in
|
||||
// the credentials (eg: for mTLS), use NewTLS instead, where a complete
|
||||
// tls.Config can be specified.
|
||||
// serverNameOverride is for testing only. If set to a non empty string,
|
||||
// it will override the virtual host name of authority (e.g. :authority header
|
||||
// field) in requests.
|
||||
func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials {
|
||||
return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp})
|
||||
}
|
||||
|
||||
// NewClientTLSFromFile constructs TLS credentials from the provided root
|
||||
// certificate authority certificate file(s) to validate server connections. If
|
||||
// certificates to establish the identity of the client need to be included in
|
||||
// the credentials (eg: for mTLS), use NewTLS instead, where a complete
|
||||
// tls.Config can be specified.
|
||||
// serverNameOverride is for testing only. If set to a non empty string,
|
||||
// it will override the virtual host name of authority (e.g. :authority header
|
||||
// field) in requests.
|
||||
func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) {
|
||||
b, err := ioutil.ReadFile(certFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cp := x509.NewCertPool()
|
||||
if !cp.AppendCertsFromPEM(b) {
|
||||
return nil, fmt.Errorf("credentials: failed to append certificates")
|
||||
}
|
||||
return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil
|
||||
}
|
||||
|
||||
// NewServerTLSFromCert constructs TLS credentials from the input certificate for server.
|
||||
func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials {
|
||||
return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
|
||||
}
|
||||
|
||||
// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key
|
||||
// file for server.
|
||||
func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) {
|
||||
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
|
||||
}
|
||||
|
||||
// TLSChannelzSecurityValue defines the struct that TLS protocol should return
|
||||
// from GetSecurityValue(), containing security info like cipher and certificate used.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
type TLSChannelzSecurityValue struct {
|
||||
ChannelzSecurityValue
|
||||
StandardName string
|
||||
LocalCertificate []byte
|
||||
RemoteCertificate []byte
|
||||
}
|
||||
|
||||
var cipherSuiteLookup = map[uint16]string{
|
||||
tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA",
|
||||
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
|
||||
tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA",
|
||||
tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA",
|
||||
tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256",
|
||||
tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
|
||||
tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV",
|
||||
tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
|
||||
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
|
||||
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
|
||||
}
|
||||
|
||||
// cloneTLSConfig returns a shallow clone of the exported
|
||||
// fields of cfg, ignoring the unexported sync.Once, which
|
||||
// contains a mutex and must not be copied.
|
||||
//
|
||||
// If cfg is nil, a new zero tls.Config is returned.
|
||||
//
|
||||
// TODO: inline this function if possible.
|
||||
func cloneTLSConfig(cfg *tls.Config) *tls.Config {
|
||||
if cfg == nil {
|
||||
return &tls.Config{}
|
||||
}
|
||||
|
||||
return cfg.Clone()
|
||||
}
|
605
vendor/google.golang.org/grpc/dialoptions.go
generated
vendored
Normal file
605
vendor/google.golang.org/grpc/dialoptions.go
generated
vendored
Normal file
|
@ -0,0 +1,605 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/backoff"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/internal"
|
||||
internalbackoff "google.golang.org/grpc/internal/backoff"
|
||||
"google.golang.org/grpc/internal/envconfig"
|
||||
"google.golang.org/grpc/internal/transport"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/grpc/resolver"
|
||||
"google.golang.org/grpc/stats"
|
||||
)
|
||||
|
||||
// dialOptions configure a Dial call. dialOptions are set by the DialOption
|
||||
// values passed to Dial.
|
||||
type dialOptions struct {
|
||||
unaryInt UnaryClientInterceptor
|
||||
streamInt StreamClientInterceptor
|
||||
|
||||
chainUnaryInts []UnaryClientInterceptor
|
||||
chainStreamInts []StreamClientInterceptor
|
||||
|
||||
cp Compressor
|
||||
dc Decompressor
|
||||
bs internalbackoff.Strategy
|
||||
block bool
|
||||
returnLastError bool
|
||||
insecure bool
|
||||
timeout time.Duration
|
||||
scChan <-chan ServiceConfig
|
||||
authority string
|
||||
copts transport.ConnectOptions
|
||||
callOptions []CallOption
|
||||
// This is used by WithBalancerName dial option.
|
||||
balancerBuilder balancer.Builder
|
||||
channelzParentID int64
|
||||
disableServiceConfig bool
|
||||
disableRetry bool
|
||||
disableHealthCheck bool
|
||||
healthCheckFunc internal.HealthChecker
|
||||
minConnectTimeout func() time.Duration
|
||||
defaultServiceConfig *ServiceConfig // defaultServiceConfig is parsed from defaultServiceConfigRawJSON.
|
||||
defaultServiceConfigRawJSON *string
|
||||
// This is used by ccResolverWrapper to backoff between successive calls to
|
||||
// resolver.ResolveNow(). The user will have no need to configure this, but
|
||||
// we need to be able to configure this in tests.
|
||||
resolveNowBackoff func(int) time.Duration
|
||||
resolvers []resolver.Builder
|
||||
withProxy bool
|
||||
}
|
||||
|
||||
// DialOption configures how we set up the connection.
|
||||
type DialOption interface {
|
||||
apply(*dialOptions)
|
||||
}
|
||||
|
||||
// EmptyDialOption does not alter the dial configuration. It can be embedded in
|
||||
// another structure to build custom dial options.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
type EmptyDialOption struct{}
|
||||
|
||||
func (EmptyDialOption) apply(*dialOptions) {}
|
||||
|
||||
// funcDialOption wraps a function that modifies dialOptions into an
|
||||
// implementation of the DialOption interface.
|
||||
type funcDialOption struct {
|
||||
f func(*dialOptions)
|
||||
}
|
||||
|
||||
func (fdo *funcDialOption) apply(do *dialOptions) {
|
||||
fdo.f(do)
|
||||
}
|
||||
|
||||
func newFuncDialOption(f func(*dialOptions)) *funcDialOption {
|
||||
return &funcDialOption{
|
||||
f: f,
|
||||
}
|
||||
}
|
||||
|
||||
// WithWriteBufferSize determines how much data can be batched before doing a
|
||||
// write on the wire. The corresponding memory allocation for this buffer will
|
||||
// be twice the size to keep syscalls low. The default value for this buffer is
|
||||
// 32KB.
|
||||
//
|
||||
// Zero will disable the write buffer such that each write will be on underlying
|
||||
// connection. Note: A Send call may not directly translate to a write.
|
||||
func WithWriteBufferSize(s int) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.WriteBufferSize = s
|
||||
})
|
||||
}
|
||||
|
||||
// WithReadBufferSize lets you set the size of read buffer, this determines how
|
||||
// much data can be read at most for each read syscall.
|
||||
//
|
||||
// The default value for this buffer is 32KB. Zero will disable read buffer for
|
||||
// a connection so data framer can access the underlying conn directly.
|
||||
func WithReadBufferSize(s int) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.ReadBufferSize = s
|
||||
})
|
||||
}
|
||||
|
||||
// WithInitialWindowSize returns a DialOption which sets the value for initial
|
||||
// window size on a stream. The lower bound for window size is 64K and any value
|
||||
// smaller than that will be ignored.
|
||||
func WithInitialWindowSize(s int32) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.InitialWindowSize = s
|
||||
})
|
||||
}
|
||||
|
||||
// WithInitialConnWindowSize returns a DialOption which sets the value for
|
||||
// initial window size on a connection. The lower bound for window size is 64K
|
||||
// and any value smaller than that will be ignored.
|
||||
func WithInitialConnWindowSize(s int32) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.InitialConnWindowSize = s
|
||||
})
|
||||
}
|
||||
|
||||
// WithMaxMsgSize returns a DialOption which sets the maximum message size the
|
||||
// client can receive.
|
||||
//
|
||||
// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. Will
|
||||
// be supported throughout 1.x.
|
||||
func WithMaxMsgSize(s int) DialOption {
|
||||
return WithDefaultCallOptions(MaxCallRecvMsgSize(s))
|
||||
}
|
||||
|
||||
// WithDefaultCallOptions returns a DialOption which sets the default
|
||||
// CallOptions for calls over the connection.
|
||||
func WithDefaultCallOptions(cos ...CallOption) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.callOptions = append(o.callOptions, cos...)
|
||||
})
|
||||
}
|
||||
|
||||
// WithCodec returns a DialOption which sets a codec for message marshaling and
|
||||
// unmarshaling.
|
||||
//
|
||||
// Deprecated: use WithDefaultCallOptions(ForceCodec(_)) instead. Will be
|
||||
// supported throughout 1.x.
|
||||
func WithCodec(c Codec) DialOption {
|
||||
return WithDefaultCallOptions(CallCustomCodec(c))
|
||||
}
|
||||
|
||||
// WithCompressor returns a DialOption which sets a Compressor to use for
|
||||
// message compression. It has lower priority than the compressor set by the
|
||||
// UseCompressor CallOption.
|
||||
//
|
||||
// Deprecated: use UseCompressor instead. Will be supported throughout 1.x.
|
||||
func WithCompressor(cp Compressor) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.cp = cp
|
||||
})
|
||||
}
|
||||
|
||||
// WithDecompressor returns a DialOption which sets a Decompressor to use for
|
||||
// incoming message decompression. If incoming response messages are encoded
|
||||
// using the decompressor's Type(), it will be used. Otherwise, the message
|
||||
// encoding will be used to look up the compressor registered via
|
||||
// encoding.RegisterCompressor, which will then be used to decompress the
|
||||
// message. If no compressor is registered for the encoding, an Unimplemented
|
||||
// status error will be returned.
|
||||
//
|
||||
// Deprecated: use encoding.RegisterCompressor instead. Will be supported
|
||||
// throughout 1.x.
|
||||
func WithDecompressor(dc Decompressor) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.dc = dc
|
||||
})
|
||||
}
|
||||
|
||||
// WithBalancerName sets the balancer that the ClientConn will be initialized
|
||||
// with. Balancer registered with balancerName will be used. This function
|
||||
// panics if no balancer was registered by balancerName.
|
||||
//
|
||||
// The balancer cannot be overridden by balancer option specified by service
|
||||
// config.
|
||||
//
|
||||
// Deprecated: use WithDefaultServiceConfig and WithDisableServiceConfig
|
||||
// instead. Will be removed in a future 1.x release.
|
||||
func WithBalancerName(balancerName string) DialOption {
|
||||
builder := balancer.Get(balancerName)
|
||||
if builder == nil {
|
||||
panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName))
|
||||
}
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.balancerBuilder = builder
|
||||
})
|
||||
}
|
||||
|
||||
// WithServiceConfig returns a DialOption which has a channel to read the
|
||||
// service configuration.
|
||||
//
|
||||
// Deprecated: service config should be received through name resolver or via
|
||||
// WithDefaultServiceConfig, as specified at
|
||||
// https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be
|
||||
// removed in a future 1.x release.
|
||||
func WithServiceConfig(c <-chan ServiceConfig) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.scChan = c
|
||||
})
|
||||
}
|
||||
|
||||
// WithConnectParams configures the dialer to use the provided ConnectParams.
|
||||
//
|
||||
// The backoff configuration specified as part of the ConnectParams overrides
|
||||
// all defaults specified in
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. Consider
|
||||
// using the backoff.DefaultConfig as a base, in cases where you want to
|
||||
// override only a subset of the backoff configuration.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
func WithConnectParams(p ConnectParams) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.bs = internalbackoff.Exponential{Config: p.Backoff}
|
||||
o.minConnectTimeout = func() time.Duration {
|
||||
return p.MinConnectTimeout
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// WithBackoffMaxDelay configures the dialer to use the provided maximum delay
|
||||
// when backing off after failed connection attempts.
|
||||
//
|
||||
// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x.
|
||||
func WithBackoffMaxDelay(md time.Duration) DialOption {
|
||||
return WithBackoffConfig(BackoffConfig{MaxDelay: md})
|
||||
}
|
||||
|
||||
// WithBackoffConfig configures the dialer to use the provided backoff
|
||||
// parameters after connection failures.
|
||||
//
|
||||
// Deprecated: use WithConnectParams instead. Will be supported throughout 1.x.
|
||||
func WithBackoffConfig(b BackoffConfig) DialOption {
|
||||
bc := backoff.DefaultConfig
|
||||
bc.MaxDelay = b.MaxDelay
|
||||
return withBackoff(internalbackoff.Exponential{Config: bc})
|
||||
}
|
||||
|
||||
// withBackoff sets the backoff strategy used for connectRetryNum after a failed
|
||||
// connection attempt.
|
||||
//
|
||||
// This can be exported if arbitrary backoff strategies are allowed by gRPC.
|
||||
func withBackoff(bs internalbackoff.Strategy) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.bs = bs
|
||||
})
|
||||
}
|
||||
|
||||
// WithBlock returns a DialOption which makes caller of Dial blocks until the
|
||||
// underlying connection is up. Without this, Dial returns immediately and
|
||||
// connecting the server happens in background.
|
||||
func WithBlock() DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.block = true
|
||||
})
|
||||
}
|
||||
|
||||
// WithReturnConnectionError returns a DialOption which makes the client connection
|
||||
// return a string containing both the last connection error that occurred and
|
||||
// the context.DeadlineExceeded error.
|
||||
// Implies WithBlock()
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
func WithReturnConnectionError() DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.block = true
|
||||
o.returnLastError = true
|
||||
})
|
||||
}
|
||||
|
||||
// WithInsecure returns a DialOption which disables transport security for this
|
||||
// ClientConn. Note that transport security is required unless WithInsecure is
|
||||
// set.
|
||||
func WithInsecure() DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.insecure = true
|
||||
})
|
||||
}
|
||||
|
||||
// WithNoProxy returns a DialOption which disables the use of proxies for this
|
||||
// ClientConn. This is ignored if WithDialer or WithContextDialer are used.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
func WithNoProxy() DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.withProxy = false
|
||||
})
|
||||
}
|
||||
|
||||
// WithTransportCredentials returns a DialOption which configures a connection
|
||||
// level security credentials (e.g., TLS/SSL). This should not be used together
|
||||
// with WithCredentialsBundle.
|
||||
func WithTransportCredentials(creds credentials.TransportCredentials) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.TransportCredentials = creds
|
||||
})
|
||||
}
|
||||
|
||||
// WithPerRPCCredentials returns a DialOption which sets credentials and places
|
||||
// auth state on each outbound RPC.
|
||||
func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds)
|
||||
})
|
||||
}
|
||||
|
||||
// WithCredentialsBundle returns a DialOption to set a credentials bundle for
|
||||
// the ClientConn.WithCreds. This should not be used together with
|
||||
// WithTransportCredentials.
|
||||
//
|
||||
// This API is experimental.
|
||||
func WithCredentialsBundle(b credentials.Bundle) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.CredsBundle = b
|
||||
})
|
||||
}
|
||||
|
||||
// WithTimeout returns a DialOption that configures a timeout for dialing a
|
||||
// ClientConn initially. This is valid if and only if WithBlock() is present.
|
||||
//
|
||||
// Deprecated: use DialContext instead of Dial and context.WithTimeout
|
||||
// instead. Will be supported throughout 1.x.
|
||||
func WithTimeout(d time.Duration) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.timeout = d
|
||||
})
|
||||
}
|
||||
|
||||
// WithContextDialer returns a DialOption that sets a dialer to create
|
||||
// connections. If FailOnNonTempDialError() is set to true, and an error is
|
||||
// returned by f, gRPC checks the error's Temporary() method to decide if it
|
||||
// should try to reconnect to the network address.
|
||||
func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.Dialer = f
|
||||
})
|
||||
}
|
||||
|
||||
func init() {
|
||||
internal.WithHealthCheckFunc = withHealthCheckFunc
|
||||
}
|
||||
|
||||
// WithDialer returns a DialOption that specifies a function to use for dialing
|
||||
// network addresses. If FailOnNonTempDialError() is set to true, and an error
|
||||
// is returned by f, gRPC checks the error's Temporary() method to decide if it
|
||||
// should try to reconnect to the network address.
|
||||
//
|
||||
// Deprecated: use WithContextDialer instead. Will be supported throughout
|
||||
// 1.x.
|
||||
func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption {
|
||||
return WithContextDialer(
|
||||
func(ctx context.Context, addr string) (net.Conn, error) {
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
return f(addr, time.Until(deadline))
|
||||
}
|
||||
return f(addr, 0)
|
||||
})
|
||||
}
|
||||
|
||||
// WithStatsHandler returns a DialOption that specifies the stats handler for
|
||||
// all the RPCs and underlying network connections in this ClientConn.
|
||||
func WithStatsHandler(h stats.Handler) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.StatsHandler = h
|
||||
})
|
||||
}
|
||||
|
||||
// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on
|
||||
// non-temporary dial errors. If f is true, and dialer returns a non-temporary
|
||||
// error, gRPC will fail the connection to the network address and won't try to
|
||||
// reconnect. The default value of FailOnNonTempDialError is false.
|
||||
//
|
||||
// FailOnNonTempDialError only affects the initial dial, and does not do
|
||||
// anything useful unless you are also using WithBlock().
|
||||
//
|
||||
// This is an EXPERIMENTAL API.
|
||||
func FailOnNonTempDialError(f bool) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.FailOnNonTempDialError = f
|
||||
})
|
||||
}
|
||||
|
||||
// WithUserAgent returns a DialOption that specifies a user agent string for all
|
||||
// the RPCs.
|
||||
func WithUserAgent(s string) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.UserAgent = s
|
||||
})
|
||||
}
|
||||
|
||||
// WithKeepaliveParams returns a DialOption that specifies keepalive parameters
|
||||
// for the client transport.
|
||||
func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption {
|
||||
if kp.Time < internal.KeepaliveMinPingTime {
|
||||
logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime)
|
||||
kp.Time = internal.KeepaliveMinPingTime
|
||||
}
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.KeepaliveParams = kp
|
||||
})
|
||||
}
|
||||
|
||||
// WithUnaryInterceptor returns a DialOption that specifies the interceptor for
|
||||
// unary RPCs.
|
||||
func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.unaryInt = f
|
||||
})
|
||||
}
|
||||
|
||||
// WithChainUnaryInterceptor returns a DialOption that specifies the chained
|
||||
// interceptor for unary RPCs. The first interceptor will be the outer most,
|
||||
// while the last interceptor will be the inner most wrapper around the real call.
|
||||
// All interceptors added by this method will be chained, and the interceptor
|
||||
// defined by WithUnaryInterceptor will always be prepended to the chain.
|
||||
func WithChainUnaryInterceptor(interceptors ...UnaryClientInterceptor) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.chainUnaryInts = append(o.chainUnaryInts, interceptors...)
|
||||
})
|
||||
}
|
||||
|
||||
// WithStreamInterceptor returns a DialOption that specifies the interceptor for
|
||||
// streaming RPCs.
|
||||
func WithStreamInterceptor(f StreamClientInterceptor) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.streamInt = f
|
||||
})
|
||||
}
|
||||
|
||||
// WithChainStreamInterceptor returns a DialOption that specifies the chained
|
||||
// interceptor for streaming RPCs. The first interceptor will be the outer most,
|
||||
// while the last interceptor will be the inner most wrapper around the real call.
|
||||
// All interceptors added by this method will be chained, and the interceptor
|
||||
// defined by WithStreamInterceptor will always be prepended to the chain.
|
||||
func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.chainStreamInts = append(o.chainStreamInts, interceptors...)
|
||||
})
|
||||
}
|
||||
|
||||
// WithAuthority returns a DialOption that specifies the value to be used as the
|
||||
// :authority pseudo-header. This value only works with WithInsecure and has no
|
||||
// effect if TransportCredentials are present.
|
||||
func WithAuthority(a string) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.authority = a
|
||||
})
|
||||
}
|
||||
|
||||
// WithChannelzParentID returns a DialOption that specifies the channelz ID of
|
||||
// current ClientConn's parent. This function is used in nested channel creation
|
||||
// (e.g. grpclb dial).
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
func WithChannelzParentID(id int64) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.channelzParentID = id
|
||||
})
|
||||
}
|
||||
|
||||
// WithDisableServiceConfig returns a DialOption that causes gRPC to ignore any
|
||||
// service config provided by the resolver and provides a hint to the resolver
|
||||
// to not fetch service configs.
|
||||
//
|
||||
// Note that this dial option only disables service config from resolver. If
|
||||
// default service config is provided, gRPC will use the default service config.
|
||||
func WithDisableServiceConfig() DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.disableServiceConfig = true
|
||||
})
|
||||
}
|
||||
|
||||
// WithDefaultServiceConfig returns a DialOption that configures the default
|
||||
// service config, which will be used in cases where:
|
||||
//
|
||||
// 1. WithDisableServiceConfig is also used.
|
||||
// 2. Resolver does not return a service config or if the resolver returns an
|
||||
// invalid service config.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
func WithDefaultServiceConfig(s string) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.defaultServiceConfigRawJSON = &s
|
||||
})
|
||||
}
|
||||
|
||||
// WithDisableRetry returns a DialOption that disables retries, even if the
|
||||
// service config enables them. This does not impact transparent retries, which
|
||||
// will happen automatically if no data is written to the wire or if the RPC is
|
||||
// unprocessed by the remote server.
|
||||
//
|
||||
// Retry support is currently disabled by default, but will be enabled by
|
||||
// default in the future. Until then, it may be enabled by setting the
|
||||
// environment variable "GRPC_GO_RETRY" to "on".
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
func WithDisableRetry() DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.disableRetry = true
|
||||
})
|
||||
}
|
||||
|
||||
// WithMaxHeaderListSize returns a DialOption that specifies the maximum
|
||||
// (uncompressed) size of header list that the client is prepared to accept.
|
||||
func WithMaxHeaderListSize(s uint32) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.copts.MaxHeaderListSize = &s
|
||||
})
|
||||
}
|
||||
|
||||
// WithDisableHealthCheck disables the LB channel health checking for all
|
||||
// SubConns of this ClientConn.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
func WithDisableHealthCheck() DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.disableHealthCheck = true
|
||||
})
|
||||
}
|
||||
|
||||
// withHealthCheckFunc replaces the default health check function with the
|
||||
// provided one. It makes tests easier to change the health check function.
|
||||
//
|
||||
// For testing purpose only.
|
||||
func withHealthCheckFunc(f internal.HealthChecker) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.healthCheckFunc = f
|
||||
})
|
||||
}
|
||||
|
||||
func defaultDialOptions() dialOptions {
|
||||
return dialOptions{
|
||||
disableRetry: !envconfig.Retry,
|
||||
healthCheckFunc: internal.HealthCheckFunc,
|
||||
copts: transport.ConnectOptions{
|
||||
WriteBufferSize: defaultWriteBufSize,
|
||||
ReadBufferSize: defaultReadBufSize,
|
||||
},
|
||||
resolveNowBackoff: internalbackoff.DefaultExponential.Backoff,
|
||||
withProxy: true,
|
||||
}
|
||||
}
|
||||
|
||||
// withGetMinConnectDeadline specifies the function that clientconn uses to
|
||||
// get minConnectDeadline. This can be used to make connection attempts happen
|
||||
// faster/slower.
|
||||
//
|
||||
// For testing purpose only.
|
||||
func withMinConnectDeadline(f func() time.Duration) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.minConnectTimeout = f
|
||||
})
|
||||
}
|
||||
|
||||
// withResolveNowBackoff specifies the function that clientconn uses to backoff
|
||||
// between successive calls to resolver.ResolveNow().
|
||||
//
|
||||
// For testing purpose only.
|
||||
func withResolveNowBackoff(f func(int) time.Duration) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.resolveNowBackoff = f
|
||||
})
|
||||
}
|
||||
|
||||
// WithResolvers allows a list of resolver implementations to be registered
|
||||
// locally with the ClientConn without needing to be globally registered via
|
||||
// resolver.Register. They will be matched against the scheme used for the
|
||||
// current Dial only, and will take precedence over the global registry.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
func WithResolvers(rs ...resolver.Builder) DialOption {
|
||||
return newFuncDialOption(func(o *dialOptions) {
|
||||
o.resolvers = append(o.resolvers, rs...)
|
||||
})
|
||||
}
|
22
vendor/google.golang.org/grpc/doc.go
generated
vendored
22
vendor/google.golang.org/grpc/doc.go
generated
vendored
|
@ -1,6 +1,26 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2015 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
//go:generate ./regenerate.sh
|
||||
|
||||
/*
|
||||
Package grpc implements an RPC system called gRPC.
|
||||
|
||||
See www.grpc.io for more information about gRPC.
|
||||
See grpc.io for more information about gRPC.
|
||||
*/
|
||||
package grpc // import "google.golang.org/grpc"
|
||||
|
|
122
vendor/google.golang.org/grpc/encoding/encoding.go
generated
vendored
Normal file
122
vendor/google.golang.org/grpc/encoding/encoding.go
generated
vendored
Normal file
|
@ -0,0 +1,122 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package encoding defines the interface for the compressor and codec, and
|
||||
// functions to register and retrieve compressors and codecs.
|
||||
//
|
||||
// This package is EXPERIMENTAL.
|
||||
package encoding
|
||||
|
||||
import (
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Identity specifies the optional encoding for uncompressed streams.
|
||||
// It is intended for grpc internal use only.
|
||||
const Identity = "identity"
|
||||
|
||||
// Compressor is used for compressing and decompressing when sending or
|
||||
// receiving messages.
|
||||
type Compressor interface {
|
||||
// Compress writes the data written to wc to w after compressing it. If an
|
||||
// error occurs while initializing the compressor, that error is returned
|
||||
// instead.
|
||||
Compress(w io.Writer) (io.WriteCloser, error)
|
||||
// Decompress reads data from r, decompresses it, and provides the
|
||||
// uncompressed data via the returned io.Reader. If an error occurs while
|
||||
// initializing the decompressor, that error is returned instead.
|
||||
Decompress(r io.Reader) (io.Reader, error)
|
||||
// Name is the name of the compression codec and is used to set the content
|
||||
// coding header. The result must be static; the result cannot change
|
||||
// between calls.
|
||||
Name() string
|
||||
// EXPERIMENTAL: if a Compressor implements
|
||||
// DecompressedSize(compressedBytes []byte) int, gRPC will call it
|
||||
// to determine the size of the buffer allocated for the result of decompression.
|
||||
// Return -1 to indicate unknown size.
|
||||
}
|
||||
|
||||
var registeredCompressor = make(map[string]Compressor)
|
||||
|
||||
// RegisterCompressor registers the compressor with gRPC by its name. It can
|
||||
// be activated when sending an RPC via grpc.UseCompressor(). It will be
|
||||
// automatically accessed when receiving a message based on the content coding
|
||||
// header. Servers also use it to send a response with the same encoding as
|
||||
// the request.
|
||||
//
|
||||
// NOTE: this function must only be called during initialization time (i.e. in
|
||||
// an init() function), and is not thread-safe. If multiple Compressors are
|
||||
// registered with the same name, the one registered last will take effect.
|
||||
func RegisterCompressor(c Compressor) {
|
||||
registeredCompressor[c.Name()] = c
|
||||
}
|
||||
|
||||
// GetCompressor returns Compressor for the given compressor name.
|
||||
func GetCompressor(name string) Compressor {
|
||||
return registeredCompressor[name]
|
||||
}
|
||||
|
||||
// Codec defines the interface gRPC uses to encode and decode messages. Note
|
||||
// that implementations of this interface must be thread safe; a Codec's
|
||||
// methods can be called from concurrent goroutines.
|
||||
type Codec interface {
|
||||
// Marshal returns the wire format of v.
|
||||
Marshal(v interface{}) ([]byte, error)
|
||||
// Unmarshal parses the wire format into v.
|
||||
Unmarshal(data []byte, v interface{}) error
|
||||
// Name returns the name of the Codec implementation. The returned string
|
||||
// will be used as part of content type in transmission. The result must be
|
||||
// static; the result cannot change between calls.
|
||||
Name() string
|
||||
}
|
||||
|
||||
var registeredCodecs = make(map[string]Codec)
|
||||
|
||||
// RegisterCodec registers the provided Codec for use with all gRPC clients and
|
||||
// servers.
|
||||
//
|
||||
// The Codec will be stored and looked up by result of its Name() method, which
|
||||
// should match the content-subtype of the encoding handled by the Codec. This
|
||||
// is case-insensitive, and is stored and looked up as lowercase. If the
|
||||
// result of calling Name() is an empty string, RegisterCodec will panic. See
|
||||
// Content-Type on
|
||||
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
|
||||
// more details.
|
||||
//
|
||||
// NOTE: this function must only be called during initialization time (i.e. in
|
||||
// an init() function), and is not thread-safe. If multiple Compressors are
|
||||
// registered with the same name, the one registered last will take effect.
|
||||
func RegisterCodec(codec Codec) {
|
||||
if codec == nil {
|
||||
panic("cannot register a nil Codec")
|
||||
}
|
||||
if codec.Name() == "" {
|
||||
panic("cannot register Codec with empty string result for Name()")
|
||||
}
|
||||
contentSubtype := strings.ToLower(codec.Name())
|
||||
registeredCodecs[contentSubtype] = codec
|
||||
}
|
||||
|
||||
// GetCodec gets a registered Codec by content-subtype, or nil if no Codec is
|
||||
// registered for the content-subtype.
|
||||
//
|
||||
// The content-subtype is expected to be lowercase.
|
||||
func GetCodec(contentSubtype string) Codec {
|
||||
return registeredCodecs[contentSubtype]
|
||||
}
|
110
vendor/google.golang.org/grpc/encoding/proto/proto.go
generated
vendored
Normal file
110
vendor/google.golang.org/grpc/encoding/proto/proto.go
generated
vendored
Normal file
|
@ -0,0 +1,110 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package proto defines the protobuf codec. Importing this package will
|
||||
// register the codec.
|
||||
package proto
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/grpc/encoding"
|
||||
)
|
||||
|
||||
// Name is the name registered for the proto compressor.
|
||||
const Name = "proto"
|
||||
|
||||
func init() {
|
||||
encoding.RegisterCodec(codec{})
|
||||
}
|
||||
|
||||
// codec is a Codec implementation with protobuf. It is the default codec for gRPC.
|
||||
type codec struct{}
|
||||
|
||||
type cachedProtoBuffer struct {
|
||||
lastMarshaledSize uint32
|
||||
proto.Buffer
|
||||
}
|
||||
|
||||
func capToMaxInt32(val int) uint32 {
|
||||
if val > math.MaxInt32 {
|
||||
return uint32(math.MaxInt32)
|
||||
}
|
||||
return uint32(val)
|
||||
}
|
||||
|
||||
func marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) {
|
||||
protoMsg := v.(proto.Message)
|
||||
newSlice := make([]byte, 0, cb.lastMarshaledSize)
|
||||
|
||||
cb.SetBuf(newSlice)
|
||||
cb.Reset()
|
||||
if err := cb.Marshal(protoMsg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out := cb.Bytes()
|
||||
cb.lastMarshaledSize = capToMaxInt32(len(out))
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (codec) Marshal(v interface{}) ([]byte, error) {
|
||||
if pm, ok := v.(proto.Marshaler); ok {
|
||||
// object can marshal itself, no need for buffer
|
||||
return pm.Marshal()
|
||||
}
|
||||
|
||||
cb := protoBufferPool.Get().(*cachedProtoBuffer)
|
||||
out, err := marshal(v, cb)
|
||||
|
||||
// put back buffer and lose the ref to the slice
|
||||
cb.SetBuf(nil)
|
||||
protoBufferPool.Put(cb)
|
||||
return out, err
|
||||
}
|
||||
|
||||
func (codec) Unmarshal(data []byte, v interface{}) error {
|
||||
protoMsg := v.(proto.Message)
|
||||
protoMsg.Reset()
|
||||
|
||||
if pu, ok := protoMsg.(proto.Unmarshaler); ok {
|
||||
// object can unmarshal itself, no need for buffer
|
||||
return pu.Unmarshal(data)
|
||||
}
|
||||
|
||||
cb := protoBufferPool.Get().(*cachedProtoBuffer)
|
||||
cb.SetBuf(data)
|
||||
err := cb.Unmarshal(protoMsg)
|
||||
cb.SetBuf(nil)
|
||||
protoBufferPool.Put(cb)
|
||||
return err
|
||||
}
|
||||
|
||||
func (codec) Name() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
var protoBufferPool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &cachedProtoBuffer{
|
||||
Buffer: proto.Buffer{},
|
||||
lastMarshaledSize: 16,
|
||||
}
|
||||
},
|
||||
}
|
117
vendor/google.golang.org/grpc/grpclog/component.go
generated
vendored
Normal file
117
vendor/google.golang.org/grpc/grpclog/component.go
generated
vendored
Normal file
|
@ -0,0 +1,117 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpclog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
)
|
||||
|
||||
// componentData records the settings for a component.
|
||||
type componentData struct {
|
||||
name string
|
||||
}
|
||||
|
||||
var cache = map[string]*componentData{}
|
||||
|
||||
func (c *componentData) InfoDepth(depth int, args ...interface{}) {
|
||||
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
|
||||
grpclog.InfoDepth(depth+1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) WarningDepth(depth int, args ...interface{}) {
|
||||
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
|
||||
grpclog.WarningDepth(depth+1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) ErrorDepth(depth int, args ...interface{}) {
|
||||
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
|
||||
grpclog.ErrorDepth(depth+1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) FatalDepth(depth int, args ...interface{}) {
|
||||
args = append([]interface{}{"[" + string(c.name) + "]"}, args...)
|
||||
grpclog.FatalDepth(depth+1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) Info(args ...interface{}) {
|
||||
c.InfoDepth(1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) Warning(args ...interface{}) {
|
||||
c.WarningDepth(1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) Error(args ...interface{}) {
|
||||
c.ErrorDepth(1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) Fatal(args ...interface{}) {
|
||||
c.FatalDepth(1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) Infof(format string, args ...interface{}) {
|
||||
c.InfoDepth(1, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (c *componentData) Warningf(format string, args ...interface{}) {
|
||||
c.WarningDepth(1, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (c *componentData) Errorf(format string, args ...interface{}) {
|
||||
c.ErrorDepth(1, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (c *componentData) Fatalf(format string, args ...interface{}) {
|
||||
c.FatalDepth(1, fmt.Sprintf(format, args...))
|
||||
}
|
||||
|
||||
func (c *componentData) Infoln(args ...interface{}) {
|
||||
c.InfoDepth(1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) Warningln(args ...interface{}) {
|
||||
c.WarningDepth(1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) Errorln(args ...interface{}) {
|
||||
c.ErrorDepth(1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) Fatalln(args ...interface{}) {
|
||||
c.FatalDepth(1, args...)
|
||||
}
|
||||
|
||||
func (c *componentData) V(l int) bool {
|
||||
return grpclog.Logger.V(l)
|
||||
}
|
||||
|
||||
// Component creates a new component and returns it for logging. If a component
|
||||
// with the name already exists, nothing will be created and it will be
|
||||
// returned. SetLoggerV2 will panic if it is called with a logger created by
|
||||
// Component.
|
||||
func Component(componentName string) DepthLoggerV2 {
|
||||
if cData, ok := cache[componentName]; ok {
|
||||
return cData
|
||||
}
|
||||
c := &componentData{componentName}
|
||||
cache[componentName] = c
|
||||
return c
|
||||
}
|
132
vendor/google.golang.org/grpc/grpclog/grpclog.go
generated
vendored
Normal file
132
vendor/google.golang.org/grpc/grpclog/grpclog.go
generated
vendored
Normal file
|
@ -0,0 +1,132 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package grpclog defines logging for grpc.
|
||||
//
|
||||
// All logs in transport and grpclb packages only go to verbose level 2.
|
||||
// All logs in other packages in grpc are logged in spite of the verbosity level.
|
||||
//
|
||||
// In the default logger,
|
||||
// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL,
|
||||
// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL.
|
||||
package grpclog // import "google.golang.org/grpc/grpclog"
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
)
|
||||
|
||||
func init() {
|
||||
SetLoggerV2(newLoggerV2())
|
||||
}
|
||||
|
||||
// V reports whether verbosity level l is at least the requested verbose level.
|
||||
func V(l int) bool {
|
||||
return grpclog.Logger.V(l)
|
||||
}
|
||||
|
||||
// Info logs to the INFO log.
|
||||
func Info(args ...interface{}) {
|
||||
grpclog.Logger.Info(args...)
|
||||
}
|
||||
|
||||
// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf.
|
||||
func Infof(format string, args ...interface{}) {
|
||||
grpclog.Logger.Infof(format, args...)
|
||||
}
|
||||
|
||||
// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println.
|
||||
func Infoln(args ...interface{}) {
|
||||
grpclog.Logger.Infoln(args...)
|
||||
}
|
||||
|
||||
// Warning logs to the WARNING log.
|
||||
func Warning(args ...interface{}) {
|
||||
grpclog.Logger.Warning(args...)
|
||||
}
|
||||
|
||||
// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf.
|
||||
func Warningf(format string, args ...interface{}) {
|
||||
grpclog.Logger.Warningf(format, args...)
|
||||
}
|
||||
|
||||
// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println.
|
||||
func Warningln(args ...interface{}) {
|
||||
grpclog.Logger.Warningln(args...)
|
||||
}
|
||||
|
||||
// Error logs to the ERROR log.
|
||||
func Error(args ...interface{}) {
|
||||
grpclog.Logger.Error(args...)
|
||||
}
|
||||
|
||||
// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf.
|
||||
func Errorf(format string, args ...interface{}) {
|
||||
grpclog.Logger.Errorf(format, args...)
|
||||
}
|
||||
|
||||
// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println.
|
||||
func Errorln(args ...interface{}) {
|
||||
grpclog.Logger.Errorln(args...)
|
||||
}
|
||||
|
||||
// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print.
|
||||
// It calls os.Exit() with exit code 1.
|
||||
func Fatal(args ...interface{}) {
|
||||
grpclog.Logger.Fatal(args...)
|
||||
// Make sure fatal logs will exit.
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf.
|
||||
// It calls os.Exit() with exit code 1.
|
||||
func Fatalf(format string, args ...interface{}) {
|
||||
grpclog.Logger.Fatalf(format, args...)
|
||||
// Make sure fatal logs will exit.
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println.
|
||||
// It calle os.Exit()) with exit code 1.
|
||||
func Fatalln(args ...interface{}) {
|
||||
grpclog.Logger.Fatalln(args...)
|
||||
// Make sure fatal logs will exit.
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Print prints to the logger. Arguments are handled in the manner of fmt.Print.
|
||||
//
|
||||
// Deprecated: use Info.
|
||||
func Print(args ...interface{}) {
|
||||
grpclog.Logger.Info(args...)
|
||||
}
|
||||
|
||||
// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
|
||||
//
|
||||
// Deprecated: use Infof.
|
||||
func Printf(format string, args ...interface{}) {
|
||||
grpclog.Logger.Infof(format, args...)
|
||||
}
|
||||
|
||||
// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
|
||||
//
|
||||
// Deprecated: use Infoln.
|
||||
func Println(args ...interface{}) {
|
||||
grpclog.Logger.Infoln(args...)
|
||||
}
|
108
vendor/google.golang.org/grpc/grpclog/logger.go
generated
vendored
108
vendor/google.golang.org/grpc/grpclog/logger.go
generated
vendored
|
@ -1,52 +1,28 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2015, Google Inc.
|
||||
* All rights reserved.
|
||||
* Copyright 2015 gRPC authors.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
Package grpclog defines logging for grpc.
|
||||
*/
|
||||
package grpclog // import "google.golang.org/grpc/grpclog"
|
||||
package grpclog
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
// Use golang's standard logger by default.
|
||||
// Access is not mutex-protected: do not modify except in init()
|
||||
// functions.
|
||||
var logger Logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
import "google.golang.org/grpc/internal/grpclog"
|
||||
|
||||
// Logger mimics golang's standard Logger as an interface.
|
||||
//
|
||||
// Deprecated: use LoggerV2.
|
||||
type Logger interface {
|
||||
Fatal(args ...interface{})
|
||||
Fatalf(format string, args ...interface{})
|
||||
|
@ -58,36 +34,54 @@ type Logger interface {
|
|||
|
||||
// SetLogger sets the logger that is used in grpc. Call only from
|
||||
// init() functions.
|
||||
//
|
||||
// Deprecated: use SetLoggerV2.
|
||||
func SetLogger(l Logger) {
|
||||
logger = l
|
||||
grpclog.Logger = &loggerWrapper{Logger: l}
|
||||
}
|
||||
|
||||
// Fatal is equivalent to Print() followed by a call to os.Exit() with a non-zero exit code.
|
||||
func Fatal(args ...interface{}) {
|
||||
logger.Fatal(args...)
|
||||
// loggerWrapper wraps Logger into a LoggerV2.
|
||||
type loggerWrapper struct {
|
||||
Logger
|
||||
}
|
||||
|
||||
// Fatalf is equivalent to Printf() followed by a call to os.Exit() with a non-zero exit code.
|
||||
func Fatalf(format string, args ...interface{}) {
|
||||
logger.Fatalf(format, args...)
|
||||
func (g *loggerWrapper) Info(args ...interface{}) {
|
||||
g.Logger.Print(args...)
|
||||
}
|
||||
|
||||
// Fatalln is equivalent to Println() followed by a call to os.Exit()) with a non-zero exit code.
|
||||
func Fatalln(args ...interface{}) {
|
||||
logger.Fatalln(args...)
|
||||
func (g *loggerWrapper) Infoln(args ...interface{}) {
|
||||
g.Logger.Println(args...)
|
||||
}
|
||||
|
||||
// Print prints to the logger. Arguments are handled in the manner of fmt.Print.
|
||||
func Print(args ...interface{}) {
|
||||
logger.Print(args...)
|
||||
func (g *loggerWrapper) Infof(format string, args ...interface{}) {
|
||||
g.Logger.Printf(format, args...)
|
||||
}
|
||||
|
||||
// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
|
||||
func Printf(format string, args ...interface{}) {
|
||||
logger.Printf(format, args...)
|
||||
func (g *loggerWrapper) Warning(args ...interface{}) {
|
||||
g.Logger.Print(args...)
|
||||
}
|
||||
|
||||
// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
|
||||
func Println(args ...interface{}) {
|
||||
logger.Println(args...)
|
||||
func (g *loggerWrapper) Warningln(args ...interface{}) {
|
||||
g.Logger.Println(args...)
|
||||
}
|
||||
|
||||
func (g *loggerWrapper) Warningf(format string, args ...interface{}) {
|
||||
g.Logger.Printf(format, args...)
|
||||
}
|
||||
|
||||
func (g *loggerWrapper) Error(args ...interface{}) {
|
||||
g.Logger.Print(args...)
|
||||
}
|
||||
|
||||
func (g *loggerWrapper) Errorln(args ...interface{}) {
|
||||
g.Logger.Println(args...)
|
||||
}
|
||||
|
||||
func (g *loggerWrapper) Errorf(format string, args ...interface{}) {
|
||||
g.Logger.Printf(format, args...)
|
||||
}
|
||||
|
||||
func (g *loggerWrapper) V(l int) bool {
|
||||
// Returns true for all verbose level.
|
||||
return true
|
||||
}
|
||||
|
|
218
vendor/google.golang.org/grpc/grpclog/loggerv2.go
generated
vendored
Normal file
218
vendor/google.golang.org/grpc/grpclog/loggerv2.go
generated
vendored
Normal file
|
@ -0,0 +1,218 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpclog
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"google.golang.org/grpc/internal/grpclog"
|
||||
)
|
||||
|
||||
// LoggerV2 does underlying logging work for grpclog.
|
||||
type LoggerV2 interface {
|
||||
// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
|
||||
Info(args ...interface{})
|
||||
// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
|
||||
Infoln(args ...interface{})
|
||||
// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
|
||||
Infof(format string, args ...interface{})
|
||||
// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
|
||||
Warning(args ...interface{})
|
||||
// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
|
||||
Warningln(args ...interface{})
|
||||
// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
|
||||
Warningf(format string, args ...interface{})
|
||||
// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
|
||||
Error(args ...interface{})
|
||||
// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
|
||||
Errorln(args ...interface{})
|
||||
// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
|
||||
Errorf(format string, args ...interface{})
|
||||
// Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
|
||||
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
||||
// Implementations may also call os.Exit() with a non-zero exit code.
|
||||
Fatal(args ...interface{})
|
||||
// Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
|
||||
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
||||
// Implementations may also call os.Exit() with a non-zero exit code.
|
||||
Fatalln(args ...interface{})
|
||||
// Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
|
||||
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
|
||||
// Implementations may also call os.Exit() with a non-zero exit code.
|
||||
Fatalf(format string, args ...interface{})
|
||||
// V reports whether verbosity level l is at least the requested verbose level.
|
||||
V(l int) bool
|
||||
}
|
||||
|
||||
// SetLoggerV2 sets logger that is used in grpc to a V2 logger.
|
||||
// Not mutex-protected, should be called before any gRPC functions.
|
||||
func SetLoggerV2(l LoggerV2) {
|
||||
if _, ok := l.(*componentData); ok {
|
||||
panic("cannot use component logger as grpclog logger")
|
||||
}
|
||||
grpclog.Logger = l
|
||||
grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2)
|
||||
}
|
||||
|
||||
const (
|
||||
// infoLog indicates Info severity.
|
||||
infoLog int = iota
|
||||
// warningLog indicates Warning severity.
|
||||
warningLog
|
||||
// errorLog indicates Error severity.
|
||||
errorLog
|
||||
// fatalLog indicates Fatal severity.
|
||||
fatalLog
|
||||
)
|
||||
|
||||
// severityName contains the string representation of each severity.
|
||||
var severityName = []string{
|
||||
infoLog: "INFO",
|
||||
warningLog: "WARNING",
|
||||
errorLog: "ERROR",
|
||||
fatalLog: "FATAL",
|
||||
}
|
||||
|
||||
// loggerT is the default logger used by grpclog.
|
||||
type loggerT struct {
|
||||
m []*log.Logger
|
||||
v int
|
||||
}
|
||||
|
||||
// NewLoggerV2 creates a loggerV2 with the provided writers.
|
||||
// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1).
|
||||
// Error logs will be written to errorW, warningW and infoW.
|
||||
// Warning logs will be written to warningW and infoW.
|
||||
// Info logs will be written to infoW.
|
||||
func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 {
|
||||
return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0)
|
||||
}
|
||||
|
||||
// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and
|
||||
// verbosity level.
|
||||
func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 {
|
||||
var m []*log.Logger
|
||||
m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags))
|
||||
m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags))
|
||||
ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal.
|
||||
m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags))
|
||||
m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags))
|
||||
return &loggerT{m: m, v: v}
|
||||
}
|
||||
|
||||
// newLoggerV2 creates a loggerV2 to be used as default logger.
|
||||
// All logs are written to stderr.
|
||||
func newLoggerV2() LoggerV2 {
|
||||
errorW := ioutil.Discard
|
||||
warningW := ioutil.Discard
|
||||
infoW := ioutil.Discard
|
||||
|
||||
logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL")
|
||||
switch logLevel {
|
||||
case "", "ERROR", "error": // If env is unset, set level to ERROR.
|
||||
errorW = os.Stderr
|
||||
case "WARNING", "warning":
|
||||
warningW = os.Stderr
|
||||
case "INFO", "info":
|
||||
infoW = os.Stderr
|
||||
}
|
||||
|
||||
var v int
|
||||
vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL")
|
||||
if vl, err := strconv.Atoi(vLevel); err == nil {
|
||||
v = vl
|
||||
}
|
||||
return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v)
|
||||
}
|
||||
|
||||
func (g *loggerT) Info(args ...interface{}) {
|
||||
g.m[infoLog].Print(args...)
|
||||
}
|
||||
|
||||
func (g *loggerT) Infoln(args ...interface{}) {
|
||||
g.m[infoLog].Println(args...)
|
||||
}
|
||||
|
||||
func (g *loggerT) Infof(format string, args ...interface{}) {
|
||||
g.m[infoLog].Printf(format, args...)
|
||||
}
|
||||
|
||||
func (g *loggerT) Warning(args ...interface{}) {
|
||||
g.m[warningLog].Print(args...)
|
||||
}
|
||||
|
||||
func (g *loggerT) Warningln(args ...interface{}) {
|
||||
g.m[warningLog].Println(args...)
|
||||
}
|
||||
|
||||
func (g *loggerT) Warningf(format string, args ...interface{}) {
|
||||
g.m[warningLog].Printf(format, args...)
|
||||
}
|
||||
|
||||
func (g *loggerT) Error(args ...interface{}) {
|
||||
g.m[errorLog].Print(args...)
|
||||
}
|
||||
|
||||
func (g *loggerT) Errorln(args ...interface{}) {
|
||||
g.m[errorLog].Println(args...)
|
||||
}
|
||||
|
||||
func (g *loggerT) Errorf(format string, args ...interface{}) {
|
||||
g.m[errorLog].Printf(format, args...)
|
||||
}
|
||||
|
||||
func (g *loggerT) Fatal(args ...interface{}) {
|
||||
g.m[fatalLog].Fatal(args...)
|
||||
// No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit().
|
||||
}
|
||||
|
||||
func (g *loggerT) Fatalln(args ...interface{}) {
|
||||
g.m[fatalLog].Fatalln(args...)
|
||||
// No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit().
|
||||
}
|
||||
|
||||
func (g *loggerT) Fatalf(format string, args ...interface{}) {
|
||||
g.m[fatalLog].Fatalf(format, args...)
|
||||
// No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit().
|
||||
}
|
||||
|
||||
func (g *loggerT) V(l int) bool {
|
||||
return l <= g.v
|
||||
}
|
||||
|
||||
// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
|
||||
// DepthLoggerV2, the below functions will be called with the appropriate stack
|
||||
// depth set for trivial functions the logger may ignore.
|
||||
//
|
||||
// This API is EXPERIMENTAL.
|
||||
type DepthLoggerV2 interface {
|
||||
LoggerV2
|
||||
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
InfoDepth(depth int, args ...interface{})
|
||||
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
WarningDepth(depth int, args ...interface{})
|
||||
// ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
ErrorDepth(depth int, args ...interface{})
|
||||
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print.
|
||||
FatalDepth(depth int, args ...interface{})
|
||||
}
|
6
vendor/google.golang.org/grpc/install_gae.sh
generated
vendored
Normal file
6
vendor/google.golang.org/grpc/install_gae.sh
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
#!/bin/bash
|
||||
|
||||
TMP=$(mktemp -d /tmp/sdk.XXX) \
|
||||
&& curl -o $TMP.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" \
|
||||
&& unzip -q $TMP.zip -d $TMP \
|
||||
&& export PATH="$PATH:$TMP/go_appengine"
|
77
vendor/google.golang.org/grpc/interceptor.go
generated
vendored
Normal file
77
vendor/google.golang.org/grpc/interceptor.go
generated
vendored
Normal file
|
@ -0,0 +1,77 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2016 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs.
|
||||
type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error
|
||||
|
||||
// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC
|
||||
// and it is the responsibility of the interceptor to call it.
|
||||
// This is an EXPERIMENTAL API.
|
||||
type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error
|
||||
|
||||
// Streamer is called by StreamClientInterceptor to create a ClientStream.
|
||||
type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error)
|
||||
|
||||
// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O
|
||||
// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it.
|
||||
// This is an EXPERIMENTAL API.
|
||||
type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error)
|
||||
|
||||
// UnaryServerInfo consists of various information about a unary RPC on
|
||||
// server side. All per-rpc information may be mutated by the interceptor.
|
||||
type UnaryServerInfo struct {
|
||||
// Server is the service implementation the user provides. This is read-only.
|
||||
Server interface{}
|
||||
// FullMethod is the full RPC method string, i.e., /package.service/method.
|
||||
FullMethod string
|
||||
}
|
||||
|
||||
// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal
|
||||
// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the
|
||||
// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as
|
||||
// the status message of the RPC.
|
||||
type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error)
|
||||
|
||||
// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info
|
||||
// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper
|
||||
// of the service method implementation. It is the responsibility of the interceptor to invoke handler
|
||||
// to complete the RPC.
|
||||
type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error)
|
||||
|
||||
// StreamServerInfo consists of various information about a streaming RPC on
|
||||
// server side. All per-rpc information may be mutated by the interceptor.
|
||||
type StreamServerInfo struct {
|
||||
// FullMethod is the full RPC method string, i.e., /package.service/method.
|
||||
FullMethod string
|
||||
// IsClientStream indicates whether the RPC is a client streaming RPC.
|
||||
IsClientStream bool
|
||||
// IsServerStream indicates whether the RPC is a server streaming RPC.
|
||||
IsServerStream bool
|
||||
}
|
||||
|
||||
// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server.
|
||||
// info contains all the information of this RPC the interceptor can operate on. And handler is the
|
||||
// service method implementation. It is the responsibility of the interceptor to invoke handler to
|
||||
// complete the RPC.
|
||||
type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error
|
73
vendor/google.golang.org/grpc/internal/backoff/backoff.go
generated
vendored
Normal file
73
vendor/google.golang.org/grpc/internal/backoff/backoff.go
generated
vendored
Normal file
|
@ -0,0 +1,73 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2017 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package backoff implement the backoff strategy for gRPC.
|
||||
//
|
||||
// This is kept in internal until the gRPC project decides whether or not to
|
||||
// allow alternative backoff strategies.
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
grpcbackoff "google.golang.org/grpc/backoff"
|
||||
"google.golang.org/grpc/internal/grpcrand"
|
||||
)
|
||||
|
||||
// Strategy defines the methodology for backing off after a grpc connection
|
||||
// failure.
|
||||
type Strategy interface {
|
||||
// Backoff returns the amount of time to wait before the next retry given
|
||||
// the number of consecutive failures.
|
||||
Backoff(retries int) time.Duration
|
||||
}
|
||||
|
||||
// DefaultExponential is an exponential backoff implementation using the
|
||||
// default values for all the configurable knobs defined in
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
var DefaultExponential = Exponential{Config: grpcbackoff.DefaultConfig}
|
||||
|
||||
// Exponential implements exponential backoff algorithm as defined in
|
||||
// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
|
||||
type Exponential struct {
|
||||
// Config contains all options to configure the backoff algorithm.
|
||||
Config grpcbackoff.Config
|
||||
}
|
||||
|
||||
// Backoff returns the amount of time to wait before the next retry given the
|
||||
// number of retries.
|
||||
func (bc Exponential) Backoff(retries int) time.Duration {
|
||||
if retries == 0 {
|
||||
return bc.Config.BaseDelay
|
||||
}
|
||||
backoff, max := float64(bc.Config.BaseDelay), float64(bc.Config.MaxDelay)
|
||||
for backoff < max && retries > 0 {
|
||||
backoff *= bc.Config.Multiplier
|
||||
retries--
|
||||
}
|
||||
if backoff > max {
|
||||
backoff = max
|
||||
}
|
||||
// Randomize backoff delays so that if a cluster of requests start at
|
||||
// the same time, they won't operate in lockstep.
|
||||
backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1)
|
||||
if backoff < 0 {
|
||||
return 0
|
||||
}
|
||||
return time.Duration(backoff)
|
||||
}
|
46
vendor/google.golang.org/grpc/internal/balancerload/load.go
generated
vendored
Normal file
46
vendor/google.golang.org/grpc/internal/balancerload/load.go
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
/*
|
||||
* Copyright 2019 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// Package balancerload defines APIs to parse server loads in trailers. The
|
||||
// parsed loads are sent to balancers in DoneInfo.
|
||||
package balancerload
|
||||
|
||||
import (
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// Parser converts loads from metadata into a concrete type.
|
||||
type Parser interface {
|
||||
// Parse parses loads from metadata.
|
||||
Parse(md metadata.MD) interface{}
|
||||
}
|
||||
|
||||
var parser Parser
|
||||
|
||||
// SetParser sets the load parser.
|
||||
//
|
||||
// Not mutex-protected, should be called before any gRPC functions.
|
||||
func SetParser(lr Parser) {
|
||||
parser = lr
|
||||
}
|
||||
|
||||
// Parse calls parser.Read().
|
||||
func Parse(md metadata.MD) interface{} {
|
||||
if parser == nil {
|
||||
return nil
|
||||
}
|
||||
return parser.Parse(md)
|
||||
}
|
170
vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
generated
vendored
Normal file
170
vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
generated
vendored
Normal file
|
@ -0,0 +1,170 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package binarylog implementation binary logging as defined in
|
||||
// https://github.com/grpc/proposal/blob/master/A16-binary-logging.md.
|
||||
package binarylog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal/grpcutil"
|
||||
)
|
||||
|
||||
// Logger is the global binary logger. It can be used to get binary logger for
|
||||
// each method.
|
||||
type Logger interface {
|
||||
getMethodLogger(methodName string) *MethodLogger
|
||||
}
|
||||
|
||||
// binLogger is the global binary logger for the binary. One of this should be
|
||||
// built at init time from the configuration (environment variable or flags).
|
||||
//
|
||||
// It is used to get a methodLogger for each individual method.
|
||||
var binLogger Logger
|
||||
|
||||
var grpclogLogger = grpclog.Component("binarylog")
|
||||
|
||||
// SetLogger sets the binarg logger.
|
||||
//
|
||||
// Only call this at init time.
|
||||
func SetLogger(l Logger) {
|
||||
binLogger = l
|
||||
}
|
||||
|
||||
// GetMethodLogger returns the methodLogger for the given methodName.
|
||||
//
|
||||
// methodName should be in the format of "/service/method".
|
||||
//
|
||||
// Each methodLogger returned by this method is a new instance. This is to
|
||||
// generate sequence id within the call.
|
||||
func GetMethodLogger(methodName string) *MethodLogger {
|
||||
if binLogger == nil {
|
||||
return nil
|
||||
}
|
||||
return binLogger.getMethodLogger(methodName)
|
||||
}
|
||||
|
||||
func init() {
|
||||
const envStr = "GRPC_BINARY_LOG_FILTER"
|
||||
configStr := os.Getenv(envStr)
|
||||
binLogger = NewLoggerFromConfigString(configStr)
|
||||
}
|
||||
|
||||
type methodLoggerConfig struct {
|
||||
// Max length of header and message.
|
||||
hdr, msg uint64
|
||||
}
|
||||
|
||||
type logger struct {
|
||||
all *methodLoggerConfig
|
||||
services map[string]*methodLoggerConfig
|
||||
methods map[string]*methodLoggerConfig
|
||||
|
||||
blacklist map[string]struct{}
|
||||
}
|
||||
|
||||
// newEmptyLogger creates an empty logger. The map fields need to be filled in
|
||||
// using the set* functions.
|
||||
func newEmptyLogger() *logger {
|
||||
return &logger{}
|
||||
}
|
||||
|
||||
// Set method logger for "*".
|
||||
func (l *logger) setDefaultMethodLogger(ml *methodLoggerConfig) error {
|
||||
if l.all != nil {
|
||||
return fmt.Errorf("conflicting global rules found")
|
||||
}
|
||||
l.all = ml
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set method logger for "service/*".
|
||||
//
|
||||
// New methodLogger with same service overrides the old one.
|
||||
func (l *logger) setServiceMethodLogger(service string, ml *methodLoggerConfig) error {
|
||||
if _, ok := l.services[service]; ok {
|
||||
return fmt.Errorf("conflicting service rules for service %v found", service)
|
||||
}
|
||||
if l.services == nil {
|
||||
l.services = make(map[string]*methodLoggerConfig)
|
||||
}
|
||||
l.services[service] = ml
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set method logger for "service/method".
|
||||
//
|
||||
// New methodLogger with same method overrides the old one.
|
||||
func (l *logger) setMethodMethodLogger(method string, ml *methodLoggerConfig) error {
|
||||
if _, ok := l.blacklist[method]; ok {
|
||||
return fmt.Errorf("conflicting blacklist rules for method %v found", method)
|
||||
}
|
||||
if _, ok := l.methods[method]; ok {
|
||||
return fmt.Errorf("conflicting method rules for method %v found", method)
|
||||
}
|
||||
if l.methods == nil {
|
||||
l.methods = make(map[string]*methodLoggerConfig)
|
||||
}
|
||||
l.methods[method] = ml
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set blacklist method for "-service/method".
|
||||
func (l *logger) setBlacklist(method string) error {
|
||||
if _, ok := l.blacklist[method]; ok {
|
||||
return fmt.Errorf("conflicting blacklist rules for method %v found", method)
|
||||
}
|
||||
if _, ok := l.methods[method]; ok {
|
||||
return fmt.Errorf("conflicting method rules for method %v found", method)
|
||||
}
|
||||
if l.blacklist == nil {
|
||||
l.blacklist = make(map[string]struct{})
|
||||
}
|
||||
l.blacklist[method] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getMethodLogger returns the methodLogger for the given methodName.
|
||||
//
|
||||
// methodName should be in the format of "/service/method".
|
||||
//
|
||||
// Each methodLogger returned by this method is a new instance. This is to
|
||||
// generate sequence id within the call.
|
||||
func (l *logger) getMethodLogger(methodName string) *MethodLogger {
|
||||
s, m, err := grpcutil.ParseMethod(methodName)
|
||||
if err != nil {
|
||||
grpclogLogger.Infof("binarylogging: failed to parse %q: %v", methodName, err)
|
||||
return nil
|
||||
}
|
||||
if ml, ok := l.methods[s+"/"+m]; ok {
|
||||
return newMethodLogger(ml.hdr, ml.msg)
|
||||
}
|
||||
if _, ok := l.blacklist[s+"/"+m]; ok {
|
||||
return nil
|
||||
}
|
||||
if ml, ok := l.services[s]; ok {
|
||||
return newMethodLogger(ml.hdr, ml.msg)
|
||||
}
|
||||
if l.all == nil {
|
||||
return nil
|
||||
}
|
||||
return newMethodLogger(l.all.hdr, l.all.msg)
|
||||
}
|
42
vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go
generated
vendored
Normal file
42
vendor/google.golang.org/grpc/internal/binarylog/binarylog_testutil.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// This file contains exported variables/functions that are exported for testing
|
||||
// only.
|
||||
//
|
||||
// An ideal way for this would be to put those in a *_test.go but in binarylog
|
||||
// package. But this doesn't work with staticcheck with go module. Error was:
|
||||
// "MdToMetadataProto not declared by package binarylog". This could be caused
|
||||
// by the way staticcheck looks for files for a certain package, which doesn't
|
||||
// support *_test.go files.
|
||||
//
|
||||
// Move those to binary_test.go when staticcheck is fixed.
|
||||
|
||||
package binarylog
|
||||
|
||||
var (
|
||||
// AllLogger is a logger that logs all headers/messages for all RPCs. It's
|
||||
// for testing only.
|
||||
AllLogger = NewLoggerFromConfigString("*")
|
||||
// MdToMetadataProto converts metadata to a binary logging proto message.
|
||||
// It's for testing only.
|
||||
MdToMetadataProto = mdToMetadataProto
|
||||
// AddrToProto converts an address to a binary logging proto message. It's
|
||||
// for testing only.
|
||||
AddrToProto = addrToProto
|
||||
)
|
208
vendor/google.golang.org/grpc/internal/binarylog/env_config.go
generated
vendored
Normal file
208
vendor/google.golang.org/grpc/internal/binarylog/env_config.go
generated
vendored
Normal file
|
@ -0,0 +1,208 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package binarylog
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// NewLoggerFromConfigString reads the string and build a logger. It can be used
|
||||
// to build a new logger and assign it to binarylog.Logger.
|
||||
//
|
||||
// Example filter config strings:
|
||||
// - "" Nothing will be logged
|
||||
// - "*" All headers and messages will be fully logged.
|
||||
// - "*{h}" Only headers will be logged.
|
||||
// - "*{m:256}" Only the first 256 bytes of each message will be logged.
|
||||
// - "Foo/*" Logs every method in service Foo
|
||||
// - "Foo/*,-Foo/Bar" Logs every method in service Foo except method /Foo/Bar
|
||||
// - "Foo/*,Foo/Bar{m:256}" Logs the first 256 bytes of each message in method
|
||||
// /Foo/Bar, logs all headers and messages in every other method in service
|
||||
// Foo.
|
||||
//
|
||||
// If two configs exist for one certain method or service, the one specified
|
||||
// later overrides the previous config.
|
||||
func NewLoggerFromConfigString(s string) Logger {
|
||||
if s == "" {
|
||||
return nil
|
||||
}
|
||||
l := newEmptyLogger()
|
||||
methods := strings.Split(s, ",")
|
||||
for _, method := range methods {
|
||||
if err := l.fillMethodLoggerWithConfigString(method); err != nil {
|
||||
grpclogLogger.Warningf("failed to parse binary log config: %v", err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// fillMethodLoggerWithConfigString parses config, creates methodLogger and adds
|
||||
// it to the right map in the logger.
|
||||
func (l *logger) fillMethodLoggerWithConfigString(config string) error {
|
||||
// "" is invalid.
|
||||
if config == "" {
|
||||
return errors.New("empty string is not a valid method binary logging config")
|
||||
}
|
||||
|
||||
// "-service/method", blacklist, no * or {} allowed.
|
||||
if config[0] == '-' {
|
||||
s, m, suffix, err := parseMethodConfigAndSuffix(config[1:])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid config: %q, %v", config, err)
|
||||
}
|
||||
if m == "*" {
|
||||
return fmt.Errorf("invalid config: %q, %v", config, "* not allowed in blacklist config")
|
||||
}
|
||||
if suffix != "" {
|
||||
return fmt.Errorf("invalid config: %q, %v", config, "header/message limit not allowed in blacklist config")
|
||||
}
|
||||
if err := l.setBlacklist(s + "/" + m); err != nil {
|
||||
return fmt.Errorf("invalid config: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// "*{h:256;m:256}"
|
||||
if config[0] == '*' {
|
||||
hdr, msg, err := parseHeaderMessageLengthConfig(config[1:])
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid config: %q, %v", config, err)
|
||||
}
|
||||
if err := l.setDefaultMethodLogger(&methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
|
||||
return fmt.Errorf("invalid config: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
s, m, suffix, err := parseMethodConfigAndSuffix(config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid config: %q, %v", config, err)
|
||||
}
|
||||
hdr, msg, err := parseHeaderMessageLengthConfig(suffix)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid header/message length config: %q, %v", suffix, err)
|
||||
}
|
||||
if m == "*" {
|
||||
if err := l.setServiceMethodLogger(s, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
|
||||
return fmt.Errorf("invalid config: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err := l.setMethodMethodLogger(s+"/"+m, &methodLoggerConfig{hdr: hdr, msg: msg}); err != nil {
|
||||
return fmt.Errorf("invalid config: %v", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
// TODO: this const is only used by env_config now. But could be useful for
|
||||
// other config. Move to binarylog.go if necessary.
|
||||
maxUInt = ^uint64(0)
|
||||
|
||||
// For "p.s/m" plus any suffix. Suffix will be parsed again. See test for
|
||||
// expected output.
|
||||
longMethodConfigRegexpStr = `^([\w./]+)/((?:\w+)|[*])(.+)?$`
|
||||
|
||||
// For suffix from above, "{h:123,m:123}". See test for expected output.
|
||||
optionalLengthRegexpStr = `(?::(\d+))?` // Optional ":123".
|
||||
headerConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `}$`
|
||||
messageConfigRegexpStr = `^{m` + optionalLengthRegexpStr + `}$`
|
||||
headerMessageConfigRegexpStr = `^{h` + optionalLengthRegexpStr + `;m` + optionalLengthRegexpStr + `}$`
|
||||
)
|
||||
|
||||
var (
|
||||
longMethodConfigRegexp = regexp.MustCompile(longMethodConfigRegexpStr)
|
||||
headerConfigRegexp = regexp.MustCompile(headerConfigRegexpStr)
|
||||
messageConfigRegexp = regexp.MustCompile(messageConfigRegexpStr)
|
||||
headerMessageConfigRegexp = regexp.MustCompile(headerMessageConfigRegexpStr)
|
||||
)
|
||||
|
||||
// Turn "service/method{h;m}" into "service", "method", "{h;m}".
|
||||
func parseMethodConfigAndSuffix(c string) (service, method, suffix string, _ error) {
|
||||
// Regexp result:
|
||||
//
|
||||
// in: "p.s/m{h:123,m:123}",
|
||||
// out: []string{"p.s/m{h:123,m:123}", "p.s", "m", "{h:123,m:123}"},
|
||||
match := longMethodConfigRegexp.FindStringSubmatch(c)
|
||||
if match == nil {
|
||||
return "", "", "", fmt.Errorf("%q contains invalid substring", c)
|
||||
}
|
||||
service = match[1]
|
||||
method = match[2]
|
||||
suffix = match[3]
|
||||
return
|
||||
}
|
||||
|
||||
// Turn "{h:123;m:345}" into 123, 345.
|
||||
//
|
||||
// Return maxUInt if length is unspecified.
|
||||
func parseHeaderMessageLengthConfig(c string) (hdrLenStr, msgLenStr uint64, err error) {
|
||||
if c == "" {
|
||||
return maxUInt, maxUInt, nil
|
||||
}
|
||||
// Header config only.
|
||||
if match := headerConfigRegexp.FindStringSubmatch(c); match != nil {
|
||||
if s := match[1]; s != "" {
|
||||
hdrLenStr, err = strconv.ParseUint(s, 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
|
||||
}
|
||||
return hdrLenStr, 0, nil
|
||||
}
|
||||
return maxUInt, 0, nil
|
||||
}
|
||||
|
||||
// Message config only.
|
||||
if match := messageConfigRegexp.FindStringSubmatch(c); match != nil {
|
||||
if s := match[1]; s != "" {
|
||||
msgLenStr, err = strconv.ParseUint(s, 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
|
||||
}
|
||||
return 0, msgLenStr, nil
|
||||
}
|
||||
return 0, maxUInt, nil
|
||||
}
|
||||
|
||||
// Header and message config both.
|
||||
if match := headerMessageConfigRegexp.FindStringSubmatch(c); match != nil {
|
||||
// Both hdr and msg are specified, but one or two of them might be empty.
|
||||
hdrLenStr = maxUInt
|
||||
msgLenStr = maxUInt
|
||||
if s := match[1]; s != "" {
|
||||
hdrLenStr, err = strconv.ParseUint(s, 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
|
||||
}
|
||||
}
|
||||
if s := match[2]; s != "" {
|
||||
msgLenStr, err = strconv.ParseUint(s, 10, 64)
|
||||
if err != nil {
|
||||
return 0, 0, fmt.Errorf("failed to convert %q to uint", s)
|
||||
}
|
||||
}
|
||||
return hdrLenStr, msgLenStr, nil
|
||||
}
|
||||
return 0, 0, fmt.Errorf("%q contains invalid substring", c)
|
||||
}
|
422
vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
generated
vendored
Normal file
422
vendor/google.golang.org/grpc/internal/binarylog/method_logger.go
generated
vendored
Normal file
|
@ -0,0 +1,422 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package binarylog
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
type callIDGenerator struct {
|
||||
id uint64
|
||||
}
|
||||
|
||||
func (g *callIDGenerator) next() uint64 {
|
||||
id := atomic.AddUint64(&g.id, 1)
|
||||
return id
|
||||
}
|
||||
|
||||
// reset is for testing only, and doesn't need to be thread safe.
|
||||
func (g *callIDGenerator) reset() {
|
||||
g.id = 0
|
||||
}
|
||||
|
||||
var idGen callIDGenerator
|
||||
|
||||
// MethodLogger is the sub-logger for each method.
|
||||
type MethodLogger struct {
|
||||
headerMaxLen, messageMaxLen uint64
|
||||
|
||||
callID uint64
|
||||
idWithinCallGen *callIDGenerator
|
||||
|
||||
sink Sink // TODO(blog): make this plugable.
|
||||
}
|
||||
|
||||
func newMethodLogger(h, m uint64) *MethodLogger {
|
||||
return &MethodLogger{
|
||||
headerMaxLen: h,
|
||||
messageMaxLen: m,
|
||||
|
||||
callID: idGen.next(),
|
||||
idWithinCallGen: &callIDGenerator{},
|
||||
|
||||
sink: defaultSink, // TODO(blog): make it plugable.
|
||||
}
|
||||
}
|
||||
|
||||
// Log creates a proto binary log entry, and logs it to the sink.
|
||||
func (ml *MethodLogger) Log(c LogEntryConfig) {
|
||||
m := c.toProto()
|
||||
timestamp, _ := ptypes.TimestampProto(time.Now())
|
||||
m.Timestamp = timestamp
|
||||
m.CallId = ml.callID
|
||||
m.SequenceIdWithinCall = ml.idWithinCallGen.next()
|
||||
|
||||
switch pay := m.Payload.(type) {
|
||||
case *pb.GrpcLogEntry_ClientHeader:
|
||||
m.PayloadTruncated = ml.truncateMetadata(pay.ClientHeader.GetMetadata())
|
||||
case *pb.GrpcLogEntry_ServerHeader:
|
||||
m.PayloadTruncated = ml.truncateMetadata(pay.ServerHeader.GetMetadata())
|
||||
case *pb.GrpcLogEntry_Message:
|
||||
m.PayloadTruncated = ml.truncateMessage(pay.Message)
|
||||
}
|
||||
|
||||
ml.sink.Write(m)
|
||||
}
|
||||
|
||||
func (ml *MethodLogger) truncateMetadata(mdPb *pb.Metadata) (truncated bool) {
|
||||
if ml.headerMaxLen == maxUInt {
|
||||
return false
|
||||
}
|
||||
var (
|
||||
bytesLimit = ml.headerMaxLen
|
||||
index int
|
||||
)
|
||||
// At the end of the loop, index will be the first entry where the total
|
||||
// size is greater than the limit:
|
||||
//
|
||||
// len(entry[:index]) <= ml.hdr && len(entry[:index+1]) > ml.hdr.
|
||||
for ; index < len(mdPb.Entry); index++ {
|
||||
entry := mdPb.Entry[index]
|
||||
if entry.Key == "grpc-trace-bin" {
|
||||
// "grpc-trace-bin" is a special key. It's kept in the log entry,
|
||||
// but not counted towards the size limit.
|
||||
continue
|
||||
}
|
||||
currentEntryLen := uint64(len(entry.Value))
|
||||
if currentEntryLen > bytesLimit {
|
||||
break
|
||||
}
|
||||
bytesLimit -= currentEntryLen
|
||||
}
|
||||
truncated = index < len(mdPb.Entry)
|
||||
mdPb.Entry = mdPb.Entry[:index]
|
||||
return truncated
|
||||
}
|
||||
|
||||
func (ml *MethodLogger) truncateMessage(msgPb *pb.Message) (truncated bool) {
|
||||
if ml.messageMaxLen == maxUInt {
|
||||
return false
|
||||
}
|
||||
if ml.messageMaxLen >= uint64(len(msgPb.Data)) {
|
||||
return false
|
||||
}
|
||||
msgPb.Data = msgPb.Data[:ml.messageMaxLen]
|
||||
return true
|
||||
}
|
||||
|
||||
// LogEntryConfig represents the configuration for binary log entry.
|
||||
type LogEntryConfig interface {
|
||||
toProto() *pb.GrpcLogEntry
|
||||
}
|
||||
|
||||
// ClientHeader configs the binary log entry to be a ClientHeader entry.
|
||||
type ClientHeader struct {
|
||||
OnClientSide bool
|
||||
Header metadata.MD
|
||||
MethodName string
|
||||
Authority string
|
||||
Timeout time.Duration
|
||||
// PeerAddr is required only when it's on server side.
|
||||
PeerAddr net.Addr
|
||||
}
|
||||
|
||||
func (c *ClientHeader) toProto() *pb.GrpcLogEntry {
|
||||
// This function doesn't need to set all the fields (e.g. seq ID). The Log
|
||||
// function will set the fields when necessary.
|
||||
clientHeader := &pb.ClientHeader{
|
||||
Metadata: mdToMetadataProto(c.Header),
|
||||
MethodName: c.MethodName,
|
||||
Authority: c.Authority,
|
||||
}
|
||||
if c.Timeout > 0 {
|
||||
clientHeader.Timeout = ptypes.DurationProto(c.Timeout)
|
||||
}
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HEADER,
|
||||
Payload: &pb.GrpcLogEntry_ClientHeader{
|
||||
ClientHeader: clientHeader,
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
if c.PeerAddr != nil {
|
||||
ret.Peer = addrToProto(c.PeerAddr)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// ServerHeader configs the binary log entry to be a ServerHeader entry.
|
||||
type ServerHeader struct {
|
||||
OnClientSide bool
|
||||
Header metadata.MD
|
||||
// PeerAddr is required only when it's on client side.
|
||||
PeerAddr net.Addr
|
||||
}
|
||||
|
||||
func (c *ServerHeader) toProto() *pb.GrpcLogEntry {
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_HEADER,
|
||||
Payload: &pb.GrpcLogEntry_ServerHeader{
|
||||
ServerHeader: &pb.ServerHeader{
|
||||
Metadata: mdToMetadataProto(c.Header),
|
||||
},
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
if c.PeerAddr != nil {
|
||||
ret.Peer = addrToProto(c.PeerAddr)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// ClientMessage configs the binary log entry to be a ClientMessage entry.
|
||||
type ClientMessage struct {
|
||||
OnClientSide bool
|
||||
// Message can be a proto.Message or []byte. Other messages formats are not
|
||||
// supported.
|
||||
Message interface{}
|
||||
}
|
||||
|
||||
func (c *ClientMessage) toProto() *pb.GrpcLogEntry {
|
||||
var (
|
||||
data []byte
|
||||
err error
|
||||
)
|
||||
if m, ok := c.Message.(proto.Message); ok {
|
||||
data, err = proto.Marshal(m)
|
||||
if err != nil {
|
||||
grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err)
|
||||
}
|
||||
} else if b, ok := c.Message.([]byte); ok {
|
||||
data = b
|
||||
} else {
|
||||
grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
|
||||
}
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_MESSAGE,
|
||||
Payload: &pb.GrpcLogEntry_Message{
|
||||
Message: &pb.Message{
|
||||
Length: uint32(len(data)),
|
||||
Data: data,
|
||||
},
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// ServerMessage configs the binary log entry to be a ServerMessage entry.
|
||||
type ServerMessage struct {
|
||||
OnClientSide bool
|
||||
// Message can be a proto.Message or []byte. Other messages formats are not
|
||||
// supported.
|
||||
Message interface{}
|
||||
}
|
||||
|
||||
func (c *ServerMessage) toProto() *pb.GrpcLogEntry {
|
||||
var (
|
||||
data []byte
|
||||
err error
|
||||
)
|
||||
if m, ok := c.Message.(proto.Message); ok {
|
||||
data, err = proto.Marshal(m)
|
||||
if err != nil {
|
||||
grpclogLogger.Infof("binarylogging: failed to marshal proto message: %v", err)
|
||||
}
|
||||
} else if b, ok := c.Message.([]byte); ok {
|
||||
data = b
|
||||
} else {
|
||||
grpclogLogger.Infof("binarylogging: message to log is neither proto.message nor []byte")
|
||||
}
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_MESSAGE,
|
||||
Payload: &pb.GrpcLogEntry_Message{
|
||||
Message: &pb.Message{
|
||||
Length: uint32(len(data)),
|
||||
Data: data,
|
||||
},
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// ClientHalfClose configs the binary log entry to be a ClientHalfClose entry.
|
||||
type ClientHalfClose struct {
|
||||
OnClientSide bool
|
||||
}
|
||||
|
||||
func (c *ClientHalfClose) toProto() *pb.GrpcLogEntry {
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CLIENT_HALF_CLOSE,
|
||||
Payload: nil, // No payload here.
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// ServerTrailer configs the binary log entry to be a ServerTrailer entry.
|
||||
type ServerTrailer struct {
|
||||
OnClientSide bool
|
||||
Trailer metadata.MD
|
||||
// Err is the status error.
|
||||
Err error
|
||||
// PeerAddr is required only when it's on client side and the RPC is trailer
|
||||
// only.
|
||||
PeerAddr net.Addr
|
||||
}
|
||||
|
||||
func (c *ServerTrailer) toProto() *pb.GrpcLogEntry {
|
||||
st, ok := status.FromError(c.Err)
|
||||
if !ok {
|
||||
grpclogLogger.Info("binarylogging: error in trailer is not a status error")
|
||||
}
|
||||
var (
|
||||
detailsBytes []byte
|
||||
err error
|
||||
)
|
||||
stProto := st.Proto()
|
||||
if stProto != nil && len(stProto.Details) != 0 {
|
||||
detailsBytes, err = proto.Marshal(stProto)
|
||||
if err != nil {
|
||||
grpclogLogger.Infof("binarylogging: failed to marshal status proto: %v", err)
|
||||
}
|
||||
}
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_SERVER_TRAILER,
|
||||
Payload: &pb.GrpcLogEntry_Trailer{
|
||||
Trailer: &pb.Trailer{
|
||||
Metadata: mdToMetadataProto(c.Trailer),
|
||||
StatusCode: uint32(st.Code()),
|
||||
StatusMessage: st.Message(),
|
||||
StatusDetails: detailsBytes,
|
||||
},
|
||||
},
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
if c.PeerAddr != nil {
|
||||
ret.Peer = addrToProto(c.PeerAddr)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// Cancel configs the binary log entry to be a Cancel entry.
|
||||
type Cancel struct {
|
||||
OnClientSide bool
|
||||
}
|
||||
|
||||
func (c *Cancel) toProto() *pb.GrpcLogEntry {
|
||||
ret := &pb.GrpcLogEntry{
|
||||
Type: pb.GrpcLogEntry_EVENT_TYPE_CANCEL,
|
||||
Payload: nil,
|
||||
}
|
||||
if c.OnClientSide {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_CLIENT
|
||||
} else {
|
||||
ret.Logger = pb.GrpcLogEntry_LOGGER_SERVER
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// metadataKeyOmit returns whether the metadata entry with this key should be
|
||||
// omitted.
|
||||
func metadataKeyOmit(key string) bool {
|
||||
switch key {
|
||||
case "lb-token", ":path", ":authority", "content-encoding", "content-type", "user-agent", "te":
|
||||
return true
|
||||
case "grpc-trace-bin": // grpc-trace-bin is special because it's visiable to users.
|
||||
return false
|
||||
}
|
||||
return strings.HasPrefix(key, "grpc-")
|
||||
}
|
||||
|
||||
func mdToMetadataProto(md metadata.MD) *pb.Metadata {
|
||||
ret := &pb.Metadata{}
|
||||
for k, vv := range md {
|
||||
if metadataKeyOmit(k) {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
ret.Entry = append(ret.Entry,
|
||||
&pb.MetadataEntry{
|
||||
Key: k,
|
||||
Value: []byte(v),
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func addrToProto(addr net.Addr) *pb.Address {
|
||||
ret := &pb.Address{}
|
||||
switch a := addr.(type) {
|
||||
case *net.TCPAddr:
|
||||
if a.IP.To4() != nil {
|
||||
ret.Type = pb.Address_TYPE_IPV4
|
||||
} else if a.IP.To16() != nil {
|
||||
ret.Type = pb.Address_TYPE_IPV6
|
||||
} else {
|
||||
ret.Type = pb.Address_TYPE_UNKNOWN
|
||||
// Do not set address and port fields.
|
||||
break
|
||||
}
|
||||
ret.Address = a.IP.String()
|
||||
ret.IpPort = uint32(a.Port)
|
||||
case *net.UnixAddr:
|
||||
ret.Type = pb.Address_TYPE_UNIX
|
||||
ret.Address = a.String()
|
||||
default:
|
||||
ret.Type = pb.Address_TYPE_UNKNOWN
|
||||
}
|
||||
return ret
|
||||
}
|
161
vendor/google.golang.org/grpc/internal/binarylog/sink.go
generated
vendored
Normal file
161
vendor/google.golang.org/grpc/internal/binarylog/sink.go
generated
vendored
Normal file
|
@ -0,0 +1,161 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package binarylog
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
pb "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultSink Sink = &noopSink{} // TODO(blog): change this default (file in /tmp).
|
||||
)
|
||||
|
||||
// SetDefaultSink sets the sink where binary logs will be written to.
|
||||
//
|
||||
// Not thread safe. Only set during initialization.
|
||||
func SetDefaultSink(s Sink) {
|
||||
if defaultSink != nil {
|
||||
defaultSink.Close()
|
||||
}
|
||||
defaultSink = s
|
||||
}
|
||||
|
||||
// Sink writes log entry into the binary log sink.
|
||||
type Sink interface {
|
||||
// Write will be called to write the log entry into the sink.
|
||||
//
|
||||
// It should be thread-safe so it can be called in parallel.
|
||||
Write(*pb.GrpcLogEntry) error
|
||||
// Close will be called when the Sink is replaced by a new Sink.
|
||||
Close() error
|
||||
}
|
||||
|
||||
type noopSink struct{}
|
||||
|
||||
func (ns *noopSink) Write(*pb.GrpcLogEntry) error { return nil }
|
||||
func (ns *noopSink) Close() error { return nil }
|
||||
|
||||
// newWriterSink creates a binary log sink with the given writer.
|
||||
//
|
||||
// Write() marshals the proto message and writes it to the given writer. Each
|
||||
// message is prefixed with a 4 byte big endian unsigned integer as the length.
|
||||
//
|
||||
// No buffer is done, Close() doesn't try to close the writer.
|
||||
func newWriterSink(w io.Writer) *writerSink {
|
||||
return &writerSink{out: w}
|
||||
}
|
||||
|
||||
type writerSink struct {
|
||||
out io.Writer
|
||||
}
|
||||
|
||||
func (ws *writerSink) Write(e *pb.GrpcLogEntry) error {
|
||||
b, err := proto.Marshal(e)
|
||||
if err != nil {
|
||||
grpclogLogger.Infof("binary logging: failed to marshal proto message: %v", err)
|
||||
}
|
||||
hdr := make([]byte, 4)
|
||||
binary.BigEndian.PutUint32(hdr, uint32(len(b)))
|
||||
if _, err := ws.out.Write(hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := ws.out.Write(b); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ws *writerSink) Close() error { return nil }
|
||||
|
||||
type bufWriteCloserSink struct {
|
||||
mu sync.Mutex
|
||||
closer io.Closer
|
||||
out *writerSink // out is built on buf.
|
||||
buf *bufio.Writer // buf is kept for flush.
|
||||
|
||||
writeStartOnce sync.Once
|
||||
writeTicker *time.Ticker
|
||||
}
|
||||
|
||||
func (fs *bufWriteCloserSink) Write(e *pb.GrpcLogEntry) error {
|
||||
// Start the write loop when Write is called.
|
||||
fs.writeStartOnce.Do(fs.startFlushGoroutine)
|
||||
fs.mu.Lock()
|
||||
if err := fs.out.Write(e); err != nil {
|
||||
fs.mu.Unlock()
|
||||
return err
|
||||
}
|
||||
fs.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
bufFlushDuration = 60 * time.Second
|
||||
)
|
||||
|
||||
func (fs *bufWriteCloserSink) startFlushGoroutine() {
|
||||
fs.writeTicker = time.NewTicker(bufFlushDuration)
|
||||
go func() {
|
||||
for range fs.writeTicker.C {
|
||||
fs.mu.Lock()
|
||||
fs.buf.Flush()
|
||||
fs.mu.Unlock()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (fs *bufWriteCloserSink) Close() error {
|
||||
if fs.writeTicker != nil {
|
||||
fs.writeTicker.Stop()
|
||||
}
|
||||
fs.mu.Lock()
|
||||
fs.buf.Flush()
|
||||
fs.closer.Close()
|
||||
fs.out.Close()
|
||||
fs.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func newBufWriteCloserSink(o io.WriteCloser) Sink {
|
||||
bufW := bufio.NewWriter(o)
|
||||
return &bufWriteCloserSink{
|
||||
closer: o,
|
||||
out: newWriterSink(bufW),
|
||||
buf: bufW,
|
||||
}
|
||||
}
|
||||
|
||||
// NewTempFileSink creates a temp file and returns a Sink that writes to this
|
||||
// file.
|
||||
func NewTempFileSink() (Sink, error) {
|
||||
tempFile, err := ioutil.TempFile("/tmp", "grpcgo_binarylog_*.txt")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create temp file: %v", err)
|
||||
}
|
||||
return newBufWriteCloserSink(tempFile), nil
|
||||
}
|
85
vendor/google.golang.org/grpc/internal/buffer/unbounded.go
generated
vendored
Normal file
85
vendor/google.golang.org/grpc/internal/buffer/unbounded.go
generated
vendored
Normal file
|
@ -0,0 +1,85 @@
|
|||
/*
|
||||
* Copyright 2019 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package buffer provides an implementation of an unbounded buffer.
|
||||
package buffer
|
||||
|
||||
import "sync"
|
||||
|
||||
// Unbounded is an implementation of an unbounded buffer which does not use
|
||||
// extra goroutines. This is typically used for passing updates from one entity
|
||||
// to another within gRPC.
|
||||
//
|
||||
// All methods on this type are thread-safe and don't block on anything except
|
||||
// the underlying mutex used for synchronization.
|
||||
//
|
||||
// Unbounded supports values of any type to be stored in it by using a channel
|
||||
// of `interface{}`. This means that a call to Put() incurs an extra memory
|
||||
// allocation, and also that users need a type assertion while reading. For
|
||||
// performance critical code paths, using Unbounded is strongly discouraged and
|
||||
// defining a new type specific implementation of this buffer is preferred. See
|
||||
// internal/transport/transport.go for an example of this.
|
||||
type Unbounded struct {
|
||||
c chan interface{}
|
||||
mu sync.Mutex
|
||||
backlog []interface{}
|
||||
}
|
||||
|
||||
// NewUnbounded returns a new instance of Unbounded.
|
||||
func NewUnbounded() *Unbounded {
|
||||
return &Unbounded{c: make(chan interface{}, 1)}
|
||||
}
|
||||
|
||||
// Put adds t to the unbounded buffer.
|
||||
func (b *Unbounded) Put(t interface{}) {
|
||||
b.mu.Lock()
|
||||
if len(b.backlog) == 0 {
|
||||
select {
|
||||
case b.c <- t:
|
||||
b.mu.Unlock()
|
||||
return
|
||||
default:
|
||||
}
|
||||
}
|
||||
b.backlog = append(b.backlog, t)
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
// Load sends the earliest buffered data, if any, onto the read channel
|
||||
// returned by Get(). Users are expected to call this every time they read a
|
||||
// value from the read channel.
|
||||
func (b *Unbounded) Load() {
|
||||
b.mu.Lock()
|
||||
if len(b.backlog) > 0 {
|
||||
select {
|
||||
case b.c <- b.backlog[0]:
|
||||
b.backlog[0] = nil
|
||||
b.backlog = b.backlog[1:]
|
||||
default:
|
||||
}
|
||||
}
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
// Get returns a read channel on which values added to the buffer, via Put(),
|
||||
// are sent on.
|
||||
//
|
||||
// Upon reading a value from this channel, users are expected to call Load() to
|
||||
// send the next buffered value onto the channel if there is any.
|
||||
func (b *Unbounded) Get() <-chan interface{} {
|
||||
return b.c
|
||||
}
|
739
vendor/google.golang.org/grpc/internal/channelz/funcs.go
generated
vendored
Normal file
739
vendor/google.golang.org/grpc/internal/channelz/funcs.go
generated
vendored
Normal file
|
@ -0,0 +1,739 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package channelz defines APIs for enabling channelz service, entry
|
||||
// registration/deletion, and accessing channelz data. It also defines channelz
|
||||
// metric struct formats.
|
||||
//
|
||||
// All APIs in this package are experimental.
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultMaxTraceEntry int32 = 30
|
||||
)
|
||||
|
||||
var (
|
||||
db dbWrapper
|
||||
idGen idGenerator
|
||||
// EntryPerPage defines the number of channelz entries to be shown on a web page.
|
||||
EntryPerPage = int64(50)
|
||||
curState int32
|
||||
maxTraceEntry = defaultMaxTraceEntry
|
||||
)
|
||||
|
||||
// TurnOn turns on channelz data collection.
|
||||
func TurnOn() {
|
||||
if !IsOn() {
|
||||
NewChannelzStorage()
|
||||
atomic.StoreInt32(&curState, 1)
|
||||
}
|
||||
}
|
||||
|
||||
// IsOn returns whether channelz data collection is on.
|
||||
func IsOn() bool {
|
||||
return atomic.CompareAndSwapInt32(&curState, 1, 1)
|
||||
}
|
||||
|
||||
// SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel).
|
||||
// Setting it to 0 will disable channel tracing.
|
||||
func SetMaxTraceEntry(i int32) {
|
||||
atomic.StoreInt32(&maxTraceEntry, i)
|
||||
}
|
||||
|
||||
// ResetMaxTraceEntryToDefault resets the maximum number of trace entry per entity to default.
|
||||
func ResetMaxTraceEntryToDefault() {
|
||||
atomic.StoreInt32(&maxTraceEntry, defaultMaxTraceEntry)
|
||||
}
|
||||
|
||||
func getMaxTraceEntry() int {
|
||||
i := atomic.LoadInt32(&maxTraceEntry)
|
||||
return int(i)
|
||||
}
|
||||
|
||||
// dbWarpper wraps around a reference to internal channelz data storage, and
|
||||
// provide synchronized functionality to set and get the reference.
|
||||
type dbWrapper struct {
|
||||
mu sync.RWMutex
|
||||
DB *channelMap
|
||||
}
|
||||
|
||||
func (d *dbWrapper) set(db *channelMap) {
|
||||
d.mu.Lock()
|
||||
d.DB = db
|
||||
d.mu.Unlock()
|
||||
}
|
||||
|
||||
func (d *dbWrapper) get() *channelMap {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
return d.DB
|
||||
}
|
||||
|
||||
// NewChannelzStorage initializes channelz data storage and id generator.
|
||||
//
|
||||
// This function returns a cleanup function to wait for all channelz state to be reset by the
|
||||
// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests
|
||||
// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen
|
||||
// to remove some entity just register by the new test, since the id space is the same.
|
||||
//
|
||||
// Note: This function is exported for testing purpose only. User should not call
|
||||
// it in most cases.
|
||||
func NewChannelzStorage() (cleanup func() error) {
|
||||
db.set(&channelMap{
|
||||
topLevelChannels: make(map[int64]struct{}),
|
||||
channels: make(map[int64]*channel),
|
||||
listenSockets: make(map[int64]*listenSocket),
|
||||
normalSockets: make(map[int64]*normalSocket),
|
||||
servers: make(map[int64]*server),
|
||||
subChannels: make(map[int64]*subChannel),
|
||||
})
|
||||
idGen.reset()
|
||||
return func() error {
|
||||
var err error
|
||||
cm := db.get()
|
||||
if cm == nil {
|
||||
return nil
|
||||
}
|
||||
for i := 0; i < 1000; i++ {
|
||||
cm.mu.Lock()
|
||||
if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 {
|
||||
cm.mu.Unlock()
|
||||
// all things stored in the channelz map have been cleared.
|
||||
return nil
|
||||
}
|
||||
cm.mu.Unlock()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
cm.mu.Lock()
|
||||
err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets))
|
||||
cm.mu.Unlock()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// GetTopChannels returns a slice of top channel's ChannelMetric, along with a
|
||||
// boolean indicating whether there's more top channels to be queried for.
|
||||
//
|
||||
// The arg id specifies that only top channel with id at or above it will be included
|
||||
// in the result. The returned slice is up to a length of the arg maxResults or
|
||||
// EntryPerPage if maxResults is zero, and is sorted in ascending id order.
|
||||
func GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) {
|
||||
return db.get().GetTopChannels(id, maxResults)
|
||||
}
|
||||
|
||||
// GetServers returns a slice of server's ServerMetric, along with a
|
||||
// boolean indicating whether there's more servers to be queried for.
|
||||
//
|
||||
// The arg id specifies that only server with id at or above it will be included
|
||||
// in the result. The returned slice is up to a length of the arg maxResults or
|
||||
// EntryPerPage if maxResults is zero, and is sorted in ascending id order.
|
||||
func GetServers(id int64, maxResults int64) ([]*ServerMetric, bool) {
|
||||
return db.get().GetServers(id, maxResults)
|
||||
}
|
||||
|
||||
// GetServerSockets returns a slice of server's (identified by id) normal socket's
|
||||
// SocketMetric, along with a boolean indicating whether there's more sockets to
|
||||
// be queried for.
|
||||
//
|
||||
// The arg startID specifies that only sockets with id at or above it will be
|
||||
// included in the result. The returned slice is up to a length of the arg maxResults
|
||||
// or EntryPerPage if maxResults is zero, and is sorted in ascending id order.
|
||||
func GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) {
|
||||
return db.get().GetServerSockets(id, startID, maxResults)
|
||||
}
|
||||
|
||||
// GetChannel returns the ChannelMetric for the channel (identified by id).
|
||||
func GetChannel(id int64) *ChannelMetric {
|
||||
return db.get().GetChannel(id)
|
||||
}
|
||||
|
||||
// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id).
|
||||
func GetSubChannel(id int64) *SubChannelMetric {
|
||||
return db.get().GetSubChannel(id)
|
||||
}
|
||||
|
||||
// GetSocket returns the SocketInternalMetric for the socket (identified by id).
|
||||
func GetSocket(id int64) *SocketMetric {
|
||||
return db.get().GetSocket(id)
|
||||
}
|
||||
|
||||
// GetServer returns the ServerMetric for the server (identified by id).
|
||||
func GetServer(id int64) *ServerMetric {
|
||||
return db.get().GetServer(id)
|
||||
}
|
||||
|
||||
// RegisterChannel registers the given channel c in channelz database with ref
|
||||
// as its reference name, and add it to the child list of its parent (identified
|
||||
// by pid). pid = 0 means no parent. It returns the unique channelz tracking id
|
||||
// assigned to this channel.
|
||||
func RegisterChannel(c Channel, pid int64, ref string) int64 {
|
||||
id := idGen.genID()
|
||||
cn := &channel{
|
||||
refName: ref,
|
||||
c: c,
|
||||
subChans: make(map[int64]string),
|
||||
nestedChans: make(map[int64]string),
|
||||
id: id,
|
||||
pid: pid,
|
||||
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
|
||||
}
|
||||
if pid == 0 {
|
||||
db.get().addChannel(id, cn, true, pid, ref)
|
||||
} else {
|
||||
db.get().addChannel(id, cn, false, pid, ref)
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// RegisterSubChannel registers the given channel c in channelz database with ref
|
||||
// as its reference name, and add it to the child list of its parent (identified
|
||||
// by pid). It returns the unique channelz tracking id assigned to this subchannel.
|
||||
func RegisterSubChannel(c Channel, pid int64, ref string) int64 {
|
||||
if pid == 0 {
|
||||
logger.Error("a SubChannel's parent id cannot be 0")
|
||||
return 0
|
||||
}
|
||||
id := idGen.genID()
|
||||
sc := &subChannel{
|
||||
refName: ref,
|
||||
c: c,
|
||||
sockets: make(map[int64]string),
|
||||
id: id,
|
||||
pid: pid,
|
||||
trace: &channelTrace{createdTime: time.Now(), events: make([]*TraceEvent, 0, getMaxTraceEntry())},
|
||||
}
|
||||
db.get().addSubChannel(id, sc, pid, ref)
|
||||
return id
|
||||
}
|
||||
|
||||
// RegisterServer registers the given server s in channelz database. It returns
|
||||
// the unique channelz tracking id assigned to this server.
|
||||
func RegisterServer(s Server, ref string) int64 {
|
||||
id := idGen.genID()
|
||||
svr := &server{
|
||||
refName: ref,
|
||||
s: s,
|
||||
sockets: make(map[int64]string),
|
||||
listenSockets: make(map[int64]string),
|
||||
id: id,
|
||||
}
|
||||
db.get().addServer(id, svr)
|
||||
return id
|
||||
}
|
||||
|
||||
// RegisterListenSocket registers the given listen socket s in channelz database
|
||||
// with ref as its reference name, and add it to the child list of its parent
|
||||
// (identified by pid). It returns the unique channelz tracking id assigned to
|
||||
// this listen socket.
|
||||
func RegisterListenSocket(s Socket, pid int64, ref string) int64 {
|
||||
if pid == 0 {
|
||||
logger.Error("a ListenSocket's parent id cannot be 0")
|
||||
return 0
|
||||
}
|
||||
id := idGen.genID()
|
||||
ls := &listenSocket{refName: ref, s: s, id: id, pid: pid}
|
||||
db.get().addListenSocket(id, ls, pid, ref)
|
||||
return id
|
||||
}
|
||||
|
||||
// RegisterNormalSocket registers the given normal socket s in channelz database
|
||||
// with ref as its reference name, and add it to the child list of its parent
|
||||
// (identified by pid). It returns the unique channelz tracking id assigned to
|
||||
// this normal socket.
|
||||
func RegisterNormalSocket(s Socket, pid int64, ref string) int64 {
|
||||
if pid == 0 {
|
||||
logger.Error("a NormalSocket's parent id cannot be 0")
|
||||
return 0
|
||||
}
|
||||
id := idGen.genID()
|
||||
ns := &normalSocket{refName: ref, s: s, id: id, pid: pid}
|
||||
db.get().addNormalSocket(id, ns, pid, ref)
|
||||
return id
|
||||
}
|
||||
|
||||
// RemoveEntry removes an entry with unique channelz trakcing id to be id from
|
||||
// channelz database.
|
||||
func RemoveEntry(id int64) {
|
||||
db.get().removeEntry(id)
|
||||
}
|
||||
|
||||
// TraceEventDesc is what the caller of AddTraceEvent should provide to describe the event to be added
|
||||
// to the channel trace.
|
||||
// The Parent field is optional. It is used for event that will be recorded in the entity's parent
|
||||
// trace also.
|
||||
type TraceEventDesc struct {
|
||||
Desc string
|
||||
Severity Severity
|
||||
Parent *TraceEventDesc
|
||||
}
|
||||
|
||||
// AddTraceEvent adds trace related to the entity with specified id, using the provided TraceEventDesc.
|
||||
func AddTraceEvent(l grpclog.DepthLoggerV2, id int64, depth int, desc *TraceEventDesc) {
|
||||
for d := desc; d != nil; d = d.Parent {
|
||||
switch d.Severity {
|
||||
case CtUNKNOWN:
|
||||
l.InfoDepth(depth+1, d.Desc)
|
||||
case CtINFO:
|
||||
l.InfoDepth(depth+1, d.Desc)
|
||||
case CtWarning:
|
||||
l.WarningDepth(depth+1, d.Desc)
|
||||
case CtError:
|
||||
l.ErrorDepth(depth+1, d.Desc)
|
||||
}
|
||||
}
|
||||
if getMaxTraceEntry() == 0 {
|
||||
return
|
||||
}
|
||||
db.get().traceEvent(id, desc)
|
||||
}
|
||||
|
||||
// channelMap is the storage data structure for channelz.
|
||||
// Methods of channelMap can be divided in two two categories with respect to locking.
|
||||
// 1. Methods acquire the global lock.
|
||||
// 2. Methods that can only be called when global lock is held.
|
||||
// A second type of method need always to be called inside a first type of method.
|
||||
type channelMap struct {
|
||||
mu sync.RWMutex
|
||||
topLevelChannels map[int64]struct{}
|
||||
servers map[int64]*server
|
||||
channels map[int64]*channel
|
||||
subChannels map[int64]*subChannel
|
||||
listenSockets map[int64]*listenSocket
|
||||
normalSockets map[int64]*normalSocket
|
||||
}
|
||||
|
||||
func (c *channelMap) addServer(id int64, s *server) {
|
||||
c.mu.Lock()
|
||||
s.cm = c
|
||||
c.servers[id] = s
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) {
|
||||
c.mu.Lock()
|
||||
cn.cm = c
|
||||
cn.trace.cm = c
|
||||
c.channels[id] = cn
|
||||
if isTopChannel {
|
||||
c.topLevelChannels[id] = struct{}{}
|
||||
} else {
|
||||
c.findEntry(pid).addChild(id, cn)
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) {
|
||||
c.mu.Lock()
|
||||
sc.cm = c
|
||||
sc.trace.cm = c
|
||||
c.subChannels[id] = sc
|
||||
c.findEntry(pid).addChild(id, sc)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) {
|
||||
c.mu.Lock()
|
||||
ls.cm = c
|
||||
c.listenSockets[id] = ls
|
||||
c.findEntry(pid).addChild(id, ls)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) {
|
||||
c.mu.Lock()
|
||||
ns.cm = c
|
||||
c.normalSockets[id] = ns
|
||||
c.findEntry(pid).addChild(id, ns)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// removeEntry triggers the removal of an entry, which may not indeed delete the entry, if it has to
|
||||
// wait on the deletion of its children and until no other entity's channel trace references it.
|
||||
// It may lead to a chain of entry deletion. For example, deleting the last socket of a gracefully
|
||||
// shutting down server will lead to the server being also deleted.
|
||||
func (c *channelMap) removeEntry(id int64) {
|
||||
c.mu.Lock()
|
||||
c.findEntry(id).triggerDelete()
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// c.mu must be held by the caller
|
||||
func (c *channelMap) decrTraceRefCount(id int64) {
|
||||
e := c.findEntry(id)
|
||||
if v, ok := e.(tracedChannel); ok {
|
||||
v.decrTraceRefCount()
|
||||
e.deleteSelfIfReady()
|
||||
}
|
||||
}
|
||||
|
||||
// c.mu must be held by the caller.
|
||||
func (c *channelMap) findEntry(id int64) entry {
|
||||
var v entry
|
||||
var ok bool
|
||||
if v, ok = c.channels[id]; ok {
|
||||
return v
|
||||
}
|
||||
if v, ok = c.subChannels[id]; ok {
|
||||
return v
|
||||
}
|
||||
if v, ok = c.servers[id]; ok {
|
||||
return v
|
||||
}
|
||||
if v, ok = c.listenSockets[id]; ok {
|
||||
return v
|
||||
}
|
||||
if v, ok = c.normalSockets[id]; ok {
|
||||
return v
|
||||
}
|
||||
return &dummyEntry{idNotFound: id}
|
||||
}
|
||||
|
||||
// c.mu must be held by the caller
|
||||
// deleteEntry simply deletes an entry from the channelMap. Before calling this
|
||||
// method, caller must check this entry is ready to be deleted, i.e removeEntry()
|
||||
// has been called on it, and no children still exist.
|
||||
// Conditionals are ordered by the expected frequency of deletion of each entity
|
||||
// type, in order to optimize performance.
|
||||
func (c *channelMap) deleteEntry(id int64) {
|
||||
var ok bool
|
||||
if _, ok = c.normalSockets[id]; ok {
|
||||
delete(c.normalSockets, id)
|
||||
return
|
||||
}
|
||||
if _, ok = c.subChannels[id]; ok {
|
||||
delete(c.subChannels, id)
|
||||
return
|
||||
}
|
||||
if _, ok = c.channels[id]; ok {
|
||||
delete(c.channels, id)
|
||||
delete(c.topLevelChannels, id)
|
||||
return
|
||||
}
|
||||
if _, ok = c.listenSockets[id]; ok {
|
||||
delete(c.listenSockets, id)
|
||||
return
|
||||
}
|
||||
if _, ok = c.servers[id]; ok {
|
||||
delete(c.servers, id)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *channelMap) traceEvent(id int64, desc *TraceEventDesc) {
|
||||
c.mu.Lock()
|
||||
child := c.findEntry(id)
|
||||
childTC, ok := child.(tracedChannel)
|
||||
if !ok {
|
||||
c.mu.Unlock()
|
||||
return
|
||||
}
|
||||
childTC.getChannelTrace().append(&TraceEvent{Desc: desc.Desc, Severity: desc.Severity, Timestamp: time.Now()})
|
||||
if desc.Parent != nil {
|
||||
parent := c.findEntry(child.getParentID())
|
||||
var chanType RefChannelType
|
||||
switch child.(type) {
|
||||
case *channel:
|
||||
chanType = RefChannel
|
||||
case *subChannel:
|
||||
chanType = RefSubChannel
|
||||
}
|
||||
if parentTC, ok := parent.(tracedChannel); ok {
|
||||
parentTC.getChannelTrace().append(&TraceEvent{
|
||||
Desc: desc.Parent.Desc,
|
||||
Severity: desc.Parent.Severity,
|
||||
Timestamp: time.Now(),
|
||||
RefID: id,
|
||||
RefName: childTC.getRefName(),
|
||||
RefType: chanType,
|
||||
})
|
||||
childTC.incrTraceRefCount()
|
||||
}
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
type int64Slice []int64
|
||||
|
||||
func (s int64Slice) Len() int { return len(s) }
|
||||
func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] }
|
||||
|
||||
func copyMap(m map[int64]string) map[int64]string {
|
||||
n := make(map[int64]string)
|
||||
for k, v := range m {
|
||||
n[k] = v
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func min(a, b int64) int64 {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (c *channelMap) GetTopChannels(id int64, maxResults int64) ([]*ChannelMetric, bool) {
|
||||
if maxResults <= 0 {
|
||||
maxResults = EntryPerPage
|
||||
}
|
||||
c.mu.RLock()
|
||||
l := int64(len(c.topLevelChannels))
|
||||
ids := make([]int64, 0, l)
|
||||
cns := make([]*channel, 0, min(l, maxResults))
|
||||
|
||||
for k := range c.topLevelChannels {
|
||||
ids = append(ids, k)
|
||||
}
|
||||
sort.Sort(int64Slice(ids))
|
||||
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
|
||||
count := int64(0)
|
||||
var end bool
|
||||
var t []*ChannelMetric
|
||||
for i, v := range ids[idx:] {
|
||||
if count == maxResults {
|
||||
break
|
||||
}
|
||||
if cn, ok := c.channels[v]; ok {
|
||||
cns = append(cns, cn)
|
||||
t = append(t, &ChannelMetric{
|
||||
NestedChans: copyMap(cn.nestedChans),
|
||||
SubChans: copyMap(cn.subChans),
|
||||
})
|
||||
count++
|
||||
}
|
||||
if i == len(ids[idx:])-1 {
|
||||
end = true
|
||||
break
|
||||
}
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
if count == 0 {
|
||||
end = true
|
||||
}
|
||||
|
||||
for i, cn := range cns {
|
||||
t[i].ChannelData = cn.c.ChannelzMetric()
|
||||
t[i].ID = cn.id
|
||||
t[i].RefName = cn.refName
|
||||
t[i].Trace = cn.trace.dumpData()
|
||||
}
|
||||
return t, end
|
||||
}
|
||||
|
||||
func (c *channelMap) GetServers(id, maxResults int64) ([]*ServerMetric, bool) {
|
||||
if maxResults <= 0 {
|
||||
maxResults = EntryPerPage
|
||||
}
|
||||
c.mu.RLock()
|
||||
l := int64(len(c.servers))
|
||||
ids := make([]int64, 0, l)
|
||||
ss := make([]*server, 0, min(l, maxResults))
|
||||
for k := range c.servers {
|
||||
ids = append(ids, k)
|
||||
}
|
||||
sort.Sort(int64Slice(ids))
|
||||
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id })
|
||||
count := int64(0)
|
||||
var end bool
|
||||
var s []*ServerMetric
|
||||
for i, v := range ids[idx:] {
|
||||
if count == maxResults {
|
||||
break
|
||||
}
|
||||
if svr, ok := c.servers[v]; ok {
|
||||
ss = append(ss, svr)
|
||||
s = append(s, &ServerMetric{
|
||||
ListenSockets: copyMap(svr.listenSockets),
|
||||
})
|
||||
count++
|
||||
}
|
||||
if i == len(ids[idx:])-1 {
|
||||
end = true
|
||||
break
|
||||
}
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
if count == 0 {
|
||||
end = true
|
||||
}
|
||||
|
||||
for i, svr := range ss {
|
||||
s[i].ServerData = svr.s.ChannelzMetric()
|
||||
s[i].ID = svr.id
|
||||
s[i].RefName = svr.refName
|
||||
}
|
||||
return s, end
|
||||
}
|
||||
|
||||
func (c *channelMap) GetServerSockets(id int64, startID int64, maxResults int64) ([]*SocketMetric, bool) {
|
||||
if maxResults <= 0 {
|
||||
maxResults = EntryPerPage
|
||||
}
|
||||
var svr *server
|
||||
var ok bool
|
||||
c.mu.RLock()
|
||||
if svr, ok = c.servers[id]; !ok {
|
||||
// server with id doesn't exist.
|
||||
c.mu.RUnlock()
|
||||
return nil, true
|
||||
}
|
||||
svrskts := svr.sockets
|
||||
l := int64(len(svrskts))
|
||||
ids := make([]int64, 0, l)
|
||||
sks := make([]*normalSocket, 0, min(l, maxResults))
|
||||
for k := range svrskts {
|
||||
ids = append(ids, k)
|
||||
}
|
||||
sort.Sort(int64Slice(ids))
|
||||
idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= startID })
|
||||
count := int64(0)
|
||||
var end bool
|
||||
for i, v := range ids[idx:] {
|
||||
if count == maxResults {
|
||||
break
|
||||
}
|
||||
if ns, ok := c.normalSockets[v]; ok {
|
||||
sks = append(sks, ns)
|
||||
count++
|
||||
}
|
||||
if i == len(ids[idx:])-1 {
|
||||
end = true
|
||||
break
|
||||
}
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
if count == 0 {
|
||||
end = true
|
||||
}
|
||||
var s []*SocketMetric
|
||||
for _, ns := range sks {
|
||||
sm := &SocketMetric{}
|
||||
sm.SocketData = ns.s.ChannelzMetric()
|
||||
sm.ID = ns.id
|
||||
sm.RefName = ns.refName
|
||||
s = append(s, sm)
|
||||
}
|
||||
return s, end
|
||||
}
|
||||
|
||||
func (c *channelMap) GetChannel(id int64) *ChannelMetric {
|
||||
cm := &ChannelMetric{}
|
||||
var cn *channel
|
||||
var ok bool
|
||||
c.mu.RLock()
|
||||
if cn, ok = c.channels[id]; !ok {
|
||||
// channel with id doesn't exist.
|
||||
c.mu.RUnlock()
|
||||
return nil
|
||||
}
|
||||
cm.NestedChans = copyMap(cn.nestedChans)
|
||||
cm.SubChans = copyMap(cn.subChans)
|
||||
// cn.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of cn.c when
|
||||
// holding the lock to prevent potential data race.
|
||||
chanCopy := cn.c
|
||||
c.mu.RUnlock()
|
||||
cm.ChannelData = chanCopy.ChannelzMetric()
|
||||
cm.ID = cn.id
|
||||
cm.RefName = cn.refName
|
||||
cm.Trace = cn.trace.dumpData()
|
||||
return cm
|
||||
}
|
||||
|
||||
func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric {
|
||||
cm := &SubChannelMetric{}
|
||||
var sc *subChannel
|
||||
var ok bool
|
||||
c.mu.RLock()
|
||||
if sc, ok = c.subChannels[id]; !ok {
|
||||
// subchannel with id doesn't exist.
|
||||
c.mu.RUnlock()
|
||||
return nil
|
||||
}
|
||||
cm.Sockets = copyMap(sc.sockets)
|
||||
// sc.c can be set to &dummyChannel{} when deleteSelfFromMap is called. Save a copy of sc.c when
|
||||
// holding the lock to prevent potential data race.
|
||||
chanCopy := sc.c
|
||||
c.mu.RUnlock()
|
||||
cm.ChannelData = chanCopy.ChannelzMetric()
|
||||
cm.ID = sc.id
|
||||
cm.RefName = sc.refName
|
||||
cm.Trace = sc.trace.dumpData()
|
||||
return cm
|
||||
}
|
||||
|
||||
func (c *channelMap) GetSocket(id int64) *SocketMetric {
|
||||
sm := &SocketMetric{}
|
||||
c.mu.RLock()
|
||||
if ls, ok := c.listenSockets[id]; ok {
|
||||
c.mu.RUnlock()
|
||||
sm.SocketData = ls.s.ChannelzMetric()
|
||||
sm.ID = ls.id
|
||||
sm.RefName = ls.refName
|
||||
return sm
|
||||
}
|
||||
if ns, ok := c.normalSockets[id]; ok {
|
||||
c.mu.RUnlock()
|
||||
sm.SocketData = ns.s.ChannelzMetric()
|
||||
sm.ID = ns.id
|
||||
sm.RefName = ns.refName
|
||||
return sm
|
||||
}
|
||||
c.mu.RUnlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *channelMap) GetServer(id int64) *ServerMetric {
|
||||
sm := &ServerMetric{}
|
||||
var svr *server
|
||||
var ok bool
|
||||
c.mu.RLock()
|
||||
if svr, ok = c.servers[id]; !ok {
|
||||
c.mu.RUnlock()
|
||||
return nil
|
||||
}
|
||||
sm.ListenSockets = copyMap(svr.listenSockets)
|
||||
c.mu.RUnlock()
|
||||
sm.ID = svr.id
|
||||
sm.RefName = svr.refName
|
||||
sm.ServerData = svr.s.ChannelzMetric()
|
||||
return sm
|
||||
}
|
||||
|
||||
type idGenerator struct {
|
||||
id int64
|
||||
}
|
||||
|
||||
func (i *idGenerator) reset() {
|
||||
atomic.StoreInt64(&i.id, 0)
|
||||
}
|
||||
|
||||
func (i *idGenerator) genID() int64 {
|
||||
return atomic.AddInt64(&i.id, 1)
|
||||
}
|
102
vendor/google.golang.org/grpc/internal/channelz/logging.go
generated
vendored
Normal file
102
vendor/google.golang.org/grpc/internal/channelz/logging.go
generated
vendored
Normal file
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2020 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var logger = grpclog.Component("channelz")
|
||||
|
||||
// Info logs and adds a trace event if channelz is on.
|
||||
func Info(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
|
||||
if IsOn() {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprint(args...),
|
||||
Severity: CtINFO,
|
||||
})
|
||||
} else {
|
||||
l.InfoDepth(1, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// Infof logs and adds a trace event if channelz is on.
|
||||
func Infof(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
if IsOn() {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: msg,
|
||||
Severity: CtINFO,
|
||||
})
|
||||
} else {
|
||||
l.InfoDepth(1, msg)
|
||||
}
|
||||
}
|
||||
|
||||
// Warning logs and adds a trace event if channelz is on.
|
||||
func Warning(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
|
||||
if IsOn() {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprint(args...),
|
||||
Severity: CtWarning,
|
||||
})
|
||||
} else {
|
||||
l.WarningDepth(1, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// Warningf logs and adds a trace event if channelz is on.
|
||||
func Warningf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
if IsOn() {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: msg,
|
||||
Severity: CtWarning,
|
||||
})
|
||||
} else {
|
||||
l.WarningDepth(1, msg)
|
||||
}
|
||||
}
|
||||
|
||||
// Error logs and adds a trace event if channelz is on.
|
||||
func Error(l grpclog.DepthLoggerV2, id int64, args ...interface{}) {
|
||||
if IsOn() {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: fmt.Sprint(args...),
|
||||
Severity: CtError,
|
||||
})
|
||||
} else {
|
||||
l.ErrorDepth(1, args...)
|
||||
}
|
||||
}
|
||||
|
||||
// Errorf logs and adds a trace event if channelz is on.
|
||||
func Errorf(l grpclog.DepthLoggerV2, id int64, format string, args ...interface{}) {
|
||||
msg := fmt.Sprintf(format, args...)
|
||||
if IsOn() {
|
||||
AddTraceEvent(l, id, 1, &TraceEventDesc{
|
||||
Desc: msg,
|
||||
Severity: CtError,
|
||||
})
|
||||
} else {
|
||||
l.ErrorDepth(1, msg)
|
||||
}
|
||||
}
|
701
vendor/google.golang.org/grpc/internal/channelz/types.go
generated
vendored
Normal file
701
vendor/google.golang.org/grpc/internal/channelz/types.go
generated
vendored
Normal file
|
@ -0,0 +1,701 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2018 gRPC authors.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/connectivity"
|
||||
"google.golang.org/grpc/credentials"
|
||||
)
|
||||
|
||||
// entry represents a node in the channelz database.
|
||||
type entry interface {
|
||||
// addChild adds a child e, whose channelz id is id to child list
|
||||
addChild(id int64, e entry)
|
||||
// deleteChild deletes a child with channelz id to be id from child list
|
||||
deleteChild(id int64)
|
||||
// triggerDelete tries to delete self from channelz database. However, if child
|
||||
// list is not empty, then deletion from the database is on hold until the last
|
||||
// child is deleted from database.
|
||||
triggerDelete()
|
||||
// deleteSelfIfReady check whether triggerDelete() has been called before, and whether child
|
||||
// list is now empty. If both conditions are met, then delete self from database.
|
||||
deleteSelfIfReady()
|
||||
// getParentID returns parent ID of the entry. 0 value parent ID means no parent.
|
||||
getParentID() int64
|
||||
}
|
||||
|
||||
// dummyEntry is a fake entry to handle entry not found case.
|
||||
type dummyEntry struct {
|
||||
idNotFound int64
|
||||
}
|
||||
|
||||
func (d *dummyEntry) addChild(id int64, e entry) {
|
||||
// Note: It is possible for a normal program to reach here under race condition.
|
||||
// For example, there could be a race between ClientConn.Close() info being propagated
|
||||
// to addrConn and http2Client. ClientConn.Close() cancel the context and result
|
||||
// in http2Client to error. The error info is then caught by transport monitor
|
||||
// and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore,
|
||||
// the addrConn will create a new transport. And when registering the new transport in
|
||||
// channelz, its parent addrConn could have already been torn down and deleted
|
||||
// from channelz tracking, and thus reach the code here.
|
||||
logger.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound)
|
||||
}
|
||||
|
||||
func (d *dummyEntry) deleteChild(id int64) {
|
||||
// It is possible for a normal program to reach here under race condition.
|
||||
// Refer to the example described in addChild().
|
||||
logger.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound)
|
||||
}
|
||||
|
||||
func (d *dummyEntry) triggerDelete() {
|
||||
logger.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound)
|
||||
}
|
||||
|
||||
func (*dummyEntry) deleteSelfIfReady() {
|
||||
// code should not reach here. deleteSelfIfReady is always called on an existing entry.
|
||||
}
|
||||
|
||||
func (*dummyEntry) getParentID() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
// ChannelMetric defines the info channelz provides for a specific Channel, which
|
||||
// includes ChannelInternalMetric and channelz-specific data, such as channelz id,
|
||||
// child list, etc.
|
||||
type ChannelMetric struct {
|
||||
// ID is the channelz id of this channel.
|
||||
ID int64
|
||||
// RefName is the human readable reference string of this channel.
|
||||
RefName string
|
||||
// ChannelData contains channel internal metric reported by the channel through
|
||||
// ChannelzMetric().
|
||||
ChannelData *ChannelInternalMetric
|
||||
// NestedChans tracks the nested channel type children of this channel in the format of
|
||||
// a map from nested channel channelz id to corresponding reference string.
|
||||
NestedChans map[int64]string
|
||||
// SubChans tracks the subchannel type children of this channel in the format of a
|
||||
// map from subchannel channelz id to corresponding reference string.
|
||||
SubChans map[int64]string
|
||||
// Sockets tracks the socket type children of this channel in the format of a map
|
||||
// from socket channelz id to corresponding reference string.
|
||||
// Note current grpc implementation doesn't allow channel having sockets directly,
|
||||
// therefore, this is field is unused.
|
||||
Sockets map[int64]string
|
||||
// Trace contains the most recent traced events.
|
||||
Trace *ChannelTrace
|
||||
}
|
||||
|
||||
// SubChannelMetric defines the info channelz provides for a specific SubChannel,
|
||||
// which includes ChannelInternalMetric and channelz-specific data, such as
|
||||
// channelz id, child list, etc.
|
||||
type SubChannelMetric struct {
|
||||
// ID is the channelz id of this subchannel.
|
||||
ID int64
|
||||
// RefName is the human readable reference string of this subchannel.
|
||||
RefName string
|
||||
// ChannelData contains subchannel internal metric reported by the subchannel
|
||||
// through ChannelzMetric().
|
||||
ChannelData *ChannelInternalMetric
|
||||
// NestedChans tracks the nested channel type children of this subchannel in the format of
|
||||
// a map from nested channel channelz id to corresponding reference string.
|
||||
// Note current grpc implementation doesn't allow subchannel to have nested channels
|
||||
// as children, therefore, this field is unused.
|
||||
NestedChans map[int64]string
|
||||
// SubChans tracks the subchannel type children of this subchannel in the format of a
|
||||
// map from subchannel channelz id to corresponding reference string.
|
||||
// Note current grpc implementation doesn't allow subchannel to have subchannels
|
||||
// as children, therefore, this field is unused.
|
||||
SubChans map[int64]string
|
||||
// Sockets tracks the socket type children of this subchannel in the format of a map
|
||||
// from socket channelz id to corresponding reference string.
|
||||
Sockets map[int64]string
|
||||
// Trace contains the most recent traced events.
|
||||
Trace *ChannelTrace
|
||||
}
|
||||
|
||||
// ChannelInternalMetric defines the struct that the implementor of Channel interface
|
||||
// should return from ChannelzMetric().
|
||||
type ChannelInternalMetric struct {
|
||||
// current connectivity state of the channel.
|
||||
State connectivity.State
|
||||
// The target this channel originally tried to connect to. May be absent
|
||||
Target string
|
||||
// The number of calls started on the channel.
|
||||
CallsStarted int64
|
||||
// The number of calls that have completed with an OK status.
|
||||
CallsSucceeded int64
|
||||
// The number of calls that have a completed with a non-OK status.
|
||||
CallsFailed int64
|
||||
// The last time a call was started on the channel.
|
||||
LastCallStartedTimestamp time.Time
|
||||
}
|
||||
|
||||
// ChannelTrace stores traced events on a channel/subchannel and related info.
|
||||
type ChannelTrace struct {
|
||||
// EventNum is the number of events that ever got traced (i.e. including those that have been deleted)
|
||||
EventNum int64
|
||||
// CreationTime is the creation time of the trace.
|
||||
CreationTime time.Time
|
||||
// Events stores the most recent trace events (up to $maxTraceEntry, newer event will overwrite the
|
||||
// oldest one)
|
||||
Events []*TraceEvent
|
||||
}
|
||||
|
||||
// TraceEvent represent a single trace event
|
||||
type TraceEvent struct {
|
||||
// Desc is a simple description of the trace event.
|
||||
Desc string
|
||||
// Severity states the severity of this trace event.
|
||||
Severity Severity
|
||||
// Timestamp is the event time.
|
||||
Timestamp time.Time
|
||||
// RefID is the id of the entity that gets referenced in the event. RefID is 0 if no other entity is
|
||||
// involved in this event.
|
||||
// e.g. SubChannel (id: 4[]) Created. --> RefID = 4, RefName = "" (inside [])
|
||||
RefID int64
|
||||
// RefName is the reference name for the entity that gets referenced in the event.
|
||||
RefName string
|
||||
// RefType indicates the referenced entity type, i.e Channel or SubChannel.
|
||||
RefType RefChannelType
|
||||
}
|
||||
|
||||
// Channel is the interface that should be satisfied in order to be tracked by
|
||||
// channelz as Channel or SubChannel.
|
||||
type Channel interface {
|
||||
ChannelzMetric() *ChannelInternalMetric
|
||||
}
|
||||
|
||||
type dummyChannel struct{}
|
||||
|
||||
func (d *dummyChannel) ChannelzMetric() *ChannelInternalMetric {
|
||||
return &ChannelInternalMetric{}
|
||||
}
|
||||
|
||||
type channel struct {
|
||||
refName string
|
||||
c Channel
|
||||
closeCalled bool
|
||||
nestedChans map[int64]string
|
||||
subChans map[int64]string
|
||||
id int64
|
||||
pid int64
|
||||
cm *channelMap
|
||||
trace *channelTrace
|
||||
// traceRefCount is the number of trace events that reference this channel.
|
||||
// Non-zero traceRefCount means the trace of this channel cannot be deleted.
|
||||
traceRefCount int32
|
||||
}
|
||||
|
||||
func (c *channel) addChild(id int64, e entry) {
|
||||
switch v := e.(type) {
|
||||
case *subChannel:
|
||||
c.subChans[id] = v.refName
|
||||
case *channel:
|
||||
c.nestedChans[id] = v.refName
|
||||
default:
|
||||
logger.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *channel) deleteChild(id int64) {
|
||||
delete(c.subChans, id)
|
||||
delete(c.nestedChans, id)
|
||||
c.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (c *channel) triggerDelete() {
|
||||
c.closeCalled = true
|
||||
c.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (c *channel) getParentID() int64 {
|
||||
return c.pid
|
||||
}
|
||||
|
||||
// deleteSelfFromTree tries to delete the channel from the channelz entry relation tree, which means
|
||||
// deleting the channel reference from its parent's child list.
|
||||
//
|
||||
// In order for a channel to be deleted from the tree, it must meet the criteria that, removal of the
|
||||
// corresponding grpc object has been invoked, and the channel does not have any children left.
|
||||
//
|
||||
// The returned boolean value indicates whether the channel has been successfully deleted from tree.
|
||||
func (c *channel) deleteSelfFromTree() (deleted bool) {
|
||||
if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 {
|
||||
return false
|
||||
}
|
||||
// not top channel
|
||||
if c.pid != 0 {
|
||||
c.cm.findEntry(c.pid).deleteChild(c.id)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// deleteSelfFromMap checks whether it is valid to delete the channel from the map, which means
|
||||
// deleting the channel from channelz's tracking entirely. Users can no longer use id to query the
|
||||
// channel, and its memory will be garbage collected.
|
||||
//
|
||||
// The trace reference count of the channel must be 0 in order to be deleted from the map. This is
|
||||
// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
|
||||
// the trace of the referenced entity must not be deleted. In order to release the resource allocated
|
||||
// by grpc, the reference to the grpc object is reset to a dummy object.
|
||||
//
|
||||
// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
|
||||
//
|
||||
// It returns a bool to indicate whether the channel can be safely deleted from map.
|
||||
func (c *channel) deleteSelfFromMap() (delete bool) {
|
||||
if c.getTraceRefCount() != 0 {
|
||||
c.c = &dummyChannel{}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// deleteSelfIfReady tries to delete the channel itself from the channelz database.
|
||||
// The delete process includes two steps:
|
||||
// 1. delete the channel from the entry relation tree, i.e. delete the channel reference from its
|
||||
// parent's child list.
|
||||
// 2. delete the channel from the map, i.e. delete the channel entirely from channelz. Lookup by id
|
||||
// will return entry not found error.
|
||||
func (c *channel) deleteSelfIfReady() {
|
||||
if !c.deleteSelfFromTree() {
|
||||
return
|
||||
}
|
||||
if !c.deleteSelfFromMap() {
|
||||
return
|
||||
}
|
||||
c.cm.deleteEntry(c.id)
|
||||
c.trace.clear()
|
||||
}
|
||||
|
||||
func (c *channel) getChannelTrace() *channelTrace {
|
||||
return c.trace
|
||||
}
|
||||
|
||||
func (c *channel) incrTraceRefCount() {
|
||||
atomic.AddInt32(&c.traceRefCount, 1)
|
||||
}
|
||||
|
||||
func (c *channel) decrTraceRefCount() {
|
||||
atomic.AddInt32(&c.traceRefCount, -1)
|
||||
}
|
||||
|
||||
func (c *channel) getTraceRefCount() int {
|
||||
i := atomic.LoadInt32(&c.traceRefCount)
|
||||
return int(i)
|
||||
}
|
||||
|
||||
func (c *channel) getRefName() string {
|
||||
return c.refName
|
||||
}
|
||||
|
||||
type subChannel struct {
|
||||
refName string
|
||||
c Channel
|
||||
closeCalled bool
|
||||
sockets map[int64]string
|
||||
id int64
|
||||
pid int64
|
||||
cm *channelMap
|
||||
trace *channelTrace
|
||||
traceRefCount int32
|
||||
}
|
||||
|
||||
func (sc *subChannel) addChild(id int64, e entry) {
|
||||
if v, ok := e.(*normalSocket); ok {
|
||||
sc.sockets[id] = v.refName
|
||||
} else {
|
||||
logger.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e)
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *subChannel) deleteChild(id int64) {
|
||||
delete(sc.sockets, id)
|
||||
sc.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (sc *subChannel) triggerDelete() {
|
||||
sc.closeCalled = true
|
||||
sc.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (sc *subChannel) getParentID() int64 {
|
||||
return sc.pid
|
||||
}
|
||||
|
||||
// deleteSelfFromTree tries to delete the subchannel from the channelz entry relation tree, which
|
||||
// means deleting the subchannel reference from its parent's child list.
|
||||
//
|
||||
// In order for a subchannel to be deleted from the tree, it must meet the criteria that, removal of
|
||||
// the corresponding grpc object has been invoked, and the subchannel does not have any children left.
|
||||
//
|
||||
// The returned boolean value indicates whether the channel has been successfully deleted from tree.
|
||||
func (sc *subChannel) deleteSelfFromTree() (deleted bool) {
|
||||
if !sc.closeCalled || len(sc.sockets) != 0 {
|
||||
return false
|
||||
}
|
||||
sc.cm.findEntry(sc.pid).deleteChild(sc.id)
|
||||
return true
|
||||
}
|
||||
|
||||
// deleteSelfFromMap checks whether it is valid to delete the subchannel from the map, which means
|
||||
// deleting the subchannel from channelz's tracking entirely. Users can no longer use id to query
|
||||
// the subchannel, and its memory will be garbage collected.
|
||||
//
|
||||
// The trace reference count of the subchannel must be 0 in order to be deleted from the map. This is
|
||||
// specified in the channel tracing gRFC that as long as some other trace has reference to an entity,
|
||||
// the trace of the referenced entity must not be deleted. In order to release the resource allocated
|
||||
// by grpc, the reference to the grpc object is reset to a dummy object.
|
||||
//
|
||||
// deleteSelfFromMap must be called after deleteSelfFromTree returns true.
|
||||
//
|
||||
// It returns a bool to indicate whether the channel can be safely deleted from map.
|
||||
func (sc *subChannel) deleteSelfFromMap() (delete bool) {
|
||||
if sc.getTraceRefCount() != 0 {
|
||||
// free the grpc struct (i.e. addrConn)
|
||||
sc.c = &dummyChannel{}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// deleteSelfIfReady tries to delete the subchannel itself from the channelz database.
|
||||
// The delete process includes two steps:
|
||||
// 1. delete the subchannel from the entry relation tree, i.e. delete the subchannel reference from
|
||||
// its parent's child list.
|
||||
// 2. delete the subchannel from the map, i.e. delete the subchannel entirely from channelz. Lookup
|
||||
// by id will return entry not found error.
|
||||
func (sc *subChannel) deleteSelfIfReady() {
|
||||
if !sc.deleteSelfFromTree() {
|
||||
return
|
||||
}
|
||||
if !sc.deleteSelfFromMap() {
|
||||
return
|
||||
}
|
||||
sc.cm.deleteEntry(sc.id)
|
||||
sc.trace.clear()
|
||||
}
|
||||
|
||||
func (sc *subChannel) getChannelTrace() *channelTrace {
|
||||
return sc.trace
|
||||
}
|
||||
|
||||
func (sc *subChannel) incrTraceRefCount() {
|
||||
atomic.AddInt32(&sc.traceRefCount, 1)
|
||||
}
|
||||
|
||||
func (sc *subChannel) decrTraceRefCount() {
|
||||
atomic.AddInt32(&sc.traceRefCount, -1)
|
||||
}
|
||||
|
||||
func (sc *subChannel) getTraceRefCount() int {
|
||||
i := atomic.LoadInt32(&sc.traceRefCount)
|
||||
return int(i)
|
||||
}
|
||||
|
||||
func (sc *subChannel) getRefName() string {
|
||||
return sc.refName
|
||||
}
|
||||
|
||||
// SocketMetric defines the info channelz provides for a specific Socket, which
|
||||
// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc.
|
||||
type SocketMetric struct {
|
||||
// ID is the channelz id of this socket.
|
||||
ID int64
|
||||
// RefName is the human readable reference string of this socket.
|
||||
RefName string
|
||||
// SocketData contains socket internal metric reported by the socket through
|
||||
// ChannelzMetric().
|
||||
SocketData *SocketInternalMetric
|
||||
}
|
||||
|
||||
// SocketInternalMetric defines the struct that the implementor of Socket interface
|
||||
// should return from ChannelzMetric().
|
||||
type SocketInternalMetric struct {
|
||||
// The number of streams that have been started.
|
||||
StreamsStarted int64
|
||||
// The number of streams that have ended successfully:
|
||||
// On client side, receiving frame with eos bit set.
|
||||
// On server side, sending frame with eos bit set.
|
||||
StreamsSucceeded int64
|
||||
// The number of streams that have ended unsuccessfully:
|
||||
// On client side, termination without receiving frame with eos bit set.
|
||||
// On server side, termination without sending frame with eos bit set.
|
||||
StreamsFailed int64
|
||||
// The number of messages successfully sent on this socket.
|
||||
MessagesSent int64
|
||||
MessagesReceived int64
|
||||
// The number of keep alives sent. This is typically implemented with HTTP/2
|
||||
// ping messages.
|
||||
KeepAlivesSent int64
|
||||
// The last time a stream was created by this endpoint. Usually unset for
|
||||
// servers.
|
||||
LastLocalStreamCreatedTimestamp time.Time
|
||||
// The last time a stream was created by the remote endpoint. Usually unset
|
||||
// for clients.
|
||||
LastRemoteStreamCreatedTimestamp time.Time
|
||||
// The last time a message was sent by this endpoint.
|
||||
LastMessageSentTimestamp time.Time
|
||||
// The last time a message was received by this endpoint.
|
||||
LastMessageReceivedTimestamp time.Time
|
||||
// The amount of window, granted to the local endpoint by the remote endpoint.
|
||||
// This may be slightly out of date due to network latency. This does NOT
|
||||
// include stream level or TCP level flow control info.
|
||||
LocalFlowControlWindow int64
|
||||
// The amount of window, granted to the remote endpoint by the local endpoint.
|
||||
// This may be slightly out of date due to network latency. This does NOT
|
||||
// include stream level or TCP level flow control info.
|
||||
RemoteFlowControlWindow int64
|
||||
// The locally bound address.
|
||||
LocalAddr net.Addr
|
||||
// The remote bound address. May be absent.
|
||||
RemoteAddr net.Addr
|
||||
// Optional, represents the name of the remote endpoint, if different than
|
||||
// the original target name.
|
||||
RemoteName string
|
||||
SocketOptions *SocketOptionData
|
||||
Security credentials.ChannelzSecurityValue
|
||||
}
|
||||
|
||||
// Socket is the interface that should be satisfied in order to be tracked by
|
||||
// channelz as Socket.
|
||||
type Socket interface {
|
||||
ChannelzMetric() *SocketInternalMetric
|
||||
}
|
||||
|
||||
type listenSocket struct {
|
||||
refName string
|
||||
s Socket
|
||||
id int64
|
||||
pid int64
|
||||
cm *channelMap
|
||||
}
|
||||
|
||||
func (ls *listenSocket) addChild(id int64, e entry) {
|
||||
logger.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e)
|
||||
}
|
||||
|
||||
func (ls *listenSocket) deleteChild(id int64) {
|
||||
logger.Errorf("cannot delete a child (id = %d) from a listen socket", id)
|
||||
}
|
||||
|
||||
func (ls *listenSocket) triggerDelete() {
|
||||
ls.cm.deleteEntry(ls.id)
|
||||
ls.cm.findEntry(ls.pid).deleteChild(ls.id)
|
||||
}
|
||||
|
||||
func (ls *listenSocket) deleteSelfIfReady() {
|
||||
logger.Errorf("cannot call deleteSelfIfReady on a listen socket")
|
||||
}
|
||||
|
||||
func (ls *listenSocket) getParentID() int64 {
|
||||
return ls.pid
|
||||
}
|
||||
|
||||
type normalSocket struct {
|
||||
refName string
|
||||
s Socket
|
||||
id int64
|
||||
pid int64
|
||||
cm *channelMap
|
||||
}
|
||||
|
||||
func (ns *normalSocket) addChild(id int64, e entry) {
|
||||
logger.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e)
|
||||
}
|
||||
|
||||
func (ns *normalSocket) deleteChild(id int64) {
|
||||
logger.Errorf("cannot delete a child (id = %d) from a normal socket", id)
|
||||
}
|
||||
|
||||
func (ns *normalSocket) triggerDelete() {
|
||||
ns.cm.deleteEntry(ns.id)
|
||||
ns.cm.findEntry(ns.pid).deleteChild(ns.id)
|
||||
}
|
||||
|
||||
func (ns *normalSocket) deleteSelfIfReady() {
|
||||
logger.Errorf("cannot call deleteSelfIfReady on a normal socket")
|
||||
}
|
||||
|
||||
func (ns *normalSocket) getParentID() int64 {
|
||||
return ns.pid
|
||||
}
|
||||
|
||||
// ServerMetric defines the info channelz provides for a specific Server, which
|
||||
// includes ServerInternalMetric and channelz-specific data, such as channelz id,
|
||||
// child list, etc.
|
||||
type ServerMetric struct {
|
||||
// ID is the channelz id of this server.
|
||||
ID int64
|
||||
// RefName is the human readable reference string of this server.
|
||||
RefName string
|
||||
// ServerData contains server internal metric reported by the server through
|
||||
// ChannelzMetric().
|
||||
ServerData *ServerInternalMetric
|
||||
// ListenSockets tracks the listener socket type children of this server in the
|
||||
// format of a map from socket channelz id to corresponding reference string.
|
||||
ListenSockets map[int64]string
|
||||
}
|
||||
|
||||
// ServerInternalMetric defines the struct that the implementor of Server interface
|
||||
// should return from ChannelzMetric().
|
||||
type ServerInternalMetric struct {
|
||||
// The number of incoming calls started on the server.
|
||||
CallsStarted int64
|
||||
// The number of incoming calls that have completed with an OK status.
|
||||
CallsSucceeded int64
|
||||
// The number of incoming calls that have a completed with a non-OK status.
|
||||
CallsFailed int64
|
||||
// The last time a call was started on the server.
|
||||
LastCallStartedTimestamp time.Time
|
||||
}
|
||||
|
||||
// Server is the interface to be satisfied in order to be tracked by channelz as
|
||||
// Server.
|
||||
type Server interface {
|
||||
ChannelzMetric() *ServerInternalMetric
|
||||
}
|
||||
|
||||
type server struct {
|
||||
refName string
|
||||
s Server
|
||||
closeCalled bool
|
||||
sockets map[int64]string
|
||||
listenSockets map[int64]string
|
||||
id int64
|
||||
cm *channelMap
|
||||
}
|
||||
|
||||
func (s *server) addChild(id int64, e entry) {
|
||||
switch v := e.(type) {
|
||||
case *normalSocket:
|
||||
s.sockets[id] = v.refName
|
||||
case *listenSocket:
|
||||
s.listenSockets[id] = v.refName
|
||||
default:
|
||||
logger.Errorf("cannot add a child (id = %d) of type %T to a server", id, e)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *server) deleteChild(id int64) {
|
||||
delete(s.sockets, id)
|
||||
delete(s.listenSockets, id)
|
||||
s.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (s *server) triggerDelete() {
|
||||
s.closeCalled = true
|
||||
s.deleteSelfIfReady()
|
||||
}
|
||||
|
||||
func (s *server) deleteSelfIfReady() {
|
||||
if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 {
|
||||
return
|
||||
}
|
||||
s.cm.deleteEntry(s.id)
|
||||
}
|
||||
|
||||
func (s *server) getParentID() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
type tracedChannel interface {
|
||||
getChannelTrace() *channelTrace
|
||||
incrTraceRefCount()
|
||||
decrTraceRefCount()
|
||||
getRefName() string
|
||||
}
|
||||
|
||||
type channelTrace struct {
|
||||
cm *channelMap
|
||||
createdTime time.Time
|
||||
eventCount int64
|
||||
mu sync.Mutex
|
||||
events []*TraceEvent
|
||||
}
|
||||
|
||||
func (c *channelTrace) append(e *TraceEvent) {
|
||||
c.mu.Lock()
|
||||
if len(c.events) == getMaxTraceEntry() {
|
||||
del := c.events[0]
|
||||
c.events = c.events[1:]
|
||||
if del.RefID != 0 {
|
||||
// start recursive cleanup in a goroutine to not block the call originated from grpc.
|
||||
go func() {
|
||||
// need to acquire c.cm.mu lock to call the unlocked attemptCleanup func.
|
||||
c.cm.mu.Lock()
|
||||
c.cm.decrTraceRefCount(del.RefID)
|
||||
c.cm.mu.Unlock()
|
||||
}()
|
||||
}
|
||||
}
|
||||
e.Timestamp = time.Now()
|
||||
c.events = append(c.events, e)
|
||||
c.eventCount++
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *channelTrace) clear() {
|
||||
c.mu.Lock()
|
||||
for _, e := range c.events {
|
||||
if e.RefID != 0 {
|
||||
// caller should have already held the c.cm.mu lock.
|
||||
c.cm.decrTraceRefCount(e.RefID)
|
||||
}
|
||||
}
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// Severity is the severity level of a trace event.
|
||||
// The canonical enumeration of all valid values is here:
|
||||
// https://github.com/grpc/grpc-proto/blob/9b13d199cc0d4703c7ea26c9c330ba695866eb23/grpc/channelz/v1/channelz.proto#L126.
|
||||
type Severity int
|
||||
|
||||
const (
|
||||
// CtUNKNOWN indicates unknown severity of a trace event.
|
||||
CtUNKNOWN Severity = iota
|
||||
// CtINFO indicates info level severity of a trace event.
|
||||
CtINFO
|
||||
// CtWarning indicates warning level severity of a trace event.
|
||||
CtWarning
|
||||
// CtError indicates error level severity of a trace event.
|
||||
CtError
|
||||
)
|
||||
|
||||
// RefChannelType is the type of the entity being referenced in a trace event.
|
||||
type RefChannelType int
|
||||
|
||||
const (
|
||||
// RefChannel indicates the referenced entity is a Channel.
|
||||
RefChannel RefChannelType = iota
|
||||
// RefSubChannel indicates the referenced entity is a SubChannel.
|
||||
RefSubChannel
|
||||
)
|
||||
|
||||
func (c *channelTrace) dumpData() *ChannelTrace {
|
||||
c.mu.Lock()
|
||||
ct := &ChannelTrace{EventNum: c.eventCount, CreationTime: c.createdTime}
|
||||
ct.Events = c.events[:len(c.events)]
|
||||
c.mu.Unlock()
|
||||
return ct
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue