Fix GCS
Signed-off-by: Olivier Gambier <olivier@docker.com>
This commit is contained in:
parent
59401e277b
commit
d1444b56e9
141 changed files with 19483 additions and 4205 deletions
46
vendor/google.golang.org/api/gensupport/backoff.go
generated
vendored
Normal file
46
vendor/google.golang.org/api/gensupport/backoff.go
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
type BackoffStrategy interface {
|
||||
// Pause returns the duration of the next pause and true if the operation should be
|
||||
// retried, or false if no further retries should be attempted.
|
||||
Pause() (time.Duration, bool)
|
||||
|
||||
// Reset restores the strategy to its initial state.
|
||||
Reset()
|
||||
}
|
||||
|
||||
// ExponentialBackoff performs exponential backoff as per https://en.wikipedia.org/wiki/Exponential_backoff.
|
||||
// The initial pause time is given by Base.
|
||||
// Once the total pause time exceeds Max, Pause will indicate no further retries.
|
||||
type ExponentialBackoff struct {
|
||||
Base time.Duration
|
||||
Max time.Duration
|
||||
total time.Duration
|
||||
n uint
|
||||
}
|
||||
|
||||
func (eb *ExponentialBackoff) Pause() (time.Duration, bool) {
|
||||
if eb.total > eb.Max {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// The next pause is selected from randomly from [0, 2^n * Base).
|
||||
d := time.Duration(rand.Int63n((1 << eb.n) * int64(eb.Base)))
|
||||
eb.total += d
|
||||
eb.n++
|
||||
return d, true
|
||||
}
|
||||
|
||||
func (eb *ExponentialBackoff) Reset() {
|
||||
eb.n = 0
|
||||
eb.total = 0
|
||||
}
|
77
vendor/google.golang.org/api/gensupport/buffer.go
generated
vendored
Normal file
77
vendor/google.golang.org/api/gensupport/buffer.go
generated
vendored
Normal file
|
@ -0,0 +1,77 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
// ResumableBuffer buffers data from an io.Reader to support uploading media in retryable chunks.
|
||||
type ResumableBuffer struct {
|
||||
media io.Reader
|
||||
|
||||
chunk []byte // The current chunk which is pending upload. The capacity is the chunk size.
|
||||
err error // Any error generated when populating chunk by reading media.
|
||||
|
||||
// The absolute position of chunk in the underlying media.
|
||||
off int64
|
||||
}
|
||||
|
||||
func NewResumableBuffer(media io.Reader, chunkSize int) *ResumableBuffer {
|
||||
return &ResumableBuffer{media: media, chunk: make([]byte, 0, chunkSize)}
|
||||
}
|
||||
|
||||
// Chunk returns the current buffered chunk, the offset in the underlying media
|
||||
// from which the chunk is drawn, and the size of the chunk.
|
||||
// Successive calls to Chunk return the same chunk between calls to Next.
|
||||
func (rb *ResumableBuffer) Chunk() (chunk io.Reader, off int64, size int, err error) {
|
||||
// There may already be data in chunk if Next has not been called since the previous call to Chunk.
|
||||
if rb.err == nil && len(rb.chunk) == 0 {
|
||||
rb.err = rb.loadChunk()
|
||||
}
|
||||
return bytes.NewReader(rb.chunk), rb.off, len(rb.chunk), rb.err
|
||||
}
|
||||
|
||||
// loadChunk will read from media into chunk, up to the capacity of chunk.
|
||||
func (rb *ResumableBuffer) loadChunk() error {
|
||||
bufSize := cap(rb.chunk)
|
||||
rb.chunk = rb.chunk[:bufSize]
|
||||
|
||||
read := 0
|
||||
var err error
|
||||
for err == nil && read < bufSize {
|
||||
var n int
|
||||
n, err = rb.media.Read(rb.chunk[read:])
|
||||
read += n
|
||||
}
|
||||
rb.chunk = rb.chunk[:read]
|
||||
return err
|
||||
}
|
||||
|
||||
// Next advances to the next chunk, which will be returned by the next call to Chunk.
|
||||
// Calls to Next without a corresponding prior call to Chunk will have no effect.
|
||||
func (rb *ResumableBuffer) Next() {
|
||||
rb.off += int64(len(rb.chunk))
|
||||
rb.chunk = rb.chunk[0:0]
|
||||
}
|
||||
|
||||
type readerTyper struct {
|
||||
io.Reader
|
||||
googleapi.ContentTyper
|
||||
}
|
||||
|
||||
// ReaderAtToReader adapts a ReaderAt to be used as a Reader.
|
||||
// If ra implements googleapi.ContentTyper, then the returned reader
|
||||
// will also implement googleapi.ContentTyper, delegating to ra.
|
||||
func ReaderAtToReader(ra io.ReaderAt, size int64) io.Reader {
|
||||
r := io.NewSectionReader(ra, 0, size)
|
||||
if typer, ok := ra.(googleapi.ContentTyper); ok {
|
||||
return readerTyper{r, typer}
|
||||
}
|
||||
return r
|
||||
}
|
10
vendor/google.golang.org/api/gensupport/doc.go
generated
vendored
Normal file
10
vendor/google.golang.org/api/gensupport/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package gensupport is an internal implementation detail used by code
|
||||
// generated by the google-api-go-generator tool.
|
||||
//
|
||||
// This package may be modified at any time without regard for backwards
|
||||
// compatibility. It should not be used directly by API users.
|
||||
package gensupport
|
172
vendor/google.golang.org/api/gensupport/json.go
generated
vendored
Normal file
172
vendor/google.golang.org/api/gensupport/json.go
generated
vendored
Normal file
|
@ -0,0 +1,172 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// MarshalJSON returns a JSON encoding of schema containing only selected fields.
|
||||
// A field is selected if:
|
||||
// * it has a non-empty value, or
|
||||
// * its field name is present in forceSendFields, and
|
||||
// * it is not a nil pointer or nil interface.
|
||||
// The JSON key for each selected field is taken from the field's json: struct tag.
|
||||
func MarshalJSON(schema interface{}, forceSendFields []string) ([]byte, error) {
|
||||
if len(forceSendFields) == 0 {
|
||||
return json.Marshal(schema)
|
||||
}
|
||||
|
||||
mustInclude := make(map[string]struct{})
|
||||
for _, f := range forceSendFields {
|
||||
mustInclude[f] = struct{}{}
|
||||
}
|
||||
|
||||
dataMap, err := schemaToMap(schema, mustInclude)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return json.Marshal(dataMap)
|
||||
}
|
||||
|
||||
func schemaToMap(schema interface{}, mustInclude map[string]struct{}) (map[string]interface{}, error) {
|
||||
m := make(map[string]interface{})
|
||||
s := reflect.ValueOf(schema)
|
||||
st := s.Type()
|
||||
|
||||
for i := 0; i < s.NumField(); i++ {
|
||||
jsonTag := st.Field(i).Tag.Get("json")
|
||||
if jsonTag == "" {
|
||||
continue
|
||||
}
|
||||
tag, err := parseJSONTag(jsonTag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tag.ignore {
|
||||
continue
|
||||
}
|
||||
|
||||
v := s.Field(i)
|
||||
f := st.Field(i)
|
||||
if !includeField(v, f, mustInclude) {
|
||||
continue
|
||||
}
|
||||
|
||||
// nil maps are treated as empty maps.
|
||||
if f.Type.Kind() == reflect.Map && v.IsNil() {
|
||||
m[tag.apiName] = map[string]string{}
|
||||
continue
|
||||
}
|
||||
|
||||
// nil slices are treated as empty slices.
|
||||
if f.Type.Kind() == reflect.Slice && v.IsNil() {
|
||||
m[tag.apiName] = []bool{}
|
||||
continue
|
||||
}
|
||||
|
||||
if tag.stringFormat {
|
||||
m[tag.apiName] = formatAsString(v, f.Type.Kind())
|
||||
} else {
|
||||
m[tag.apiName] = v.Interface()
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// formatAsString returns a string representation of v, dereferencing it first if possible.
|
||||
func formatAsString(v reflect.Value, kind reflect.Kind) string {
|
||||
if kind == reflect.Ptr && !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%v", v.Interface())
|
||||
}
|
||||
|
||||
// jsonTag represents a restricted version of the struct tag format used by encoding/json.
|
||||
// It is used to describe the JSON encoding of fields in a Schema struct.
|
||||
type jsonTag struct {
|
||||
apiName string
|
||||
stringFormat bool
|
||||
ignore bool
|
||||
}
|
||||
|
||||
// parseJSONTag parses a restricted version of the struct tag format used by encoding/json.
|
||||
// The format of the tag must match that generated by the Schema.writeSchemaStruct method
|
||||
// in the api generator.
|
||||
func parseJSONTag(val string) (jsonTag, error) {
|
||||
if val == "-" {
|
||||
return jsonTag{ignore: true}, nil
|
||||
}
|
||||
|
||||
var tag jsonTag
|
||||
|
||||
i := strings.Index(val, ",")
|
||||
if i == -1 || val[:i] == "" {
|
||||
return tag, fmt.Errorf("malformed json tag: %s", val)
|
||||
}
|
||||
|
||||
tag = jsonTag{
|
||||
apiName: val[:i],
|
||||
}
|
||||
|
||||
switch val[i+1:] {
|
||||
case "omitempty":
|
||||
case "omitempty,string":
|
||||
tag.stringFormat = true
|
||||
default:
|
||||
return tag, fmt.Errorf("malformed json tag: %s", val)
|
||||
}
|
||||
|
||||
return tag, nil
|
||||
}
|
||||
|
||||
// Reports whether the struct field "f" with value "v" should be included in JSON output.
|
||||
func includeField(v reflect.Value, f reflect.StructField, mustInclude map[string]struct{}) bool {
|
||||
// The regular JSON encoding of a nil pointer is "null", which means "delete this field".
|
||||
// Therefore, we could enable field deletion by honoring pointer fields' presence in the mustInclude set.
|
||||
// However, many fields are not pointers, so there would be no way to delete these fields.
|
||||
// Rather than partially supporting field deletion, we ignore mustInclude for nil pointer fields.
|
||||
// Deletion will be handled by a separate mechanism.
|
||||
if f.Type.Kind() == reflect.Ptr && v.IsNil() {
|
||||
return false
|
||||
}
|
||||
|
||||
// The "any" type is represented as an interface{}. If this interface
|
||||
// is nil, there is no reasonable representation to send. We ignore
|
||||
// these fields, for the same reasons as given above for pointers.
|
||||
if f.Type.Kind() == reflect.Interface && v.IsNil() {
|
||||
return false
|
||||
}
|
||||
|
||||
_, ok := mustInclude[f.Name]
|
||||
return ok || !isEmptyValue(v)
|
||||
}
|
||||
|
||||
// isEmptyValue reports whether v is the empty value for its type. This
|
||||
// implementation is based on that of the encoding/json package, but its
|
||||
// correctness does not depend on it being identical. What's important is that
|
||||
// this function return false in situations where v should not be sent as part
|
||||
// of a PATCH operation.
|
||||
func isEmptyValue(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
return v.Len() == 0
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
return v.IsNil()
|
||||
}
|
||||
return false
|
||||
}
|
200
vendor/google.golang.org/api/gensupport/media.go
generated
vendored
Normal file
200
vendor/google.golang.org/api/gensupport/media.go
generated
vendored
Normal file
|
@ -0,0 +1,200 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
const sniffBuffSize = 512
|
||||
|
||||
func newContentSniffer(r io.Reader) *contentSniffer {
|
||||
return &contentSniffer{r: r}
|
||||
}
|
||||
|
||||
// contentSniffer wraps a Reader, and reports the content type determined by sniffing up to 512 bytes from the Reader.
|
||||
type contentSniffer struct {
|
||||
r io.Reader
|
||||
start []byte // buffer for the sniffed bytes.
|
||||
err error // set to any error encountered while reading bytes to be sniffed.
|
||||
|
||||
ctype string // set on first sniff.
|
||||
sniffed bool // set to true on first sniff.
|
||||
}
|
||||
|
||||
func (cs *contentSniffer) Read(p []byte) (n int, err error) {
|
||||
// Ensure that the content type is sniffed before any data is consumed from Reader.
|
||||
_, _ = cs.ContentType()
|
||||
|
||||
if len(cs.start) > 0 {
|
||||
n := copy(p, cs.start)
|
||||
cs.start = cs.start[n:]
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// We may have read some bytes into start while sniffing, even if the read ended in an error.
|
||||
// We should first return those bytes, then the error.
|
||||
if cs.err != nil {
|
||||
return 0, cs.err
|
||||
}
|
||||
|
||||
// Now we have handled all bytes that were buffered while sniffing. Now just delegate to the underlying reader.
|
||||
return cs.r.Read(p)
|
||||
}
|
||||
|
||||
// ContentType returns the sniffed content type, and whether the content type was succesfully sniffed.
|
||||
func (cs *contentSniffer) ContentType() (string, bool) {
|
||||
if cs.sniffed {
|
||||
return cs.ctype, cs.ctype != ""
|
||||
}
|
||||
cs.sniffed = true
|
||||
// If ReadAll hits EOF, it returns err==nil.
|
||||
cs.start, cs.err = ioutil.ReadAll(io.LimitReader(cs.r, sniffBuffSize))
|
||||
|
||||
// Don't try to detect the content type based on possibly incomplete data.
|
||||
if cs.err != nil {
|
||||
return "", false
|
||||
}
|
||||
|
||||
cs.ctype = http.DetectContentType(cs.start)
|
||||
return cs.ctype, true
|
||||
}
|
||||
|
||||
// DetermineContentType determines the content type of the supplied reader.
|
||||
// If the content type is already known, it can be specified via ctype.
|
||||
// Otherwise, the content of media will be sniffed to determine the content type.
|
||||
// If media implements googleapi.ContentTyper (deprecated), this will be used
|
||||
// instead of sniffing the content.
|
||||
// After calling DetectContentType the caller must not perform further reads on
|
||||
// media, but rather read from the Reader that is returned.
|
||||
func DetermineContentType(media io.Reader, ctype string) (io.Reader, string) {
|
||||
// Note: callers could avoid calling DetectContentType if ctype != "",
|
||||
// but doing the check inside this function reduces the amount of
|
||||
// generated code.
|
||||
if ctype != "" {
|
||||
return media, ctype
|
||||
}
|
||||
|
||||
// For backwards compatability, allow clients to set content
|
||||
// type by providing a ContentTyper for media.
|
||||
if typer, ok := media.(googleapi.ContentTyper); ok {
|
||||
return media, typer.ContentType()
|
||||
}
|
||||
|
||||
sniffer := newContentSniffer(media)
|
||||
if ctype, ok := sniffer.ContentType(); ok {
|
||||
return sniffer, ctype
|
||||
}
|
||||
// If content type could not be sniffed, reads from sniffer will eventually fail with an error.
|
||||
return sniffer, ""
|
||||
}
|
||||
|
||||
type typeReader struct {
|
||||
io.Reader
|
||||
typ string
|
||||
}
|
||||
|
||||
// multipartReader combines the contents of multiple readers to creat a multipart/related HTTP body.
|
||||
// Close must be called if reads from the multipartReader are abandoned before reaching EOF.
|
||||
type multipartReader struct {
|
||||
pr *io.PipeReader
|
||||
pipeOpen bool
|
||||
ctype string
|
||||
}
|
||||
|
||||
func newMultipartReader(parts []typeReader) *multipartReader {
|
||||
mp := &multipartReader{pipeOpen: true}
|
||||
var pw *io.PipeWriter
|
||||
mp.pr, pw = io.Pipe()
|
||||
mpw := multipart.NewWriter(pw)
|
||||
mp.ctype = "multipart/related; boundary=" + mpw.Boundary()
|
||||
go func() {
|
||||
for _, part := range parts {
|
||||
w, err := mpw.CreatePart(typeHeader(part.typ))
|
||||
if err != nil {
|
||||
mpw.Close()
|
||||
pw.CloseWithError(fmt.Errorf("googleapi: CreatePart failed: %v", err))
|
||||
return
|
||||
}
|
||||
_, err = io.Copy(w, part.Reader)
|
||||
if err != nil {
|
||||
mpw.Close()
|
||||
pw.CloseWithError(fmt.Errorf("googleapi: Copy failed: %v", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
mpw.Close()
|
||||
pw.Close()
|
||||
}()
|
||||
return mp
|
||||
}
|
||||
|
||||
func (mp *multipartReader) Read(data []byte) (n int, err error) {
|
||||
return mp.pr.Read(data)
|
||||
}
|
||||
|
||||
func (mp *multipartReader) Close() error {
|
||||
if !mp.pipeOpen {
|
||||
return nil
|
||||
}
|
||||
mp.pipeOpen = false
|
||||
return mp.pr.Close()
|
||||
}
|
||||
|
||||
// CombineBodyMedia combines a json body with media content to create a multipart/related HTTP body.
|
||||
// It returns a ReadCloser containing the combined body, and the overall "multipart/related" content type, with random boundary.
|
||||
//
|
||||
// The caller must call Close on the returned ReadCloser if reads are abandoned before reaching EOF.
|
||||
func CombineBodyMedia(body io.Reader, bodyContentType string, media io.Reader, mediaContentType string) (io.ReadCloser, string) {
|
||||
mp := newMultipartReader([]typeReader{
|
||||
{body, bodyContentType},
|
||||
{media, mediaContentType},
|
||||
})
|
||||
return mp, mp.ctype
|
||||
}
|
||||
|
||||
func typeHeader(contentType string) textproto.MIMEHeader {
|
||||
h := make(textproto.MIMEHeader)
|
||||
if contentType != "" {
|
||||
h.Set("Content-Type", contentType)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// PrepareUpload determines whether the data in the supplied reader should be
|
||||
// uploaded in a single request, or in sequential chunks.
|
||||
// chunkSize is the size of the chunk that media should be split into.
|
||||
// If chunkSize is non-zero and the contents of media do not fit in a single
|
||||
// chunk (or there is an error reading media), then media will be returned as a
|
||||
// ResumableBuffer. Otherwise, media will be returned as a Reader.
|
||||
//
|
||||
// After PrepareUpload has been called, media should no longer be used: the
|
||||
// media content should be accessed via one of the return values.
|
||||
func PrepareUpload(media io.Reader, chunkSize int) (io.Reader,
|
||||
*ResumableBuffer) {
|
||||
if chunkSize == 0 { // do not chunk
|
||||
return media, nil
|
||||
}
|
||||
|
||||
rb := NewResumableBuffer(media, chunkSize)
|
||||
rdr, _, _, err := rb.Chunk()
|
||||
|
||||
if err == io.EOF { // we can upload this in a single request
|
||||
return rdr, nil
|
||||
}
|
||||
// err might be a non-EOF error. If it is, the next call to rb.Chunk will
|
||||
// return the same error. Returning a ResumableBuffer ensures that this error
|
||||
// will be handled at some point.
|
||||
|
||||
return nil, rb
|
||||
}
|
50
vendor/google.golang.org/api/gensupport/params.go
generated
vendored
Normal file
50
vendor/google.golang.org/api/gensupport/params.go
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
// URLParams is a simplified replacement for url.Values
|
||||
// that safely builds up URL parameters for encoding.
|
||||
type URLParams map[string][]string
|
||||
|
||||
// Get returns the first value for the given key, or "".
|
||||
func (u URLParams) Get(key string) string {
|
||||
vs := u[key]
|
||||
if len(vs) == 0 {
|
||||
return ""
|
||||
}
|
||||
return vs[0]
|
||||
}
|
||||
|
||||
// Set sets the key to value.
|
||||
// It replaces any existing values.
|
||||
func (u URLParams) Set(key, value string) {
|
||||
u[key] = []string{value}
|
||||
}
|
||||
|
||||
// SetMulti sets the key to an array of values.
|
||||
// It replaces any existing values.
|
||||
// Note that values must not be modified after calling SetMulti
|
||||
// so the caller is responsible for making a copy if necessary.
|
||||
func (u URLParams) SetMulti(key string, values []string) {
|
||||
u[key] = values
|
||||
}
|
||||
|
||||
// Encode encodes the values into ``URL encoded'' form
|
||||
// ("bar=baz&foo=quux") sorted by key.
|
||||
func (u URLParams) Encode() string {
|
||||
return url.Values(u).Encode()
|
||||
}
|
||||
|
||||
func SetOptions(u URLParams, opts ...googleapi.CallOption) {
|
||||
for _, o := range opts {
|
||||
u.Set(o.Get())
|
||||
}
|
||||
}
|
198
vendor/google.golang.org/api/gensupport/resumable.go
generated
vendored
Normal file
198
vendor/google.golang.org/api/gensupport/resumable.go
generated
vendored
Normal file
|
@ -0,0 +1,198 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gensupport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/context/ctxhttp"
|
||||
)
|
||||
|
||||
const (
|
||||
// statusResumeIncomplete is the code returned by the Google uploader
|
||||
// when the transfer is not yet complete.
|
||||
statusResumeIncomplete = 308
|
||||
|
||||
// statusTooManyRequests is returned by the storage API if the
|
||||
// per-project limits have been temporarily exceeded. The request
|
||||
// should be retried.
|
||||
// https://cloud.google.com/storage/docs/json_api/v1/status-codes#standardcodes
|
||||
statusTooManyRequests = 429
|
||||
)
|
||||
|
||||
// ResumableUpload is used by the generated APIs to provide resumable uploads.
|
||||
// It is not used by developers directly.
|
||||
type ResumableUpload struct {
|
||||
Client *http.Client
|
||||
// URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable".
|
||||
URI string
|
||||
UserAgent string // User-Agent for header of the request
|
||||
// Media is the object being uploaded.
|
||||
Media *ResumableBuffer
|
||||
// MediaType defines the media type, e.g. "image/jpeg".
|
||||
MediaType string
|
||||
|
||||
mu sync.Mutex // guards progress
|
||||
progress int64 // number of bytes uploaded so far
|
||||
|
||||
// Callback is an optional function that will be periodically called with the cumulative number of bytes uploaded.
|
||||
Callback func(int64)
|
||||
|
||||
// If not specified, a default exponential backoff strategy will be used.
|
||||
Backoff BackoffStrategy
|
||||
}
|
||||
|
||||
// Progress returns the number of bytes uploaded at this point.
|
||||
func (rx *ResumableUpload) Progress() int64 {
|
||||
rx.mu.Lock()
|
||||
defer rx.mu.Unlock()
|
||||
return rx.progress
|
||||
}
|
||||
|
||||
// doUploadRequest performs a single HTTP request to upload data.
|
||||
// off specifies the offset in rx.Media from which data is drawn.
|
||||
// size is the number of bytes in data.
|
||||
// final specifies whether data is the final chunk to be uploaded.
|
||||
func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, off, size int64, final bool) (*http.Response, error) {
|
||||
req, err := http.NewRequest("POST", rx.URI, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req.ContentLength = size
|
||||
var contentRange string
|
||||
if final {
|
||||
if size == 0 {
|
||||
contentRange = fmt.Sprintf("bytes */%v", off)
|
||||
} else {
|
||||
contentRange = fmt.Sprintf("bytes %v-%v/%v", off, off+size-1, off+size)
|
||||
}
|
||||
} else {
|
||||
contentRange = fmt.Sprintf("bytes %v-%v/*", off, off+size-1)
|
||||
}
|
||||
req.Header.Set("Content-Range", contentRange)
|
||||
req.Header.Set("Content-Type", rx.MediaType)
|
||||
req.Header.Set("User-Agent", rx.UserAgent)
|
||||
return ctxhttp.Do(ctx, rx.Client, req)
|
||||
|
||||
}
|
||||
|
||||
// reportProgress calls a user-supplied callback to report upload progress.
|
||||
// If old==updated, the callback is not called.
|
||||
func (rx *ResumableUpload) reportProgress(old, updated int64) {
|
||||
if updated-old == 0 {
|
||||
return
|
||||
}
|
||||
rx.mu.Lock()
|
||||
rx.progress = updated
|
||||
rx.mu.Unlock()
|
||||
if rx.Callback != nil {
|
||||
rx.Callback(updated)
|
||||
}
|
||||
}
|
||||
|
||||
// transferChunk performs a single HTTP request to upload a single chunk from rx.Media.
|
||||
func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, error) {
|
||||
chunk, off, size, err := rx.Media.Chunk()
|
||||
|
||||
done := err == io.EOF
|
||||
if !done && err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res, err := rx.doUploadRequest(ctx, chunk, off, int64(size), done)
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
|
||||
if res.StatusCode == statusResumeIncomplete || res.StatusCode == http.StatusOK {
|
||||
rx.reportProgress(off, off+int64(size))
|
||||
}
|
||||
|
||||
if res.StatusCode == statusResumeIncomplete {
|
||||
rx.Media.Next()
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func contextDone(ctx context.Context) bool {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Upload starts the process of a resumable upload with a cancellable context.
|
||||
// It retries using the provided back off strategy until cancelled or the
|
||||
// strategy indicates to stop retrying.
|
||||
// It is called from the auto-generated API code and is not visible to the user.
|
||||
// rx is private to the auto-generated API code.
|
||||
// Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close.
|
||||
func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) {
|
||||
var pause time.Duration
|
||||
backoff := rx.Backoff
|
||||
if backoff == nil {
|
||||
backoff = DefaultBackoffStrategy()
|
||||
}
|
||||
|
||||
for {
|
||||
// Ensure that we return in the case of cancelled context, even if pause is 0.
|
||||
if contextDone(ctx) {
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case <-time.After(pause):
|
||||
}
|
||||
|
||||
resp, err = rx.transferChunk(ctx)
|
||||
|
||||
var status int
|
||||
if resp != nil {
|
||||
status = resp.StatusCode
|
||||
}
|
||||
|
||||
// Check if we should retry the request.
|
||||
if shouldRetry(status, err) {
|
||||
var retry bool
|
||||
pause, retry = backoff.Pause()
|
||||
if retry {
|
||||
if resp != nil && resp.Body != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// If the chunk was uploaded successfully, but there's still
|
||||
// more to go, upload the next chunk without any delay.
|
||||
if status == statusResumeIncomplete {
|
||||
pause = 0
|
||||
backoff.Reset()
|
||||
resp.Body.Close()
|
||||
continue
|
||||
}
|
||||
|
||||
// It's possible for err and resp to both be non-nil here, but we expose a simpler
|
||||
// contract to our callers: exactly one of resp and err will be non-nil. This means
|
||||
// that any response body must be closed here before returning a non-nil error.
|
||||
if err != nil {
|
||||
if resp != nil && resp.Body != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
}
|
77
vendor/google.golang.org/api/gensupport/retry.go
generated
vendored
Normal file
77
vendor/google.golang.org/api/gensupport/retry.go
generated
vendored
Normal file
|
@ -0,0 +1,77 @@
|
|||
package gensupport
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Retry invokes the given function, retrying it multiple times if the connection failed or
|
||||
// the HTTP status response indicates the request should be attempted again. ctx may be nil.
|
||||
func Retry(ctx context.Context, f func() (*http.Response, error), backoff BackoffStrategy) (*http.Response, error) {
|
||||
for {
|
||||
resp, err := f()
|
||||
|
||||
var status int
|
||||
if resp != nil {
|
||||
status = resp.StatusCode
|
||||
}
|
||||
|
||||
// Return if we shouldn't retry.
|
||||
pause, retry := backoff.Pause()
|
||||
if !shouldRetry(status, err) || !retry {
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// Ensure the response body is closed, if any.
|
||||
if resp != nil && resp.Body != nil {
|
||||
resp.Body.Close()
|
||||
}
|
||||
|
||||
// Pause, but still listen to ctx.Done if context is not nil.
|
||||
var done <-chan struct{}
|
||||
if ctx != nil {
|
||||
done = ctx.Done()
|
||||
}
|
||||
select {
|
||||
case <-done:
|
||||
return nil, ctx.Err()
|
||||
case <-time.After(pause):
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultBackoffStrategy returns a default strategy to use for retrying failed upload requests.
|
||||
func DefaultBackoffStrategy() BackoffStrategy {
|
||||
return &ExponentialBackoff{
|
||||
Base: 250 * time.Millisecond,
|
||||
Max: 16 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
// shouldRetry returns true if the HTTP response / error indicates that the
|
||||
// request should be attempted again.
|
||||
func shouldRetry(status int, err error) bool {
|
||||
// Retry for 5xx response codes.
|
||||
if 500 <= status && status < 600 {
|
||||
return true
|
||||
}
|
||||
|
||||
// Retry on statusTooManyRequests{
|
||||
if status == statusTooManyRequests {
|
||||
return true
|
||||
}
|
||||
|
||||
// Retry on unexpected EOFs and temporary network errors.
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
return true
|
||||
}
|
||||
if err, ok := err.(net.Error); ok {
|
||||
return err.Temporary()
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
383
vendor/google.golang.org/api/googleapi/googleapi.go
generated
vendored
383
vendor/google.golang.org/api/googleapi/googleapi.go
generated
vendored
|
@ -9,21 +9,13 @@ package googleapi
|
|||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/googleapi/internal/uritemplates"
|
||||
)
|
||||
|
||||
|
@ -42,17 +34,28 @@ type SizeReaderAt interface {
|
|||
Size() int64
|
||||
}
|
||||
|
||||
// ServerResponse is embedded in each Do response and
|
||||
// provides the HTTP status code and header sent by the server.
|
||||
type ServerResponse struct {
|
||||
// HTTPStatusCode is the server's response status code.
|
||||
// When using a resource method's Do call, this will always be in the 2xx range.
|
||||
HTTPStatusCode int
|
||||
// Header contains the response header fields from the server.
|
||||
Header http.Header
|
||||
}
|
||||
|
||||
const (
|
||||
Version = "0.5"
|
||||
|
||||
// statusResumeIncomplete is the code returned by the Google uploader when the transfer is not yet complete.
|
||||
statusResumeIncomplete = 308
|
||||
|
||||
// UserAgent is the header string used to identify this package.
|
||||
UserAgent = "google-api-go-client/" + Version
|
||||
|
||||
// uploadPause determines the delay between failed upload attempts
|
||||
uploadPause = 1 * time.Second
|
||||
// The default chunk size to use for resumable uplods if not specified by the user.
|
||||
DefaultUploadChunkSize = 8 * 1024 * 1024
|
||||
|
||||
// The minimum chunk size that can be used for resumable uploads. All
|
||||
// user-specified chunk sizes must be multiple of this value.
|
||||
MinUploadChunkSize = 256 * 1024
|
||||
)
|
||||
|
||||
// Error contains an error response from the server.
|
||||
|
@ -65,6 +68,8 @@ type Error struct {
|
|||
// Body is the raw response returned by the server.
|
||||
// It is often but not always JSON, depending on how the request fails.
|
||||
Body string
|
||||
// Header contains the response header fields from the server.
|
||||
Header http.Header
|
||||
|
||||
Errors []ErrorItem
|
||||
}
|
||||
|
@ -122,6 +127,34 @@ func CheckResponse(res *http.Response) error {
|
|||
return jerr.Error
|
||||
}
|
||||
}
|
||||
return &Error{
|
||||
Code: res.StatusCode,
|
||||
Body: string(slurp),
|
||||
Header: res.Header,
|
||||
}
|
||||
}
|
||||
|
||||
// IsNotModified reports whether err is the result of the
|
||||
// server replying with http.StatusNotModified.
|
||||
// Such error values are sometimes returned by "Do" methods
|
||||
// on calls when If-None-Match is used.
|
||||
func IsNotModified(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
ae, ok := err.(*Error)
|
||||
return ok && ae.Code == http.StatusNotModified
|
||||
}
|
||||
|
||||
// CheckMediaResponse returns an error (of type *Error) if the response
|
||||
// status code is not 2xx. Unlike CheckResponse it does not assume the
|
||||
// body is a JSON error document.
|
||||
func CheckMediaResponse(res *http.Response) error {
|
||||
if res.StatusCode >= 200 && res.StatusCode <= 299 {
|
||||
return nil
|
||||
}
|
||||
slurp, _ := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20))
|
||||
res.Body.Close()
|
||||
return &Error{
|
||||
Code: res.StatusCode,
|
||||
Body: string(slurp),
|
||||
|
@ -148,52 +181,6 @@ func (wrap MarshalStyle) JSONReader(v interface{}) (io.Reader, error) {
|
|||
return buf, nil
|
||||
}
|
||||
|
||||
func getMediaType(media io.Reader) (io.Reader, string) {
|
||||
if typer, ok := media.(ContentTyper); ok {
|
||||
return media, typer.ContentType()
|
||||
}
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
typ := "application/octet-stream"
|
||||
buf, err := ioutil.ReadAll(io.LimitReader(media, 512))
|
||||
if err != nil {
|
||||
pw.CloseWithError(fmt.Errorf("error reading media: %v", err))
|
||||
return pr, typ
|
||||
}
|
||||
typ = http.DetectContentType(buf)
|
||||
mr := io.MultiReader(bytes.NewReader(buf), media)
|
||||
go func() {
|
||||
_, err = io.Copy(pw, mr)
|
||||
if err != nil {
|
||||
pw.CloseWithError(fmt.Errorf("error reading media: %v", err))
|
||||
return
|
||||
}
|
||||
pw.Close()
|
||||
}()
|
||||
return pr, typ
|
||||
}
|
||||
|
||||
// DetectMediaType detects and returns the content type of the provided media.
|
||||
// If the type can not be determined, "application/octet-stream" is returned.
|
||||
func DetectMediaType(media io.ReaderAt) string {
|
||||
if typer, ok := media.(ContentTyper); ok {
|
||||
return typer.ContentType()
|
||||
}
|
||||
|
||||
typ := "application/octet-stream"
|
||||
buf := make([]byte, 1024)
|
||||
n, err := media.ReadAt(buf, 0)
|
||||
buf = buf[:n]
|
||||
if err == nil || err == io.EOF {
|
||||
typ = http.DetectContentType(buf)
|
||||
}
|
||||
return typ
|
||||
}
|
||||
|
||||
type Lengther interface {
|
||||
Len() int
|
||||
}
|
||||
|
||||
// endingWithErrorReader from r until it returns an error. If the
|
||||
// final error from r is io.EOF and e is non-nil, e is used instead.
|
||||
type endingWithErrorReader struct {
|
||||
|
@ -209,12 +196,6 @@ func (er endingWithErrorReader) Read(p []byte) (n int, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
func typeHeader(contentType string) textproto.MIMEHeader {
|
||||
h := make(textproto.MIMEHeader)
|
||||
h.Set("Content-Type", contentType)
|
||||
return h
|
||||
}
|
||||
|
||||
// countingWriter counts the number of bytes it receives to write, but
|
||||
// discards them.
|
||||
type countingWriter struct {
|
||||
|
@ -226,203 +207,65 @@ func (w countingWriter) Write(p []byte) (int, error) {
|
|||
return len(p), nil
|
||||
}
|
||||
|
||||
// ConditionallyIncludeMedia does nothing if media is nil.
|
||||
//
|
||||
// bodyp is an in/out parameter. It should initially point to the
|
||||
// reader of the application/json (or whatever) payload to send in the
|
||||
// API request. It's updated to point to the multipart body reader.
|
||||
//
|
||||
// ctypep is an in/out parameter. It should initially point to the
|
||||
// content type of the bodyp, usually "application/json". It's updated
|
||||
// to the "multipart/related" content type, with random boundary.
|
||||
//
|
||||
// The return value is the content-length of the entire multpart body.
|
||||
func ConditionallyIncludeMedia(media io.Reader, bodyp *io.Reader, ctypep *string) (cancel func(), ok bool) {
|
||||
if media == nil {
|
||||
return
|
||||
}
|
||||
// Get the media type, which might return a different reader instance.
|
||||
var mediaType string
|
||||
media, mediaType = getMediaType(media)
|
||||
|
||||
body, bodyType := *bodyp, *ctypep
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
mpw := multipart.NewWriter(pw)
|
||||
*bodyp = pr
|
||||
*ctypep = "multipart/related; boundary=" + mpw.Boundary()
|
||||
go func() {
|
||||
w, err := mpw.CreatePart(typeHeader(bodyType))
|
||||
if err != nil {
|
||||
mpw.Close()
|
||||
pw.CloseWithError(fmt.Errorf("googleapi: body CreatePart failed: %v", err))
|
||||
return
|
||||
}
|
||||
_, err = io.Copy(w, body)
|
||||
if err != nil {
|
||||
mpw.Close()
|
||||
pw.CloseWithError(fmt.Errorf("googleapi: body Copy failed: %v", err))
|
||||
return
|
||||
}
|
||||
|
||||
w, err = mpw.CreatePart(typeHeader(mediaType))
|
||||
if err != nil {
|
||||
mpw.Close()
|
||||
pw.CloseWithError(fmt.Errorf("googleapi: media CreatePart failed: %v", err))
|
||||
return
|
||||
}
|
||||
_, err = io.Copy(w, media)
|
||||
if err != nil {
|
||||
mpw.Close()
|
||||
pw.CloseWithError(fmt.Errorf("googleapi: media Copy failed: %v", err))
|
||||
return
|
||||
}
|
||||
mpw.Close()
|
||||
pw.Close()
|
||||
}()
|
||||
cancel = func() { pw.CloseWithError(errAborted) }
|
||||
return cancel, true
|
||||
}
|
||||
|
||||
var errAborted = errors.New("googleapi: upload aborted")
|
||||
|
||||
// ProgressUpdater is a function that is called upon every progress update of a resumable upload.
|
||||
// This is the only part of a resumable upload (from googleapi) that is usable by the developer.
|
||||
// The remaining usable pieces of resumable uploads is exposed in each auto-generated API.
|
||||
type ProgressUpdater func(current, total int64)
|
||||
|
||||
// ResumableUpload is used by the generated APIs to provide resumable uploads.
|
||||
type MediaOption interface {
|
||||
setOptions(o *MediaOptions)
|
||||
}
|
||||
|
||||
type contentTypeOption string
|
||||
|
||||
func (ct contentTypeOption) setOptions(o *MediaOptions) {
|
||||
o.ContentType = string(ct)
|
||||
if o.ContentType == "" {
|
||||
o.ForceEmptyContentType = true
|
||||
}
|
||||
}
|
||||
|
||||
// ContentType returns a MediaOption which sets the Content-Type header for media uploads.
|
||||
// If ctype is empty, the Content-Type header will be omitted.
|
||||
func ContentType(ctype string) MediaOption {
|
||||
return contentTypeOption(ctype)
|
||||
}
|
||||
|
||||
type chunkSizeOption int
|
||||
|
||||
func (cs chunkSizeOption) setOptions(o *MediaOptions) {
|
||||
size := int(cs)
|
||||
if size%MinUploadChunkSize != 0 {
|
||||
size += MinUploadChunkSize - (size % MinUploadChunkSize)
|
||||
}
|
||||
o.ChunkSize = size
|
||||
}
|
||||
|
||||
// ChunkSize returns a MediaOption which sets the chunk size for media uploads.
|
||||
// size will be rounded up to the nearest multiple of 256K.
|
||||
// Media which contains fewer than size bytes will be uploaded in a single request.
|
||||
// Media which contains size bytes or more will be uploaded in separate chunks.
|
||||
// If size is zero, media will be uploaded in a single request.
|
||||
func ChunkSize(size int) MediaOption {
|
||||
return chunkSizeOption(size)
|
||||
}
|
||||
|
||||
// MediaOptions stores options for customizing media upload. It is not used by developers directly.
|
||||
type MediaOptions struct {
|
||||
ContentType string
|
||||
ForceEmptyContentType bool
|
||||
|
||||
ChunkSize int
|
||||
}
|
||||
|
||||
// ProcessMediaOptions stores options from opts in a MediaOptions.
|
||||
// It is not used by developers directly.
|
||||
type ResumableUpload struct {
|
||||
Client *http.Client
|
||||
// URI is the resumable resource destination provided by the server after specifying "&uploadType=resumable".
|
||||
URI string
|
||||
UserAgent string // User-Agent for header of the request
|
||||
// Media is the object being uploaded.
|
||||
Media io.ReaderAt
|
||||
// MediaType defines the media type, e.g. "image/jpeg".
|
||||
MediaType string
|
||||
// ContentLength is the full size of the object being uploaded.
|
||||
ContentLength int64
|
||||
|
||||
mu sync.Mutex // guards progress
|
||||
progress int64 // number of bytes uploaded so far
|
||||
started bool // whether the upload has been started
|
||||
|
||||
// Callback is an optional function that will be called upon every progress update.
|
||||
Callback ProgressUpdater
|
||||
}
|
||||
|
||||
var (
|
||||
// rangeRE matches the transfer status response from the server. $1 is the last byte index uploaded.
|
||||
rangeRE = regexp.MustCompile(`^bytes=0\-(\d+)$`)
|
||||
// chunkSize is the size of the chunks created during a resumable upload and should be a power of two.
|
||||
// 1<<18 is the minimum size supported by the Google uploader, and there is no maximum.
|
||||
chunkSize int64 = 1 << 18
|
||||
)
|
||||
|
||||
// Progress returns the number of bytes uploaded at this point.
|
||||
func (rx *ResumableUpload) Progress() int64 {
|
||||
rx.mu.Lock()
|
||||
defer rx.mu.Unlock()
|
||||
return rx.progress
|
||||
}
|
||||
|
||||
func (rx *ResumableUpload) transferStatus() (int64, *http.Response, error) {
|
||||
req, _ := http.NewRequest("POST", rx.URI, nil)
|
||||
req.ContentLength = 0
|
||||
req.Header.Set("User-Agent", rx.UserAgent)
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes */%v", rx.ContentLength))
|
||||
res, err := rx.Client.Do(req)
|
||||
if err != nil || res.StatusCode != statusResumeIncomplete {
|
||||
return 0, res, err
|
||||
func ProcessMediaOptions(opts []MediaOption) *MediaOptions {
|
||||
mo := &MediaOptions{ChunkSize: DefaultUploadChunkSize}
|
||||
for _, o := range opts {
|
||||
o.setOptions(mo)
|
||||
}
|
||||
var start int64
|
||||
if m := rangeRE.FindStringSubmatch(res.Header.Get("Range")); len(m) == 2 {
|
||||
start, err = strconv.ParseInt(m[1], 10, 64)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("unable to parse range size %v", m[1])
|
||||
}
|
||||
start += 1 // Start at the next byte
|
||||
}
|
||||
return start, res, nil
|
||||
}
|
||||
|
||||
type chunk struct {
|
||||
body io.Reader
|
||||
size int64
|
||||
err error
|
||||
}
|
||||
|
||||
func (rx *ResumableUpload) transferChunks(ctx context.Context) (*http.Response, error) {
|
||||
var start int64
|
||||
var err error
|
||||
res := &http.Response{}
|
||||
if rx.started {
|
||||
start, res, err = rx.transferStatus()
|
||||
if err != nil || res.StatusCode != statusResumeIncomplete {
|
||||
return res, err
|
||||
}
|
||||
}
|
||||
rx.started = true
|
||||
|
||||
for {
|
||||
select { // Check for cancellation
|
||||
case <-ctx.Done():
|
||||
res.StatusCode = http.StatusRequestTimeout
|
||||
return res, ctx.Err()
|
||||
default:
|
||||
}
|
||||
reqSize := rx.ContentLength - start
|
||||
if reqSize > chunkSize {
|
||||
reqSize = chunkSize
|
||||
}
|
||||
r := io.NewSectionReader(rx.Media, start, reqSize)
|
||||
req, _ := http.NewRequest("POST", rx.URI, r)
|
||||
req.ContentLength = reqSize
|
||||
req.Header.Set("Content-Range", fmt.Sprintf("bytes %v-%v/%v", start, start+reqSize-1, rx.ContentLength))
|
||||
req.Header.Set("Content-Type", rx.MediaType)
|
||||
req.Header.Set("User-Agent", rx.UserAgent)
|
||||
res, err = rx.Client.Do(req)
|
||||
start += reqSize
|
||||
if err == nil && (res.StatusCode == statusResumeIncomplete || res.StatusCode == http.StatusOK) {
|
||||
rx.mu.Lock()
|
||||
rx.progress = start // keep track of number of bytes sent so far
|
||||
rx.mu.Unlock()
|
||||
if rx.Callback != nil {
|
||||
rx.Callback(start, rx.ContentLength)
|
||||
}
|
||||
}
|
||||
if err != nil || res.StatusCode != statusResumeIncomplete {
|
||||
break
|
||||
}
|
||||
}
|
||||
return res, err
|
||||
}
|
||||
|
||||
var sleep = time.Sleep // override in unit tests
|
||||
|
||||
// Upload starts the process of a resumable upload with a cancellable context.
|
||||
// It retries indefinitely (with a pause of uploadPause between attempts) until cancelled.
|
||||
// It is called from the auto-generated API code and is not visible to the user.
|
||||
// rx is private to the auto-generated API code.
|
||||
func (rx *ResumableUpload) Upload(ctx context.Context) (*http.Response, error) {
|
||||
var res *http.Response
|
||||
var err error
|
||||
for {
|
||||
res, err = rx.transferChunks(ctx)
|
||||
if err != nil || res.StatusCode == http.StatusCreated || res.StatusCode == http.StatusOK {
|
||||
return res, err
|
||||
}
|
||||
select { // Check for cancellation
|
||||
case <-ctx.Done():
|
||||
res.StatusCode = http.StatusRequestTimeout
|
||||
return res, ctx.Err()
|
||||
default:
|
||||
}
|
||||
sleep(uploadPause)
|
||||
}
|
||||
return res, err
|
||||
return mo
|
||||
}
|
||||
|
||||
func ResolveRelative(basestr, relstr string) string {
|
||||
|
@ -549,3 +392,33 @@ func CombineFields(s []Field) string {
|
|||
}
|
||||
return strings.Join(r, ",")
|
||||
}
|
||||
|
||||
// A CallOption is an optional argument to an API call.
|
||||
// It should be treated as an opaque value by users of Google APIs.
|
||||
//
|
||||
// A CallOption is something that configures an API call in a way that is
|
||||
// not specific to that API; for instance, controlling the quota user for
|
||||
// an API call is common across many APIs, and is thus a CallOption.
|
||||
type CallOption interface {
|
||||
Get() (key, value string)
|
||||
}
|
||||
|
||||
// QuotaUser returns a CallOption that will set the quota user for a call.
|
||||
// The quota user can be used by server-side applications to control accounting.
|
||||
// It can be an arbitrary string up to 40 characters, and will override UserIP
|
||||
// if both are provided.
|
||||
func QuotaUser(u string) CallOption { return quotaUser(u) }
|
||||
|
||||
type quotaUser string
|
||||
|
||||
func (q quotaUser) Get() (string, string) { return "quotaUser", string(q) }
|
||||
|
||||
// UserIP returns a CallOption that will set the "userIp" parameter of a call.
|
||||
// This should be the IP address of the originating request.
|
||||
func UserIP(ip string) CallOption { return userIP(ip) }
|
||||
|
||||
type userIP string
|
||||
|
||||
func (i userIP) Get() (string, string) { return "userIp", string(i) }
|
||||
|
||||
// TODO: Fields too
|
||||
|
|
247
vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go
generated
vendored
247
vendor/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go
generated
vendored
|
@ -2,26 +2,15 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package uritemplates is a level 4 implementation of RFC 6570 (URI
|
||||
// Package uritemplates is a level 3 implementation of RFC 6570 (URI
|
||||
// Template, http://tools.ietf.org/html/rfc6570).
|
||||
//
|
||||
// To use uritemplates, parse a template string and expand it with a value
|
||||
// map:
|
||||
//
|
||||
// template, _ := uritemplates.Parse("https://api.github.com/repos{/user,repo}")
|
||||
// values := make(map[string]interface{})
|
||||
// values["user"] = "jtacoma"
|
||||
// values["repo"] = "uritemplates"
|
||||
// expanded, _ := template.ExpandString(values)
|
||||
// fmt.Printf(expanded)
|
||||
//
|
||||
// uritemplates does not support composite values (in Go: slices or maps)
|
||||
// and so does not qualify as a level 4 implementation.
|
||||
package uritemplates
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -45,52 +34,47 @@ func pctEncode(src []byte) []byte {
|
|||
return dst
|
||||
}
|
||||
|
||||
func escape(s string, allowReserved bool) (escaped string) {
|
||||
func escape(s string, allowReserved bool) string {
|
||||
if allowReserved {
|
||||
escaped = string(reserved.ReplaceAllFunc([]byte(s), pctEncode))
|
||||
} else {
|
||||
escaped = string(unreserved.ReplaceAllFunc([]byte(s), pctEncode))
|
||||
return string(reserved.ReplaceAllFunc([]byte(s), pctEncode))
|
||||
}
|
||||
return escaped
|
||||
return string(unreserved.ReplaceAllFunc([]byte(s), pctEncode))
|
||||
}
|
||||
|
||||
// A UriTemplate is a parsed representation of a URI template.
|
||||
type UriTemplate struct {
|
||||
// A uriTemplate is a parsed representation of a URI template.
|
||||
type uriTemplate struct {
|
||||
raw string
|
||||
parts []templatePart
|
||||
}
|
||||
|
||||
// Parse parses a URI template string into a UriTemplate object.
|
||||
func Parse(rawtemplate string) (template *UriTemplate, err error) {
|
||||
template = new(UriTemplate)
|
||||
template.raw = rawtemplate
|
||||
split := strings.Split(rawtemplate, "{")
|
||||
template.parts = make([]templatePart, len(split)*2-1)
|
||||
// parse parses a URI template string into a uriTemplate object.
|
||||
func parse(rawTemplate string) (*uriTemplate, error) {
|
||||
split := strings.Split(rawTemplate, "{")
|
||||
parts := make([]templatePart, len(split)*2-1)
|
||||
for i, s := range split {
|
||||
if i == 0 {
|
||||
if strings.Contains(s, "}") {
|
||||
err = errors.New("unexpected }")
|
||||
break
|
||||
return nil, errors.New("unexpected }")
|
||||
}
|
||||
template.parts[i].raw = s
|
||||
} else {
|
||||
subsplit := strings.Split(s, "}")
|
||||
if len(subsplit) != 2 {
|
||||
err = errors.New("malformed template")
|
||||
break
|
||||
}
|
||||
expression := subsplit[0]
|
||||
template.parts[i*2-1], err = parseExpression(expression)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
template.parts[i*2].raw = subsplit[1]
|
||||
parts[i].raw = s
|
||||
continue
|
||||
}
|
||||
subsplit := strings.Split(s, "}")
|
||||
if len(subsplit) != 2 {
|
||||
return nil, errors.New("malformed template")
|
||||
}
|
||||
expression := subsplit[0]
|
||||
var err error
|
||||
parts[i*2-1], err = parseExpression(expression)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
parts[i*2].raw = subsplit[1]
|
||||
}
|
||||
if err != nil {
|
||||
template = nil
|
||||
}
|
||||
return template, err
|
||||
return &uriTemplate{
|
||||
raw: rawTemplate,
|
||||
parts: parts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type templatePart struct {
|
||||
|
@ -160,6 +144,8 @@ func parseExpression(expression string) (result templatePart, err error) {
|
|||
}
|
||||
|
||||
func parseTerm(term string) (result templateTerm, err error) {
|
||||
// TODO(djd): Remove "*" suffix parsing once we check that no APIs have
|
||||
// mistakenly used that attribute.
|
||||
if strings.HasSuffix(term, "*") {
|
||||
result.explode = true
|
||||
term = term[:len(term)-1]
|
||||
|
@ -185,175 +171,50 @@ func parseTerm(term string) (result templateTerm, err error) {
|
|||
}
|
||||
|
||||
// Expand expands a URI template with a set of values to produce a string.
|
||||
func (self *UriTemplate) Expand(value interface{}) (string, error) {
|
||||
values, ismap := value.(map[string]interface{})
|
||||
if !ismap {
|
||||
if m, ismap := struct2map(value); !ismap {
|
||||
return "", errors.New("expected map[string]interface{}, struct, or pointer to struct.")
|
||||
} else {
|
||||
return self.Expand(m)
|
||||
}
|
||||
}
|
||||
func (t *uriTemplate) Expand(values map[string]string) string {
|
||||
var buf bytes.Buffer
|
||||
for _, p := range self.parts {
|
||||
err := p.expand(&buf, values)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, p := range t.parts {
|
||||
p.expand(&buf, values)
|
||||
}
|
||||
return buf.String(), nil
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func (self *templatePart) expand(buf *bytes.Buffer, values map[string]interface{}) error {
|
||||
if len(self.raw) > 0 {
|
||||
buf.WriteString(self.raw)
|
||||
return nil
|
||||
func (tp *templatePart) expand(buf *bytes.Buffer, values map[string]string) {
|
||||
if len(tp.raw) > 0 {
|
||||
buf.WriteString(tp.raw)
|
||||
return
|
||||
}
|
||||
var zeroLen = buf.Len()
|
||||
buf.WriteString(self.first)
|
||||
var firstLen = buf.Len()
|
||||
for _, term := range self.terms {
|
||||
var first = true
|
||||
for _, term := range tp.terms {
|
||||
value, exists := values[term.name]
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
if buf.Len() != firstLen {
|
||||
buf.WriteString(self.sep)
|
||||
}
|
||||
switch v := value.(type) {
|
||||
case string:
|
||||
self.expandString(buf, term, v)
|
||||
case []interface{}:
|
||||
self.expandArray(buf, term, v)
|
||||
case map[string]interface{}:
|
||||
if term.truncate > 0 {
|
||||
return errors.New("cannot truncate a map expansion")
|
||||
}
|
||||
self.expandMap(buf, term, v)
|
||||
default:
|
||||
if m, ismap := struct2map(value); ismap {
|
||||
if term.truncate > 0 {
|
||||
return errors.New("cannot truncate a map expansion")
|
||||
}
|
||||
self.expandMap(buf, term, m)
|
||||
} else {
|
||||
str := fmt.Sprintf("%v", value)
|
||||
self.expandString(buf, term, str)
|
||||
}
|
||||
if first {
|
||||
buf.WriteString(tp.first)
|
||||
first = false
|
||||
} else {
|
||||
buf.WriteString(tp.sep)
|
||||
}
|
||||
tp.expandString(buf, term, value)
|
||||
}
|
||||
if buf.Len() == firstLen {
|
||||
original := buf.Bytes()[:zeroLen]
|
||||
buf.Reset()
|
||||
buf.Write(original)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (self *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) {
|
||||
if self.named {
|
||||
func (tp *templatePart) expandName(buf *bytes.Buffer, name string, empty bool) {
|
||||
if tp.named {
|
||||
buf.WriteString(name)
|
||||
if empty {
|
||||
buf.WriteString(self.ifemp)
|
||||
buf.WriteString(tp.ifemp)
|
||||
} else {
|
||||
buf.WriteString("=")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (self *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) {
|
||||
func (tp *templatePart) expandString(buf *bytes.Buffer, t templateTerm, s string) {
|
||||
if len(s) > t.truncate && t.truncate > 0 {
|
||||
s = s[:t.truncate]
|
||||
}
|
||||
self.expandName(buf, t.name, len(s) == 0)
|
||||
buf.WriteString(escape(s, self.allowReserved))
|
||||
}
|
||||
|
||||
func (self *templatePart) expandArray(buf *bytes.Buffer, t templateTerm, a []interface{}) {
|
||||
if len(a) == 0 {
|
||||
return
|
||||
} else if !t.explode {
|
||||
self.expandName(buf, t.name, false)
|
||||
}
|
||||
for i, value := range a {
|
||||
if t.explode && i > 0 {
|
||||
buf.WriteString(self.sep)
|
||||
} else if i > 0 {
|
||||
buf.WriteString(",")
|
||||
}
|
||||
var s string
|
||||
switch v := value.(type) {
|
||||
case string:
|
||||
s = v
|
||||
default:
|
||||
s = fmt.Sprintf("%v", v)
|
||||
}
|
||||
if len(s) > t.truncate && t.truncate > 0 {
|
||||
s = s[:t.truncate]
|
||||
}
|
||||
if self.named && t.explode {
|
||||
self.expandName(buf, t.name, len(s) == 0)
|
||||
}
|
||||
buf.WriteString(escape(s, self.allowReserved))
|
||||
}
|
||||
}
|
||||
|
||||
func (self *templatePart) expandMap(buf *bytes.Buffer, t templateTerm, m map[string]interface{}) {
|
||||
if len(m) == 0 {
|
||||
return
|
||||
}
|
||||
if !t.explode {
|
||||
self.expandName(buf, t.name, len(m) == 0)
|
||||
}
|
||||
var firstLen = buf.Len()
|
||||
for k, value := range m {
|
||||
if firstLen != buf.Len() {
|
||||
if t.explode {
|
||||
buf.WriteString(self.sep)
|
||||
} else {
|
||||
buf.WriteString(",")
|
||||
}
|
||||
}
|
||||
var s string
|
||||
switch v := value.(type) {
|
||||
case string:
|
||||
s = v
|
||||
default:
|
||||
s = fmt.Sprintf("%v", v)
|
||||
}
|
||||
if t.explode {
|
||||
buf.WriteString(escape(k, self.allowReserved))
|
||||
buf.WriteRune('=')
|
||||
buf.WriteString(escape(s, self.allowReserved))
|
||||
} else {
|
||||
buf.WriteString(escape(k, self.allowReserved))
|
||||
buf.WriteRune(',')
|
||||
buf.WriteString(escape(s, self.allowReserved))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func struct2map(v interface{}) (map[string]interface{}, bool) {
|
||||
value := reflect.ValueOf(v)
|
||||
switch value.Type().Kind() {
|
||||
case reflect.Ptr:
|
||||
return struct2map(value.Elem().Interface())
|
||||
case reflect.Struct:
|
||||
m := make(map[string]interface{})
|
||||
for i := 0; i < value.NumField(); i++ {
|
||||
tag := value.Type().Field(i).Tag
|
||||
var name string
|
||||
if strings.Contains(string(tag), ":") {
|
||||
name = tag.Get("uri")
|
||||
} else {
|
||||
name = strings.TrimSpace(string(tag))
|
||||
}
|
||||
if len(name) == 0 {
|
||||
name = value.Type().Field(i).Name
|
||||
}
|
||||
m[name] = value.Field(i).Interface()
|
||||
}
|
||||
return m, true
|
||||
}
|
||||
return nil, false
|
||||
tp.expandName(buf, t.name, len(s) == 0)
|
||||
buf.WriteString(escape(s, tp.allowReserved))
|
||||
}
|
||||
|
|
14
vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go
generated
vendored
14
vendor/google.golang.org/api/googleapi/internal/uritemplates/utils.go
generated
vendored
|
@ -1,13 +1,13 @@
|
|||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uritemplates
|
||||
|
||||
func Expand(path string, expansions map[string]string) (string, error) {
|
||||
template, err := Parse(path)
|
||||
func Expand(path string, values map[string]string) (string, error) {
|
||||
template, err := parse(path)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
values := make(map[string]interface{})
|
||||
for k, v := range expansions {
|
||||
values[k] = v
|
||||
}
|
||||
return template.Expand(values)
|
||||
return template.Expand(values), nil
|
||||
}
|
||||
|
|
32
vendor/google.golang.org/api/googleapi/types.go
generated
vendored
32
vendor/google.golang.org/api/googleapi/types.go
generated
vendored
|
@ -148,3 +148,35 @@ func (s Float64s) MarshalJSON() ([]byte, error) {
|
|||
return strconv.AppendFloat(dst, s[i], 'g', -1, 64)
|
||||
})
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper routines for simplifying the creation of optional fields of basic type.
|
||||
*/
|
||||
|
||||
// Bool is a helper routine that allocates a new bool value
|
||||
// to store v and returns a pointer to it.
|
||||
func Bool(v bool) *bool { return &v }
|
||||
|
||||
// Int32 is a helper routine that allocates a new int32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Int32(v int32) *int32 { return &v }
|
||||
|
||||
// Int64 is a helper routine that allocates a new int64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Int64(v int64) *int64 { return &v }
|
||||
|
||||
// Float64 is a helper routine that allocates a new float64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Float64(v float64) *float64 { return &v }
|
||||
|
||||
// Uint32 is a helper routine that allocates a new uint32 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Uint32(v uint32) *uint32 { return &v }
|
||||
|
||||
// Uint64 is a helper routine that allocates a new uint64 value
|
||||
// to store v and returns a pointer to it.
|
||||
func Uint64(v uint64) *uint64 { return &v }
|
||||
|
||||
// String is a helper routine that allocates a new string value
|
||||
// to store v and returns a pointer to it.
|
||||
func String(v string) *string { return &v }
|
||||
|
|
116
vendor/google.golang.org/api/storage/v1/storage-api.json
generated
vendored
116
vendor/google.golang.org/api/storage/v1/storage-api.json
generated
vendored
|
@ -1,13 +1,13 @@
|
|||
{
|
||||
"kind": "discovery#restDescription",
|
||||
"etag": "\"ye6orv2F-1npMW3u9suM3a7C5Bo/ALzt_o9hBNBIakQJmeXCNhSU8II\"",
|
||||
"etag": "\"bRFOOrZKfO9LweMbPqu0kcu6De8/KVPQfwGxQTBtH0g1kuij0C9i4uc\"",
|
||||
"discoveryVersion": "v1",
|
||||
"id": "storage:v1",
|
||||
"name": "storage",
|
||||
"version": "v1",
|
||||
"revision": "20150630",
|
||||
"title": "Cloud Storage API",
|
||||
"description": "Lets you store and retrieve potentially-large, immutable data objects.",
|
||||
"revision": "20160304",
|
||||
"title": "Cloud Storage JSON API",
|
||||
"description": "Stores and retrieves potentially large, immutable data objects.",
|
||||
"ownerDomain": "google.com",
|
||||
"ownerName": "Google",
|
||||
"icons": {
|
||||
|
@ -75,6 +75,9 @@
|
|||
"https://www.googleapis.com/auth/cloud-platform": {
|
||||
"description": "View and manage your data across Google Cloud Platform services"
|
||||
},
|
||||
"https://www.googleapis.com/auth/cloud-platform.read-only": {
|
||||
"description": "View your data across Google Cloud Platform services"
|
||||
},
|
||||
"https://www.googleapis.com/auth/devstorage.full_control": {
|
||||
"description": "Manage your data and permissions in Google Cloud Storage"
|
||||
},
|
||||
|
@ -271,7 +274,12 @@
|
|||
},
|
||||
"timeCreated": {
|
||||
"type": "string",
|
||||
"description": "Creation time of the bucket in RFC 3339 format.",
|
||||
"description": "The creation time of the bucket in RFC 3339 format.",
|
||||
"format": "date-time"
|
||||
},
|
||||
"updated": {
|
||||
"type": "string",
|
||||
"description": "The modification time of the bucket in RFC 3339 format.",
|
||||
"format": "date-time"
|
||||
},
|
||||
"versioning": {
|
||||
|
@ -566,18 +574,26 @@
|
|||
},
|
||||
"contentType": {
|
||||
"type": "string",
|
||||
"description": "Content-Type of the object data.",
|
||||
"annotations": {
|
||||
"required": [
|
||||
"storage.objects.insert",
|
||||
"storage.objects.update"
|
||||
]
|
||||
}
|
||||
"description": "Content-Type of the object data."
|
||||
},
|
||||
"crc32c": {
|
||||
"type": "string",
|
||||
"description": "CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian byte order. For more information about using the CRC32c checksum, see Hashes and ETags: Best Practices."
|
||||
},
|
||||
"customerEncryption": {
|
||||
"type": "object",
|
||||
"description": "Metadata of customer-supplied encryption key, if the object is encrypted by such a key.",
|
||||
"properties": {
|
||||
"encryptionAlgorithm": {
|
||||
"type": "string",
|
||||
"description": "The encryption algorithm."
|
||||
},
|
||||
"keySha256": {
|
||||
"type": "string",
|
||||
"description": "SHA256 hash value of the encryption key."
|
||||
}
|
||||
}
|
||||
},
|
||||
"etag": {
|
||||
"type": "string",
|
||||
"description": "HTTP 1.1 Entity tag for the object."
|
||||
|
@ -648,6 +664,11 @@
|
|||
"type": "string",
|
||||
"description": "Storage class of the object."
|
||||
},
|
||||
"timeCreated": {
|
||||
"type": "string",
|
||||
"description": "The creation time of the object in RFC 3339 format.",
|
||||
"format": "date-time"
|
||||
},
|
||||
"timeDeleted": {
|
||||
"type": "string",
|
||||
"description": "The deletion time of the object in RFC 3339 format. Will be returned if and only if this version of the object has been deleted.",
|
||||
|
@ -655,7 +676,7 @@
|
|||
},
|
||||
"updated": {
|
||||
"type": "string",
|
||||
"description": "The creation or modification time of the object in RFC 3339 format. For buckets with versioning enabled, changing an object's metadata does not change this property.",
|
||||
"description": "The modification time of the object metadata in RFC 3339 format.",
|
||||
"format": "date-time"
|
||||
}
|
||||
}
|
||||
|
@ -842,6 +863,7 @@
|
|||
"entity"
|
||||
],
|
||||
"scopes": [
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/devstorage.full_control"
|
||||
]
|
||||
},
|
||||
|
@ -872,6 +894,7 @@
|
|||
"$ref": "BucketAccessControl"
|
||||
},
|
||||
"scopes": [
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/devstorage.full_control"
|
||||
]
|
||||
},
|
||||
|
@ -898,6 +921,7 @@
|
|||
"$ref": "BucketAccessControl"
|
||||
},
|
||||
"scopes": [
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/devstorage.full_control"
|
||||
]
|
||||
},
|
||||
|
@ -921,6 +945,7 @@
|
|||
"$ref": "BucketAccessControls"
|
||||
},
|
||||
"scopes": [
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/devstorage.full_control"
|
||||
]
|
||||
},
|
||||
|
@ -954,6 +979,7 @@
|
|||
"$ref": "BucketAccessControl"
|
||||
},
|
||||
"scopes": [
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/devstorage.full_control"
|
||||
]
|
||||
},
|
||||
|
@ -987,6 +1013,7 @@
|
|||
"$ref": "BucketAccessControl"
|
||||
},
|
||||
"scopes": [
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/devstorage.full_control"
|
||||
]
|
||||
}
|
||||
|
@ -1074,6 +1101,7 @@
|
|||
},
|
||||
"scopes": [
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/cloud-platform.read-only",
|
||||
"https://www.googleapis.com/auth/devstorage.full_control",
|
||||
"https://www.googleapis.com/auth/devstorage.read_only",
|
||||
"https://www.googleapis.com/auth/devstorage.read_write"
|
||||
|
@ -1211,6 +1239,7 @@
|
|||
},
|
||||
"scopes": [
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/cloud-platform.read-only",
|
||||
"https://www.googleapis.com/auth/devstorage.full_control",
|
||||
"https://www.googleapis.com/auth/devstorage.read_only",
|
||||
"https://www.googleapis.com/auth/devstorage.read_write"
|
||||
|
@ -1417,6 +1446,7 @@
|
|||
},
|
||||
"scopes": [
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/cloud-platform.read-only",
|
||||
"https://www.googleapis.com/auth/devstorage.full_control",
|
||||
"https://www.googleapis.com/auth/devstorage.read_only",
|
||||
"https://www.googleapis.com/auth/devstorage.read_write"
|
||||
|
@ -1450,6 +1480,7 @@
|
|||
"entity"
|
||||
],
|
||||
"scopes": [
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/devstorage.full_control"
|
||||
]
|
||||
},
|
||||
|
@ -1480,6 +1511,7 @@
|
|||
"$ref": "ObjectAccessControl"
|
||||
},
|
||||
"scopes": [
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/devstorage.full_control"
|
||||
]
|
||||
},
|
||||
|
@ -1506,6 +1538,7 @@
|
|||
"$ref": "ObjectAccessControl"
|
||||
},
|
||||
"scopes": [
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/devstorage.full_control"
|
||||
]
|
||||
},
|
||||
|
@ -1541,6 +1574,7 @@
|
|||
"$ref": "ObjectAccessControls"
|
||||
},
|
||||
"scopes": [
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/devstorage.full_control"
|
||||
]
|
||||
},
|
||||
|
@ -1574,6 +1608,7 @@
|
|||
"$ref": "ObjectAccessControl"
|
||||
},
|
||||
"scopes": [
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/devstorage.full_control"
|
||||
]
|
||||
},
|
||||
|
@ -1607,6 +1642,7 @@
|
|||
"$ref": "ObjectAccessControl"
|
||||
},
|
||||
"scopes": [
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/devstorage.full_control"
|
||||
]
|
||||
}
|
||||
|
@ -1640,7 +1676,7 @@
|
|||
},
|
||||
"object": {
|
||||
"type": "string",
|
||||
"description": "Name of the object.",
|
||||
"description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
|
||||
"required": true,
|
||||
"location": "path"
|
||||
}
|
||||
|
@ -1651,6 +1687,7 @@
|
|||
"entity"
|
||||
],
|
||||
"scopes": [
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/devstorage.full_control"
|
||||
]
|
||||
},
|
||||
|
@ -1680,7 +1717,7 @@
|
|||
},
|
||||
"object": {
|
||||
"type": "string",
|
||||
"description": "Name of the object.",
|
||||
"description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
|
||||
"required": true,
|
||||
"location": "path"
|
||||
}
|
||||
|
@ -1694,6 +1731,7 @@
|
|||
"$ref": "ObjectAccessControl"
|
||||
},
|
||||
"scopes": [
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/devstorage.full_control"
|
||||
]
|
||||
},
|
||||
|
@ -1717,7 +1755,7 @@
|
|||
},
|
||||
"object": {
|
||||
"type": "string",
|
||||
"description": "Name of the object.",
|
||||
"description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
|
||||
"required": true,
|
||||
"location": "path"
|
||||
}
|
||||
|
@ -1733,6 +1771,7 @@
|
|||
"$ref": "ObjectAccessControl"
|
||||
},
|
||||
"scopes": [
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/devstorage.full_control"
|
||||
]
|
||||
},
|
||||
|
@ -1756,7 +1795,7 @@
|
|||
},
|
||||
"object": {
|
||||
"type": "string",
|
||||
"description": "Name of the object.",
|
||||
"description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
|
||||
"required": true,
|
||||
"location": "path"
|
||||
}
|
||||
|
@ -1769,6 +1808,7 @@
|
|||
"$ref": "ObjectAccessControls"
|
||||
},
|
||||
"scopes": [
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/devstorage.full_control"
|
||||
]
|
||||
},
|
||||
|
@ -1798,7 +1838,7 @@
|
|||
},
|
||||
"object": {
|
||||
"type": "string",
|
||||
"description": "Name of the object.",
|
||||
"description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
|
||||
"required": true,
|
||||
"location": "path"
|
||||
}
|
||||
|
@ -1815,6 +1855,7 @@
|
|||
"$ref": "ObjectAccessControl"
|
||||
},
|
||||
"scopes": [
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/devstorage.full_control"
|
||||
]
|
||||
},
|
||||
|
@ -1844,7 +1885,7 @@
|
|||
},
|
||||
"object": {
|
||||
"type": "string",
|
||||
"description": "Name of the object.",
|
||||
"description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
|
||||
"required": true,
|
||||
"location": "path"
|
||||
}
|
||||
|
@ -1861,6 +1902,7 @@
|
|||
"$ref": "ObjectAccessControl"
|
||||
},
|
||||
"scopes": [
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/devstorage.full_control"
|
||||
]
|
||||
}
|
||||
|
@ -1882,7 +1924,7 @@
|
|||
},
|
||||
"destinationObject": {
|
||||
"type": "string",
|
||||
"description": "Name of the new object.",
|
||||
"description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
|
||||
"required": true,
|
||||
"location": "path"
|
||||
},
|
||||
|
@ -1935,7 +1977,8 @@
|
|||
"https://www.googleapis.com/auth/devstorage.full_control",
|
||||
"https://www.googleapis.com/auth/devstorage.read_write"
|
||||
],
|
||||
"supportsMediaDownload": true
|
||||
"supportsMediaDownload": true,
|
||||
"useMediaDownloadService": true
|
||||
},
|
||||
"copy": {
|
||||
"id": "storage.objects.copy",
|
||||
|
@ -1945,7 +1988,7 @@
|
|||
"parameters": {
|
||||
"destinationBucket": {
|
||||
"type": "string",
|
||||
"description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.",
|
||||
"description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
|
||||
"required": true,
|
||||
"location": "path"
|
||||
},
|
||||
|
@ -2051,7 +2094,7 @@
|
|||
},
|
||||
"sourceObject": {
|
||||
"type": "string",
|
||||
"description": "Name of the source object.",
|
||||
"description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
|
||||
"required": true,
|
||||
"location": "path"
|
||||
}
|
||||
|
@ -2073,7 +2116,8 @@
|
|||
"https://www.googleapis.com/auth/devstorage.full_control",
|
||||
"https://www.googleapis.com/auth/devstorage.read_write"
|
||||
],
|
||||
"supportsMediaDownload": true
|
||||
"supportsMediaDownload": true,
|
||||
"useMediaDownloadService": true
|
||||
},
|
||||
"delete": {
|
||||
"id": "storage.objects.delete",
|
||||
|
@ -2119,7 +2163,7 @@
|
|||
},
|
||||
"object": {
|
||||
"type": "string",
|
||||
"description": "Name of the object.",
|
||||
"description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
|
||||
"required": true,
|
||||
"location": "path"
|
||||
}
|
||||
|
@ -2178,7 +2222,7 @@
|
|||
},
|
||||
"object": {
|
||||
"type": "string",
|
||||
"description": "Name of the object.",
|
||||
"description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
|
||||
"required": true,
|
||||
"location": "path"
|
||||
},
|
||||
|
@ -2205,11 +2249,13 @@
|
|||
},
|
||||
"scopes": [
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/cloud-platform.read-only",
|
||||
"https://www.googleapis.com/auth/devstorage.full_control",
|
||||
"https://www.googleapis.com/auth/devstorage.read_only",
|
||||
"https://www.googleapis.com/auth/devstorage.read_write"
|
||||
],
|
||||
"supportsMediaDownload": true
|
||||
"supportsMediaDownload": true,
|
||||
"useMediaDownloadService": true
|
||||
},
|
||||
"insert": {
|
||||
"id": "storage.objects.insert",
|
||||
|
@ -2254,7 +2300,7 @@
|
|||
},
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.",
|
||||
"description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
|
||||
"location": "query"
|
||||
},
|
||||
"predefinedAcl": {
|
||||
|
@ -2307,6 +2353,7 @@
|
|||
"https://www.googleapis.com/auth/devstorage.read_write"
|
||||
],
|
||||
"supportsMediaDownload": true,
|
||||
"useMediaDownloadService": true,
|
||||
"supportsMediaUpload": true,
|
||||
"mediaUpload": {
|
||||
"accept": [
|
||||
|
@ -2385,6 +2432,7 @@
|
|||
},
|
||||
"scopes": [
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/cloud-platform.read-only",
|
||||
"https://www.googleapis.com/auth/devstorage.full_control",
|
||||
"https://www.googleapis.com/auth/devstorage.read_only",
|
||||
"https://www.googleapis.com/auth/devstorage.read_write"
|
||||
|
@ -2435,7 +2483,7 @@
|
|||
},
|
||||
"object": {
|
||||
"type": "string",
|
||||
"description": "Name of the object.",
|
||||
"description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
|
||||
"required": true,
|
||||
"location": "path"
|
||||
},
|
||||
|
@ -2504,7 +2552,7 @@
|
|||
},
|
||||
"destinationObject": {
|
||||
"type": "string",
|
||||
"description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.",
|
||||
"description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
|
||||
"required": true,
|
||||
"location": "path"
|
||||
},
|
||||
|
@ -2615,7 +2663,7 @@
|
|||
},
|
||||
"sourceObject": {
|
||||
"type": "string",
|
||||
"description": "Name of the source object.",
|
||||
"description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
|
||||
"required": true,
|
||||
"location": "path"
|
||||
}
|
||||
|
@ -2682,7 +2730,7 @@
|
|||
},
|
||||
"object": {
|
||||
"type": "string",
|
||||
"description": "Name of the object.",
|
||||
"description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
|
||||
"required": true,
|
||||
"location": "path"
|
||||
},
|
||||
|
@ -2736,7 +2784,8 @@
|
|||
"https://www.googleapis.com/auth/devstorage.full_control",
|
||||
"https://www.googleapis.com/auth/devstorage.read_write"
|
||||
],
|
||||
"supportsMediaDownload": true
|
||||
"supportsMediaDownload": true,
|
||||
"useMediaDownloadService": true
|
||||
},
|
||||
"watchAll": {
|
||||
"id": "storage.objects.watchAll",
|
||||
|
@ -2803,6 +2852,7 @@
|
|||
},
|
||||
"scopes": [
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/cloud-platform.read-only",
|
||||
"https://www.googleapis.com/auth/devstorage.full_control",
|
||||
"https://www.googleapis.com/auth/devstorage.read_only",
|
||||
"https://www.googleapis.com/auth/devstorage.read_write"
|
||||
|
|
3609
vendor/google.golang.org/api/storage/v1/storage-gen.go
generated
vendored
3609
vendor/google.golang.org/api/storage/v1/storage-gen.go
generated
vendored
File diff suppressed because it is too large
Load diff
18
vendor/google.golang.org/appengine/.travis.yml
generated
vendored
Normal file
18
vendor/google.golang.org/appengine/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
language: go
|
||||
sudo: false
|
||||
|
||||
go:
|
||||
- 1.4
|
||||
|
||||
install:
|
||||
- go get -v -t -d google.golang.org/appengine/...
|
||||
- mkdir sdk
|
||||
- curl -o sdk.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.24.zip"
|
||||
- unzip sdk.zip -d sdk
|
||||
- export APPENGINE_DEV_APPSERVER=$(pwd)/sdk/go_appengine/dev_appserver.py
|
||||
|
||||
script:
|
||||
- go version
|
||||
- go test -v google.golang.org/appengine/...
|
||||
- go test -v -race google.golang.org/appengine/...
|
||||
- sdk/go_appengine/goapp test -v google.golang.org/appengine/...
|
202
vendor/google.golang.org/appengine/LICENSE
generated
vendored
Normal file
202
vendor/google.golang.org/appengine/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
73
vendor/google.golang.org/appengine/README.md
generated
vendored
Normal file
73
vendor/google.golang.org/appengine/README.md
generated
vendored
Normal file
|
@ -0,0 +1,73 @@
|
|||
# Go App Engine packages
|
||||
|
||||
[](https://travis-ci.org/golang/appengine)
|
||||
|
||||
This repository supports the Go runtime on App Engine,
|
||||
including both classic App Engine and Managed VMs.
|
||||
It provides APIs for interacting with App Engine services.
|
||||
Its canonical import path is `google.golang.org/appengine`.
|
||||
|
||||
See https://cloud.google.com/appengine/docs/go/
|
||||
for more information.
|
||||
|
||||
File issue reports and feature requests on the [Google App Engine issue
|
||||
tracker](https://code.google.com/p/googleappengine/issues/entry?template=Go%20defect).
|
||||
|
||||
## Directory structure
|
||||
The top level directory of this repository is the `appengine` package. It
|
||||
contains the
|
||||
basic APIs (e.g. `appengine.NewContext`) that apply across APIs. Specific API
|
||||
packages are in subdirectories (e.g. `datastore`).
|
||||
|
||||
There is an `internal` subdirectory that contains service protocol buffers,
|
||||
plus packages required for connectivity to make API calls. App Engine apps
|
||||
should not directly import any package under `internal`.
|
||||
|
||||
## Updating a Go App Engine app
|
||||
|
||||
This section describes how to update a traditional Go App Engine app to use
|
||||
these packages.
|
||||
|
||||
### 1. Update YAML files (Managed VMs only)
|
||||
|
||||
The `app.yaml` file (and YAML files for modules) should have these new lines added:
|
||||
```
|
||||
vm: true
|
||||
```
|
||||
See https://cloud.google.com/appengine/docs/go/modules/#Go_Instance_scaling_and_class for details.
|
||||
|
||||
### 2. Update import paths
|
||||
|
||||
The import paths for App Engine packages are now fully qualified, based at `google.golang.org/appengine`.
|
||||
You will need to update your code to use import paths starting with that; for instance,
|
||||
code importing `appengine/datastore` will now need to import `google.golang.org/appengine/datastore`.
|
||||
You can do that manually, or by running this command to recursively update all Go source files in the current directory:
|
||||
(may require GNU sed)
|
||||
```
|
||||
sed -i '/"appengine/{s,"appengine,"google.golang.org/appengine,;s,appengine_,appengine/,}' \
|
||||
$(find . -name '*.go')
|
||||
```
|
||||
|
||||
### 3. Update code using deprecated, removed or modified APIs
|
||||
|
||||
Most App Engine services are available with exactly the same API.
|
||||
A few APIs were cleaned up, and some are not available yet.
|
||||
This list summarises the differences:
|
||||
|
||||
* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`.
|
||||
* Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`.
|
||||
* `appengine.Timeout` has been removed. Use `context.WithTimeout` instead.
|
||||
* `appengine.Datacenter` now takes a `context.Context` argument.
|
||||
* `datastore.PropertyLoadSaver` has been simplified to use slices in place of channels.
|
||||
* `delay.Call` now returns an error.
|
||||
* `search.FieldLoadSaver` now handles document metadata.
|
||||
* `urlfetch.Transport` no longer has a Deadline field; set a deadline on the
|
||||
`context.Context` instead.
|
||||
* `aetest` no longer declares its own Context type, and uses the standard one instead.
|
||||
* `taskqueue.QueueStats` no longer takes a maxTasks argument. That argument has been
|
||||
deprecated and unused for a long time.
|
||||
* `appengine.BackendHostname` and `appengine.BackendInstance` were for the deprecated backends feature.
|
||||
Use `appengine.ModuleHostname`and `appengine.ModuleName` instead.
|
||||
* Most of `appengine/file` and parts of `appengine/blobstore` are deprecated.
|
||||
Use [Google Cloud Storage](https://godoc.org/google.golang.org/cloud/storage) instead.
|
||||
* `appengine/socket` is not required on Managed VMs. Use the standard `net` package instead.
|
76
vendor/google.golang.org/appengine/appengine.go
generated
vendored
Normal file
76
vendor/google.golang.org/appengine/appengine.go
generated
vendored
Normal file
|
@ -0,0 +1,76 @@
|
|||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package appengine provides basic functionality for Google App Engine.
|
||||
//
|
||||
// For more information on how to write Go apps for Google App Engine, see:
|
||||
// https://cloud.google.com/appengine/docs/go/
|
||||
package appengine
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"google.golang.org/appengine/internal"
|
||||
)
|
||||
|
||||
// IsDevAppServer reports whether the App Engine app is running in the
|
||||
// development App Server.
|
||||
func IsDevAppServer() bool {
|
||||
return internal.IsDevAppServer()
|
||||
}
|
||||
|
||||
// NewContext returns a context for an in-flight HTTP request.
|
||||
// This function is cheap.
|
||||
func NewContext(req *http.Request) context.Context {
|
||||
return WithContext(context.Background(), req)
|
||||
}
|
||||
|
||||
// WithContext returns a copy of the parent context
|
||||
// and associates it with an in-flight HTTP request.
|
||||
// This function is cheap.
|
||||
func WithContext(parent context.Context, req *http.Request) context.Context {
|
||||
return internal.WithContext(parent, req)
|
||||
}
|
||||
|
||||
// TODO(dsymonds): Add a Call function here? Otherwise other packages can't access internal.Call.
|
||||
|
||||
// BlobKey is a key for a blobstore blob.
|
||||
//
|
||||
// Conceptually, this type belongs in the blobstore package, but it lives in
|
||||
// the appengine package to avoid a circular dependency: blobstore depends on
|
||||
// datastore, and datastore needs to refer to the BlobKey type.
|
||||
type BlobKey string
|
||||
|
||||
// GeoPoint represents a location as latitude/longitude in degrees.
|
||||
type GeoPoint struct {
|
||||
Lat, Lng float64
|
||||
}
|
||||
|
||||
// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude.
|
||||
func (g GeoPoint) Valid() bool {
|
||||
return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180
|
||||
}
|
||||
|
||||
// APICallFunc defines a function type for handling an API call.
|
||||
// See WithCallOverride.
|
||||
type APICallFunc func(ctx context.Context, service, method string, in, out proto.Message) error
|
||||
|
||||
// WithAPICallFunc returns a copy of the parent context
|
||||
// that will cause API calls to invoke f instead of their normal operation.
|
||||
//
|
||||
// This is intended for advanced users only.
|
||||
func WithAPICallFunc(ctx context.Context, f APICallFunc) context.Context {
|
||||
return internal.WithCallOverride(ctx, internal.CallOverrideFunc(f))
|
||||
}
|
||||
|
||||
// APICall performs an API call.
|
||||
//
|
||||
// This is not intended for general use; it is exported for use in conjunction
|
||||
// with WithAPICallFunc.
|
||||
func APICall(ctx context.Context, service, method string, in, out proto.Message) error {
|
||||
return internal.Call(ctx, service, method, in, out)
|
||||
}
|
56
vendor/google.golang.org/appengine/appengine_vm.go
generated
vendored
Normal file
56
vendor/google.golang.org/appengine/appengine_vm.go
generated
vendored
Normal file
|
@ -0,0 +1,56 @@
|
|||
// Copyright 2015 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
|
||||
package appengine
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"google.golang.org/appengine/internal"
|
||||
)
|
||||
|
||||
// The comment below must not be changed.
|
||||
// It is used by go-app-builder to recognise that this package has
|
||||
// the Main function to use in the synthetic main.
|
||||
// The gophers party all night; the rabbits provide the beats.
|
||||
|
||||
// Main is the principal entry point for a Managed VMs app.
|
||||
// It installs a trivial health checker if one isn't already registered,
|
||||
// and starts listening on port 8080 (overridden by the $PORT environment
|
||||
// variable).
|
||||
//
|
||||
// See https://cloud.google.com/appengine/docs/managed-vms/custom-runtimes#health_check_requests
|
||||
// for details on how to do your own health checking.
|
||||
//
|
||||
// Main never returns.
|
||||
//
|
||||
// Main is designed so that the app's main package looks like this:
|
||||
//
|
||||
// package main
|
||||
//
|
||||
// import (
|
||||
// "google.golang.org/appengine"
|
||||
//
|
||||
// _ "myapp/package0"
|
||||
// _ "myapp/package1"
|
||||
// )
|
||||
//
|
||||
// func main() {
|
||||
// appengine.Main()
|
||||
// }
|
||||
//
|
||||
// The "myapp/packageX" packages are expected to register HTTP handlers
|
||||
// in their init functions.
|
||||
func Main() {
|
||||
internal.Main()
|
||||
}
|
||||
|
||||
// BackgroundContext returns a context not associated with a request.
|
||||
// This should only be used when not servicing a request.
|
||||
// This only works on Managed VMs.
|
||||
func BackgroundContext() context.Context {
|
||||
return internal.BackgroundContext()
|
||||
}
|
46
vendor/google.golang.org/appengine/errors.go
generated
vendored
Normal file
46
vendor/google.golang.org/appengine/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file provides error functions for common API failure modes.
|
||||
|
||||
package appengine
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/appengine/internal"
|
||||
)
|
||||
|
||||
// IsOverQuota reports whether err represents an API call failure
|
||||
// due to insufficient available quota.
|
||||
func IsOverQuota(err error) bool {
|
||||
callErr, ok := err.(*internal.CallError)
|
||||
return ok && callErr.Code == 4
|
||||
}
|
||||
|
||||
// MultiError is returned by batch operations when there are errors with
|
||||
// particular elements. Errors will be in a one-to-one correspondence with
|
||||
// the input elements; successful elements will have a nil entry.
|
||||
type MultiError []error
|
||||
|
||||
func (m MultiError) Error() string {
|
||||
s, n := "", 0
|
||||
for _, e := range m {
|
||||
if e != nil {
|
||||
if n == 0 {
|
||||
s = e.Error()
|
||||
}
|
||||
n++
|
||||
}
|
||||
}
|
||||
switch n {
|
||||
case 0:
|
||||
return "(0 errors)"
|
||||
case 1:
|
||||
return s
|
||||
case 2:
|
||||
return s + " (and 1 other error)"
|
||||
}
|
||||
return fmt.Sprintf("%s (and %d other errors)", s, n-1)
|
||||
}
|
142
vendor/google.golang.org/appengine/identity.go
generated
vendored
Normal file
142
vendor/google.golang.org/appengine/identity.go
generated
vendored
Normal file
|
@ -0,0 +1,142 @@
|
|||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package appengine
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"google.golang.org/appengine/internal"
|
||||
pb "google.golang.org/appengine/internal/app_identity"
|
||||
modpb "google.golang.org/appengine/internal/modules"
|
||||
)
|
||||
|
||||
// AppID returns the application ID for the current application.
|
||||
// The string will be a plain application ID (e.g. "appid"), with a
|
||||
// domain prefix for custom domain deployments (e.g. "example.com:appid").
|
||||
func AppID(c context.Context) string { return internal.AppID(c) }
|
||||
|
||||
// DefaultVersionHostname returns the standard hostname of the default version
|
||||
// of the current application (e.g. "my-app.appspot.com"). This is suitable for
|
||||
// use in constructing URLs.
|
||||
func DefaultVersionHostname(c context.Context) string {
|
||||
return internal.DefaultVersionHostname(c)
|
||||
}
|
||||
|
||||
// ModuleName returns the module name of the current instance.
|
||||
func ModuleName(c context.Context) string {
|
||||
return internal.ModuleName(c)
|
||||
}
|
||||
|
||||
// ModuleHostname returns a hostname of a module instance.
|
||||
// If module is the empty string, it refers to the module of the current instance.
|
||||
// If version is empty, it refers to the version of the current instance if valid,
|
||||
// or the default version of the module of the current instance.
|
||||
// If instance is empty, ModuleHostname returns the load-balancing hostname.
|
||||
func ModuleHostname(c context.Context, module, version, instance string) (string, error) {
|
||||
req := &modpb.GetHostnameRequest{}
|
||||
if module != "" {
|
||||
req.Module = &module
|
||||
}
|
||||
if version != "" {
|
||||
req.Version = &version
|
||||
}
|
||||
if instance != "" {
|
||||
req.Instance = &instance
|
||||
}
|
||||
res := &modpb.GetHostnameResponse{}
|
||||
if err := internal.Call(c, "modules", "GetHostname", req, res); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return *res.Hostname, nil
|
||||
}
|
||||
|
||||
// VersionID returns the version ID for the current application.
|
||||
// It will be of the form "X.Y", where X is specified in app.yaml,
|
||||
// and Y is a number generated when each version of the app is uploaded.
|
||||
// It does not include a module name.
|
||||
func VersionID(c context.Context) string { return internal.VersionID(c) }
|
||||
|
||||
// InstanceID returns a mostly-unique identifier for this instance.
|
||||
func InstanceID() string { return internal.InstanceID() }
|
||||
|
||||
// Datacenter returns an identifier for the datacenter that the instance is running in.
|
||||
func Datacenter(c context.Context) string { return internal.Datacenter(c) }
|
||||
|
||||
// ServerSoftware returns the App Engine release version.
|
||||
// In production, it looks like "Google App Engine/X.Y.Z".
|
||||
// In the development appserver, it looks like "Development/X.Y".
|
||||
func ServerSoftware() string { return internal.ServerSoftware() }
|
||||
|
||||
// RequestID returns a string that uniquely identifies the request.
|
||||
func RequestID(c context.Context) string { return internal.RequestID(c) }
|
||||
|
||||
// AccessToken generates an OAuth2 access token for the specified scopes on
|
||||
// behalf of service account of this application. This token will expire after
|
||||
// the returned time.
|
||||
func AccessToken(c context.Context, scopes ...string) (token string, expiry time.Time, err error) {
|
||||
req := &pb.GetAccessTokenRequest{Scope: scopes}
|
||||
res := &pb.GetAccessTokenResponse{}
|
||||
|
||||
err = internal.Call(c, "app_identity_service", "GetAccessToken", req, res)
|
||||
if err != nil {
|
||||
return "", time.Time{}, err
|
||||
}
|
||||
return res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil
|
||||
}
|
||||
|
||||
// Certificate represents a public certificate for the app.
|
||||
type Certificate struct {
|
||||
KeyName string
|
||||
Data []byte // PEM-encoded X.509 certificate
|
||||
}
|
||||
|
||||
// PublicCertificates retrieves the public certificates for the app.
|
||||
// They can be used to verify a signature returned by SignBytes.
|
||||
func PublicCertificates(c context.Context) ([]Certificate, error) {
|
||||
req := &pb.GetPublicCertificateForAppRequest{}
|
||||
res := &pb.GetPublicCertificateForAppResponse{}
|
||||
if err := internal.Call(c, "app_identity_service", "GetPublicCertificatesForApp", req, res); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var cs []Certificate
|
||||
for _, pc := range res.PublicCertificateList {
|
||||
cs = append(cs, Certificate{
|
||||
KeyName: pc.GetKeyName(),
|
||||
Data: []byte(pc.GetX509CertificatePem()),
|
||||
})
|
||||
}
|
||||
return cs, nil
|
||||
}
|
||||
|
||||
// ServiceAccount returns a string representing the service account name, in
|
||||
// the form of an email address (typically app_id@appspot.gserviceaccount.com).
|
||||
func ServiceAccount(c context.Context) (string, error) {
|
||||
req := &pb.GetServiceAccountNameRequest{}
|
||||
res := &pb.GetServiceAccountNameResponse{}
|
||||
|
||||
err := internal.Call(c, "app_identity_service", "GetServiceAccountName", req, res)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return res.GetServiceAccountName(), err
|
||||
}
|
||||
|
||||
// SignBytes signs bytes using a private key unique to your application.
|
||||
func SignBytes(c context.Context, bytes []byte) (keyName string, signature []byte, err error) {
|
||||
req := &pb.SignForAppRequest{BytesToSign: bytes}
|
||||
res := &pb.SignForAppResponse{}
|
||||
|
||||
if err := internal.Call(c, "app_identity_service", "SignForApp", req, res); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return res.GetKeyName(), res.GetSignatureBytes(), nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
internal.RegisterErrorCodeMap("app_identity_service", pb.AppIdentityServiceError_ErrorCode_name)
|
||||
internal.RegisterErrorCodeMap("modules", modpb.ModulesServiceError_ErrorCode_name)
|
||||
}
|
640
vendor/google.golang.org/appengine/internal/api.go
generated
vendored
Normal file
640
vendor/google.golang.org/appengine/internal/api.go
generated
vendored
Normal file
|
@ -0,0 +1,640 @@
|
|||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
netcontext "golang.org/x/net/context"
|
||||
|
||||
basepb "google.golang.org/appengine/internal/base"
|
||||
logpb "google.golang.org/appengine/internal/log"
|
||||
remotepb "google.golang.org/appengine/internal/remote_api"
|
||||
)
|
||||
|
||||
const (
|
||||
apiPath = "/rpc_http"
|
||||
)
|
||||
|
||||
var (
|
||||
// Incoming headers.
|
||||
ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket")
|
||||
dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo")
|
||||
traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context")
|
||||
curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
|
||||
userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP")
|
||||
remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr")
|
||||
|
||||
// Outgoing headers.
|
||||
apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint")
|
||||
apiEndpointHeaderValue = []string{"app-engine-apis"}
|
||||
apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method")
|
||||
apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"}
|
||||
apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline")
|
||||
apiContentType = http.CanonicalHeaderKey("Content-Type")
|
||||
apiContentTypeValue = []string{"application/octet-stream"}
|
||||
logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count")
|
||||
|
||||
apiHTTPClient = &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
Dial: limitDial,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func apiURL() *url.URL {
|
||||
host, port := "appengine.googleapis.internal", "10001"
|
||||
if h := os.Getenv("API_HOST"); h != "" {
|
||||
host = h
|
||||
}
|
||||
if p := os.Getenv("API_PORT"); p != "" {
|
||||
port = p
|
||||
}
|
||||
return &url.URL{
|
||||
Scheme: "http",
|
||||
Host: host + ":" + port,
|
||||
Path: apiPath,
|
||||
}
|
||||
}
|
||||
|
||||
func handleHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
c := &context{
|
||||
req: r,
|
||||
outHeader: w.Header(),
|
||||
apiURL: apiURL(),
|
||||
}
|
||||
stopFlushing := make(chan int)
|
||||
|
||||
ctxs.Lock()
|
||||
ctxs.m[r] = c
|
||||
ctxs.Unlock()
|
||||
defer func() {
|
||||
ctxs.Lock()
|
||||
delete(ctxs.m, r)
|
||||
ctxs.Unlock()
|
||||
}()
|
||||
|
||||
// Patch up RemoteAddr so it looks reasonable.
|
||||
if addr := r.Header.Get(userIPHeader); addr != "" {
|
||||
r.RemoteAddr = addr
|
||||
} else if addr = r.Header.Get(remoteAddrHeader); addr != "" {
|
||||
r.RemoteAddr = addr
|
||||
} else {
|
||||
// Should not normally reach here, but pick a sensible default anyway.
|
||||
r.RemoteAddr = "127.0.0.1"
|
||||
}
|
||||
// The address in the headers will most likely be of these forms:
|
||||
// 123.123.123.123
|
||||
// 2001:db8::1
|
||||
// net/http.Request.RemoteAddr is specified to be in "IP:port" form.
|
||||
if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil {
|
||||
// Assume the remote address is only a host; add a default port.
|
||||
r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80")
|
||||
}
|
||||
|
||||
// Start goroutine responsible for flushing app logs.
|
||||
// This is done after adding c to ctx.m (and stopped before removing it)
|
||||
// because flushing logs requires making an API call.
|
||||
go c.logFlusher(stopFlushing)
|
||||
|
||||
executeRequestSafely(c, r)
|
||||
c.outHeader = nil // make sure header changes aren't respected any more
|
||||
|
||||
stopFlushing <- 1 // any logging beyond this point will be dropped
|
||||
|
||||
// Flush any pending logs asynchronously.
|
||||
c.pendingLogs.Lock()
|
||||
flushes := c.pendingLogs.flushes
|
||||
if len(c.pendingLogs.lines) > 0 {
|
||||
flushes++
|
||||
}
|
||||
c.pendingLogs.Unlock()
|
||||
go c.flushLog(false)
|
||||
w.Header().Set(logFlushHeader, strconv.Itoa(flushes))
|
||||
|
||||
// Avoid nil Write call if c.Write is never called.
|
||||
if c.outCode != 0 {
|
||||
w.WriteHeader(c.outCode)
|
||||
}
|
||||
if c.outBody != nil {
|
||||
w.Write(c.outBody)
|
||||
}
|
||||
}
|
||||
|
||||
func executeRequestSafely(c *context, r *http.Request) {
|
||||
defer func() {
|
||||
if x := recover(); x != nil {
|
||||
logf(c, 4, "%s", renderPanic(x)) // 4 == critical
|
||||
c.outCode = 500
|
||||
}
|
||||
}()
|
||||
|
||||
http.DefaultServeMux.ServeHTTP(c, r)
|
||||
}
|
||||
|
||||
func renderPanic(x interface{}) string {
|
||||
buf := make([]byte, 16<<10) // 16 KB should be plenty
|
||||
buf = buf[:runtime.Stack(buf, false)]
|
||||
|
||||
// Remove the first few stack frames:
|
||||
// this func
|
||||
// the recover closure in the caller
|
||||
// That will root the stack trace at the site of the panic.
|
||||
const (
|
||||
skipStart = "internal.renderPanic"
|
||||
skipFrames = 2
|
||||
)
|
||||
start := bytes.Index(buf, []byte(skipStart))
|
||||
p := start
|
||||
for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ {
|
||||
p = bytes.IndexByte(buf[p+1:], '\n') + p + 1
|
||||
if p < 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if p >= 0 {
|
||||
// buf[start:p+1] is the block to remove.
|
||||
// Copy buf[p+1:] over buf[start:] and shrink buf.
|
||||
copy(buf[start:], buf[p+1:])
|
||||
buf = buf[:len(buf)-(p+1-start)]
|
||||
}
|
||||
|
||||
// Add panic heading.
|
||||
head := fmt.Sprintf("panic: %v\n\n", x)
|
||||
if len(head) > len(buf) {
|
||||
// Extremely unlikely to happen.
|
||||
return head
|
||||
}
|
||||
copy(buf[len(head):], buf)
|
||||
copy(buf, head)
|
||||
|
||||
return string(buf)
|
||||
}
|
||||
|
||||
var ctxs = struct {
|
||||
sync.Mutex
|
||||
m map[*http.Request]*context
|
||||
bg *context // background context, lazily initialized
|
||||
// dec is used by tests to decorate the netcontext.Context returned
|
||||
// for a given request. This allows tests to add overrides (such as
|
||||
// WithAppIDOverride) to the context. The map is nil outside tests.
|
||||
dec map[*http.Request]func(netcontext.Context) netcontext.Context
|
||||
}{
|
||||
m: make(map[*http.Request]*context),
|
||||
}
|
||||
|
||||
// context represents the context of an in-flight HTTP request.
|
||||
// It implements the appengine.Context and http.ResponseWriter interfaces.
|
||||
type context struct {
|
||||
req *http.Request
|
||||
|
||||
outCode int
|
||||
outHeader http.Header
|
||||
outBody []byte
|
||||
|
||||
pendingLogs struct {
|
||||
sync.Mutex
|
||||
lines []*logpb.UserAppLogLine
|
||||
flushes int
|
||||
}
|
||||
|
||||
apiURL *url.URL
|
||||
}
|
||||
|
||||
var contextKey = "holds a *context"
|
||||
|
||||
func fromContext(ctx netcontext.Context) *context {
|
||||
c, _ := ctx.Value(&contextKey).(*context)
|
||||
return c
|
||||
}
|
||||
|
||||
func withContext(parent netcontext.Context, c *context) netcontext.Context {
|
||||
ctx := netcontext.WithValue(parent, &contextKey, c)
|
||||
if ns := c.req.Header.Get(curNamespaceHeader); ns != "" {
|
||||
ctx = withNamespace(ctx, ns)
|
||||
}
|
||||
return ctx
|
||||
}
|
||||
|
||||
func toContext(c *context) netcontext.Context {
|
||||
return withContext(netcontext.Background(), c)
|
||||
}
|
||||
|
||||
func IncomingHeaders(ctx netcontext.Context) http.Header {
|
||||
if c := fromContext(ctx); c != nil {
|
||||
return c.req.Header
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
|
||||
ctxs.Lock()
|
||||
c := ctxs.m[req]
|
||||
d := ctxs.dec[req]
|
||||
ctxs.Unlock()
|
||||
|
||||
if d != nil {
|
||||
parent = d(parent)
|
||||
}
|
||||
|
||||
if c == nil {
|
||||
// Someone passed in an http.Request that is not in-flight.
|
||||
// We panic here rather than panicking at a later point
|
||||
// so that stack traces will be more sensible.
|
||||
log.Panic("appengine: NewContext passed an unknown http.Request")
|
||||
}
|
||||
return withContext(parent, c)
|
||||
}
|
||||
|
||||
func BackgroundContext() netcontext.Context {
|
||||
ctxs.Lock()
|
||||
defer ctxs.Unlock()
|
||||
|
||||
if ctxs.bg != nil {
|
||||
return toContext(ctxs.bg)
|
||||
}
|
||||
|
||||
// Compute background security ticket.
|
||||
appID := partitionlessAppID()
|
||||
escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
|
||||
majVersion := VersionID(nil)
|
||||
if i := strings.Index(majVersion, "."); i > 0 {
|
||||
majVersion = majVersion[:i]
|
||||
}
|
||||
ticket := fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID())
|
||||
|
||||
ctxs.bg = &context{
|
||||
req: &http.Request{
|
||||
Header: http.Header{
|
||||
ticketHeader: []string{ticket},
|
||||
},
|
||||
},
|
||||
apiURL: apiURL(),
|
||||
}
|
||||
|
||||
// TODO(dsymonds): Wire up the shutdown handler to do a final flush.
|
||||
go ctxs.bg.logFlusher(make(chan int))
|
||||
|
||||
return toContext(ctxs.bg)
|
||||
}
|
||||
|
||||
// RegisterTestRequest registers the HTTP request req for testing, such that
|
||||
// any API calls are sent to the provided URL. It returns a closure to delete
|
||||
// the registration.
|
||||
// It should only be used by aetest package.
|
||||
func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) func() {
|
||||
c := &context{
|
||||
req: req,
|
||||
apiURL: apiURL,
|
||||
}
|
||||
ctxs.Lock()
|
||||
defer ctxs.Unlock()
|
||||
if _, ok := ctxs.m[req]; ok {
|
||||
log.Panic("req already associated with context")
|
||||
}
|
||||
if _, ok := ctxs.dec[req]; ok {
|
||||
log.Panic("req already associated with context")
|
||||
}
|
||||
if ctxs.dec == nil {
|
||||
ctxs.dec = make(map[*http.Request]func(netcontext.Context) netcontext.Context)
|
||||
}
|
||||
ctxs.m[req] = c
|
||||
ctxs.dec[req] = decorate
|
||||
|
||||
return func() {
|
||||
ctxs.Lock()
|
||||
delete(ctxs.m, req)
|
||||
delete(ctxs.dec, req)
|
||||
ctxs.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
var errTimeout = &CallError{
|
||||
Detail: "Deadline exceeded",
|
||||
Code: int32(remotepb.RpcError_CANCELLED),
|
||||
Timeout: true,
|
||||
}
|
||||
|
||||
func (c *context) Header() http.Header { return c.outHeader }
|
||||
|
||||
// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status
|
||||
// codes do not permit a response body (nor response entity headers such as
|
||||
// Content-Length, Content-Type, etc).
|
||||
func bodyAllowedForStatus(status int) bool {
|
||||
switch {
|
||||
case status >= 100 && status <= 199:
|
||||
return false
|
||||
case status == 204:
|
||||
return false
|
||||
case status == 304:
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *context) Write(b []byte) (int, error) {
|
||||
if c.outCode == 0 {
|
||||
c.WriteHeader(http.StatusOK)
|
||||
}
|
||||
if len(b) > 0 && !bodyAllowedForStatus(c.outCode) {
|
||||
return 0, http.ErrBodyNotAllowed
|
||||
}
|
||||
c.outBody = append(c.outBody, b...)
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
func (c *context) WriteHeader(code int) {
|
||||
if c.outCode != 0 {
|
||||
logf(c, 3, "WriteHeader called multiple times on request.") // error level
|
||||
return
|
||||
}
|
||||
c.outCode = code
|
||||
}
|
||||
|
||||
func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) {
|
||||
hreq := &http.Request{
|
||||
Method: "POST",
|
||||
URL: c.apiURL,
|
||||
Header: http.Header{
|
||||
apiEndpointHeader: apiEndpointHeaderValue,
|
||||
apiMethodHeader: apiMethodHeaderValue,
|
||||
apiContentType: apiContentTypeValue,
|
||||
apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)},
|
||||
},
|
||||
Body: ioutil.NopCloser(bytes.NewReader(body)),
|
||||
ContentLength: int64(len(body)),
|
||||
Host: c.apiURL.Host,
|
||||
}
|
||||
if info := c.req.Header.Get(dapperHeader); info != "" {
|
||||
hreq.Header.Set(dapperHeader, info)
|
||||
}
|
||||
if info := c.req.Header.Get(traceHeader); info != "" {
|
||||
hreq.Header.Set(traceHeader, info)
|
||||
}
|
||||
|
||||
tr := apiHTTPClient.Transport.(*http.Transport)
|
||||
|
||||
var timedOut int32 // atomic; set to 1 if timed out
|
||||
t := time.AfterFunc(timeout, func() {
|
||||
atomic.StoreInt32(&timedOut, 1)
|
||||
tr.CancelRequest(hreq)
|
||||
})
|
||||
defer t.Stop()
|
||||
defer func() {
|
||||
// Check if timeout was exceeded.
|
||||
if atomic.LoadInt32(&timedOut) != 0 {
|
||||
err = errTimeout
|
||||
}
|
||||
}()
|
||||
|
||||
hresp, err := apiHTTPClient.Do(hreq)
|
||||
if err != nil {
|
||||
return nil, &CallError{
|
||||
Detail: fmt.Sprintf("service bridge HTTP failed: %v", err),
|
||||
Code: int32(remotepb.RpcError_UNKNOWN),
|
||||
}
|
||||
}
|
||||
defer hresp.Body.Close()
|
||||
hrespBody, err := ioutil.ReadAll(hresp.Body)
|
||||
if hresp.StatusCode != 200 {
|
||||
return nil, &CallError{
|
||||
Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody),
|
||||
Code: int32(remotepb.RpcError_UNKNOWN),
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, &CallError{
|
||||
Detail: fmt.Sprintf("service bridge response bad: %v", err),
|
||||
Code: int32(remotepb.RpcError_UNKNOWN),
|
||||
}
|
||||
}
|
||||
return hrespBody, nil
|
||||
}
|
||||
|
||||
func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
|
||||
if f, ctx, ok := callOverrideFromContext(ctx); ok {
|
||||
return f(ctx, service, method, in, out)
|
||||
}
|
||||
|
||||
// Handle already-done contexts quickly.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
c := fromContext(ctx)
|
||||
if c == nil {
|
||||
// Give a good error message rather than a panic lower down.
|
||||
return errors.New("not an App Engine context")
|
||||
}
|
||||
|
||||
// Apply transaction modifications if we're in a transaction.
|
||||
if t := transactionFromContext(ctx); t != nil {
|
||||
if t.finished {
|
||||
return errors.New("transaction context has expired")
|
||||
}
|
||||
applyTransaction(in, &t.transaction)
|
||||
}
|
||||
|
||||
// Default RPC timeout is 60s.
|
||||
timeout := 60 * time.Second
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
timeout = deadline.Sub(time.Now())
|
||||
}
|
||||
|
||||
data, err := proto.Marshal(in)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ticket := c.req.Header.Get(ticketHeader)
|
||||
req := &remotepb.Request{
|
||||
ServiceName: &service,
|
||||
Method: &method,
|
||||
Request: data,
|
||||
RequestId: &ticket,
|
||||
}
|
||||
hreqBody, err := proto.Marshal(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hrespBody, err := c.post(hreqBody, timeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res := &remotepb.Response{}
|
||||
if err := proto.Unmarshal(hrespBody, res); err != nil {
|
||||
return err
|
||||
}
|
||||
if res.RpcError != nil {
|
||||
ce := &CallError{
|
||||
Detail: res.RpcError.GetDetail(),
|
||||
Code: *res.RpcError.Code,
|
||||
}
|
||||
switch remotepb.RpcError_ErrorCode(ce.Code) {
|
||||
case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED:
|
||||
ce.Timeout = true
|
||||
}
|
||||
return ce
|
||||
}
|
||||
if res.ApplicationError != nil {
|
||||
return &APIError{
|
||||
Service: *req.ServiceName,
|
||||
Detail: res.ApplicationError.GetDetail(),
|
||||
Code: *res.ApplicationError.Code,
|
||||
}
|
||||
}
|
||||
if res.Exception != nil || res.JavaException != nil {
|
||||
// This shouldn't happen, but let's be defensive.
|
||||
return &CallError{
|
||||
Detail: "service bridge returned exception",
|
||||
Code: int32(remotepb.RpcError_UNKNOWN),
|
||||
}
|
||||
}
|
||||
return proto.Unmarshal(res.Response, out)
|
||||
}
|
||||
|
||||
func (c *context) Request() *http.Request {
|
||||
return c.req
|
||||
}
|
||||
|
||||
func (c *context) addLogLine(ll *logpb.UserAppLogLine) {
|
||||
// Truncate long log lines.
|
||||
// TODO(dsymonds): Check if this is still necessary.
|
||||
const lim = 8 << 10
|
||||
if len(*ll.Message) > lim {
|
||||
suffix := fmt.Sprintf("...(length %d)", len(*ll.Message))
|
||||
ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix)
|
||||
}
|
||||
|
||||
c.pendingLogs.Lock()
|
||||
c.pendingLogs.lines = append(c.pendingLogs.lines, ll)
|
||||
c.pendingLogs.Unlock()
|
||||
}
|
||||
|
||||
var logLevelName = map[int64]string{
|
||||
0: "DEBUG",
|
||||
1: "INFO",
|
||||
2: "WARNING",
|
||||
3: "ERROR",
|
||||
4: "CRITICAL",
|
||||
}
|
||||
|
||||
func logf(c *context, level int64, format string, args ...interface{}) {
|
||||
s := fmt.Sprintf(format, args...)
|
||||
s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
|
||||
c.addLogLine(&logpb.UserAppLogLine{
|
||||
TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3),
|
||||
Level: &level,
|
||||
Message: &s,
|
||||
})
|
||||
log.Print(logLevelName[level] + ": " + s)
|
||||
}
|
||||
|
||||
// flushLog attempts to flush any pending logs to the appserver.
|
||||
// It should not be called concurrently.
|
||||
func (c *context) flushLog(force bool) (flushed bool) {
|
||||
c.pendingLogs.Lock()
|
||||
// Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious.
|
||||
n, rem := 0, 30<<20
|
||||
for ; n < len(c.pendingLogs.lines); n++ {
|
||||
ll := c.pendingLogs.lines[n]
|
||||
// Each log line will require about 3 bytes of overhead.
|
||||
nb := proto.Size(ll) + 3
|
||||
if nb > rem {
|
||||
break
|
||||
}
|
||||
rem -= nb
|
||||
}
|
||||
lines := c.pendingLogs.lines[:n]
|
||||
c.pendingLogs.lines = c.pendingLogs.lines[n:]
|
||||
c.pendingLogs.Unlock()
|
||||
|
||||
if len(lines) == 0 && !force {
|
||||
// Nothing to flush.
|
||||
return false
|
||||
}
|
||||
|
||||
rescueLogs := false
|
||||
defer func() {
|
||||
if rescueLogs {
|
||||
c.pendingLogs.Lock()
|
||||
c.pendingLogs.lines = append(lines, c.pendingLogs.lines...)
|
||||
c.pendingLogs.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
buf, err := proto.Marshal(&logpb.UserAppLogGroup{
|
||||
LogLine: lines,
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err)
|
||||
rescueLogs = true
|
||||
return false
|
||||
}
|
||||
|
||||
req := &logpb.FlushRequest{
|
||||
Logs: buf,
|
||||
}
|
||||
res := &basepb.VoidProto{}
|
||||
c.pendingLogs.Lock()
|
||||
c.pendingLogs.flushes++
|
||||
c.pendingLogs.Unlock()
|
||||
if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil {
|
||||
log.Printf("internal.flushLog: Flush RPC: %v", err)
|
||||
rescueLogs = true
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
const (
|
||||
// Log flushing parameters.
|
||||
flushInterval = 1 * time.Second
|
||||
forceFlushInterval = 60 * time.Second
|
||||
)
|
||||
|
||||
func (c *context) logFlusher(stop <-chan int) {
|
||||
lastFlush := time.Now()
|
||||
tick := time.NewTicker(flushInterval)
|
||||
for {
|
||||
select {
|
||||
case <-stop:
|
||||
// Request finished.
|
||||
tick.Stop()
|
||||
return
|
||||
case <-tick.C:
|
||||
force := time.Now().Sub(lastFlush) > forceFlushInterval
|
||||
if c.flushLog(force) {
|
||||
lastFlush = time.Now()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ContextForTesting(req *http.Request) netcontext.Context {
|
||||
return toContext(&context{req: req})
|
||||
}
|
133
vendor/google.golang.org/appengine/internal/api_classic.go
generated
vendored
Normal file
133
vendor/google.golang.org/appengine/internal/api_classic.go
generated
vendored
Normal file
|
@ -0,0 +1,133 @@
|
|||
// Copyright 2015 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build appengine
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"appengine"
|
||||
"appengine_internal"
|
||||
basepb "appengine_internal/base"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
netcontext "golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var contextKey = "holds an appengine.Context"
|
||||
|
||||
func fromContext(ctx netcontext.Context) appengine.Context {
|
||||
c, _ := ctx.Value(&contextKey).(appengine.Context)
|
||||
return c
|
||||
}
|
||||
|
||||
// This is only for classic App Engine adapters.
|
||||
func ClassicContextFromContext(ctx netcontext.Context) appengine.Context {
|
||||
return fromContext(ctx)
|
||||
}
|
||||
|
||||
func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context {
|
||||
ctx := netcontext.WithValue(parent, &contextKey, c)
|
||||
|
||||
s := &basepb.StringProto{}
|
||||
c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil)
|
||||
if ns := s.GetValue(); ns != "" {
|
||||
ctx = NamespacedContext(ctx, ns)
|
||||
}
|
||||
|
||||
return ctx
|
||||
}
|
||||
|
||||
func IncomingHeaders(ctx netcontext.Context) http.Header {
|
||||
if c := fromContext(ctx); c != nil {
|
||||
if req, ok := c.Request().(*http.Request); ok {
|
||||
return req.Header
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
|
||||
c := appengine.NewContext(req)
|
||||
return withContext(parent, c)
|
||||
}
|
||||
|
||||
func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
|
||||
if f, ctx, ok := callOverrideFromContext(ctx); ok {
|
||||
return f(ctx, service, method, in, out)
|
||||
}
|
||||
|
||||
// Handle already-done contexts quickly.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
c := fromContext(ctx)
|
||||
if c == nil {
|
||||
// Give a good error message rather than a panic lower down.
|
||||
return errors.New("not an App Engine context")
|
||||
}
|
||||
|
||||
// Apply transaction modifications if we're in a transaction.
|
||||
if t := transactionFromContext(ctx); t != nil {
|
||||
if t.finished {
|
||||
return errors.New("transaction context has expired")
|
||||
}
|
||||
applyTransaction(in, &t.transaction)
|
||||
}
|
||||
|
||||
var opts *appengine_internal.CallOptions
|
||||
if d, ok := ctx.Deadline(); ok {
|
||||
opts = &appengine_internal.CallOptions{
|
||||
Timeout: d.Sub(time.Now()),
|
||||
}
|
||||
}
|
||||
|
||||
err := c.Call(service, method, in, out, opts)
|
||||
switch v := err.(type) {
|
||||
case *appengine_internal.APIError:
|
||||
return &APIError{
|
||||
Service: v.Service,
|
||||
Detail: v.Detail,
|
||||
Code: v.Code,
|
||||
}
|
||||
case *appengine_internal.CallError:
|
||||
return &CallError{
|
||||
Detail: v.Detail,
|
||||
Code: v.Code,
|
||||
Timeout: v.Timeout,
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func handleHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
panic("handleHTTP called; this should be impossible")
|
||||
}
|
||||
|
||||
func logf(c appengine.Context, level int64, format string, args ...interface{}) {
|
||||
var fn func(format string, args ...interface{})
|
||||
switch level {
|
||||
case 0:
|
||||
fn = c.Debugf
|
||||
case 1:
|
||||
fn = c.Infof
|
||||
case 2:
|
||||
fn = c.Warningf
|
||||
case 3:
|
||||
fn = c.Errorf
|
||||
case 4:
|
||||
fn = c.Criticalf
|
||||
default:
|
||||
// This shouldn't happen.
|
||||
fn = c.Criticalf
|
||||
}
|
||||
fn(format, args...)
|
||||
}
|
101
vendor/google.golang.org/appengine/internal/api_common.go
generated
vendored
Normal file
101
vendor/google.golang.org/appengine/internal/api_common.go
generated
vendored
Normal file
|
@ -0,0 +1,101 @@
|
|||
// Copyright 2015 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"github.com/golang/protobuf/proto"
|
||||
netcontext "golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error
|
||||
|
||||
var callOverrideKey = "holds []CallOverrideFunc"
|
||||
|
||||
func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context {
|
||||
// We avoid appending to any existing call override
|
||||
// so we don't risk overwriting a popped stack below.
|
||||
var cofs []CallOverrideFunc
|
||||
if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok {
|
||||
cofs = append(cofs, uf...)
|
||||
}
|
||||
cofs = append(cofs, f)
|
||||
return netcontext.WithValue(ctx, &callOverrideKey, cofs)
|
||||
}
|
||||
|
||||
func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) {
|
||||
cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc)
|
||||
if len(cofs) == 0 {
|
||||
return nil, nil, false
|
||||
}
|
||||
// We found a list of overrides; grab the last, and reconstitute a
|
||||
// context that will hide it.
|
||||
f := cofs[len(cofs)-1]
|
||||
ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1])
|
||||
return f, ctx, true
|
||||
}
|
||||
|
||||
type logOverrideFunc func(level int64, format string, args ...interface{})
|
||||
|
||||
var logOverrideKey = "holds a logOverrideFunc"
|
||||
|
||||
func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context {
|
||||
return netcontext.WithValue(ctx, &logOverrideKey, f)
|
||||
}
|
||||
|
||||
var appIDOverrideKey = "holds a string, being the full app ID"
|
||||
|
||||
func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context {
|
||||
return netcontext.WithValue(ctx, &appIDOverrideKey, appID)
|
||||
}
|
||||
|
||||
var namespaceKey = "holds the namespace string"
|
||||
|
||||
func withNamespace(ctx netcontext.Context, ns string) netcontext.Context {
|
||||
return netcontext.WithValue(ctx, &namespaceKey, ns)
|
||||
}
|
||||
|
||||
func NamespaceFromContext(ctx netcontext.Context) string {
|
||||
// If there's no namespace, return the empty string.
|
||||
ns, _ := ctx.Value(&namespaceKey).(string)
|
||||
return ns
|
||||
}
|
||||
|
||||
// FullyQualifiedAppID returns the fully-qualified application ID.
|
||||
// This may contain a partition prefix (e.g. "s~" for High Replication apps),
|
||||
// or a domain prefix (e.g. "example.com:").
|
||||
func FullyQualifiedAppID(ctx netcontext.Context) string {
|
||||
if id, ok := ctx.Value(&appIDOverrideKey).(string); ok {
|
||||
return id
|
||||
}
|
||||
return fullyQualifiedAppID(ctx)
|
||||
}
|
||||
|
||||
func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) {
|
||||
if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok {
|
||||
f(level, format, args...)
|
||||
return
|
||||
}
|
||||
logf(fromContext(ctx), level, format, args...)
|
||||
}
|
||||
|
||||
// NamespacedContext wraps a Context to support namespaces.
|
||||
func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context {
|
||||
n := &namespacedContext{
|
||||
namespace: namespace,
|
||||
}
|
||||
return withNamespace(WithCallOverride(ctx, n.call), namespace)
|
||||
}
|
||||
|
||||
type namespacedContext struct {
|
||||
namespace string
|
||||
}
|
||||
|
||||
func (n *namespacedContext) call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
|
||||
// Apply any namespace mods.
|
||||
if mod, ok := NamespaceMods[service]; ok {
|
||||
mod(in, n.namespace)
|
||||
}
|
||||
return Call(ctx, service, method, in, out)
|
||||
}
|
28
vendor/google.golang.org/appengine/internal/app_id.go
generated
vendored
Normal file
28
vendor/google.golang.org/appengine/internal/app_id.go
generated
vendored
Normal file
|
@ -0,0 +1,28 @@
|
|||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func parseFullAppID(appid string) (partition, domain, displayID string) {
|
||||
if i := strings.Index(appid, "~"); i != -1 {
|
||||
partition, appid = appid[:i], appid[i+1:]
|
||||
}
|
||||
if i := strings.Index(appid, ":"); i != -1 {
|
||||
domain, appid = appid[:i], appid[i+1:]
|
||||
}
|
||||
return partition, domain, appid
|
||||
}
|
||||
|
||||
// appID returns "appid" or "domain.com:appid".
|
||||
func appID(fullAppID string) string {
|
||||
_, dom, dis := parseFullAppID(fullAppID)
|
||||
if dom != "" {
|
||||
return dom + ":" + dis
|
||||
}
|
||||
return dis
|
||||
}
|
296
vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go
generated
vendored
Normal file
296
vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,296 @@
|
|||
// Code generated by protoc-gen-go.
|
||||
// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package app_identity is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
google.golang.org/appengine/internal/app_identity/app_identity_service.proto
|
||||
|
||||
It has these top-level messages:
|
||||
AppIdentityServiceError
|
||||
SignForAppRequest
|
||||
SignForAppResponse
|
||||
GetPublicCertificateForAppRequest
|
||||
PublicCertificate
|
||||
GetPublicCertificateForAppResponse
|
||||
GetServiceAccountNameRequest
|
||||
GetServiceAccountNameResponse
|
||||
GetAccessTokenRequest
|
||||
GetAccessTokenResponse
|
||||
GetDefaultGcsBucketNameRequest
|
||||
GetDefaultGcsBucketNameResponse
|
||||
*/
|
||||
package app_identity
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
type AppIdentityServiceError_ErrorCode int32
|
||||
|
||||
const (
|
||||
AppIdentityServiceError_SUCCESS AppIdentityServiceError_ErrorCode = 0
|
||||
AppIdentityServiceError_UNKNOWN_SCOPE AppIdentityServiceError_ErrorCode = 9
|
||||
AppIdentityServiceError_BLOB_TOO_LARGE AppIdentityServiceError_ErrorCode = 1000
|
||||
AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001
|
||||
AppIdentityServiceError_NOT_A_VALID_APP AppIdentityServiceError_ErrorCode = 1002
|
||||
AppIdentityServiceError_UNKNOWN_ERROR AppIdentityServiceError_ErrorCode = 1003
|
||||
AppIdentityServiceError_NOT_ALLOWED AppIdentityServiceError_ErrorCode = 1005
|
||||
AppIdentityServiceError_NOT_IMPLEMENTED AppIdentityServiceError_ErrorCode = 1006
|
||||
)
|
||||
|
||||
var AppIdentityServiceError_ErrorCode_name = map[int32]string{
|
||||
0: "SUCCESS",
|
||||
9: "UNKNOWN_SCOPE",
|
||||
1000: "BLOB_TOO_LARGE",
|
||||
1001: "DEADLINE_EXCEEDED",
|
||||
1002: "NOT_A_VALID_APP",
|
||||
1003: "UNKNOWN_ERROR",
|
||||
1005: "NOT_ALLOWED",
|
||||
1006: "NOT_IMPLEMENTED",
|
||||
}
|
||||
var AppIdentityServiceError_ErrorCode_value = map[string]int32{
|
||||
"SUCCESS": 0,
|
||||
"UNKNOWN_SCOPE": 9,
|
||||
"BLOB_TOO_LARGE": 1000,
|
||||
"DEADLINE_EXCEEDED": 1001,
|
||||
"NOT_A_VALID_APP": 1002,
|
||||
"UNKNOWN_ERROR": 1003,
|
||||
"NOT_ALLOWED": 1005,
|
||||
"NOT_IMPLEMENTED": 1006,
|
||||
}
|
||||
|
||||
func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode {
|
||||
p := new(AppIdentityServiceError_ErrorCode)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
func (x AppIdentityServiceError_ErrorCode) String() string {
|
||||
return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x))
|
||||
}
|
||||
func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
|
||||
value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*x = AppIdentityServiceError_ErrorCode(value)
|
||||
return nil
|
||||
}
|
||||
|
||||
type AppIdentityServiceError struct {
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *AppIdentityServiceError) Reset() { *m = AppIdentityServiceError{} }
|
||||
func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) }
|
||||
func (*AppIdentityServiceError) ProtoMessage() {}
|
||||
|
||||
type SignForAppRequest struct {
|
||||
BytesToSign []byte `protobuf:"bytes,1,opt,name=bytes_to_sign" json:"bytes_to_sign,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SignForAppRequest) Reset() { *m = SignForAppRequest{} }
|
||||
func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*SignForAppRequest) ProtoMessage() {}
|
||||
|
||||
func (m *SignForAppRequest) GetBytesToSign() []byte {
|
||||
if m != nil {
|
||||
return m.BytesToSign
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type SignForAppResponse struct {
|
||||
KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"`
|
||||
SignatureBytes []byte `protobuf:"bytes,2,opt,name=signature_bytes" json:"signature_bytes,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SignForAppResponse) Reset() { *m = SignForAppResponse{} }
|
||||
func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*SignForAppResponse) ProtoMessage() {}
|
||||
|
||||
func (m *SignForAppResponse) GetKeyName() string {
|
||||
if m != nil && m.KeyName != nil {
|
||||
return *m.KeyName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *SignForAppResponse) GetSignatureBytes() []byte {
|
||||
if m != nil {
|
||||
return m.SignatureBytes
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type GetPublicCertificateForAppRequest struct {
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GetPublicCertificateForAppRequest) Reset() { *m = GetPublicCertificateForAppRequest{} }
|
||||
func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetPublicCertificateForAppRequest) ProtoMessage() {}
|
||||
|
||||
type PublicCertificate struct {
|
||||
KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"`
|
||||
X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem" json:"x509_certificate_pem,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *PublicCertificate) Reset() { *m = PublicCertificate{} }
|
||||
func (m *PublicCertificate) String() string { return proto.CompactTextString(m) }
|
||||
func (*PublicCertificate) ProtoMessage() {}
|
||||
|
||||
func (m *PublicCertificate) GetKeyName() string {
|
||||
if m != nil && m.KeyName != nil {
|
||||
return *m.KeyName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *PublicCertificate) GetX509CertificatePem() string {
|
||||
if m != nil && m.X509CertificatePem != nil {
|
||||
return *m.X509CertificatePem
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type GetPublicCertificateForAppResponse struct {
|
||||
PublicCertificateList []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list" json:"public_certificate_list,omitempty"`
|
||||
MaxClientCacheTimeInSecond *int64 `protobuf:"varint,2,opt,name=max_client_cache_time_in_second" json:"max_client_cache_time_in_second,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GetPublicCertificateForAppResponse) Reset() { *m = GetPublicCertificateForAppResponse{} }
|
||||
func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetPublicCertificateForAppResponse) ProtoMessage() {}
|
||||
|
||||
func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate {
|
||||
if m != nil {
|
||||
return m.PublicCertificateList
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 {
|
||||
if m != nil && m.MaxClientCacheTimeInSecond != nil {
|
||||
return *m.MaxClientCacheTimeInSecond
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type GetServiceAccountNameRequest struct {
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GetServiceAccountNameRequest) Reset() { *m = GetServiceAccountNameRequest{} }
|
||||
func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetServiceAccountNameRequest) ProtoMessage() {}
|
||||
|
||||
type GetServiceAccountNameResponse struct {
|
||||
ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name" json:"service_account_name,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GetServiceAccountNameResponse) Reset() { *m = GetServiceAccountNameResponse{} }
|
||||
func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetServiceAccountNameResponse) ProtoMessage() {}
|
||||
|
||||
func (m *GetServiceAccountNameResponse) GetServiceAccountName() string {
|
||||
if m != nil && m.ServiceAccountName != nil {
|
||||
return *m.ServiceAccountName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type GetAccessTokenRequest struct {
|
||||
Scope []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"`
|
||||
ServiceAccountId *int64 `protobuf:"varint,2,opt,name=service_account_id" json:"service_account_id,omitempty"`
|
||||
ServiceAccountName *string `protobuf:"bytes,3,opt,name=service_account_name" json:"service_account_name,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GetAccessTokenRequest) Reset() { *m = GetAccessTokenRequest{} }
|
||||
func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetAccessTokenRequest) ProtoMessage() {}
|
||||
|
||||
func (m *GetAccessTokenRequest) GetScope() []string {
|
||||
if m != nil {
|
||||
return m.Scope
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *GetAccessTokenRequest) GetServiceAccountId() int64 {
|
||||
if m != nil && m.ServiceAccountId != nil {
|
||||
return *m.ServiceAccountId
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *GetAccessTokenRequest) GetServiceAccountName() string {
|
||||
if m != nil && m.ServiceAccountName != nil {
|
||||
return *m.ServiceAccountName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type GetAccessTokenResponse struct {
|
||||
AccessToken *string `protobuf:"bytes,1,opt,name=access_token" json:"access_token,omitempty"`
|
||||
ExpirationTime *int64 `protobuf:"varint,2,opt,name=expiration_time" json:"expiration_time,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GetAccessTokenResponse) Reset() { *m = GetAccessTokenResponse{} }
|
||||
func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetAccessTokenResponse) ProtoMessage() {}
|
||||
|
||||
func (m *GetAccessTokenResponse) GetAccessToken() string {
|
||||
if m != nil && m.AccessToken != nil {
|
||||
return *m.AccessToken
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *GetAccessTokenResponse) GetExpirationTime() int64 {
|
||||
if m != nil && m.ExpirationTime != nil {
|
||||
return *m.ExpirationTime
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type GetDefaultGcsBucketNameRequest struct {
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GetDefaultGcsBucketNameRequest) Reset() { *m = GetDefaultGcsBucketNameRequest{} }
|
||||
func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetDefaultGcsBucketNameRequest) ProtoMessage() {}
|
||||
|
||||
type GetDefaultGcsBucketNameResponse struct {
|
||||
DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name" json:"default_gcs_bucket_name,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GetDefaultGcsBucketNameResponse) Reset() { *m = GetDefaultGcsBucketNameResponse{} }
|
||||
func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetDefaultGcsBucketNameResponse) ProtoMessage() {}
|
||||
|
||||
func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string {
|
||||
if m != nil && m.DefaultGcsBucketName != nil {
|
||||
return *m.DefaultGcsBucketName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
}
|
64
vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto
generated
vendored
Normal file
64
vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto
generated
vendored
Normal file
|
@ -0,0 +1,64 @@
|
|||
syntax = "proto2";
|
||||
option go_package = "app_identity";
|
||||
|
||||
package appengine;
|
||||
|
||||
message AppIdentityServiceError {
|
||||
enum ErrorCode {
|
||||
SUCCESS = 0;
|
||||
UNKNOWN_SCOPE = 9;
|
||||
BLOB_TOO_LARGE = 1000;
|
||||
DEADLINE_EXCEEDED = 1001;
|
||||
NOT_A_VALID_APP = 1002;
|
||||
UNKNOWN_ERROR = 1003;
|
||||
NOT_ALLOWED = 1005;
|
||||
NOT_IMPLEMENTED = 1006;
|
||||
}
|
||||
}
|
||||
|
||||
message SignForAppRequest {
|
||||
optional bytes bytes_to_sign = 1;
|
||||
}
|
||||
|
||||
message SignForAppResponse {
|
||||
optional string key_name = 1;
|
||||
optional bytes signature_bytes = 2;
|
||||
}
|
||||
|
||||
message GetPublicCertificateForAppRequest {
|
||||
}
|
||||
|
||||
message PublicCertificate {
|
||||
optional string key_name = 1;
|
||||
optional string x509_certificate_pem = 2;
|
||||
}
|
||||
|
||||
message GetPublicCertificateForAppResponse {
|
||||
repeated PublicCertificate public_certificate_list = 1;
|
||||
optional int64 max_client_cache_time_in_second = 2;
|
||||
}
|
||||
|
||||
message GetServiceAccountNameRequest {
|
||||
}
|
||||
|
||||
message GetServiceAccountNameResponse {
|
||||
optional string service_account_name = 1;
|
||||
}
|
||||
|
||||
message GetAccessTokenRequest {
|
||||
repeated string scope = 1;
|
||||
optional int64 service_account_id = 2;
|
||||
optional string service_account_name = 3;
|
||||
}
|
||||
|
||||
message GetAccessTokenResponse {
|
||||
optional string access_token = 1;
|
||||
optional int64 expiration_time = 2;
|
||||
}
|
||||
|
||||
message GetDefaultGcsBucketNameRequest {
|
||||
}
|
||||
|
||||
message GetDefaultGcsBucketNameResponse {
|
||||
optional string default_gcs_bucket_name = 1;
|
||||
}
|
133
vendor/google.golang.org/appengine/internal/base/api_base.pb.go
generated
vendored
Normal file
133
vendor/google.golang.org/appengine/internal/base/api_base.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,133 @@
|
|||
// Code generated by protoc-gen-go.
|
||||
// source: google.golang.org/appengine/internal/base/api_base.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package base is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
google.golang.org/appengine/internal/base/api_base.proto
|
||||
|
||||
It has these top-level messages:
|
||||
StringProto
|
||||
Integer32Proto
|
||||
Integer64Proto
|
||||
BoolProto
|
||||
DoubleProto
|
||||
BytesProto
|
||||
VoidProto
|
||||
*/
|
||||
package base
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
type StringProto struct {
|
||||
Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *StringProto) Reset() { *m = StringProto{} }
|
||||
func (m *StringProto) String() string { return proto.CompactTextString(m) }
|
||||
func (*StringProto) ProtoMessage() {}
|
||||
|
||||
func (m *StringProto) GetValue() string {
|
||||
if m != nil && m.Value != nil {
|
||||
return *m.Value
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type Integer32Proto struct {
|
||||
Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Integer32Proto) Reset() { *m = Integer32Proto{} }
|
||||
func (m *Integer32Proto) String() string { return proto.CompactTextString(m) }
|
||||
func (*Integer32Proto) ProtoMessage() {}
|
||||
|
||||
func (m *Integer32Proto) GetValue() int32 {
|
||||
if m != nil && m.Value != nil {
|
||||
return *m.Value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type Integer64Proto struct {
|
||||
Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Integer64Proto) Reset() { *m = Integer64Proto{} }
|
||||
func (m *Integer64Proto) String() string { return proto.CompactTextString(m) }
|
||||
func (*Integer64Proto) ProtoMessage() {}
|
||||
|
||||
func (m *Integer64Proto) GetValue() int64 {
|
||||
if m != nil && m.Value != nil {
|
||||
return *m.Value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type BoolProto struct {
|
||||
Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *BoolProto) Reset() { *m = BoolProto{} }
|
||||
func (m *BoolProto) String() string { return proto.CompactTextString(m) }
|
||||
func (*BoolProto) ProtoMessage() {}
|
||||
|
||||
func (m *BoolProto) GetValue() bool {
|
||||
if m != nil && m.Value != nil {
|
||||
return *m.Value
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type DoubleProto struct {
|
||||
Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *DoubleProto) Reset() { *m = DoubleProto{} }
|
||||
func (m *DoubleProto) String() string { return proto.CompactTextString(m) }
|
||||
func (*DoubleProto) ProtoMessage() {}
|
||||
|
||||
func (m *DoubleProto) GetValue() float64 {
|
||||
if m != nil && m.Value != nil {
|
||||
return *m.Value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type BytesProto struct {
|
||||
Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *BytesProto) Reset() { *m = BytesProto{} }
|
||||
func (m *BytesProto) String() string { return proto.CompactTextString(m) }
|
||||
func (*BytesProto) ProtoMessage() {}
|
||||
|
||||
func (m *BytesProto) GetValue() []byte {
|
||||
if m != nil {
|
||||
return m.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type VoidProto struct {
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *VoidProto) Reset() { *m = VoidProto{} }
|
||||
func (m *VoidProto) String() string { return proto.CompactTextString(m) }
|
||||
func (*VoidProto) ProtoMessage() {}
|
33
vendor/google.golang.org/appengine/internal/base/api_base.proto
generated
vendored
Normal file
33
vendor/google.golang.org/appengine/internal/base/api_base.proto
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
|||
// Built-in base types for API calls. Primarily useful as return types.
|
||||
|
||||
syntax = "proto2";
|
||||
option go_package = "base";
|
||||
|
||||
package appengine.base;
|
||||
|
||||
message StringProto {
|
||||
required string value = 1;
|
||||
}
|
||||
|
||||
message Integer32Proto {
|
||||
required int32 value = 1;
|
||||
}
|
||||
|
||||
message Integer64Proto {
|
||||
required int64 value = 1;
|
||||
}
|
||||
|
||||
message BoolProto {
|
||||
required bool value = 1;
|
||||
}
|
||||
|
||||
message DoubleProto {
|
||||
required double value = 1;
|
||||
}
|
||||
|
||||
message BytesProto {
|
||||
required bytes value = 1 [ctype=CORD];
|
||||
}
|
||||
|
||||
message VoidProto {
|
||||
}
|
2778
vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
generated
vendored
Normal file
2778
vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
541
vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
generated
vendored
Normal file
541
vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
generated
vendored
Normal file
|
@ -0,0 +1,541 @@
|
|||
syntax = "proto2";
|
||||
option go_package = "datastore";
|
||||
|
||||
package appengine;
|
||||
|
||||
message Action{}
|
||||
|
||||
message PropertyValue {
|
||||
optional int64 int64Value = 1;
|
||||
optional bool booleanValue = 2;
|
||||
optional string stringValue = 3;
|
||||
optional double doubleValue = 4;
|
||||
|
||||
optional group PointValue = 5 {
|
||||
required double x = 6;
|
||||
required double y = 7;
|
||||
}
|
||||
|
||||
optional group UserValue = 8 {
|
||||
required string email = 9;
|
||||
required string auth_domain = 10;
|
||||
optional string nickname = 11;
|
||||
optional string federated_identity = 21;
|
||||
optional string federated_provider = 22;
|
||||
}
|
||||
|
||||
optional group ReferenceValue = 12 {
|
||||
required string app = 13;
|
||||
optional string name_space = 20;
|
||||
repeated group PathElement = 14 {
|
||||
required string type = 15;
|
||||
optional int64 id = 16;
|
||||
optional string name = 17;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
message Property {
|
||||
enum Meaning {
|
||||
NO_MEANING = 0;
|
||||
BLOB = 14;
|
||||
TEXT = 15;
|
||||
BYTESTRING = 16;
|
||||
|
||||
ATOM_CATEGORY = 1;
|
||||
ATOM_LINK = 2;
|
||||
ATOM_TITLE = 3;
|
||||
ATOM_CONTENT = 4;
|
||||
ATOM_SUMMARY = 5;
|
||||
ATOM_AUTHOR = 6;
|
||||
|
||||
GD_WHEN = 7;
|
||||
GD_EMAIL = 8;
|
||||
GEORSS_POINT = 9;
|
||||
GD_IM = 10;
|
||||
|
||||
GD_PHONENUMBER = 11;
|
||||
GD_POSTALADDRESS = 12;
|
||||
|
||||
GD_RATING = 13;
|
||||
|
||||
BLOBKEY = 17;
|
||||
ENTITY_PROTO = 19;
|
||||
|
||||
INDEX_VALUE = 18;
|
||||
};
|
||||
|
||||
optional Meaning meaning = 1 [default = NO_MEANING];
|
||||
optional string meaning_uri = 2;
|
||||
|
||||
required string name = 3;
|
||||
|
||||
required PropertyValue value = 5;
|
||||
|
||||
required bool multiple = 4;
|
||||
|
||||
optional bool searchable = 6 [default=false];
|
||||
|
||||
enum FtsTokenizationOption {
|
||||
HTML = 1;
|
||||
ATOM = 2;
|
||||
}
|
||||
|
||||
optional FtsTokenizationOption fts_tokenization_option = 8;
|
||||
|
||||
optional string locale = 9 [default = "en"];
|
||||
}
|
||||
|
||||
message Path {
|
||||
repeated group Element = 1 {
|
||||
required string type = 2;
|
||||
optional int64 id = 3;
|
||||
optional string name = 4;
|
||||
}
|
||||
}
|
||||
|
||||
message Reference {
|
||||
required string app = 13;
|
||||
optional string name_space = 20;
|
||||
required Path path = 14;
|
||||
}
|
||||
|
||||
message User {
|
||||
required string email = 1;
|
||||
required string auth_domain = 2;
|
||||
optional string nickname = 3;
|
||||
optional string federated_identity = 6;
|
||||
optional string federated_provider = 7;
|
||||
}
|
||||
|
||||
message EntityProto {
|
||||
required Reference key = 13;
|
||||
required Path entity_group = 16;
|
||||
optional User owner = 17;
|
||||
|
||||
enum Kind {
|
||||
GD_CONTACT = 1;
|
||||
GD_EVENT = 2;
|
||||
GD_MESSAGE = 3;
|
||||
}
|
||||
optional Kind kind = 4;
|
||||
optional string kind_uri = 5;
|
||||
|
||||
repeated Property property = 14;
|
||||
repeated Property raw_property = 15;
|
||||
|
||||
optional int32 rank = 18;
|
||||
}
|
||||
|
||||
message CompositeProperty {
|
||||
required int64 index_id = 1;
|
||||
repeated string value = 2;
|
||||
}
|
||||
|
||||
message Index {
|
||||
required string entity_type = 1;
|
||||
required bool ancestor = 5;
|
||||
repeated group Property = 2 {
|
||||
required string name = 3;
|
||||
enum Direction {
|
||||
ASCENDING = 1;
|
||||
DESCENDING = 2;
|
||||
}
|
||||
optional Direction direction = 4 [default = ASCENDING];
|
||||
}
|
||||
}
|
||||
|
||||
message CompositeIndex {
|
||||
required string app_id = 1;
|
||||
required int64 id = 2;
|
||||
required Index definition = 3;
|
||||
|
||||
enum State {
|
||||
WRITE_ONLY = 1;
|
||||
READ_WRITE = 2;
|
||||
DELETED = 3;
|
||||
ERROR = 4;
|
||||
}
|
||||
required State state = 4;
|
||||
|
||||
optional bool only_use_if_required = 6 [default = false];
|
||||
}
|
||||
|
||||
message IndexPostfix {
|
||||
message IndexValue {
|
||||
required string property_name = 1;
|
||||
required PropertyValue value = 2;
|
||||
}
|
||||
|
||||
repeated IndexValue index_value = 1;
|
||||
|
||||
optional Reference key = 2;
|
||||
|
||||
optional bool before = 3 [default=true];
|
||||
}
|
||||
|
||||
message IndexPosition {
|
||||
optional string key = 1;
|
||||
|
||||
optional bool before = 2 [default=true];
|
||||
}
|
||||
|
||||
message Snapshot {
|
||||
enum Status {
|
||||
INACTIVE = 0;
|
||||
ACTIVE = 1;
|
||||
}
|
||||
|
||||
required int64 ts = 1;
|
||||
}
|
||||
|
||||
message InternalHeader {
|
||||
optional string qos = 1;
|
||||
}
|
||||
|
||||
message Transaction {
|
||||
optional InternalHeader header = 4;
|
||||
required fixed64 handle = 1;
|
||||
required string app = 2;
|
||||
optional bool mark_changes = 3 [default = false];
|
||||
}
|
||||
|
||||
message Query {
|
||||
optional InternalHeader header = 39;
|
||||
|
||||
required string app = 1;
|
||||
optional string name_space = 29;
|
||||
|
||||
optional string kind = 3;
|
||||
optional Reference ancestor = 17;
|
||||
|
||||
repeated group Filter = 4 {
|
||||
enum Operator {
|
||||
LESS_THAN = 1;
|
||||
LESS_THAN_OR_EQUAL = 2;
|
||||
GREATER_THAN = 3;
|
||||
GREATER_THAN_OR_EQUAL = 4;
|
||||
EQUAL = 5;
|
||||
IN = 6;
|
||||
EXISTS = 7;
|
||||
}
|
||||
|
||||
required Operator op = 6;
|
||||
repeated Property property = 14;
|
||||
}
|
||||
|
||||
optional string search_query = 8;
|
||||
|
||||
repeated group Order = 9 {
|
||||
enum Direction {
|
||||
ASCENDING = 1;
|
||||
DESCENDING = 2;
|
||||
}
|
||||
|
||||
required string property = 10;
|
||||
optional Direction direction = 11 [default = ASCENDING];
|
||||
}
|
||||
|
||||
enum Hint {
|
||||
ORDER_FIRST = 1;
|
||||
ANCESTOR_FIRST = 2;
|
||||
FILTER_FIRST = 3;
|
||||
}
|
||||
optional Hint hint = 18;
|
||||
|
||||
optional int32 count = 23;
|
||||
|
||||
optional int32 offset = 12 [default = 0];
|
||||
|
||||
optional int32 limit = 16;
|
||||
|
||||
optional CompiledCursor compiled_cursor = 30;
|
||||
optional CompiledCursor end_compiled_cursor = 31;
|
||||
|
||||
repeated CompositeIndex composite_index = 19;
|
||||
|
||||
optional bool require_perfect_plan = 20 [default = false];
|
||||
|
||||
optional bool keys_only = 21 [default = false];
|
||||
|
||||
optional Transaction transaction = 22;
|
||||
|
||||
optional bool compile = 25 [default = false];
|
||||
|
||||
optional int64 failover_ms = 26;
|
||||
|
||||
optional bool strong = 32;
|
||||
|
||||
repeated string property_name = 33;
|
||||
|
||||
repeated string group_by_property_name = 34;
|
||||
|
||||
optional bool distinct = 24;
|
||||
|
||||
optional int64 min_safe_time_seconds = 35;
|
||||
|
||||
repeated string safe_replica_name = 36;
|
||||
|
||||
optional bool persist_offset = 37 [default=false];
|
||||
}
|
||||
|
||||
message CompiledQuery {
|
||||
required group PrimaryScan = 1 {
|
||||
optional string index_name = 2;
|
||||
|
||||
optional string start_key = 3;
|
||||
optional bool start_inclusive = 4;
|
||||
optional string end_key = 5;
|
||||
optional bool end_inclusive = 6;
|
||||
|
||||
repeated string start_postfix_value = 22;
|
||||
repeated string end_postfix_value = 23;
|
||||
|
||||
optional int64 end_unapplied_log_timestamp_us = 19;
|
||||
}
|
||||
|
||||
repeated group MergeJoinScan = 7 {
|
||||
required string index_name = 8;
|
||||
|
||||
repeated string prefix_value = 9;
|
||||
|
||||
optional bool value_prefix = 20 [default=false];
|
||||
}
|
||||
|
||||
optional Index index_def = 21;
|
||||
|
||||
optional int32 offset = 10 [default = 0];
|
||||
|
||||
optional int32 limit = 11;
|
||||
|
||||
required bool keys_only = 12;
|
||||
|
||||
repeated string property_name = 24;
|
||||
|
||||
optional int32 distinct_infix_size = 25;
|
||||
|
||||
optional group EntityFilter = 13 {
|
||||
optional bool distinct = 14 [default=false];
|
||||
|
||||
optional string kind = 17;
|
||||
optional Reference ancestor = 18;
|
||||
}
|
||||
}
|
||||
|
||||
message CompiledCursor {
|
||||
optional group Position = 2 {
|
||||
optional string start_key = 27;
|
||||
|
||||
repeated group IndexValue = 29 {
|
||||
optional string property = 30;
|
||||
required PropertyValue value = 31;
|
||||
}
|
||||
|
||||
optional Reference key = 32;
|
||||
|
||||
optional bool start_inclusive = 28 [default=true];
|
||||
}
|
||||
}
|
||||
|
||||
message Cursor {
|
||||
required fixed64 cursor = 1;
|
||||
|
||||
optional string app = 2;
|
||||
}
|
||||
|
||||
message Error {
|
||||
enum ErrorCode {
|
||||
BAD_REQUEST = 1;
|
||||
CONCURRENT_TRANSACTION = 2;
|
||||
INTERNAL_ERROR = 3;
|
||||
NEED_INDEX = 4;
|
||||
TIMEOUT = 5;
|
||||
PERMISSION_DENIED = 6;
|
||||
BIGTABLE_ERROR = 7;
|
||||
COMMITTED_BUT_STILL_APPLYING = 8;
|
||||
CAPABILITY_DISABLED = 9;
|
||||
TRY_ALTERNATE_BACKEND = 10;
|
||||
SAFE_TIME_TOO_OLD = 11;
|
||||
}
|
||||
}
|
||||
|
||||
message Cost {
|
||||
optional int32 index_writes = 1;
|
||||
optional int32 index_write_bytes = 2;
|
||||
optional int32 entity_writes = 3;
|
||||
optional int32 entity_write_bytes = 4;
|
||||
optional group CommitCost = 5 {
|
||||
optional int32 requested_entity_puts = 6;
|
||||
optional int32 requested_entity_deletes = 7;
|
||||
};
|
||||
optional int32 approximate_storage_delta = 8;
|
||||
optional int32 id_sequence_updates = 9;
|
||||
}
|
||||
|
||||
message GetRequest {
|
||||
optional InternalHeader header = 6;
|
||||
|
||||
repeated Reference key = 1;
|
||||
optional Transaction transaction = 2;
|
||||
|
||||
optional int64 failover_ms = 3;
|
||||
|
||||
optional bool strong = 4;
|
||||
|
||||
optional bool allow_deferred = 5 [default=false];
|
||||
}
|
||||
|
||||
message GetResponse {
|
||||
repeated group Entity = 1 {
|
||||
optional EntityProto entity = 2;
|
||||
optional Reference key = 4;
|
||||
|
||||
optional int64 version = 3;
|
||||
}
|
||||
|
||||
repeated Reference deferred = 5;
|
||||
|
||||
optional bool in_order = 6 [default=true];
|
||||
}
|
||||
|
||||
message PutRequest {
|
||||
optional InternalHeader header = 11;
|
||||
|
||||
repeated EntityProto entity = 1;
|
||||
optional Transaction transaction = 2;
|
||||
repeated CompositeIndex composite_index = 3;
|
||||
|
||||
optional bool trusted = 4 [default = false];
|
||||
|
||||
optional bool force = 7 [default = false];
|
||||
|
||||
optional bool mark_changes = 8 [default = false];
|
||||
repeated Snapshot snapshot = 9;
|
||||
|
||||
enum AutoIdPolicy {
|
||||
CURRENT = 0;
|
||||
SEQUENTIAL = 1;
|
||||
}
|
||||
optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT];
|
||||
}
|
||||
|
||||
message PutResponse {
|
||||
repeated Reference key = 1;
|
||||
optional Cost cost = 2;
|
||||
repeated int64 version = 3;
|
||||
}
|
||||
|
||||
message TouchRequest {
|
||||
optional InternalHeader header = 10;
|
||||
|
||||
repeated Reference key = 1;
|
||||
repeated CompositeIndex composite_index = 2;
|
||||
optional bool force = 3 [default = false];
|
||||
repeated Snapshot snapshot = 9;
|
||||
}
|
||||
|
||||
message TouchResponse {
|
||||
optional Cost cost = 1;
|
||||
}
|
||||
|
||||
message DeleteRequest {
|
||||
optional InternalHeader header = 10;
|
||||
|
||||
repeated Reference key = 6;
|
||||
optional Transaction transaction = 5;
|
||||
|
||||
optional bool trusted = 4 [default = false];
|
||||
|
||||
optional bool force = 7 [default = false];
|
||||
|
||||
optional bool mark_changes = 8 [default = false];
|
||||
repeated Snapshot snapshot = 9;
|
||||
}
|
||||
|
||||
message DeleteResponse {
|
||||
optional Cost cost = 1;
|
||||
repeated int64 version = 3;
|
||||
}
|
||||
|
||||
message NextRequest {
|
||||
optional InternalHeader header = 5;
|
||||
|
||||
required Cursor cursor = 1;
|
||||
optional int32 count = 2;
|
||||
|
||||
optional int32 offset = 4 [default = 0];
|
||||
|
||||
optional bool compile = 3 [default = false];
|
||||
}
|
||||
|
||||
message QueryResult {
|
||||
optional Cursor cursor = 1;
|
||||
|
||||
repeated EntityProto result = 2;
|
||||
|
||||
optional int32 skipped_results = 7;
|
||||
|
||||
required bool more_results = 3;
|
||||
|
||||
optional bool keys_only = 4;
|
||||
|
||||
optional bool index_only = 9;
|
||||
|
||||
optional bool small_ops = 10;
|
||||
|
||||
optional CompiledQuery compiled_query = 5;
|
||||
|
||||
optional CompiledCursor compiled_cursor = 6;
|
||||
|
||||
repeated CompositeIndex index = 8;
|
||||
|
||||
repeated int64 version = 11;
|
||||
}
|
||||
|
||||
message AllocateIdsRequest {
|
||||
optional InternalHeader header = 4;
|
||||
|
||||
optional Reference model_key = 1;
|
||||
|
||||
optional int64 size = 2;
|
||||
|
||||
optional int64 max = 3;
|
||||
|
||||
repeated Reference reserve = 5;
|
||||
}
|
||||
|
||||
message AllocateIdsResponse {
|
||||
required int64 start = 1;
|
||||
required int64 end = 2;
|
||||
optional Cost cost = 3;
|
||||
}
|
||||
|
||||
message CompositeIndices {
|
||||
repeated CompositeIndex index = 1;
|
||||
}
|
||||
|
||||
message AddActionsRequest {
|
||||
optional InternalHeader header = 3;
|
||||
|
||||
required Transaction transaction = 1;
|
||||
repeated Action action = 2;
|
||||
}
|
||||
|
||||
message AddActionsResponse {
|
||||
}
|
||||
|
||||
message BeginTransactionRequest {
|
||||
optional InternalHeader header = 3;
|
||||
|
||||
required string app = 1;
|
||||
optional bool allow_multiple_eg = 2 [default = false];
|
||||
}
|
||||
|
||||
message CommitResponse {
|
||||
optional Cost cost = 1;
|
||||
|
||||
repeated group Version = 3 {
|
||||
required Reference root_entity_key = 4;
|
||||
required int64 version = 5;
|
||||
}
|
||||
}
|
14
vendor/google.golang.org/appengine/internal/identity.go
generated
vendored
Normal file
14
vendor/google.golang.org/appengine/internal/identity.go
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package internal
|
||||
|
||||
import netcontext "golang.org/x/net/context"
|
||||
|
||||
// These functions are implementations of the wrapper functions
|
||||
// in ../appengine/identity.go. See that file for commentary.
|
||||
|
||||
func AppID(c netcontext.Context) string {
|
||||
return appID(FullyQualifiedAppID(c))
|
||||
}
|
27
vendor/google.golang.org/appengine/internal/identity_classic.go
generated
vendored
Normal file
27
vendor/google.golang.org/appengine/internal/identity_classic.go
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
// Copyright 2015 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build appengine
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"appengine"
|
||||
|
||||
netcontext "golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func DefaultVersionHostname(ctx netcontext.Context) string {
|
||||
return appengine.DefaultVersionHostname(fromContext(ctx))
|
||||
}
|
||||
|
||||
func RequestID(ctx netcontext.Context) string { return appengine.RequestID(fromContext(ctx)) }
|
||||
func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() }
|
||||
func ServerSoftware() string { return appengine.ServerSoftware() }
|
||||
func ModuleName(ctx netcontext.Context) string { return appengine.ModuleName(fromContext(ctx)) }
|
||||
func VersionID(ctx netcontext.Context) string { return appengine.VersionID(fromContext(ctx)) }
|
||||
func InstanceID() string { return appengine.InstanceID() }
|
||||
func IsDevAppServer() bool { return appengine.IsDevAppServer() }
|
||||
|
||||
func fullyQualifiedAppID(ctx netcontext.Context) string { return fromContext(ctx).FullyQualifiedAppID() }
|
97
vendor/google.golang.org/appengine/internal/identity_vm.go
generated
vendored
Normal file
97
vendor/google.golang.org/appengine/internal/identity_vm.go
generated
vendored
Normal file
|
@ -0,0 +1,97 @@
|
|||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
netcontext "golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// These functions are implementations of the wrapper functions
|
||||
// in ../appengine/identity.go. See that file for commentary.
|
||||
|
||||
const (
|
||||
hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname"
|
||||
hRequestLogId = "X-AppEngine-Request-Log-Id"
|
||||
hDatacenter = "X-AppEngine-Datacenter"
|
||||
)
|
||||
|
||||
func ctxHeaders(ctx netcontext.Context) http.Header {
|
||||
return fromContext(ctx).Request().Header
|
||||
}
|
||||
|
||||
func DefaultVersionHostname(ctx netcontext.Context) string {
|
||||
return ctxHeaders(ctx).Get(hDefaultVersionHostname)
|
||||
}
|
||||
|
||||
func RequestID(ctx netcontext.Context) string {
|
||||
return ctxHeaders(ctx).Get(hRequestLogId)
|
||||
}
|
||||
|
||||
func Datacenter(ctx netcontext.Context) string {
|
||||
return ctxHeaders(ctx).Get(hDatacenter)
|
||||
}
|
||||
|
||||
func ServerSoftware() string {
|
||||
// TODO(dsymonds): Remove fallback when we've verified this.
|
||||
if s := os.Getenv("SERVER_SOFTWARE"); s != "" {
|
||||
return s
|
||||
}
|
||||
return "Google App Engine/1.x.x"
|
||||
}
|
||||
|
||||
// TODO(dsymonds): Remove the metadata fetches.
|
||||
|
||||
func ModuleName(_ netcontext.Context) string {
|
||||
if s := os.Getenv("GAE_MODULE_NAME"); s != "" {
|
||||
return s
|
||||
}
|
||||
return string(mustGetMetadata("instance/attributes/gae_backend_name"))
|
||||
}
|
||||
|
||||
func VersionID(_ netcontext.Context) string {
|
||||
if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" {
|
||||
return s1 + "." + s2
|
||||
}
|
||||
return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version"))
|
||||
}
|
||||
|
||||
func InstanceID() string {
|
||||
if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" {
|
||||
return s
|
||||
}
|
||||
return string(mustGetMetadata("instance/attributes/gae_backend_instance"))
|
||||
}
|
||||
|
||||
func partitionlessAppID() string {
|
||||
// gae_project has everything except the partition prefix.
|
||||
appID := os.Getenv("GAE_LONG_APP_ID")
|
||||
if appID == "" {
|
||||
appID = string(mustGetMetadata("instance/attributes/gae_project"))
|
||||
}
|
||||
return appID
|
||||
}
|
||||
|
||||
func fullyQualifiedAppID(_ netcontext.Context) string {
|
||||
appID := partitionlessAppID()
|
||||
|
||||
part := os.Getenv("GAE_PARTITION")
|
||||
if part == "" {
|
||||
part = string(mustGetMetadata("instance/attributes/gae_partition"))
|
||||
}
|
||||
|
||||
if part != "" {
|
||||
appID = part + "~" + appID
|
||||
}
|
||||
return appID
|
||||
}
|
||||
|
||||
func IsDevAppServer() bool {
|
||||
return os.Getenv("RUN_WITH_DEVAPPSERVER") != ""
|
||||
}
|
144
vendor/google.golang.org/appengine/internal/internal.go
generated
vendored
Normal file
144
vendor/google.golang.org/appengine/internal/internal.go
generated
vendored
Normal file
|
@ -0,0 +1,144 @@
|
|||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package internal provides support for package appengine.
|
||||
//
|
||||
// Programs should not use this package directly. Its API is not stable.
|
||||
// Use packages appengine and appengine/* instead.
|
||||
package internal
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
remotepb "google.golang.org/appengine/internal/remote_api"
|
||||
)
|
||||
|
||||
// errorCodeMaps is a map of service name to the error code map for the service.
|
||||
var errorCodeMaps = make(map[string]map[int32]string)
|
||||
|
||||
// RegisterErrorCodeMap is called from API implementations to register their
|
||||
// error code map. This should only be called from init functions.
|
||||
func RegisterErrorCodeMap(service string, m map[int32]string) {
|
||||
errorCodeMaps[service] = m
|
||||
}
|
||||
|
||||
type timeoutCodeKey struct {
|
||||
service string
|
||||
code int32
|
||||
}
|
||||
|
||||
// timeoutCodes is the set of service+code pairs that represent timeouts.
|
||||
var timeoutCodes = make(map[timeoutCodeKey]bool)
|
||||
|
||||
func RegisterTimeoutErrorCode(service string, code int32) {
|
||||
timeoutCodes[timeoutCodeKey{service, code}] = true
|
||||
}
|
||||
|
||||
// APIError is the type returned by appengine.Context's Call method
|
||||
// when an API call fails in an API-specific way. This may be, for instance,
|
||||
// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE.
|
||||
type APIError struct {
|
||||
Service string
|
||||
Detail string
|
||||
Code int32 // API-specific error code
|
||||
}
|
||||
|
||||
func (e *APIError) Error() string {
|
||||
if e.Code == 0 {
|
||||
if e.Detail == "" {
|
||||
return "APIError <empty>"
|
||||
}
|
||||
return e.Detail
|
||||
}
|
||||
s := fmt.Sprintf("API error %d", e.Code)
|
||||
if m, ok := errorCodeMaps[e.Service]; ok {
|
||||
s += " (" + e.Service + ": " + m[e.Code] + ")"
|
||||
} else {
|
||||
// Shouldn't happen, but provide a bit more detail if it does.
|
||||
s = e.Service + " " + s
|
||||
}
|
||||
if e.Detail != "" {
|
||||
s += ": " + e.Detail
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (e *APIError) IsTimeout() bool {
|
||||
return timeoutCodes[timeoutCodeKey{e.Service, e.Code}]
|
||||
}
|
||||
|
||||
// CallError is the type returned by appengine.Context's Call method when an
|
||||
// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED.
|
||||
type CallError struct {
|
||||
Detail string
|
||||
Code int32
|
||||
// TODO: Remove this if we get a distinguishable error code.
|
||||
Timeout bool
|
||||
}
|
||||
|
||||
func (e *CallError) Error() string {
|
||||
var msg string
|
||||
switch remotepb.RpcError_ErrorCode(e.Code) {
|
||||
case remotepb.RpcError_UNKNOWN:
|
||||
return e.Detail
|
||||
case remotepb.RpcError_OVER_QUOTA:
|
||||
msg = "Over quota"
|
||||
case remotepb.RpcError_CAPABILITY_DISABLED:
|
||||
msg = "Capability disabled"
|
||||
case remotepb.RpcError_CANCELLED:
|
||||
msg = "Canceled"
|
||||
default:
|
||||
msg = fmt.Sprintf("Call error %d", e.Code)
|
||||
}
|
||||
s := msg + ": " + e.Detail
|
||||
if e.Timeout {
|
||||
s += " (timeout)"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (e *CallError) IsTimeout() bool {
|
||||
return e.Timeout
|
||||
}
|
||||
|
||||
func Main() {
|
||||
installHealthChecker(http.DefaultServeMux)
|
||||
|
||||
port := "8080"
|
||||
if s := os.Getenv("PORT"); s != "" {
|
||||
port = s
|
||||
}
|
||||
|
||||
if err := http.ListenAndServe(":"+port, http.HandlerFunc(handleHTTP)); err != nil {
|
||||
log.Fatalf("http.ListenAndServe: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func installHealthChecker(mux *http.ServeMux) {
|
||||
// If no health check handler has been installed by this point, add a trivial one.
|
||||
const healthPath = "/_ah/health"
|
||||
hreq := &http.Request{
|
||||
Method: "GET",
|
||||
URL: &url.URL{
|
||||
Path: healthPath,
|
||||
},
|
||||
}
|
||||
if _, pat := mux.Handler(hreq); pat != healthPath {
|
||||
mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) {
|
||||
io.WriteString(w, "ok")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace.
|
||||
// The function should be prepared to be called on the same message more than once; it should only modify the
|
||||
// RPC request the first time.
|
||||
var NamespaceMods = make(map[string]func(m proto.Message, namespace string))
|
899
vendor/google.golang.org/appengine/internal/log/log_service.pb.go
generated
vendored
Normal file
899
vendor/google.golang.org/appengine/internal/log/log_service.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,899 @@
|
|||
// Code generated by protoc-gen-go.
|
||||
// source: google.golang.org/appengine/internal/log/log_service.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package log is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
google.golang.org/appengine/internal/log/log_service.proto
|
||||
|
||||
It has these top-level messages:
|
||||
LogServiceError
|
||||
UserAppLogLine
|
||||
UserAppLogGroup
|
||||
FlushRequest
|
||||
SetStatusRequest
|
||||
LogOffset
|
||||
LogLine
|
||||
RequestLog
|
||||
LogModuleVersion
|
||||
LogReadRequest
|
||||
LogReadResponse
|
||||
LogUsageRecord
|
||||
LogUsageRequest
|
||||
LogUsageResponse
|
||||
*/
|
||||
package log
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
type LogServiceError_ErrorCode int32
|
||||
|
||||
const (
|
||||
LogServiceError_OK LogServiceError_ErrorCode = 0
|
||||
LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1
|
||||
LogServiceError_STORAGE_ERROR LogServiceError_ErrorCode = 2
|
||||
)
|
||||
|
||||
var LogServiceError_ErrorCode_name = map[int32]string{
|
||||
0: "OK",
|
||||
1: "INVALID_REQUEST",
|
||||
2: "STORAGE_ERROR",
|
||||
}
|
||||
var LogServiceError_ErrorCode_value = map[string]int32{
|
||||
"OK": 0,
|
||||
"INVALID_REQUEST": 1,
|
||||
"STORAGE_ERROR": 2,
|
||||
}
|
||||
|
||||
func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode {
|
||||
p := new(LogServiceError_ErrorCode)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
func (x LogServiceError_ErrorCode) String() string {
|
||||
return proto.EnumName(LogServiceError_ErrorCode_name, int32(x))
|
||||
}
|
||||
func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
|
||||
value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*x = LogServiceError_ErrorCode(value)
|
||||
return nil
|
||||
}
|
||||
|
||||
type LogServiceError struct {
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *LogServiceError) Reset() { *m = LogServiceError{} }
|
||||
func (m *LogServiceError) String() string { return proto.CompactTextString(m) }
|
||||
func (*LogServiceError) ProtoMessage() {}
|
||||
|
||||
type UserAppLogLine struct {
|
||||
TimestampUsec *int64 `protobuf:"varint,1,req,name=timestamp_usec" json:"timestamp_usec,omitempty"`
|
||||
Level *int64 `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
|
||||
Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *UserAppLogLine) Reset() { *m = UserAppLogLine{} }
|
||||
func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) }
|
||||
func (*UserAppLogLine) ProtoMessage() {}
|
||||
|
||||
func (m *UserAppLogLine) GetTimestampUsec() int64 {
|
||||
if m != nil && m.TimestampUsec != nil {
|
||||
return *m.TimestampUsec
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *UserAppLogLine) GetLevel() int64 {
|
||||
if m != nil && m.Level != nil {
|
||||
return *m.Level
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *UserAppLogLine) GetMessage() string {
|
||||
if m != nil && m.Message != nil {
|
||||
return *m.Message
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type UserAppLogGroup struct {
|
||||
LogLine []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line" json:"log_line,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *UserAppLogGroup) Reset() { *m = UserAppLogGroup{} }
|
||||
func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) }
|
||||
func (*UserAppLogGroup) ProtoMessage() {}
|
||||
|
||||
func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine {
|
||||
if m != nil {
|
||||
return m.LogLine
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type FlushRequest struct {
|
||||
Logs []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *FlushRequest) Reset() { *m = FlushRequest{} }
|
||||
func (m *FlushRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*FlushRequest) ProtoMessage() {}
|
||||
|
||||
func (m *FlushRequest) GetLogs() []byte {
|
||||
if m != nil {
|
||||
return m.Logs
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type SetStatusRequest struct {
|
||||
Status *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SetStatusRequest) Reset() { *m = SetStatusRequest{} }
|
||||
func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*SetStatusRequest) ProtoMessage() {}
|
||||
|
||||
func (m *SetStatusRequest) GetStatus() string {
|
||||
if m != nil && m.Status != nil {
|
||||
return *m.Status
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type LogOffset struct {
|
||||
RequestId []byte `protobuf:"bytes,1,opt,name=request_id" json:"request_id,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *LogOffset) Reset() { *m = LogOffset{} }
|
||||
func (m *LogOffset) String() string { return proto.CompactTextString(m) }
|
||||
func (*LogOffset) ProtoMessage() {}
|
||||
|
||||
func (m *LogOffset) GetRequestId() []byte {
|
||||
if m != nil {
|
||||
return m.RequestId
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type LogLine struct {
|
||||
Time *int64 `protobuf:"varint,1,req,name=time" json:"time,omitempty"`
|
||||
Level *int32 `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
|
||||
LogMessage *string `protobuf:"bytes,3,req,name=log_message" json:"log_message,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *LogLine) Reset() { *m = LogLine{} }
|
||||
func (m *LogLine) String() string { return proto.CompactTextString(m) }
|
||||
func (*LogLine) ProtoMessage() {}
|
||||
|
||||
func (m *LogLine) GetTime() int64 {
|
||||
if m != nil && m.Time != nil {
|
||||
return *m.Time
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *LogLine) GetLevel() int32 {
|
||||
if m != nil && m.Level != nil {
|
||||
return *m.Level
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *LogLine) GetLogMessage() string {
|
||||
if m != nil && m.LogMessage != nil {
|
||||
return *m.LogMessage
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type RequestLog struct {
|
||||
AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
|
||||
ModuleId *string `protobuf:"bytes,37,opt,name=module_id,def=default" json:"module_id,omitempty"`
|
||||
VersionId *string `protobuf:"bytes,2,req,name=version_id" json:"version_id,omitempty"`
|
||||
RequestId []byte `protobuf:"bytes,3,req,name=request_id" json:"request_id,omitempty"`
|
||||
Offset *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"`
|
||||
Ip *string `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"`
|
||||
Nickname *string `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"`
|
||||
StartTime *int64 `protobuf:"varint,6,req,name=start_time" json:"start_time,omitempty"`
|
||||
EndTime *int64 `protobuf:"varint,7,req,name=end_time" json:"end_time,omitempty"`
|
||||
Latency *int64 `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"`
|
||||
Mcycles *int64 `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"`
|
||||
Method *string `protobuf:"bytes,10,req,name=method" json:"method,omitempty"`
|
||||
Resource *string `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"`
|
||||
HttpVersion *string `protobuf:"bytes,12,req,name=http_version" json:"http_version,omitempty"`
|
||||
Status *int32 `protobuf:"varint,13,req,name=status" json:"status,omitempty"`
|
||||
ResponseSize *int64 `protobuf:"varint,14,req,name=response_size" json:"response_size,omitempty"`
|
||||
Referrer *string `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"`
|
||||
UserAgent *string `protobuf:"bytes,16,opt,name=user_agent" json:"user_agent,omitempty"`
|
||||
UrlMapEntry *string `protobuf:"bytes,17,req,name=url_map_entry" json:"url_map_entry,omitempty"`
|
||||
Combined *string `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"`
|
||||
ApiMcycles *int64 `protobuf:"varint,19,opt,name=api_mcycles" json:"api_mcycles,omitempty"`
|
||||
Host *string `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"`
|
||||
Cost *float64 `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"`
|
||||
TaskQueueName *string `protobuf:"bytes,22,opt,name=task_queue_name" json:"task_queue_name,omitempty"`
|
||||
TaskName *string `protobuf:"bytes,23,opt,name=task_name" json:"task_name,omitempty"`
|
||||
WasLoadingRequest *bool `protobuf:"varint,24,opt,name=was_loading_request" json:"was_loading_request,omitempty"`
|
||||
PendingTime *int64 `protobuf:"varint,25,opt,name=pending_time" json:"pending_time,omitempty"`
|
||||
ReplicaIndex *int32 `protobuf:"varint,26,opt,name=replica_index,def=-1" json:"replica_index,omitempty"`
|
||||
Finished *bool `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"`
|
||||
CloneKey []byte `protobuf:"bytes,28,opt,name=clone_key" json:"clone_key,omitempty"`
|
||||
Line []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"`
|
||||
LinesIncomplete *bool `protobuf:"varint,36,opt,name=lines_incomplete" json:"lines_incomplete,omitempty"`
|
||||
AppEngineRelease []byte `protobuf:"bytes,38,opt,name=app_engine_release" json:"app_engine_release,omitempty"`
|
||||
ExitReason *int32 `protobuf:"varint,30,opt,name=exit_reason" json:"exit_reason,omitempty"`
|
||||
WasThrottledForTime *bool `protobuf:"varint,31,opt,name=was_throttled_for_time" json:"was_throttled_for_time,omitempty"`
|
||||
WasThrottledForRequests *bool `protobuf:"varint,32,opt,name=was_throttled_for_requests" json:"was_throttled_for_requests,omitempty"`
|
||||
ThrottledTime *int64 `protobuf:"varint,33,opt,name=throttled_time" json:"throttled_time,omitempty"`
|
||||
ServerName []byte `protobuf:"bytes,34,opt,name=server_name" json:"server_name,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *RequestLog) Reset() { *m = RequestLog{} }
|
||||
func (m *RequestLog) String() string { return proto.CompactTextString(m) }
|
||||
func (*RequestLog) ProtoMessage() {}
|
||||
|
||||
const Default_RequestLog_ModuleId string = "default"
|
||||
const Default_RequestLog_ReplicaIndex int32 = -1
|
||||
const Default_RequestLog_Finished bool = true
|
||||
|
||||
func (m *RequestLog) GetAppId() string {
|
||||
if m != nil && m.AppId != nil {
|
||||
return *m.AppId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetModuleId() string {
|
||||
if m != nil && m.ModuleId != nil {
|
||||
return *m.ModuleId
|
||||
}
|
||||
return Default_RequestLog_ModuleId
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetVersionId() string {
|
||||
if m != nil && m.VersionId != nil {
|
||||
return *m.VersionId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetRequestId() []byte {
|
||||
if m != nil {
|
||||
return m.RequestId
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetOffset() *LogOffset {
|
||||
if m != nil {
|
||||
return m.Offset
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetIp() string {
|
||||
if m != nil && m.Ip != nil {
|
||||
return *m.Ip
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetNickname() string {
|
||||
if m != nil && m.Nickname != nil {
|
||||
return *m.Nickname
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetStartTime() int64 {
|
||||
if m != nil && m.StartTime != nil {
|
||||
return *m.StartTime
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetEndTime() int64 {
|
||||
if m != nil && m.EndTime != nil {
|
||||
return *m.EndTime
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetLatency() int64 {
|
||||
if m != nil && m.Latency != nil {
|
||||
return *m.Latency
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetMcycles() int64 {
|
||||
if m != nil && m.Mcycles != nil {
|
||||
return *m.Mcycles
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetMethod() string {
|
||||
if m != nil && m.Method != nil {
|
||||
return *m.Method
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetResource() string {
|
||||
if m != nil && m.Resource != nil {
|
||||
return *m.Resource
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetHttpVersion() string {
|
||||
if m != nil && m.HttpVersion != nil {
|
||||
return *m.HttpVersion
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetStatus() int32 {
|
||||
if m != nil && m.Status != nil {
|
||||
return *m.Status
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetResponseSize() int64 {
|
||||
if m != nil && m.ResponseSize != nil {
|
||||
return *m.ResponseSize
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetReferrer() string {
|
||||
if m != nil && m.Referrer != nil {
|
||||
return *m.Referrer
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetUserAgent() string {
|
||||
if m != nil && m.UserAgent != nil {
|
||||
return *m.UserAgent
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetUrlMapEntry() string {
|
||||
if m != nil && m.UrlMapEntry != nil {
|
||||
return *m.UrlMapEntry
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetCombined() string {
|
||||
if m != nil && m.Combined != nil {
|
||||
return *m.Combined
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetApiMcycles() int64 {
|
||||
if m != nil && m.ApiMcycles != nil {
|
||||
return *m.ApiMcycles
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetHost() string {
|
||||
if m != nil && m.Host != nil {
|
||||
return *m.Host
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetCost() float64 {
|
||||
if m != nil && m.Cost != nil {
|
||||
return *m.Cost
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetTaskQueueName() string {
|
||||
if m != nil && m.TaskQueueName != nil {
|
||||
return *m.TaskQueueName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetTaskName() string {
|
||||
if m != nil && m.TaskName != nil {
|
||||
return *m.TaskName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetWasLoadingRequest() bool {
|
||||
if m != nil && m.WasLoadingRequest != nil {
|
||||
return *m.WasLoadingRequest
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetPendingTime() int64 {
|
||||
if m != nil && m.PendingTime != nil {
|
||||
return *m.PendingTime
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetReplicaIndex() int32 {
|
||||
if m != nil && m.ReplicaIndex != nil {
|
||||
return *m.ReplicaIndex
|
||||
}
|
||||
return Default_RequestLog_ReplicaIndex
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetFinished() bool {
|
||||
if m != nil && m.Finished != nil {
|
||||
return *m.Finished
|
||||
}
|
||||
return Default_RequestLog_Finished
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetCloneKey() []byte {
|
||||
if m != nil {
|
||||
return m.CloneKey
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetLine() []*LogLine {
|
||||
if m != nil {
|
||||
return m.Line
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetLinesIncomplete() bool {
|
||||
if m != nil && m.LinesIncomplete != nil {
|
||||
return *m.LinesIncomplete
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetAppEngineRelease() []byte {
|
||||
if m != nil {
|
||||
return m.AppEngineRelease
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetExitReason() int32 {
|
||||
if m != nil && m.ExitReason != nil {
|
||||
return *m.ExitReason
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetWasThrottledForTime() bool {
|
||||
if m != nil && m.WasThrottledForTime != nil {
|
||||
return *m.WasThrottledForTime
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetWasThrottledForRequests() bool {
|
||||
if m != nil && m.WasThrottledForRequests != nil {
|
||||
return *m.WasThrottledForRequests
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetThrottledTime() int64 {
|
||||
if m != nil && m.ThrottledTime != nil {
|
||||
return *m.ThrottledTime
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *RequestLog) GetServerName() []byte {
|
||||
if m != nil {
|
||||
return m.ServerName
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type LogModuleVersion struct {
|
||||
ModuleId *string `protobuf:"bytes,1,opt,name=module_id,def=default" json:"module_id,omitempty"`
|
||||
VersionId *string `protobuf:"bytes,2,opt,name=version_id" json:"version_id,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *LogModuleVersion) Reset() { *m = LogModuleVersion{} }
|
||||
func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) }
|
||||
func (*LogModuleVersion) ProtoMessage() {}
|
||||
|
||||
const Default_LogModuleVersion_ModuleId string = "default"
|
||||
|
||||
func (m *LogModuleVersion) GetModuleId() string {
|
||||
if m != nil && m.ModuleId != nil {
|
||||
return *m.ModuleId
|
||||
}
|
||||
return Default_LogModuleVersion_ModuleId
|
||||
}
|
||||
|
||||
func (m *LogModuleVersion) GetVersionId() string {
|
||||
if m != nil && m.VersionId != nil {
|
||||
return *m.VersionId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type LogReadRequest struct {
|
||||
AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
|
||||
VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"`
|
||||
ModuleVersion []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version" json:"module_version,omitempty"`
|
||||
StartTime *int64 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"`
|
||||
EndTime *int64 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"`
|
||||
Offset *LogOffset `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"`
|
||||
RequestId [][]byte `protobuf:"bytes,6,rep,name=request_id" json:"request_id,omitempty"`
|
||||
MinimumLogLevel *int32 `protobuf:"varint,7,opt,name=minimum_log_level" json:"minimum_log_level,omitempty"`
|
||||
IncludeIncomplete *bool `protobuf:"varint,8,opt,name=include_incomplete" json:"include_incomplete,omitempty"`
|
||||
Count *int64 `protobuf:"varint,9,opt,name=count" json:"count,omitempty"`
|
||||
CombinedLogRegex *string `protobuf:"bytes,14,opt,name=combined_log_regex" json:"combined_log_regex,omitempty"`
|
||||
HostRegex *string `protobuf:"bytes,15,opt,name=host_regex" json:"host_regex,omitempty"`
|
||||
ReplicaIndex *int32 `protobuf:"varint,16,opt,name=replica_index" json:"replica_index,omitempty"`
|
||||
IncludeAppLogs *bool `protobuf:"varint,10,opt,name=include_app_logs" json:"include_app_logs,omitempty"`
|
||||
AppLogsPerRequest *int32 `protobuf:"varint,17,opt,name=app_logs_per_request" json:"app_logs_per_request,omitempty"`
|
||||
IncludeHost *bool `protobuf:"varint,11,opt,name=include_host" json:"include_host,omitempty"`
|
||||
IncludeAll *bool `protobuf:"varint,12,opt,name=include_all" json:"include_all,omitempty"`
|
||||
CacheIterator *bool `protobuf:"varint,13,opt,name=cache_iterator" json:"cache_iterator,omitempty"`
|
||||
NumShards *int32 `protobuf:"varint,18,opt,name=num_shards" json:"num_shards,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *LogReadRequest) Reset() { *m = LogReadRequest{} }
|
||||
func (m *LogReadRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*LogReadRequest) ProtoMessage() {}
|
||||
|
||||
func (m *LogReadRequest) GetAppId() string {
|
||||
if m != nil && m.AppId != nil {
|
||||
return *m.AppId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *LogReadRequest) GetVersionId() []string {
|
||||
if m != nil {
|
||||
return m.VersionId
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion {
|
||||
if m != nil {
|
||||
return m.ModuleVersion
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *LogReadRequest) GetStartTime() int64 {
|
||||
if m != nil && m.StartTime != nil {
|
||||
return *m.StartTime
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *LogReadRequest) GetEndTime() int64 {
|
||||
if m != nil && m.EndTime != nil {
|
||||
return *m.EndTime
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *LogReadRequest) GetOffset() *LogOffset {
|
||||
if m != nil {
|
||||
return m.Offset
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *LogReadRequest) GetRequestId() [][]byte {
|
||||
if m != nil {
|
||||
return m.RequestId
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *LogReadRequest) GetMinimumLogLevel() int32 {
|
||||
if m != nil && m.MinimumLogLevel != nil {
|
||||
return *m.MinimumLogLevel
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *LogReadRequest) GetIncludeIncomplete() bool {
|
||||
if m != nil && m.IncludeIncomplete != nil {
|
||||
return *m.IncludeIncomplete
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *LogReadRequest) GetCount() int64 {
|
||||
if m != nil && m.Count != nil {
|
||||
return *m.Count
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *LogReadRequest) GetCombinedLogRegex() string {
|
||||
if m != nil && m.CombinedLogRegex != nil {
|
||||
return *m.CombinedLogRegex
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *LogReadRequest) GetHostRegex() string {
|
||||
if m != nil && m.HostRegex != nil {
|
||||
return *m.HostRegex
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *LogReadRequest) GetReplicaIndex() int32 {
|
||||
if m != nil && m.ReplicaIndex != nil {
|
||||
return *m.ReplicaIndex
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *LogReadRequest) GetIncludeAppLogs() bool {
|
||||
if m != nil && m.IncludeAppLogs != nil {
|
||||
return *m.IncludeAppLogs
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *LogReadRequest) GetAppLogsPerRequest() int32 {
|
||||
if m != nil && m.AppLogsPerRequest != nil {
|
||||
return *m.AppLogsPerRequest
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *LogReadRequest) GetIncludeHost() bool {
|
||||
if m != nil && m.IncludeHost != nil {
|
||||
return *m.IncludeHost
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *LogReadRequest) GetIncludeAll() bool {
|
||||
if m != nil && m.IncludeAll != nil {
|
||||
return *m.IncludeAll
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *LogReadRequest) GetCacheIterator() bool {
|
||||
if m != nil && m.CacheIterator != nil {
|
||||
return *m.CacheIterator
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *LogReadRequest) GetNumShards() int32 {
|
||||
if m != nil && m.NumShards != nil {
|
||||
return *m.NumShards
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type LogReadResponse struct {
|
||||
Log []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"`
|
||||
Offset *LogOffset `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"`
|
||||
LastEndTime *int64 `protobuf:"varint,3,opt,name=last_end_time" json:"last_end_time,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *LogReadResponse) Reset() { *m = LogReadResponse{} }
|
||||
func (m *LogReadResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*LogReadResponse) ProtoMessage() {}
|
||||
|
||||
func (m *LogReadResponse) GetLog() []*RequestLog {
|
||||
if m != nil {
|
||||
return m.Log
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *LogReadResponse) GetOffset() *LogOffset {
|
||||
if m != nil {
|
||||
return m.Offset
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *LogReadResponse) GetLastEndTime() int64 {
|
||||
if m != nil && m.LastEndTime != nil {
|
||||
return *m.LastEndTime
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type LogUsageRecord struct {
|
||||
VersionId *string `protobuf:"bytes,1,opt,name=version_id" json:"version_id,omitempty"`
|
||||
StartTime *int32 `protobuf:"varint,2,opt,name=start_time" json:"start_time,omitempty"`
|
||||
EndTime *int32 `protobuf:"varint,3,opt,name=end_time" json:"end_time,omitempty"`
|
||||
Count *int64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"`
|
||||
TotalSize *int64 `protobuf:"varint,5,opt,name=total_size" json:"total_size,omitempty"`
|
||||
Records *int32 `protobuf:"varint,6,opt,name=records" json:"records,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *LogUsageRecord) Reset() { *m = LogUsageRecord{} }
|
||||
func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) }
|
||||
func (*LogUsageRecord) ProtoMessage() {}
|
||||
|
||||
func (m *LogUsageRecord) GetVersionId() string {
|
||||
if m != nil && m.VersionId != nil {
|
||||
return *m.VersionId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *LogUsageRecord) GetStartTime() int32 {
|
||||
if m != nil && m.StartTime != nil {
|
||||
return *m.StartTime
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *LogUsageRecord) GetEndTime() int32 {
|
||||
if m != nil && m.EndTime != nil {
|
||||
return *m.EndTime
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *LogUsageRecord) GetCount() int64 {
|
||||
if m != nil && m.Count != nil {
|
||||
return *m.Count
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *LogUsageRecord) GetTotalSize() int64 {
|
||||
if m != nil && m.TotalSize != nil {
|
||||
return *m.TotalSize
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *LogUsageRecord) GetRecords() int32 {
|
||||
if m != nil && m.Records != nil {
|
||||
return *m.Records
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type LogUsageRequest struct {
|
||||
AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
|
||||
VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"`
|
||||
StartTime *int32 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"`
|
||||
EndTime *int32 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"`
|
||||
ResolutionHours *uint32 `protobuf:"varint,5,opt,name=resolution_hours,def=1" json:"resolution_hours,omitempty"`
|
||||
CombineVersions *bool `protobuf:"varint,6,opt,name=combine_versions" json:"combine_versions,omitempty"`
|
||||
UsageVersion *int32 `protobuf:"varint,7,opt,name=usage_version" json:"usage_version,omitempty"`
|
||||
VersionsOnly *bool `protobuf:"varint,8,opt,name=versions_only" json:"versions_only,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *LogUsageRequest) Reset() { *m = LogUsageRequest{} }
|
||||
func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*LogUsageRequest) ProtoMessage() {}
|
||||
|
||||
const Default_LogUsageRequest_ResolutionHours uint32 = 1
|
||||
|
||||
func (m *LogUsageRequest) GetAppId() string {
|
||||
if m != nil && m.AppId != nil {
|
||||
return *m.AppId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *LogUsageRequest) GetVersionId() []string {
|
||||
if m != nil {
|
||||
return m.VersionId
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *LogUsageRequest) GetStartTime() int32 {
|
||||
if m != nil && m.StartTime != nil {
|
||||
return *m.StartTime
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *LogUsageRequest) GetEndTime() int32 {
|
||||
if m != nil && m.EndTime != nil {
|
||||
return *m.EndTime
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *LogUsageRequest) GetResolutionHours() uint32 {
|
||||
if m != nil && m.ResolutionHours != nil {
|
||||
return *m.ResolutionHours
|
||||
}
|
||||
return Default_LogUsageRequest_ResolutionHours
|
||||
}
|
||||
|
||||
func (m *LogUsageRequest) GetCombineVersions() bool {
|
||||
if m != nil && m.CombineVersions != nil {
|
||||
return *m.CombineVersions
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *LogUsageRequest) GetUsageVersion() int32 {
|
||||
if m != nil && m.UsageVersion != nil {
|
||||
return *m.UsageVersion
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *LogUsageRequest) GetVersionsOnly() bool {
|
||||
if m != nil && m.VersionsOnly != nil {
|
||||
return *m.VersionsOnly
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type LogUsageResponse struct {
|
||||
Usage []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"`
|
||||
Summary *LogUsageRecord `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *LogUsageResponse) Reset() { *m = LogUsageResponse{} }
|
||||
func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*LogUsageResponse) ProtoMessage() {}
|
||||
|
||||
func (m *LogUsageResponse) GetUsage() []*LogUsageRecord {
|
||||
if m != nil {
|
||||
return m.Usage
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *LogUsageResponse) GetSummary() *LogUsageRecord {
|
||||
if m != nil {
|
||||
return m.Summary
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
}
|
150
vendor/google.golang.org/appengine/internal/log/log_service.proto
generated
vendored
Normal file
150
vendor/google.golang.org/appengine/internal/log/log_service.proto
generated
vendored
Normal file
|
@ -0,0 +1,150 @@
|
|||
syntax = "proto2";
|
||||
option go_package = "log";
|
||||
|
||||
package appengine;
|
||||
|
||||
message LogServiceError {
|
||||
enum ErrorCode {
|
||||
OK = 0;
|
||||
INVALID_REQUEST = 1;
|
||||
STORAGE_ERROR = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message UserAppLogLine {
|
||||
required int64 timestamp_usec = 1;
|
||||
required int64 level = 2;
|
||||
required string message = 3;
|
||||
}
|
||||
|
||||
message UserAppLogGroup {
|
||||
repeated UserAppLogLine log_line = 2;
|
||||
}
|
||||
|
||||
message FlushRequest {
|
||||
optional bytes logs = 1;
|
||||
}
|
||||
|
||||
message SetStatusRequest {
|
||||
required string status = 1;
|
||||
}
|
||||
|
||||
|
||||
message LogOffset {
|
||||
optional bytes request_id = 1;
|
||||
}
|
||||
|
||||
message LogLine {
|
||||
required int64 time = 1;
|
||||
required int32 level = 2;
|
||||
required string log_message = 3;
|
||||
}
|
||||
|
||||
message RequestLog {
|
||||
required string app_id = 1;
|
||||
optional string module_id = 37 [default="default"];
|
||||
required string version_id = 2;
|
||||
required bytes request_id = 3;
|
||||
optional LogOffset offset = 35;
|
||||
required string ip = 4;
|
||||
optional string nickname = 5;
|
||||
required int64 start_time = 6;
|
||||
required int64 end_time = 7;
|
||||
required int64 latency = 8;
|
||||
required int64 mcycles = 9;
|
||||
required string method = 10;
|
||||
required string resource = 11;
|
||||
required string http_version = 12;
|
||||
required int32 status = 13;
|
||||
required int64 response_size = 14;
|
||||
optional string referrer = 15;
|
||||
optional string user_agent = 16;
|
||||
required string url_map_entry = 17;
|
||||
required string combined = 18;
|
||||
optional int64 api_mcycles = 19;
|
||||
optional string host = 20;
|
||||
optional double cost = 21;
|
||||
|
||||
optional string task_queue_name = 22;
|
||||
optional string task_name = 23;
|
||||
|
||||
optional bool was_loading_request = 24;
|
||||
optional int64 pending_time = 25;
|
||||
optional int32 replica_index = 26 [default = -1];
|
||||
optional bool finished = 27 [default = true];
|
||||
optional bytes clone_key = 28;
|
||||
|
||||
repeated LogLine line = 29;
|
||||
|
||||
optional bool lines_incomplete = 36;
|
||||
optional bytes app_engine_release = 38;
|
||||
|
||||
optional int32 exit_reason = 30;
|
||||
optional bool was_throttled_for_time = 31;
|
||||
optional bool was_throttled_for_requests = 32;
|
||||
optional int64 throttled_time = 33;
|
||||
|
||||
optional bytes server_name = 34;
|
||||
}
|
||||
|
||||
message LogModuleVersion {
|
||||
optional string module_id = 1 [default="default"];
|
||||
optional string version_id = 2;
|
||||
}
|
||||
|
||||
message LogReadRequest {
|
||||
required string app_id = 1;
|
||||
repeated string version_id = 2;
|
||||
repeated LogModuleVersion module_version = 19;
|
||||
|
||||
optional int64 start_time = 3;
|
||||
optional int64 end_time = 4;
|
||||
optional LogOffset offset = 5;
|
||||
repeated bytes request_id = 6;
|
||||
|
||||
optional int32 minimum_log_level = 7;
|
||||
optional bool include_incomplete = 8;
|
||||
optional int64 count = 9;
|
||||
|
||||
optional string combined_log_regex = 14;
|
||||
optional string host_regex = 15;
|
||||
optional int32 replica_index = 16;
|
||||
|
||||
optional bool include_app_logs = 10;
|
||||
optional int32 app_logs_per_request = 17;
|
||||
optional bool include_host = 11;
|
||||
optional bool include_all = 12;
|
||||
optional bool cache_iterator = 13;
|
||||
optional int32 num_shards = 18;
|
||||
}
|
||||
|
||||
message LogReadResponse {
|
||||
repeated RequestLog log = 1;
|
||||
optional LogOffset offset = 2;
|
||||
optional int64 last_end_time = 3;
|
||||
}
|
||||
|
||||
message LogUsageRecord {
|
||||
optional string version_id = 1;
|
||||
optional int32 start_time = 2;
|
||||
optional int32 end_time = 3;
|
||||
optional int64 count = 4;
|
||||
optional int64 total_size = 5;
|
||||
optional int32 records = 6;
|
||||
}
|
||||
|
||||
message LogUsageRequest {
|
||||
required string app_id = 1;
|
||||
repeated string version_id = 2;
|
||||
optional int32 start_time = 3;
|
||||
optional int32 end_time = 4;
|
||||
optional uint32 resolution_hours = 5 [default = 1];
|
||||
optional bool combine_versions = 6;
|
||||
optional int32 usage_version = 7;
|
||||
optional bool versions_only = 8;
|
||||
}
|
||||
|
||||
message LogUsageResponse {
|
||||
repeated LogUsageRecord usage = 1;
|
||||
optional LogUsageRecord summary = 2;
|
||||
}
|
61
vendor/google.golang.org/appengine/internal/metadata.go
generated
vendored
Normal file
61
vendor/google.golang.org/appengine/internal/metadata.go
generated
vendored
Normal file
|
@ -0,0 +1,61 @@
|
|||
// Copyright 2014 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package internal
|
||||
|
||||
// This file has code for accessing metadata.
|
||||
//
|
||||
// References:
|
||||
// https://cloud.google.com/compute/docs/metadata
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
const (
|
||||
metadataHost = "metadata"
|
||||
metadataPath = "/computeMetadata/v1/"
|
||||
)
|
||||
|
||||
var (
|
||||
metadataRequestHeaders = http.Header{
|
||||
"Metadata-Flavor": []string{"Google"},
|
||||
}
|
||||
)
|
||||
|
||||
// TODO(dsymonds): Do we need to support default values, like Python?
|
||||
func mustGetMetadata(key string) []byte {
|
||||
b, err := getMetadata(key)
|
||||
if err != nil {
|
||||
log.Fatalf("Metadata fetch failed: %v", err)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func getMetadata(key string) ([]byte, error) {
|
||||
// TODO(dsymonds): May need to use url.Parse to support keys with query args.
|
||||
req := &http.Request{
|
||||
Method: "GET",
|
||||
URL: &url.URL{
|
||||
Scheme: "http",
|
||||
Host: metadataHost,
|
||||
Path: metadataPath + key,
|
||||
},
|
||||
Header: metadataRequestHeaders,
|
||||
Host: metadataHost,
|
||||
}
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode)
|
||||
}
|
||||
return ioutil.ReadAll(resp.Body)
|
||||
}
|
375
vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go
generated
vendored
Normal file
375
vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,375 @@
|
|||
// Code generated by protoc-gen-go.
|
||||
// source: google.golang.org/appengine/internal/modules/modules_service.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package modules is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
google.golang.org/appengine/internal/modules/modules_service.proto
|
||||
|
||||
It has these top-level messages:
|
||||
ModulesServiceError
|
||||
GetModulesRequest
|
||||
GetModulesResponse
|
||||
GetVersionsRequest
|
||||
GetVersionsResponse
|
||||
GetDefaultVersionRequest
|
||||
GetDefaultVersionResponse
|
||||
GetNumInstancesRequest
|
||||
GetNumInstancesResponse
|
||||
SetNumInstancesRequest
|
||||
SetNumInstancesResponse
|
||||
StartModuleRequest
|
||||
StartModuleResponse
|
||||
StopModuleRequest
|
||||
StopModuleResponse
|
||||
GetHostnameRequest
|
||||
GetHostnameResponse
|
||||
*/
|
||||
package modules
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
type ModulesServiceError_ErrorCode int32
|
||||
|
||||
const (
|
||||
ModulesServiceError_OK ModulesServiceError_ErrorCode = 0
|
||||
ModulesServiceError_INVALID_MODULE ModulesServiceError_ErrorCode = 1
|
||||
ModulesServiceError_INVALID_VERSION ModulesServiceError_ErrorCode = 2
|
||||
ModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3
|
||||
ModulesServiceError_TRANSIENT_ERROR ModulesServiceError_ErrorCode = 4
|
||||
ModulesServiceError_UNEXPECTED_STATE ModulesServiceError_ErrorCode = 5
|
||||
)
|
||||
|
||||
var ModulesServiceError_ErrorCode_name = map[int32]string{
|
||||
0: "OK",
|
||||
1: "INVALID_MODULE",
|
||||
2: "INVALID_VERSION",
|
||||
3: "INVALID_INSTANCES",
|
||||
4: "TRANSIENT_ERROR",
|
||||
5: "UNEXPECTED_STATE",
|
||||
}
|
||||
var ModulesServiceError_ErrorCode_value = map[string]int32{
|
||||
"OK": 0,
|
||||
"INVALID_MODULE": 1,
|
||||
"INVALID_VERSION": 2,
|
||||
"INVALID_INSTANCES": 3,
|
||||
"TRANSIENT_ERROR": 4,
|
||||
"UNEXPECTED_STATE": 5,
|
||||
}
|
||||
|
||||
func (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode {
|
||||
p := new(ModulesServiceError_ErrorCode)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
func (x ModulesServiceError_ErrorCode) String() string {
|
||||
return proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x))
|
||||
}
|
||||
func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
|
||||
value, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, "ModulesServiceError_ErrorCode")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*x = ModulesServiceError_ErrorCode(value)
|
||||
return nil
|
||||
}
|
||||
|
||||
type ModulesServiceError struct {
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ModulesServiceError) Reset() { *m = ModulesServiceError{} }
|
||||
func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) }
|
||||
func (*ModulesServiceError) ProtoMessage() {}
|
||||
|
||||
type GetModulesRequest struct {
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GetModulesRequest) Reset() { *m = GetModulesRequest{} }
|
||||
func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetModulesRequest) ProtoMessage() {}
|
||||
|
||||
type GetModulesResponse struct {
|
||||
Module []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GetModulesResponse) Reset() { *m = GetModulesResponse{} }
|
||||
func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetModulesResponse) ProtoMessage() {}
|
||||
|
||||
func (m *GetModulesResponse) GetModule() []string {
|
||||
if m != nil {
|
||||
return m.Module
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type GetVersionsRequest struct {
|
||||
Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GetVersionsRequest) Reset() { *m = GetVersionsRequest{} }
|
||||
func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetVersionsRequest) ProtoMessage() {}
|
||||
|
||||
func (m *GetVersionsRequest) GetModule() string {
|
||||
if m != nil && m.Module != nil {
|
||||
return *m.Module
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type GetVersionsResponse struct {
|
||||
Version []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GetVersionsResponse) Reset() { *m = GetVersionsResponse{} }
|
||||
func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetVersionsResponse) ProtoMessage() {}
|
||||
|
||||
func (m *GetVersionsResponse) GetVersion() []string {
|
||||
if m != nil {
|
||||
return m.Version
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type GetDefaultVersionRequest struct {
|
||||
Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GetDefaultVersionRequest) Reset() { *m = GetDefaultVersionRequest{} }
|
||||
func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetDefaultVersionRequest) ProtoMessage() {}
|
||||
|
||||
func (m *GetDefaultVersionRequest) GetModule() string {
|
||||
if m != nil && m.Module != nil {
|
||||
return *m.Module
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type GetDefaultVersionResponse struct {
|
||||
Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GetDefaultVersionResponse) Reset() { *m = GetDefaultVersionResponse{} }
|
||||
func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetDefaultVersionResponse) ProtoMessage() {}
|
||||
|
||||
func (m *GetDefaultVersionResponse) GetVersion() string {
|
||||
if m != nil && m.Version != nil {
|
||||
return *m.Version
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type GetNumInstancesRequest struct {
|
||||
Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
|
||||
Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GetNumInstancesRequest) Reset() { *m = GetNumInstancesRequest{} }
|
||||
func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetNumInstancesRequest) ProtoMessage() {}
|
||||
|
||||
func (m *GetNumInstancesRequest) GetModule() string {
|
||||
if m != nil && m.Module != nil {
|
||||
return *m.Module
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *GetNumInstancesRequest) GetVersion() string {
|
||||
if m != nil && m.Version != nil {
|
||||
return *m.Version
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type GetNumInstancesResponse struct {
|
||||
Instances *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GetNumInstancesResponse) Reset() { *m = GetNumInstancesResponse{} }
|
||||
func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetNumInstancesResponse) ProtoMessage() {}
|
||||
|
||||
func (m *GetNumInstancesResponse) GetInstances() int64 {
|
||||
if m != nil && m.Instances != nil {
|
||||
return *m.Instances
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type SetNumInstancesRequest struct {
|
||||
Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
|
||||
Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
|
||||
Instances *int64 `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SetNumInstancesRequest) Reset() { *m = SetNumInstancesRequest{} }
|
||||
func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*SetNumInstancesRequest) ProtoMessage() {}
|
||||
|
||||
func (m *SetNumInstancesRequest) GetModule() string {
|
||||
if m != nil && m.Module != nil {
|
||||
return *m.Module
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *SetNumInstancesRequest) GetVersion() string {
|
||||
if m != nil && m.Version != nil {
|
||||
return *m.Version
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *SetNumInstancesRequest) GetInstances() int64 {
|
||||
if m != nil && m.Instances != nil {
|
||||
return *m.Instances
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type SetNumInstancesResponse struct {
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SetNumInstancesResponse) Reset() { *m = SetNumInstancesResponse{} }
|
||||
func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*SetNumInstancesResponse) ProtoMessage() {}
|
||||
|
||||
type StartModuleRequest struct {
|
||||
Module *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"`
|
||||
Version *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *StartModuleRequest) Reset() { *m = StartModuleRequest{} }
|
||||
func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*StartModuleRequest) ProtoMessage() {}
|
||||
|
||||
func (m *StartModuleRequest) GetModule() string {
|
||||
if m != nil && m.Module != nil {
|
||||
return *m.Module
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *StartModuleRequest) GetVersion() string {
|
||||
if m != nil && m.Version != nil {
|
||||
return *m.Version
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type StartModuleResponse struct {
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *StartModuleResponse) Reset() { *m = StartModuleResponse{} }
|
||||
func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*StartModuleResponse) ProtoMessage() {}
|
||||
|
||||
type StopModuleRequest struct {
|
||||
Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
|
||||
Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *StopModuleRequest) Reset() { *m = StopModuleRequest{} }
|
||||
func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*StopModuleRequest) ProtoMessage() {}
|
||||
|
||||
func (m *StopModuleRequest) GetModule() string {
|
||||
if m != nil && m.Module != nil {
|
||||
return *m.Module
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *StopModuleRequest) GetVersion() string {
|
||||
if m != nil && m.Version != nil {
|
||||
return *m.Version
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type StopModuleResponse struct {
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *StopModuleResponse) Reset() { *m = StopModuleResponse{} }
|
||||
func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*StopModuleResponse) ProtoMessage() {}
|
||||
|
||||
type GetHostnameRequest struct {
|
||||
Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
|
||||
Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
|
||||
Instance *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GetHostnameRequest) Reset() { *m = GetHostnameRequest{} }
|
||||
func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetHostnameRequest) ProtoMessage() {}
|
||||
|
||||
func (m *GetHostnameRequest) GetModule() string {
|
||||
if m != nil && m.Module != nil {
|
||||
return *m.Module
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *GetHostnameRequest) GetVersion() string {
|
||||
if m != nil && m.Version != nil {
|
||||
return *m.Version
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *GetHostnameRequest) GetInstance() string {
|
||||
if m != nil && m.Instance != nil {
|
||||
return *m.Instance
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type GetHostnameResponse struct {
|
||||
Hostname *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GetHostnameResponse) Reset() { *m = GetHostnameResponse{} }
|
||||
func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetHostnameResponse) ProtoMessage() {}
|
||||
|
||||
func (m *GetHostnameResponse) GetHostname() string {
|
||||
if m != nil && m.Hostname != nil {
|
||||
return *m.Hostname
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func init() {
|
||||
}
|
80
vendor/google.golang.org/appengine/internal/modules/modules_service.proto
generated
vendored
Normal file
80
vendor/google.golang.org/appengine/internal/modules/modules_service.proto
generated
vendored
Normal file
|
@ -0,0 +1,80 @@
|
|||
syntax = "proto2";
|
||||
option go_package = "modules";
|
||||
|
||||
package appengine;
|
||||
|
||||
message ModulesServiceError {
|
||||
enum ErrorCode {
|
||||
OK = 0;
|
||||
INVALID_MODULE = 1;
|
||||
INVALID_VERSION = 2;
|
||||
INVALID_INSTANCES = 3;
|
||||
TRANSIENT_ERROR = 4;
|
||||
UNEXPECTED_STATE = 5;
|
||||
}
|
||||
}
|
||||
|
||||
message GetModulesRequest {
|
||||
}
|
||||
|
||||
message GetModulesResponse {
|
||||
repeated string module = 1;
|
||||
}
|
||||
|
||||
message GetVersionsRequest {
|
||||
optional string module = 1;
|
||||
}
|
||||
|
||||
message GetVersionsResponse {
|
||||
repeated string version = 1;
|
||||
}
|
||||
|
||||
message GetDefaultVersionRequest {
|
||||
optional string module = 1;
|
||||
}
|
||||
|
||||
message GetDefaultVersionResponse {
|
||||
required string version = 1;
|
||||
}
|
||||
|
||||
message GetNumInstancesRequest {
|
||||
optional string module = 1;
|
||||
optional string version = 2;
|
||||
}
|
||||
|
||||
message GetNumInstancesResponse {
|
||||
required int64 instances = 1;
|
||||
}
|
||||
|
||||
message SetNumInstancesRequest {
|
||||
optional string module = 1;
|
||||
optional string version = 2;
|
||||
required int64 instances = 3;
|
||||
}
|
||||
|
||||
message SetNumInstancesResponse {}
|
||||
|
||||
message StartModuleRequest {
|
||||
required string module = 1;
|
||||
required string version = 2;
|
||||
}
|
||||
|
||||
message StartModuleResponse {}
|
||||
|
||||
message StopModuleRequest {
|
||||
optional string module = 1;
|
||||
optional string version = 2;
|
||||
}
|
||||
|
||||
message StopModuleResponse {}
|
||||
|
||||
message GetHostnameRequest {
|
||||
optional string module = 1;
|
||||
optional string version = 2;
|
||||
optional string instance = 3;
|
||||
}
|
||||
|
||||
message GetHostnameResponse {
|
||||
required string hostname = 1;
|
||||
}
|
||||
|
56
vendor/google.golang.org/appengine/internal/net.go
generated
vendored
Normal file
56
vendor/google.golang.org/appengine/internal/net.go
generated
vendored
Normal file
|
@ -0,0 +1,56 @@
|
|||
// Copyright 2014 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package internal
|
||||
|
||||
// This file implements a network dialer that limits the number of concurrent connections.
|
||||
// It is only used for API calls.
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable.
|
||||
|
||||
func limitRelease() {
|
||||
// non-blocking
|
||||
select {
|
||||
case <-limitSem:
|
||||
default:
|
||||
// This should not normally happen.
|
||||
log.Print("appengine: unbalanced limitSem release!")
|
||||
}
|
||||
}
|
||||
|
||||
func limitDial(network, addr string) (net.Conn, error) {
|
||||
limitSem <- 1
|
||||
|
||||
// Dial with a timeout in case the API host is MIA.
|
||||
// The connection should normally be very fast.
|
||||
conn, err := net.DialTimeout(network, addr, 500*time.Millisecond)
|
||||
if err != nil {
|
||||
limitRelease()
|
||||
return nil, err
|
||||
}
|
||||
lc := &limitConn{Conn: conn}
|
||||
runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required
|
||||
return lc, nil
|
||||
}
|
||||
|
||||
type limitConn struct {
|
||||
close sync.Once
|
||||
net.Conn
|
||||
}
|
||||
|
||||
func (lc *limitConn) Close() error {
|
||||
defer lc.close.Do(func() {
|
||||
limitRelease()
|
||||
runtime.SetFinalizer(lc, nil)
|
||||
})
|
||||
return lc.Conn.Close()
|
||||
}
|
40
vendor/google.golang.org/appengine/internal/regen.sh
generated
vendored
Normal file
40
vendor/google.golang.org/appengine/internal/regen.sh
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
#!/bin/bash -e
|
||||
#
|
||||
# This script rebuilds the generated code for the protocol buffers.
|
||||
# To run this you will need protoc and goprotobuf installed;
|
||||
# see https://github.com/golang/protobuf for instructions.
|
||||
|
||||
PKG=google.golang.org/appengine
|
||||
|
||||
function die() {
|
||||
echo 1>&2 $*
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Sanity check that the right tools are accessible.
|
||||
for tool in go protoc protoc-gen-go; do
|
||||
q=$(which $tool) || die "didn't find $tool"
|
||||
echo 1>&2 "$tool: $q"
|
||||
done
|
||||
|
||||
echo -n 1>&2 "finding package dir... "
|
||||
pkgdir=$(go list -f '{{.Dir}}' $PKG)
|
||||
echo 1>&2 $pkgdir
|
||||
base=$(echo $pkgdir | sed "s,/$PKG\$,,")
|
||||
echo 1>&2 "base: $base"
|
||||
cd $base
|
||||
|
||||
# Run protoc once per package.
|
||||
for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do
|
||||
echo 1>&2 "* $dir"
|
||||
protoc --go_out=. $dir/*.proto
|
||||
done
|
||||
|
||||
for f in $(find $PKG/internal -name '*.pb.go'); do
|
||||
# Remove proto.RegisterEnum calls.
|
||||
# These cause duplicate registration panics when these packages
|
||||
# are used on classic App Engine. proto.RegisterEnum only affects
|
||||
# parsing the text format; we don't care about that.
|
||||
# https://code.google.com/p/googleappengine/issues/detail?id=11670#c17
|
||||
sed -i '/proto.RegisterEnum/d' $f
|
||||
done
|
231
vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
generated
vendored
Normal file
231
vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,231 @@
|
|||
// Code generated by protoc-gen-go.
|
||||
// source: google.golang.org/appengine/internal/remote_api/remote_api.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package remote_api is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
google.golang.org/appengine/internal/remote_api/remote_api.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Request
|
||||
ApplicationError
|
||||
RpcError
|
||||
Response
|
||||
*/
|
||||
package remote_api
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
type RpcError_ErrorCode int32
|
||||
|
||||
const (
|
||||
RpcError_UNKNOWN RpcError_ErrorCode = 0
|
||||
RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1
|
||||
RpcError_PARSE_ERROR RpcError_ErrorCode = 2
|
||||
RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3
|
||||
RpcError_OVER_QUOTA RpcError_ErrorCode = 4
|
||||
RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5
|
||||
RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6
|
||||
RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7
|
||||
RpcError_BAD_REQUEST RpcError_ErrorCode = 8
|
||||
RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9
|
||||
RpcError_CANCELLED RpcError_ErrorCode = 10
|
||||
RpcError_REPLAY_ERROR RpcError_ErrorCode = 11
|
||||
RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12
|
||||
)
|
||||
|
||||
var RpcError_ErrorCode_name = map[int32]string{
|
||||
0: "UNKNOWN",
|
||||
1: "CALL_NOT_FOUND",
|
||||
2: "PARSE_ERROR",
|
||||
3: "SECURITY_VIOLATION",
|
||||
4: "OVER_QUOTA",
|
||||
5: "REQUEST_TOO_LARGE",
|
||||
6: "CAPABILITY_DISABLED",
|
||||
7: "FEATURE_DISABLED",
|
||||
8: "BAD_REQUEST",
|
||||
9: "RESPONSE_TOO_LARGE",
|
||||
10: "CANCELLED",
|
||||
11: "REPLAY_ERROR",
|
||||
12: "DEADLINE_EXCEEDED",
|
||||
}
|
||||
var RpcError_ErrorCode_value = map[string]int32{
|
||||
"UNKNOWN": 0,
|
||||
"CALL_NOT_FOUND": 1,
|
||||
"PARSE_ERROR": 2,
|
||||
"SECURITY_VIOLATION": 3,
|
||||
"OVER_QUOTA": 4,
|
||||
"REQUEST_TOO_LARGE": 5,
|
||||
"CAPABILITY_DISABLED": 6,
|
||||
"FEATURE_DISABLED": 7,
|
||||
"BAD_REQUEST": 8,
|
||||
"RESPONSE_TOO_LARGE": 9,
|
||||
"CANCELLED": 10,
|
||||
"REPLAY_ERROR": 11,
|
||||
"DEADLINE_EXCEEDED": 12,
|
||||
}
|
||||
|
||||
func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode {
|
||||
p := new(RpcError_ErrorCode)
|
||||
*p = x
|
||||
return p
|
||||
}
|
||||
func (x RpcError_ErrorCode) String() string {
|
||||
return proto.EnumName(RpcError_ErrorCode_name, int32(x))
|
||||
}
|
||||
func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error {
|
||||
value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*x = RpcError_ErrorCode(value)
|
||||
return nil
|
||||
}
|
||||
|
||||
type Request struct {
|
||||
ServiceName *string `protobuf:"bytes,2,req,name=service_name" json:"service_name,omitempty"`
|
||||
Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"`
|
||||
Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"`
|
||||
RequestId *string `protobuf:"bytes,5,opt,name=request_id" json:"request_id,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Request) Reset() { *m = Request{} }
|
||||
func (m *Request) String() string { return proto.CompactTextString(m) }
|
||||
func (*Request) ProtoMessage() {}
|
||||
|
||||
func (m *Request) GetServiceName() string {
|
||||
if m != nil && m.ServiceName != nil {
|
||||
return *m.ServiceName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Request) GetMethod() string {
|
||||
if m != nil && m.Method != nil {
|
||||
return *m.Method
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Request) GetRequest() []byte {
|
||||
if m != nil {
|
||||
return m.Request
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Request) GetRequestId() string {
|
||||
if m != nil && m.RequestId != nil {
|
||||
return *m.RequestId
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type ApplicationError struct {
|
||||
Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
|
||||
Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ApplicationError) Reset() { *m = ApplicationError{} }
|
||||
func (m *ApplicationError) String() string { return proto.CompactTextString(m) }
|
||||
func (*ApplicationError) ProtoMessage() {}
|
||||
|
||||
func (m *ApplicationError) GetCode() int32 {
|
||||
if m != nil && m.Code != nil {
|
||||
return *m.Code
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ApplicationError) GetDetail() string {
|
||||
if m != nil && m.Detail != nil {
|
||||
return *m.Detail
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type RpcError struct {
|
||||
Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
|
||||
Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *RpcError) Reset() { *m = RpcError{} }
|
||||
func (m *RpcError) String() string { return proto.CompactTextString(m) }
|
||||
func (*RpcError) ProtoMessage() {}
|
||||
|
||||
func (m *RpcError) GetCode() int32 {
|
||||
if m != nil && m.Code != nil {
|
||||
return *m.Code
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *RpcError) GetDetail() string {
|
||||
if m != nil && m.Detail != nil {
|
||||
return *m.Detail
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type Response struct {
|
||||
Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
|
||||
Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"`
|
||||
ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error" json:"application_error,omitempty"`
|
||||
JavaException []byte `protobuf:"bytes,4,opt,name=java_exception" json:"java_exception,omitempty"`
|
||||
RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error" json:"rpc_error,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Response) Reset() { *m = Response{} }
|
||||
func (m *Response) String() string { return proto.CompactTextString(m) }
|
||||
func (*Response) ProtoMessage() {}
|
||||
|
||||
func (m *Response) GetResponse() []byte {
|
||||
if m != nil {
|
||||
return m.Response
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Response) GetException() []byte {
|
||||
if m != nil {
|
||||
return m.Exception
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Response) GetApplicationError() *ApplicationError {
|
||||
if m != nil {
|
||||
return m.ApplicationError
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Response) GetJavaException() []byte {
|
||||
if m != nil {
|
||||
return m.JavaException
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Response) GetRpcError() *RpcError {
|
||||
if m != nil {
|
||||
return m.RpcError
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
}
|
44
vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
generated
vendored
Normal file
44
vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
syntax = "proto2";
|
||||
option go_package = "remote_api";
|
||||
|
||||
package remote_api;
|
||||
|
||||
message Request {
|
||||
required string service_name = 2;
|
||||
required string method = 3;
|
||||
required bytes request = 4;
|
||||
optional string request_id = 5;
|
||||
}
|
||||
|
||||
message ApplicationError {
|
||||
required int32 code = 1;
|
||||
required string detail = 2;
|
||||
}
|
||||
|
||||
message RpcError {
|
||||
enum ErrorCode {
|
||||
UNKNOWN = 0;
|
||||
CALL_NOT_FOUND = 1;
|
||||
PARSE_ERROR = 2;
|
||||
SECURITY_VIOLATION = 3;
|
||||
OVER_QUOTA = 4;
|
||||
REQUEST_TOO_LARGE = 5;
|
||||
CAPABILITY_DISABLED = 6;
|
||||
FEATURE_DISABLED = 7;
|
||||
BAD_REQUEST = 8;
|
||||
RESPONSE_TOO_LARGE = 9;
|
||||
CANCELLED = 10;
|
||||
REPLAY_ERROR = 11;
|
||||
DEADLINE_EXCEEDED = 12;
|
||||
}
|
||||
required int32 code = 1;
|
||||
optional string detail = 2;
|
||||
}
|
||||
|
||||
message Response {
|
||||
optional bytes response = 1;
|
||||
optional bytes exception = 2;
|
||||
optional ApplicationError application_error = 3;
|
||||
optional bytes java_exception = 4;
|
||||
optional RpcError rpc_error = 5;
|
||||
}
|
107
vendor/google.golang.org/appengine/internal/transaction.go
generated
vendored
Normal file
107
vendor/google.golang.org/appengine/internal/transaction.go
generated
vendored
Normal file
|
@ -0,0 +1,107 @@
|
|||
// Copyright 2014 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package internal
|
||||
|
||||
// This file implements hooks for applying datastore transactions.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
netcontext "golang.org/x/net/context"
|
||||
|
||||
basepb "google.golang.org/appengine/internal/base"
|
||||
pb "google.golang.org/appengine/internal/datastore"
|
||||
)
|
||||
|
||||
var transactionSetters = make(map[reflect.Type]reflect.Value)
|
||||
|
||||
// RegisterTransactionSetter registers a function that sets transaction information
|
||||
// in a protocol buffer message. f should be a function with two arguments,
|
||||
// the first being a protocol buffer type, and the second being *datastore.Transaction.
|
||||
func RegisterTransactionSetter(f interface{}) {
|
||||
v := reflect.ValueOf(f)
|
||||
transactionSetters[v.Type().In(0)] = v
|
||||
}
|
||||
|
||||
// applyTransaction applies the transaction t to message pb
|
||||
// by using the relevant setter passed to RegisterTransactionSetter.
|
||||
func applyTransaction(pb proto.Message, t *pb.Transaction) {
|
||||
v := reflect.ValueOf(pb)
|
||||
if f, ok := transactionSetters[v.Type()]; ok {
|
||||
f.Call([]reflect.Value{v, reflect.ValueOf(t)})
|
||||
}
|
||||
}
|
||||
|
||||
var transactionKey = "used for *Transaction"
|
||||
|
||||
func transactionFromContext(ctx netcontext.Context) *transaction {
|
||||
t, _ := ctx.Value(&transactionKey).(*transaction)
|
||||
return t
|
||||
}
|
||||
|
||||
func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context {
|
||||
return netcontext.WithValue(ctx, &transactionKey, t)
|
||||
}
|
||||
|
||||
type transaction struct {
|
||||
transaction pb.Transaction
|
||||
finished bool
|
||||
}
|
||||
|
||||
var ErrConcurrentTransaction = errors.New("internal: concurrent transaction")
|
||||
|
||||
func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool) error {
|
||||
if transactionFromContext(c) != nil {
|
||||
return errors.New("nested transactions are not supported")
|
||||
}
|
||||
|
||||
// Begin the transaction.
|
||||
t := &transaction{}
|
||||
req := &pb.BeginTransactionRequest{
|
||||
App: proto.String(FullyQualifiedAppID(c)),
|
||||
}
|
||||
if xg {
|
||||
req.AllowMultipleEg = proto.Bool(true)
|
||||
}
|
||||
if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Call f, rolling back the transaction if f returns a non-nil error, or panics.
|
||||
// The panic is not recovered.
|
||||
defer func() {
|
||||
if t.finished {
|
||||
return
|
||||
}
|
||||
t.finished = true
|
||||
// Ignore the error return value, since we are already returning a non-nil
|
||||
// error (or we're panicking).
|
||||
Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{})
|
||||
}()
|
||||
if err := f(withTransaction(c, t)); err != nil {
|
||||
return err
|
||||
}
|
||||
t.finished = true
|
||||
|
||||
// Commit the transaction.
|
||||
res := &pb.CommitResponse{}
|
||||
err := Call(c, "datastore_v3", "Commit", &t.transaction, res)
|
||||
if ae, ok := err.(*APIError); ok {
|
||||
/* TODO: restore this conditional
|
||||
if appengine.IsDevAppServer() {
|
||||
*/
|
||||
// The Python Dev AppServer raises an ApplicationError with error code 2 (which is
|
||||
// Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.".
|
||||
if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." {
|
||||
return ErrConcurrentTransaction
|
||||
}
|
||||
if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) {
|
||||
return ErrConcurrentTransaction
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
25
vendor/google.golang.org/appengine/namespace.go
generated
vendored
Normal file
25
vendor/google.golang.org/appengine/namespace.go
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
// Copyright 2012 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package appengine
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"google.golang.org/appengine/internal"
|
||||
)
|
||||
|
||||
// Namespace returns a replacement context that operates within the given namespace.
|
||||
func Namespace(c context.Context, namespace string) (context.Context, error) {
|
||||
if !validNamespace.MatchString(namespace) {
|
||||
return nil, fmt.Errorf("appengine: namespace %q does not match /%s/", namespace, validNamespace)
|
||||
}
|
||||
return internal.NamespacedContext(c, namespace), nil
|
||||
}
|
||||
|
||||
// validNamespace matches valid namespace names.
|
||||
var validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`)
|
20
vendor/google.golang.org/appengine/timeout.go
generated
vendored
Normal file
20
vendor/google.golang.org/appengine/timeout.go
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
// Copyright 2013 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by the Apache 2.0
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package appengine
|
||||
|
||||
import "golang.org/x/net/context"
|
||||
|
||||
// IsTimeoutError reports whether err is a timeout error.
|
||||
func IsTimeoutError(err error) bool {
|
||||
if err == context.DeadlineExceeded {
|
||||
return true
|
||||
}
|
||||
if t, ok := err.(interface {
|
||||
IsTimeout() bool
|
||||
}); ok {
|
||||
return t.IsTimeout()
|
||||
}
|
||||
return false
|
||||
}
|
2
vendor/google.golang.org/cloud/.travis.yml
generated
vendored
2
vendor/google.golang.org/cloud/.travis.yml
generated
vendored
|
@ -2,7 +2,7 @@ sudo: false
|
|||
language: go
|
||||
go:
|
||||
- 1.4
|
||||
- tip
|
||||
- 1.5
|
||||
install:
|
||||
- go get -v google.golang.org/cloud/...
|
||||
script:
|
||||
|
|
9
vendor/google.golang.org/grpc/.travis.yml
generated
vendored
9
vendor/google.golang.org/grpc/.travis.yml
generated
vendored
|
@ -1,4 +1,13 @@
|
|||
language: go
|
||||
|
||||
before_install:
|
||||
- go get github.com/axw/gocov/gocov
|
||||
- go get github.com/mattn/goveralls
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
|
||||
install:
|
||||
- mkdir -p "$GOPATH/src/google.golang.org"
|
||||
- mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/google.golang.org/grpc"
|
||||
|
||||
script:
|
||||
- make test testrace
|
||||
|
|
4
vendor/google.golang.org/grpc/CONTRIBUTING.md
generated
vendored
4
vendor/google.golang.org/grpc/CONTRIBUTING.md
generated
vendored
|
@ -20,8 +20,4 @@ When filing an issue, make sure to answer these five questions:
|
|||
5. What did you see instead?
|
||||
|
||||
### Contributing code
|
||||
Please read the Contribution Guidelines before sending patches.
|
||||
|
||||
We will not accept GitHub pull requests once Gerrit is setup (we will use Gerrit instead for code review).
|
||||
|
||||
Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file.
|
||||
|
|
3
vendor/google.golang.org/grpc/Makefile
generated
vendored
3
vendor/google.golang.org/grpc/Makefile
generated
vendored
|
@ -45,3 +45,6 @@ testrace: testdeps
|
|||
|
||||
clean:
|
||||
go clean google.golang.org/grpc/...
|
||||
|
||||
coverage: testdeps
|
||||
./coverage.sh --coveralls
|
||||
|
|
17
vendor/google.golang.org/grpc/README.md
generated
vendored
17
vendor/google.golang.org/grpc/README.md
generated
vendored
|
@ -2,22 +2,31 @@
|
|||
|
||||
[](https://travis-ci.org/grpc/grpc-go) [](https://godoc.org/google.golang.org/grpc)
|
||||
|
||||
The Go implementation of [gRPC](https://github.com/grpc/grpc)
|
||||
The Go implementation of [gRPC](http://www.grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start](http://www.grpc.io/docs/) guide.
|
||||
|
||||
Installation
|
||||
------------
|
||||
|
||||
To install this package, you need to install Go 1.4 and setup your Go workspace on your computer. The simplest way to install the library is to run:
|
||||
To install this package, you need to install Go 1.4 or above and setup your Go workspace on your computer. The simplest way to install the library is to run:
|
||||
|
||||
```
|
||||
$ go get google.golang.org/grpc
|
||||
```
|
||||
|
||||
Prerequisites
|
||||
-------------
|
||||
|
||||
This requires Go 1.4 or above.
|
||||
|
||||
Constraints
|
||||
-----------
|
||||
The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](http://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants.
|
||||
|
||||
Documentation
|
||||
-------------
|
||||
You can find more detailed documentation and examples in the [grpc-common repository](http://github.com/grpc/grpc-common).
|
||||
See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/).
|
||||
|
||||
Status
|
||||
------
|
||||
Alpha - ready for early adopters.
|
||||
Beta release
|
||||
|
||||
|
|
55
vendor/google.golang.org/grpc/call.go
generated
vendored
55
vendor/google.golang.org/grpc/call.go
generated
vendored
|
@ -34,13 +34,13 @@
|
|||
package grpc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/trace"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
|
@ -48,16 +48,16 @@ import (
|
|||
// On error, it returns the error and indicates whether the call should be retried.
|
||||
//
|
||||
// TODO(zhaoq): Check whether the received message sequence is valid.
|
||||
func recvResponse(codec Codec, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error {
|
||||
func recvResponse(dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error {
|
||||
// Try to acquire header metadata from the server if there is any.
|
||||
var err error
|
||||
c.headerMD, err = stream.Header()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p := &parser{s: stream}
|
||||
p := &parser{r: stream}
|
||||
for {
|
||||
if err = recv(p, codec, reply); err != nil {
|
||||
if err = recv(p, dopts.codec, stream, dopts.dc, reply); err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
@ -69,7 +69,7 @@ func recvResponse(codec Codec, t transport.ClientTransport, c *callInfo, stream
|
|||
}
|
||||
|
||||
// sendRequest writes out various information of an RPC such as Context and Message.
|
||||
func sendRequest(ctx context.Context, codec Codec, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) {
|
||||
func sendRequest(ctx context.Context, codec Codec, compressor Compressor, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) {
|
||||
stream, err := t.NewStream(ctx, callHdr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -81,8 +81,11 @@ func sendRequest(ctx context.Context, codec Codec, callHdr *transport.CallHdr, t
|
|||
}
|
||||
}
|
||||
}()
|
||||
// TODO(zhaoq): Support compression.
|
||||
outBuf, err := encode(codec, args, compressionNone)
|
||||
var cbuf *bytes.Buffer
|
||||
if compressor != nil {
|
||||
cbuf = new(bytes.Buffer)
|
||||
}
|
||||
outBuf, err := encode(codec, args, compressor, cbuf)
|
||||
if err != nil {
|
||||
return nil, transport.StreamErrorf(codes.Internal, "grpc: %v", err)
|
||||
}
|
||||
|
@ -94,14 +97,6 @@ func sendRequest(ctx context.Context, codec Codec, callHdr *transport.CallHdr, t
|
|||
return stream, nil
|
||||
}
|
||||
|
||||
// callInfo contains all related configuration and information about an RPC.
|
||||
type callInfo struct {
|
||||
failFast bool
|
||||
headerMD metadata.MD
|
||||
trailerMD metadata.MD
|
||||
traceInfo traceInfo // in trace.go
|
||||
}
|
||||
|
||||
// Invoke is called by the generated code. It sends the RPC request on the
|
||||
// wire and returns after response is received.
|
||||
func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) {
|
||||
|
@ -116,9 +111,8 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
|
|||
o.after(&c)
|
||||
}
|
||||
}()
|
||||
|
||||
if EnableTracing {
|
||||
c.traceInfo.tr = trace.New("Sent."+methodFamily(method), method)
|
||||
c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
|
||||
defer c.traceInfo.tr.Finish()
|
||||
c.traceInfo.firstLine.client = true
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
|
@ -133,17 +127,11 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
|
|||
}
|
||||
}()
|
||||
}
|
||||
|
||||
callHdr := &transport.CallHdr{
|
||||
Host: cc.authority,
|
||||
Method: method,
|
||||
}
|
||||
topts := &transport.Options{
|
||||
Last: true,
|
||||
Delay: false,
|
||||
}
|
||||
var (
|
||||
ts int // track the transport sequence number
|
||||
lastErr error // record the error that happened
|
||||
)
|
||||
for {
|
||||
|
@ -156,7 +144,14 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
|
|||
if lastErr != nil && c.failFast {
|
||||
return toRPCErr(lastErr)
|
||||
}
|
||||
t, ts, err = cc.wait(ctx, ts)
|
||||
callHdr := &transport.CallHdr{
|
||||
Host: cc.authority,
|
||||
Method: method,
|
||||
}
|
||||
if cc.dopts.cp != nil {
|
||||
callHdr.SendCompress = cc.dopts.cp.Type()
|
||||
}
|
||||
t, err = cc.dopts.picker.Pick(ctx)
|
||||
if err != nil {
|
||||
if lastErr != nil {
|
||||
// This was a retry; return the error from the last attempt.
|
||||
|
@ -164,10 +159,10 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
|
|||
}
|
||||
return toRPCErr(err)
|
||||
}
|
||||
if EnableTracing {
|
||||
c.traceInfo.tr.LazyLog(payload{args}, true)
|
||||
if c.traceInfo.tr != nil {
|
||||
c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true)
|
||||
}
|
||||
stream, err = sendRequest(ctx, cc.dopts.codec, callHdr, t, args, topts)
|
||||
stream, err = sendRequest(ctx, cc.dopts.codec, cc.dopts.cp, callHdr, t, args, topts)
|
||||
if err != nil {
|
||||
if _, ok := err.(transport.ConnectionError); ok {
|
||||
lastErr = err
|
||||
|
@ -179,12 +174,12 @@ func Invoke(ctx context.Context, method string, args, reply interface{}, cc *Cli
|
|||
return toRPCErr(err)
|
||||
}
|
||||
// Receive the response
|
||||
lastErr = recvResponse(cc.dopts.codec, t, &c, stream, reply)
|
||||
lastErr = recvResponse(cc.dopts, t, &c, stream, reply)
|
||||
if _, ok := lastErr.(transport.ConnectionError); ok {
|
||||
continue
|
||||
}
|
||||
if EnableTracing {
|
||||
c.traceInfo.tr.LazyLog(payload{reply}, true)
|
||||
if c.traceInfo.tr != nil {
|
||||
c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true)
|
||||
}
|
||||
t.CloseStream(stream, lastErr)
|
||||
if lastErr != nil {
|
||||
|
|
437
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
437
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
|
@ -35,12 +35,14 @@ package grpc
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/trace"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/transport"
|
||||
|
@ -49,20 +51,34 @@ import (
|
|||
var (
|
||||
// ErrUnspecTarget indicates that the target address is unspecified.
|
||||
ErrUnspecTarget = errors.New("grpc: target is unspecified")
|
||||
// ErrNoTransportSecurity indicates that there is no transport security
|
||||
// being set for ClientConn. Users should either set one or explicitly
|
||||
// call WithInsecure DialOption to disable security.
|
||||
ErrNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)")
|
||||
// ErrCredentialsMisuse indicates that users want to transmit security information
|
||||
// (e.g., oauth2 token) which requires secure connection on an insecure
|
||||
// connection.
|
||||
ErrCredentialsMisuse = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportAuthenticator() to set)")
|
||||
// ErrClientConnClosing indicates that the operation is illegal because
|
||||
// the session is closing.
|
||||
ErrClientConnClosing = errors.New("grpc: the client connection is closing")
|
||||
// ErrClientConnTimeout indicates that the connection could not be
|
||||
// established or re-established within the specified timeout.
|
||||
ErrClientConnTimeout = errors.New("grpc: timed out trying to connect")
|
||||
// minimum time to give a connection to complete
|
||||
minConnectTimeout = 20 * time.Second
|
||||
)
|
||||
|
||||
// dialOptions configure a Dial call. dialOptions are set by the DialOption
|
||||
// values passed to Dial.
|
||||
type dialOptions struct {
|
||||
codec Codec
|
||||
block bool
|
||||
copts transport.ConnectOptions
|
||||
codec Codec
|
||||
cp Compressor
|
||||
dc Decompressor
|
||||
picker Picker
|
||||
block bool
|
||||
insecure bool
|
||||
copts transport.ConnectOptions
|
||||
}
|
||||
|
||||
// DialOption configures how we set up the connection.
|
||||
|
@ -75,6 +91,29 @@ func WithCodec(c Codec) DialOption {
|
|||
}
|
||||
}
|
||||
|
||||
// WithCompressor returns a DialOption which sets a CompressorGenerator for generating message
|
||||
// compressor.
|
||||
func WithCompressor(cp Compressor) DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.cp = cp
|
||||
}
|
||||
}
|
||||
|
||||
// WithDecompressor returns a DialOption which sets a DecompressorGenerator for generating
|
||||
// message decompressor.
|
||||
func WithDecompressor(dc Decompressor) DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.dc = dc
|
||||
}
|
||||
}
|
||||
|
||||
// WithPicker returns a DialOption which sets a picker for connection selection.
|
||||
func WithPicker(p Picker) DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.picker = p
|
||||
}
|
||||
}
|
||||
|
||||
// WithBlock returns a DialOption which makes caller of Dial blocks until the underlying
|
||||
// connection is up. Without this, Dial returns immediately and connecting the server
|
||||
// happens in background.
|
||||
|
@ -84,6 +123,14 @@ func WithBlock() DialOption {
|
|||
}
|
||||
}
|
||||
|
||||
// WithInsecure returns a DialOption which disables transport security for this ClientConn.
|
||||
// Note that transport security is required unless WithInsecure is set.
|
||||
func WithInsecure() DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.insecure = true
|
||||
}
|
||||
}
|
||||
|
||||
// WithTransportCredentials returns a DialOption which configures a
|
||||
// connection level security credentials (e.g., TLS/SSL).
|
||||
func WithTransportCredentials(creds credentials.TransportAuthenticator) DialOption {
|
||||
|
@ -114,84 +161,247 @@ func WithDialer(f func(addr string, timeout time.Duration) (net.Conn, error)) Di
|
|||
}
|
||||
}
|
||||
|
||||
// Dial creates a client connection the given target.
|
||||
// TODO(zhaoq): Have an option to make Dial return immediately without waiting
|
||||
// for connection to complete.
|
||||
func Dial(target string, opts ...DialOption) (*ClientConn, error) {
|
||||
if target == "" {
|
||||
return nil, ErrUnspecTarget
|
||||
// WithUserAgent returns a DialOption that specifies a user agent string for all the RPCs.
|
||||
func WithUserAgent(s string) DialOption {
|
||||
return func(o *dialOptions) {
|
||||
o.copts.UserAgent = s
|
||||
}
|
||||
}
|
||||
|
||||
// Dial creates a client connection the given target.
|
||||
func Dial(target string, opts ...DialOption) (*ClientConn, error) {
|
||||
cc := &ClientConn{
|
||||
target: target,
|
||||
shutdownChan: make(chan struct{}),
|
||||
target: target,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt(&cc.dopts)
|
||||
}
|
||||
if cc.dopts.codec == nil {
|
||||
// Set the default codec.
|
||||
cc.dopts.codec = protoCodec{}
|
||||
}
|
||||
if cc.dopts.picker == nil {
|
||||
cc.dopts.picker = &unicastPicker{
|
||||
target: target,
|
||||
}
|
||||
}
|
||||
if err := cc.dopts.picker.Init(cc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
colonPos := strings.LastIndex(target, ":")
|
||||
if colonPos == -1 {
|
||||
colonPos = len(target)
|
||||
}
|
||||
cc.authority = target[:colonPos]
|
||||
if cc.dopts.codec == nil {
|
||||
// Set the default codec.
|
||||
cc.dopts.codec = protoCodec{}
|
||||
}
|
||||
if cc.dopts.block {
|
||||
if err := cc.resetTransport(false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Start to monitor the error status of transport.
|
||||
go cc.transportMonitor()
|
||||
} else {
|
||||
// Start a goroutine connecting to the server asynchronously.
|
||||
go func() {
|
||||
if err := cc.resetTransport(false); err != nil {
|
||||
grpclog.Printf("Failed to dial %s: %v; please retry.", target, err)
|
||||
return
|
||||
}
|
||||
go cc.transportMonitor()
|
||||
}()
|
||||
}
|
||||
return cc, nil
|
||||
}
|
||||
|
||||
// ConnectivityState indicates the state of a client connection.
|
||||
type ConnectivityState int
|
||||
|
||||
const (
|
||||
// Idle indicates the ClientConn is idle.
|
||||
Idle ConnectivityState = iota
|
||||
// Connecting indicates the ClienConn is connecting.
|
||||
Connecting
|
||||
// Ready indicates the ClientConn is ready for work.
|
||||
Ready
|
||||
// TransientFailure indicates the ClientConn has seen a failure but expects to recover.
|
||||
TransientFailure
|
||||
// Shutdown indicates the ClientConn has started shutting down.
|
||||
Shutdown
|
||||
)
|
||||
|
||||
func (s ConnectivityState) String() string {
|
||||
switch s {
|
||||
case Idle:
|
||||
return "IDLE"
|
||||
case Connecting:
|
||||
return "CONNECTING"
|
||||
case Ready:
|
||||
return "READY"
|
||||
case TransientFailure:
|
||||
return "TRANSIENT_FAILURE"
|
||||
case Shutdown:
|
||||
return "SHUTDOWN"
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown connectivity state: %d", s))
|
||||
}
|
||||
}
|
||||
|
||||
// ClientConn represents a client connection to an RPC service.
|
||||
type ClientConn struct {
|
||||
target string
|
||||
authority string
|
||||
dopts dialOptions
|
||||
shutdownChan chan struct{}
|
||||
|
||||
mu sync.Mutex
|
||||
// ready is closed and becomes nil when a new transport is up or failed
|
||||
// due to timeout.
|
||||
ready chan struct{}
|
||||
// Indicates the ClientConn is under destruction.
|
||||
closing bool
|
||||
// Every time a new transport is created, this is incremented by 1. Used
|
||||
// to avoid trying to recreate a transport while the new one is already
|
||||
// under construction.
|
||||
transportSeq int
|
||||
transport transport.ClientTransport
|
||||
target string
|
||||
authority string
|
||||
dopts dialOptions
|
||||
}
|
||||
|
||||
func (cc *ClientConn) resetTransport(closeTransport bool) error {
|
||||
// State returns the connectivity state of cc.
|
||||
// This is EXPERIMENTAL API.
|
||||
func (cc *ClientConn) State() (ConnectivityState, error) {
|
||||
return cc.dopts.picker.State()
|
||||
}
|
||||
|
||||
// WaitForStateChange blocks until the state changes to something other than the sourceState.
|
||||
// It returns the new state or error.
|
||||
// This is EXPERIMENTAL API.
|
||||
func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) {
|
||||
return cc.dopts.picker.WaitForStateChange(ctx, sourceState)
|
||||
}
|
||||
|
||||
// Close starts to tear down the ClientConn.
|
||||
func (cc *ClientConn) Close() error {
|
||||
return cc.dopts.picker.Close()
|
||||
}
|
||||
|
||||
// Conn is a client connection to a single destination.
|
||||
type Conn struct {
|
||||
target string
|
||||
dopts dialOptions
|
||||
resetChan chan int
|
||||
shutdownChan chan struct{}
|
||||
events trace.EventLog
|
||||
|
||||
mu sync.Mutex
|
||||
state ConnectivityState
|
||||
stateCV *sync.Cond
|
||||
// ready is closed and becomes nil when a new transport is up or failed
|
||||
// due to timeout.
|
||||
ready chan struct{}
|
||||
transport transport.ClientTransport
|
||||
}
|
||||
|
||||
// NewConn creates a Conn.
|
||||
func NewConn(cc *ClientConn) (*Conn, error) {
|
||||
if cc.target == "" {
|
||||
return nil, ErrUnspecTarget
|
||||
}
|
||||
c := &Conn{
|
||||
target: cc.target,
|
||||
dopts: cc.dopts,
|
||||
resetChan: make(chan int, 1),
|
||||
shutdownChan: make(chan struct{}),
|
||||
}
|
||||
if EnableTracing {
|
||||
c.events = trace.NewEventLog("grpc.ClientConn", c.target)
|
||||
}
|
||||
if !c.dopts.insecure {
|
||||
var ok bool
|
||||
for _, cd := range c.dopts.copts.AuthOptions {
|
||||
if _, ok := cd.(credentials.TransportAuthenticator); !ok {
|
||||
continue
|
||||
}
|
||||
ok = true
|
||||
}
|
||||
if !ok {
|
||||
return nil, ErrNoTransportSecurity
|
||||
}
|
||||
} else {
|
||||
for _, cd := range c.dopts.copts.AuthOptions {
|
||||
if cd.RequireTransportSecurity() {
|
||||
return nil, ErrCredentialsMisuse
|
||||
}
|
||||
}
|
||||
}
|
||||
c.stateCV = sync.NewCond(&c.mu)
|
||||
if c.dopts.block {
|
||||
if err := c.resetTransport(false); err != nil {
|
||||
c.Close()
|
||||
return nil, err
|
||||
}
|
||||
// Start to monitor the error status of transport.
|
||||
go c.transportMonitor()
|
||||
} else {
|
||||
// Start a goroutine connecting to the server asynchronously.
|
||||
go func() {
|
||||
if err := c.resetTransport(false); err != nil {
|
||||
grpclog.Printf("Failed to dial %s: %v; please retry.", c.target, err)
|
||||
c.Close()
|
||||
return
|
||||
}
|
||||
c.transportMonitor()
|
||||
}()
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// printf records an event in cc's event log, unless cc has been closed.
|
||||
// REQUIRES cc.mu is held.
|
||||
func (cc *Conn) printf(format string, a ...interface{}) {
|
||||
if cc.events != nil {
|
||||
cc.events.Printf(format, a...)
|
||||
}
|
||||
}
|
||||
|
||||
// errorf records an error in cc's event log, unless cc has been closed.
|
||||
// REQUIRES cc.mu is held.
|
||||
func (cc *Conn) errorf(format string, a ...interface{}) {
|
||||
if cc.events != nil {
|
||||
cc.events.Errorf(format, a...)
|
||||
}
|
||||
}
|
||||
|
||||
// State returns the connectivity state of the Conn
|
||||
func (cc *Conn) State() ConnectivityState {
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
return cc.state
|
||||
}
|
||||
|
||||
// WaitForStateChange blocks until the state changes to something other than the sourceState.
|
||||
func (cc *Conn) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) {
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
if sourceState != cc.state {
|
||||
return cc.state, nil
|
||||
}
|
||||
done := make(chan struct{})
|
||||
var err error
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
cc.mu.Lock()
|
||||
err = ctx.Err()
|
||||
cc.stateCV.Broadcast()
|
||||
cc.mu.Unlock()
|
||||
case <-done:
|
||||
}
|
||||
}()
|
||||
defer close(done)
|
||||
for sourceState == cc.state {
|
||||
cc.stateCV.Wait()
|
||||
if err != nil {
|
||||
return cc.state, err
|
||||
}
|
||||
}
|
||||
return cc.state, nil
|
||||
}
|
||||
|
||||
// NotifyReset tries to signal the underlying transport needs to be reset due to
|
||||
// for example a name resolution change in flight.
|
||||
func (cc *Conn) NotifyReset() {
|
||||
select {
|
||||
case cc.resetChan <- 0:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (cc *Conn) resetTransport(closeTransport bool) error {
|
||||
var retries int
|
||||
start := time.Now()
|
||||
for {
|
||||
cc.mu.Lock()
|
||||
t := cc.transport
|
||||
ts := cc.transportSeq
|
||||
// Avoid wait() picking up a dying transport unnecessarily.
|
||||
cc.transportSeq = 0
|
||||
if cc.closing {
|
||||
cc.printf("connecting")
|
||||
if cc.state == Shutdown {
|
||||
// cc.Close() has been invoked.
|
||||
cc.mu.Unlock()
|
||||
return ErrClientConnClosing
|
||||
}
|
||||
cc.state = Connecting
|
||||
cc.stateCV.Broadcast()
|
||||
cc.mu.Unlock()
|
||||
if closeTransport {
|
||||
t.Close()
|
||||
cc.transport.Close()
|
||||
}
|
||||
// Adjust timeout for the current try.
|
||||
copts := cc.dopts.copts
|
||||
|
@ -206,29 +416,64 @@ func (cc *ClientConn) resetTransport(closeTransport bool) error {
|
|||
return ErrClientConnTimeout
|
||||
}
|
||||
}
|
||||
newTransport, err := transport.NewClientTransport(cc.target, &copts)
|
||||
sleepTime := backoff(retries)
|
||||
timeout := sleepTime
|
||||
if timeout < minConnectTimeout {
|
||||
timeout = minConnectTimeout
|
||||
}
|
||||
if copts.Timeout == 0 || copts.Timeout > timeout {
|
||||
copts.Timeout = timeout
|
||||
}
|
||||
connectTime := time.Now()
|
||||
addr, err := cc.dopts.picker.PickAddr()
|
||||
var newTransport transport.ClientTransport
|
||||
if err == nil {
|
||||
newTransport, err = transport.NewClientTransport(addr, &copts)
|
||||
}
|
||||
if err != nil {
|
||||
sleepTime := backoff(retries)
|
||||
cc.mu.Lock()
|
||||
if cc.state == Shutdown {
|
||||
// cc.Close() has been invoked.
|
||||
cc.mu.Unlock()
|
||||
return ErrClientConnClosing
|
||||
}
|
||||
cc.errorf("transient failure: %v", err)
|
||||
cc.state = TransientFailure
|
||||
cc.stateCV.Broadcast()
|
||||
if cc.ready != nil {
|
||||
close(cc.ready)
|
||||
cc.ready = nil
|
||||
}
|
||||
cc.mu.Unlock()
|
||||
sleepTime -= time.Since(connectTime)
|
||||
if sleepTime < 0 {
|
||||
sleepTime = 0
|
||||
}
|
||||
// Fail early before falling into sleep.
|
||||
if cc.dopts.copts.Timeout > 0 && cc.dopts.copts.Timeout < sleepTime+time.Since(start) {
|
||||
cc.mu.Lock()
|
||||
cc.errorf("connection timeout")
|
||||
cc.mu.Unlock()
|
||||
cc.Close()
|
||||
return ErrClientConnTimeout
|
||||
}
|
||||
closeTransport = false
|
||||
time.Sleep(sleepTime)
|
||||
retries++
|
||||
grpclog.Printf("grpc: ClientConn.resetTransport failed to create client transport: %v; Reconnecting to %q", err, cc.target)
|
||||
grpclog.Printf("grpc: Conn.resetTransport failed to create client transport: %v; Reconnecting to %q", err, cc.target)
|
||||
continue
|
||||
}
|
||||
cc.mu.Lock()
|
||||
if cc.closing {
|
||||
cc.printf("ready")
|
||||
if cc.state == Shutdown {
|
||||
// cc.Close() has been invoked.
|
||||
cc.mu.Unlock()
|
||||
newTransport.Close()
|
||||
return ErrClientConnClosing
|
||||
}
|
||||
cc.state = Ready
|
||||
cc.stateCV.Broadcast()
|
||||
cc.transport = newTransport
|
||||
cc.transportSeq = ts + 1
|
||||
if cc.ready != nil {
|
||||
close(cc.ready)
|
||||
cc.ready = nil
|
||||
|
@ -238,40 +483,65 @@ func (cc *ClientConn) resetTransport(closeTransport bool) error {
|
|||
}
|
||||
}
|
||||
|
||||
func (cc *Conn) reconnect() bool {
|
||||
cc.mu.Lock()
|
||||
if cc.state == Shutdown {
|
||||
// cc.Close() has been invoked.
|
||||
cc.mu.Unlock()
|
||||
return false
|
||||
}
|
||||
cc.state = TransientFailure
|
||||
cc.stateCV.Broadcast()
|
||||
cc.mu.Unlock()
|
||||
if err := cc.resetTransport(true); err != nil {
|
||||
// The ClientConn is closing.
|
||||
cc.mu.Lock()
|
||||
cc.printf("transport exiting: %v", err)
|
||||
cc.mu.Unlock()
|
||||
grpclog.Printf("grpc: Conn.transportMonitor exits due to: %v", err)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Run in a goroutine to track the error in transport and create the
|
||||
// new transport if an error happens. It returns when the channel is closing.
|
||||
func (cc *ClientConn) transportMonitor() {
|
||||
func (cc *Conn) transportMonitor() {
|
||||
for {
|
||||
select {
|
||||
// shutdownChan is needed to detect the channel teardown when
|
||||
// shutdownChan is needed to detect the teardown when
|
||||
// the ClientConn is idle (i.e., no RPC in flight).
|
||||
case <-cc.shutdownChan:
|
||||
return
|
||||
case <-cc.transport.Error():
|
||||
if err := cc.resetTransport(true); err != nil {
|
||||
// The channel is closing.
|
||||
grpclog.Printf("grpc: ClientConn.transportMonitor exits due to: %v", err)
|
||||
case <-cc.resetChan:
|
||||
if !cc.reconnect() {
|
||||
return
|
||||
}
|
||||
continue
|
||||
case <-cc.transport.Error():
|
||||
if !cc.reconnect() {
|
||||
return
|
||||
}
|
||||
// Tries to drain reset signal if there is any since it is out-dated.
|
||||
select {
|
||||
case <-cc.resetChan:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// When wait returns, either the new transport is up or ClientConn is
|
||||
// closing. Used to avoid working on a dying transport. It updates and
|
||||
// returns the transport and its version when there is no error.
|
||||
func (cc *ClientConn) wait(ctx context.Context, ts int) (transport.ClientTransport, int, error) {
|
||||
// Wait blocks until i) the new transport is up or ii) ctx is done or iii) cc is closed.
|
||||
func (cc *Conn) Wait(ctx context.Context) (transport.ClientTransport, error) {
|
||||
for {
|
||||
cc.mu.Lock()
|
||||
switch {
|
||||
case cc.closing:
|
||||
case cc.state == Shutdown:
|
||||
cc.mu.Unlock()
|
||||
return nil, 0, ErrClientConnClosing
|
||||
case ts < cc.transportSeq:
|
||||
// Worked on a dying transport. Try the new one immediately.
|
||||
defer cc.mu.Unlock()
|
||||
return cc.transport, cc.transportSeq, nil
|
||||
return nil, ErrClientConnClosing
|
||||
case cc.state == Ready:
|
||||
ct := cc.transport
|
||||
cc.mu.Unlock()
|
||||
return ct, nil
|
||||
default:
|
||||
ready := cc.ready
|
||||
if ready == nil {
|
||||
|
@ -281,7 +551,7 @@ func (cc *ClientConn) wait(ctx context.Context, ts int) (transport.ClientTranspo
|
|||
cc.mu.Unlock()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, 0, transport.ContextErr(ctx.Err())
|
||||
return nil, transport.ContextErr(ctx.Err())
|
||||
// Wait until the new transport is ready or failed.
|
||||
case <-ready:
|
||||
}
|
||||
|
@ -289,18 +559,23 @@ func (cc *ClientConn) wait(ctx context.Context, ts int) (transport.ClientTranspo
|
|||
}
|
||||
}
|
||||
|
||||
// Close starts to tear down the ClientConn. Returns ErrClientConnClosing if
|
||||
// Close starts to tear down the Conn. Returns ErrClientConnClosing if
|
||||
// it has been closed (mostly due to dial time-out).
|
||||
// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in
|
||||
// some edge cases (e.g., the caller opens and closes many ClientConn's in a
|
||||
// tight loop.
|
||||
func (cc *ClientConn) Close() error {
|
||||
func (cc *Conn) Close() error {
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
if cc.closing {
|
||||
if cc.state == Shutdown {
|
||||
return ErrClientConnClosing
|
||||
}
|
||||
cc.closing = true
|
||||
cc.state = Shutdown
|
||||
cc.stateCV.Broadcast()
|
||||
if cc.events != nil {
|
||||
cc.events.Finish()
|
||||
cc.events = nil
|
||||
}
|
||||
if cc.ready != nil {
|
||||
close(cc.ready)
|
||||
cc.ready = nil
|
||||
|
|
47
vendor/google.golang.org/grpc/coverage.sh
generated
vendored
Normal file
47
vendor/google.golang.org/grpc/coverage.sh
generated
vendored
Normal file
|
@ -0,0 +1,47 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
workdir=.cover
|
||||
profile="$workdir/cover.out"
|
||||
mode=set
|
||||
end2endtest="google.golang.org/grpc/test"
|
||||
|
||||
generate_cover_data() {
|
||||
rm -rf "$workdir"
|
||||
mkdir "$workdir"
|
||||
|
||||
for pkg in "$@"; do
|
||||
if [ $pkg == "google.golang.org/grpc" -o $pkg == "google.golang.org/grpc/transport" -o $pkg == "google.golang.org/grpc/metadata" -o $pkg == "google.golang.org/grpc/credentials" ]
|
||||
then
|
||||
f="$workdir/$(echo $pkg | tr / -)"
|
||||
go test -covermode="$mode" -coverprofile="$f.cover" "$pkg"
|
||||
go test -covermode="$mode" -coverpkg "$pkg" -coverprofile="$f.e2e.cover" "$end2endtest"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "mode: $mode" >"$profile"
|
||||
grep -h -v "^mode:" "$workdir"/*.cover >>"$profile"
|
||||
}
|
||||
|
||||
show_cover_report() {
|
||||
go tool cover -${1}="$profile"
|
||||
}
|
||||
|
||||
push_to_coveralls() {
|
||||
goveralls -coverprofile="$profile"
|
||||
}
|
||||
|
||||
generate_cover_data $(go list ./...)
|
||||
show_cover_report func
|
||||
case "$1" in
|
||||
"")
|
||||
;;
|
||||
--html)
|
||||
show_cover_report html ;;
|
||||
--coveralls)
|
||||
push_to_coveralls ;;
|
||||
*)
|
||||
echo >&2 "error: invalid option: $1" ;;
|
||||
esac
|
||||
rm -rf "$workdir"
|
132
vendor/google.golang.org/grpc/credentials/credentials.go
generated
vendored
132
vendor/google.golang.org/grpc/credentials/credentials.go
generated
vendored
|
@ -47,14 +47,11 @@ import (
|
|||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"golang.org/x/oauth2/jwt"
|
||||
)
|
||||
|
||||
var (
|
||||
// alpnProtoStr are the specified application level protocols for gRPC.
|
||||
alpnProtoStr = []string{"h2", "h2-14", "h2-15", "h2-16"}
|
||||
alpnProtoStr = []string{"h2"}
|
||||
)
|
||||
|
||||
// Credentials defines the common interface all supported credentials must
|
||||
|
@ -63,11 +60,15 @@ type Credentials interface {
|
|||
// GetRequestMetadata gets the current request metadata, refreshing
|
||||
// tokens if required. This should be called by the transport layer on
|
||||
// each request, and the data should be populated in headers or other
|
||||
// context. When supported by the underlying implementation, ctx can
|
||||
// be used for timeout and cancellation.
|
||||
// context. uri is the URI of the entry point for the request. When
|
||||
// supported by the underlying implementation, ctx can be used for
|
||||
// timeout and cancellation.
|
||||
// TODO(zhaoq): Define the set of the qualified keys instead of leaving
|
||||
// it as an arbitrary string.
|
||||
GetRequestMetadata(ctx context.Context) (map[string]string, error)
|
||||
GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
|
||||
// RequireTransportSecurity indicates whether the credentails requires
|
||||
// transport security.
|
||||
RequireTransportSecurity() bool
|
||||
}
|
||||
|
||||
// ProtocolInfo provides information regarding the gRPC wire protocol version,
|
||||
|
@ -81,26 +82,44 @@ type ProtocolInfo struct {
|
|||
SecurityVersion string
|
||||
}
|
||||
|
||||
// AuthInfo defines the common interface for the auth information the users are interested in.
|
||||
type AuthInfo interface {
|
||||
AuthType() string
|
||||
}
|
||||
|
||||
// TransportAuthenticator defines the common interface for all the live gRPC wire
|
||||
// protocols and supported transport security protocols (e.g., TLS, SSL).
|
||||
type TransportAuthenticator interface {
|
||||
// ClientHandshake does the authentication handshake specified by the corresponding
|
||||
// authentication protocol on rawConn for clients.
|
||||
ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (net.Conn, error)
|
||||
// ServerHandshake does the authentication handshake for servers.
|
||||
ServerHandshake(rawConn net.Conn) (net.Conn, error)
|
||||
// authentication protocol on rawConn for clients. It returns the authenticated
|
||||
// connection and the corresponding auth information about the connection.
|
||||
ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (net.Conn, AuthInfo, error)
|
||||
// ServerHandshake does the authentication handshake for servers. It returns
|
||||
// the authenticated connection and the corresponding auth information about
|
||||
// the connection.
|
||||
ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error)
|
||||
// Info provides the ProtocolInfo of this TransportAuthenticator.
|
||||
Info() ProtocolInfo
|
||||
Credentials
|
||||
}
|
||||
|
||||
// TLSInfo contains the auth information for a TLS authenticated connection.
|
||||
// It implements the AuthInfo interface.
|
||||
type TLSInfo struct {
|
||||
State tls.ConnectionState
|
||||
}
|
||||
|
||||
func (t TLSInfo) AuthType() string {
|
||||
return "tls"
|
||||
}
|
||||
|
||||
// tlsCreds is the credentials required for authenticating a connection using TLS.
|
||||
type tlsCreds struct {
|
||||
// TLS configuration
|
||||
config tls.Config
|
||||
}
|
||||
|
||||
func (c *tlsCreds) Info() ProtocolInfo {
|
||||
func (c tlsCreds) Info() ProtocolInfo {
|
||||
return ProtocolInfo{
|
||||
SecurityProtocol: "tls",
|
||||
SecurityVersion: "1.2",
|
||||
|
@ -109,17 +128,21 @@ func (c *tlsCreds) Info() ProtocolInfo {
|
|||
|
||||
// GetRequestMetadata returns nil, nil since TLS credentials does not have
|
||||
// metadata.
|
||||
func (c *tlsCreds) GetRequestMetadata(ctx context.Context) (map[string]string, error) {
|
||||
func (c *tlsCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (c *tlsCreds) RequireTransportSecurity() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type timeoutError struct{}
|
||||
|
||||
func (timeoutError) Error() string { return "credentials: Dial timed out" }
|
||||
func (timeoutError) Timeout() bool { return true }
|
||||
func (timeoutError) Temporary() bool { return true }
|
||||
|
||||
func (c *tlsCreds) ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (_ net.Conn, err error) {
|
||||
func (c *tlsCreds) ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (_ net.Conn, _ AuthInfo, err error) {
|
||||
// borrow some code from tls.DialWithDialer
|
||||
var errChannel chan error
|
||||
if timeout != 0 {
|
||||
|
@ -146,18 +169,20 @@ func (c *tlsCreds) ClientHandshake(addr string, rawConn net.Conn, timeout time.D
|
|||
}
|
||||
if err != nil {
|
||||
rawConn.Close()
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
return conn, nil
|
||||
// TODO(zhaoq): Omit the auth info for client now. It is more for
|
||||
// information than anything else.
|
||||
return conn, nil, nil
|
||||
}
|
||||
|
||||
func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, error) {
|
||||
func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
|
||||
conn := tls.Server(rawConn, &c.config)
|
||||
if err := conn.Handshake(); err != nil {
|
||||
rawConn.Close()
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
return conn, nil
|
||||
return conn, TLSInfo{conn.ConnectionState()}, nil
|
||||
}
|
||||
|
||||
// NewTLS uses c to construct a TransportAuthenticator based on TLS.
|
||||
|
@ -199,72 +224,3 @@ func NewServerTLSFromFile(certFile, keyFile string) (TransportAuthenticator, err
|
|||
}
|
||||
return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
|
||||
}
|
||||
|
||||
// TokenSource supplies credentials from an oauth2.TokenSource.
|
||||
type TokenSource struct {
|
||||
oauth2.TokenSource
|
||||
}
|
||||
|
||||
// GetRequestMetadata gets the request metadata as a map from a TokenSource.
|
||||
func (ts TokenSource) GetRequestMetadata(ctx context.Context) (map[string]string, error) {
|
||||
token, err := ts.Token()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return map[string]string{
|
||||
"authorization": token.TokenType + " " + token.AccessToken,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewComputeEngine constructs the credentials that fetches access tokens from
|
||||
// Google Compute Engine (GCE)'s metadata server. It is only valid to use this
|
||||
// if your program is running on a GCE instance.
|
||||
// TODO(dsymonds): Deprecate and remove this.
|
||||
func NewComputeEngine() Credentials {
|
||||
return TokenSource{google.ComputeTokenSource("")}
|
||||
}
|
||||
|
||||
// serviceAccount represents credentials via JWT signing key.
|
||||
type serviceAccount struct {
|
||||
config *jwt.Config
|
||||
}
|
||||
|
||||
func (s serviceAccount) GetRequestMetadata(ctx context.Context) (map[string]string, error) {
|
||||
token, err := s.config.TokenSource(ctx).Token()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return map[string]string{
|
||||
"authorization": token.TokenType + " " + token.AccessToken,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewServiceAccountFromKey constructs the credentials using the JSON key slice
|
||||
// from a Google Developers service account.
|
||||
func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (Credentials, error) {
|
||||
config, err := google.JWTConfigFromJSON(jsonKey, scope...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return serviceAccount{config: config}, nil
|
||||
}
|
||||
|
||||
// NewServiceAccountFromFile constructs the credentials using the JSON key file
|
||||
// of a Google Developers service account.
|
||||
func NewServiceAccountFromFile(keyFile string, scope ...string) (Credentials, error) {
|
||||
jsonKey, err := ioutil.ReadFile(keyFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err)
|
||||
}
|
||||
return NewServiceAccountFromKey(jsonKey, scope...)
|
||||
}
|
||||
|
||||
// NewApplicationDefault returns "Application Default Credentials". For more
|
||||
// detail, see https://developers.google.com/accounts/docs/application-default-credentials.
|
||||
func NewApplicationDefault(ctx context.Context, scope ...string) (Credentials, error) {
|
||||
t, err := google.DefaultTokenSource(ctx, scope...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return TokenSource{t}, nil
|
||||
}
|
||||
|
|
2
vendor/google.golang.org/grpc/doc.go
generated
vendored
2
vendor/google.golang.org/grpc/doc.go
generated
vendored
|
@ -1,6 +1,6 @@
|
|||
/*
|
||||
Package grpc implements an RPC system called gRPC.
|
||||
|
||||
See https://github.com/grpc/grpc for more information about gRPC.
|
||||
See www.grpc.io for more information about gRPC.
|
||||
*/
|
||||
package grpc
|
||||
|
|
41
vendor/google.golang.org/grpc/grpc-auth-support.md
generated
vendored
41
vendor/google.golang.org/grpc/grpc-auth-support.md
generated
vendored
|
@ -1,41 +0,0 @@
|
|||
# Authentication
|
||||
|
||||
As outlined <a href="https://github.com/grpc/grpc-common/blob/master/grpc-auth-support.md">here</a> gRPC supports a number of different mechanisms for asserting identity between an client and server. We'll present some code-samples here demonstrating how to provide TLS support encryption and identity assertions as well as passing OAuth2 tokens to services that support it.
|
||||
|
||||
# Enabling TLS on a gRPC client
|
||||
|
||||
```Go
|
||||
conn, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, ""))
|
||||
```
|
||||
|
||||
# Enabling TLS on a gRPC server
|
||||
|
||||
```Go
|
||||
creds, err := credentials.NewServerTLSFromFile(certFile, keyFile)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to generate credentials %v", err)
|
||||
}
|
||||
lis, err := net.Listen("tcp", ":0")
|
||||
server := grpc.NewServer(grpc.Creds(creds))
|
||||
...
|
||||
server.Serve(lis)
|
||||
```
|
||||
|
||||
# Authenticating with Google
|
||||
|
||||
## Google Compute Engine (GCE)
|
||||
|
||||
```Go
|
||||
conn, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, ""), grpc.WithPerRPCCredentials(credentials.NewComputeEngine())))
|
||||
```
|
||||
|
||||
## JWT
|
||||
|
||||
```Go
|
||||
jwtCreds, err := credentials.NewServiceAccountFromFile(*serviceAccountKeyFile, *oauthScope)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create JWT credentials: %v", err)
|
||||
}
|
||||
conn, err := grpc.Dial(serverAddr, grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, ""), grpc.WithPerRPCCredentials(jwtCreds)))
|
||||
```
|
||||
|
5
vendor/google.golang.org/grpc/grpclog/logger.go
generated
vendored
5
vendor/google.golang.org/grpc/grpclog/logger.go
generated
vendored
|
@ -42,6 +42,8 @@ import (
|
|||
)
|
||||
|
||||
// Use golang's standard logger by default.
|
||||
// Access is not mutex-protected: do not modify except in init()
|
||||
// functions.
|
||||
var logger Logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
|
||||
// Logger mimics golang's standard Logger as an interface.
|
||||
|
@ -54,7 +56,8 @@ type Logger interface {
|
|||
Println(args ...interface{})
|
||||
}
|
||||
|
||||
// SetLogger sets the logger that is used in grpc.
|
||||
// SetLogger sets the logger that is used in grpc. Call only from
|
||||
// init() functions.
|
||||
func SetLogger(l Logger) {
|
||||
logger = l
|
||||
}
|
||||
|
|
49
vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
Normal file
49
vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
Normal file
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
* Copyright 2016, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package internal contains gRPC-internal code for testing, to avoid polluting
|
||||
// the godoc of the top-level grpc package.
|
||||
package internal
|
||||
|
||||
// TestingCloseConns closes all existing transports but keeps
|
||||
// grpcServer.lis accepting new connections.
|
||||
//
|
||||
// The provided grpcServer must be of type *grpc.Server. It is untyped
|
||||
// for circular dependency reasons.
|
||||
var TestingCloseConns func(grpcServer interface{})
|
||||
|
||||
// TestingUseHandlerImpl enables the http.Handler-based server implementation.
|
||||
// It must be called before Serve and requires TLS credentials.
|
||||
//
|
||||
// The provided grpcServer must be of type *grpc.Server. It is untyped
|
||||
// for circular dependency reasons.
|
||||
var TestingUseHandlerImpl func(grpcServer interface{})
|
34
vendor/google.golang.org/grpc/metadata/metadata.go
generated
vendored
34
vendor/google.golang.org/grpc/metadata/metadata.go
generated
vendored
|
@ -46,27 +46,16 @@ const (
|
|||
binHdrSuffix = "-bin"
|
||||
)
|
||||
|
||||
// grpc-http2 requires ASCII header key and value (more detail can be found in
|
||||
// "Requests" subsection in go/grpc-http2).
|
||||
func isASCII(s string) bool {
|
||||
for _, c := range s {
|
||||
if c > 127 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// encodeKeyValue encodes key and value qualified for transmission via gRPC.
|
||||
// Transmitting binary headers violates HTTP/2 spec.
|
||||
// TODO(zhaoq): Maybe check if k is ASCII also.
|
||||
func encodeKeyValue(k, v string) (string, string) {
|
||||
if isASCII(v) {
|
||||
return k, v
|
||||
k = strings.ToLower(k)
|
||||
if strings.HasSuffix(k, binHdrSuffix) {
|
||||
val := base64.StdEncoding.EncodeToString([]byte(v))
|
||||
v = string(val)
|
||||
}
|
||||
key := k + binHdrSuffix
|
||||
val := base64.StdEncoding.EncodeToString([]byte(v))
|
||||
return key, string(val)
|
||||
return k, v
|
||||
}
|
||||
|
||||
// DecodeKeyValue returns the original key and value corresponding to the
|
||||
|
@ -75,24 +64,23 @@ func DecodeKeyValue(k, v string) (string, string, error) {
|
|||
if !strings.HasSuffix(k, binHdrSuffix) {
|
||||
return k, v, nil
|
||||
}
|
||||
key := k[:len(k)-len(binHdrSuffix)]
|
||||
val, err := base64.StdEncoding.DecodeString(v)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
return key, string(val), nil
|
||||
return k, string(val), nil
|
||||
}
|
||||
|
||||
// MD is a mapping from metadata keys to values. Users should use the following
|
||||
// two convenience functions New and Pairs to generate MD.
|
||||
type MD map[string]string
|
||||
type MD map[string][]string
|
||||
|
||||
// New creates a MD from given key-value map.
|
||||
func New(m map[string]string) MD {
|
||||
md := MD{}
|
||||
for k, v := range m {
|
||||
key, val := encodeKeyValue(k, v)
|
||||
md[key] = val
|
||||
md[key] = append(md[key], val)
|
||||
}
|
||||
return md
|
||||
}
|
||||
|
@ -111,7 +99,7 @@ func Pairs(kv ...string) MD {
|
|||
continue
|
||||
}
|
||||
key, val := encodeKeyValue(k, s)
|
||||
md[key] = val
|
||||
md[key] = append(md[key], val)
|
||||
}
|
||||
return md
|
||||
}
|
||||
|
@ -125,7 +113,9 @@ func (md MD) Len() int {
|
|||
func (md MD) Copy() MD {
|
||||
out := MD{}
|
||||
for k, v := range md {
|
||||
out[k] = v
|
||||
for _, i := range v {
|
||||
out[k] = append(out[k], i)
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
|
73
vendor/google.golang.org/grpc/naming/naming.go
generated
vendored
Normal file
73
vendor/google.golang.org/grpc/naming/naming.go
generated
vendored
Normal file
|
@ -0,0 +1,73 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2014, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package naming defines the naming API and related data structures for gRPC.
|
||||
// The interface is EXPERIMENTAL and may be suject to change.
|
||||
package naming
|
||||
|
||||
// Operation defines the corresponding operations for a name resolution change.
|
||||
type Operation uint8
|
||||
|
||||
const (
|
||||
// Add indicates a new address is added.
|
||||
Add Operation = iota
|
||||
// Delete indicates an exisiting address is deleted.
|
||||
Delete
|
||||
)
|
||||
|
||||
// Update defines a name resolution update. Notice that it is not valid having both
|
||||
// empty string Addr and nil Metadata in an Update.
|
||||
type Update struct {
|
||||
// Op indicates the operation of the update.
|
||||
Op Operation
|
||||
// Addr is the updated address. It is empty string if there is no address update.
|
||||
Addr string
|
||||
// Metadata is the updated metadata. It is nil if there is no metadata update.
|
||||
// Metadata is not required for a custom naming implementation.
|
||||
Metadata interface{}
|
||||
}
|
||||
|
||||
// Resolver creates a Watcher for a target to track its resolution changes.
|
||||
type Resolver interface {
|
||||
// Resolve creates a Watcher for target.
|
||||
Resolve(target string) (Watcher, error)
|
||||
}
|
||||
|
||||
// Watcher watches for the updates on the specified target.
|
||||
type Watcher interface {
|
||||
// Next blocks until an update or error happens. It may return one or more
|
||||
// updates. The first call should get the full set of the results.
|
||||
Next() ([]*Update, error)
|
||||
// Close closes the Watcher.
|
||||
Close()
|
||||
}
|
65
vendor/google.golang.org/grpc/peer/peer.go
generated
vendored
Normal file
65
vendor/google.golang.org/grpc/peer/peer.go
generated
vendored
Normal file
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2014, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
// Package peer defines various peer information associated with RPCs and
|
||||
// corresponding utils.
|
||||
package peer
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/credentials"
|
||||
)
|
||||
|
||||
// Peer contains the information of the peer for an RPC.
|
||||
type Peer struct {
|
||||
// Addr is the peer address.
|
||||
Addr net.Addr
|
||||
// AuthInfo is the authentication information of the transport.
|
||||
// It is nil if there is no transport security being used.
|
||||
AuthInfo credentials.AuthInfo
|
||||
}
|
||||
|
||||
type peerKey struct{}
|
||||
|
||||
// NewContext creates a new context with peer information attached.
|
||||
func NewContext(ctx context.Context, p *Peer) context.Context {
|
||||
return context.WithValue(ctx, peerKey{}, p)
|
||||
}
|
||||
|
||||
// FromContext returns the peer information in ctx if it exists.
|
||||
func FromContext(ctx context.Context) (p *Peer, ok bool) {
|
||||
p, ok = ctx.Value(peerKey{}).(*Peer)
|
||||
return
|
||||
}
|
243
vendor/google.golang.org/grpc/picker.go
generated
vendored
Normal file
243
vendor/google.golang.org/grpc/picker.go
generated
vendored
Normal file
|
@ -0,0 +1,243 @@
|
|||
/*
|
||||
*
|
||||
* Copyright 2014, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/naming"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
// Picker picks a Conn for RPC requests.
|
||||
// This is EXPERIMENTAL and please do not implement your own Picker for now.
|
||||
type Picker interface {
|
||||
// Init does initial processing for the Picker, e.g., initiate some connections.
|
||||
Init(cc *ClientConn) error
|
||||
// Pick blocks until either a transport.ClientTransport is ready for the upcoming RPC
|
||||
// or some error happens.
|
||||
Pick(ctx context.Context) (transport.ClientTransport, error)
|
||||
// PickAddr picks a peer address for connecting. This will be called repeated for
|
||||
// connecting/reconnecting.
|
||||
PickAddr() (string, error)
|
||||
// State returns the connectivity state of the underlying connections.
|
||||
State() (ConnectivityState, error)
|
||||
// WaitForStateChange blocks until the state changes to something other than
|
||||
// the sourceState. It returns the new state or error.
|
||||
WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error)
|
||||
// Close closes all the Conn's owned by this Picker.
|
||||
Close() error
|
||||
}
|
||||
|
||||
// unicastPicker is the default Picker which is used when there is no custom Picker
|
||||
// specified by users. It always picks the same Conn.
|
||||
type unicastPicker struct {
|
||||
target string
|
||||
conn *Conn
|
||||
}
|
||||
|
||||
func (p *unicastPicker) Init(cc *ClientConn) error {
|
||||
c, err := NewConn(cc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.conn = c
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *unicastPicker) Pick(ctx context.Context) (transport.ClientTransport, error) {
|
||||
return p.conn.Wait(ctx)
|
||||
}
|
||||
|
||||
func (p *unicastPicker) PickAddr() (string, error) {
|
||||
return p.target, nil
|
||||
}
|
||||
|
||||
func (p *unicastPicker) State() (ConnectivityState, error) {
|
||||
return p.conn.State(), nil
|
||||
}
|
||||
|
||||
func (p *unicastPicker) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) {
|
||||
return p.conn.WaitForStateChange(ctx, sourceState)
|
||||
}
|
||||
|
||||
func (p *unicastPicker) Close() error {
|
||||
if p.conn != nil {
|
||||
return p.conn.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// unicastNamingPicker picks an address from a name resolver to set up the connection.
|
||||
type unicastNamingPicker struct {
|
||||
cc *ClientConn
|
||||
resolver naming.Resolver
|
||||
watcher naming.Watcher
|
||||
mu sync.Mutex
|
||||
// The list of the addresses are obtained from watcher.
|
||||
addrs *list.List
|
||||
// It tracks the current picked addr by PickAddr(). The next PickAddr may
|
||||
// push it forward on addrs.
|
||||
pickedAddr *list.Element
|
||||
conn *Conn
|
||||
}
|
||||
|
||||
// NewUnicastNamingPicker creates a Picker to pick addresses from a name resolver
|
||||
// to connect.
|
||||
func NewUnicastNamingPicker(r naming.Resolver) Picker {
|
||||
return &unicastNamingPicker{
|
||||
resolver: r,
|
||||
addrs: list.New(),
|
||||
}
|
||||
}
|
||||
|
||||
type addrInfo struct {
|
||||
addr string
|
||||
// Set to true if this addrInfo needs to be deleted in the next PickAddrr() call.
|
||||
deleting bool
|
||||
}
|
||||
|
||||
// processUpdates calls Watcher.Next() once and processes the obtained updates.
|
||||
func (p *unicastNamingPicker) processUpdates() error {
|
||||
updates, err := p.watcher.Next()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, update := range updates {
|
||||
switch update.Op {
|
||||
case naming.Add:
|
||||
p.mu.Lock()
|
||||
p.addrs.PushBack(&addrInfo{
|
||||
addr: update.Addr,
|
||||
})
|
||||
p.mu.Unlock()
|
||||
// Initial connection setup
|
||||
if p.conn == nil {
|
||||
conn, err := NewConn(p.cc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.conn = conn
|
||||
}
|
||||
case naming.Delete:
|
||||
p.mu.Lock()
|
||||
for e := p.addrs.Front(); e != nil; e = e.Next() {
|
||||
if update.Addr == e.Value.(*addrInfo).addr {
|
||||
if e == p.pickedAddr {
|
||||
// Do not remove the element now if it is the current picked
|
||||
// one. We leave the deletion to the next PickAddr() call.
|
||||
e.Value.(*addrInfo).deleting = true
|
||||
// Notify Conn to close it. All the live RPCs on this connection
|
||||
// will be aborted.
|
||||
p.conn.NotifyReset()
|
||||
} else {
|
||||
p.addrs.Remove(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
p.mu.Unlock()
|
||||
default:
|
||||
grpclog.Println("Unknown update.Op ", update.Op)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// monitor runs in a standalone goroutine to keep watching name resolution updates until the watcher
|
||||
// is closed.
|
||||
func (p *unicastNamingPicker) monitor() {
|
||||
for {
|
||||
if err := p.processUpdates(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *unicastNamingPicker) Init(cc *ClientConn) error {
|
||||
w, err := p.resolver.Resolve(cc.target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.watcher = w
|
||||
p.cc = cc
|
||||
// Get the initial name resolution.
|
||||
if err := p.processUpdates(); err != nil {
|
||||
return err
|
||||
}
|
||||
go p.monitor()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *unicastNamingPicker) Pick(ctx context.Context) (transport.ClientTransport, error) {
|
||||
return p.conn.Wait(ctx)
|
||||
}
|
||||
|
||||
func (p *unicastNamingPicker) PickAddr() (string, error) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
if p.pickedAddr == nil {
|
||||
p.pickedAddr = p.addrs.Front()
|
||||
} else {
|
||||
pa := p.pickedAddr
|
||||
p.pickedAddr = pa.Next()
|
||||
if pa.Value.(*addrInfo).deleting {
|
||||
p.addrs.Remove(pa)
|
||||
}
|
||||
if p.pickedAddr == nil {
|
||||
p.pickedAddr = p.addrs.Front()
|
||||
}
|
||||
}
|
||||
if p.pickedAddr == nil {
|
||||
return "", fmt.Errorf("there is no address available to pick")
|
||||
}
|
||||
return p.pickedAddr.Value.(*addrInfo).addr, nil
|
||||
}
|
||||
|
||||
func (p *unicastNamingPicker) State() (ConnectivityState, error) {
|
||||
return 0, fmt.Errorf("State() is not supported for unicastNamingPicker")
|
||||
}
|
||||
|
||||
func (p *unicastNamingPicker) WaitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) {
|
||||
return 0, fmt.Errorf("WaitForStateChange is not supported for unicastNamingPciker")
|
||||
}
|
||||
|
||||
func (p *unicastNamingPicker) Close() error {
|
||||
p.watcher.Close()
|
||||
p.conn.Close()
|
||||
return nil
|
||||
}
|
246
vendor/google.golang.org/grpc/rpc_util.go
generated
vendored
246
vendor/google.golang.org/grpc/rpc_util.go
generated
vendored
|
@ -35,9 +35,12 @@ package grpc
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"math/rand"
|
||||
"os"
|
||||
"time"
|
||||
|
@ -75,6 +78,71 @@ func (protoCodec) String() string {
|
|||
return "proto"
|
||||
}
|
||||
|
||||
// Compressor defines the interface gRPC uses to compress a message.
|
||||
type Compressor interface {
|
||||
// Do compresses p into w.
|
||||
Do(w io.Writer, p []byte) error
|
||||
// Type returns the compression algorithm the Compressor uses.
|
||||
Type() string
|
||||
}
|
||||
|
||||
// NewGZIPCompressor creates a Compressor based on GZIP.
|
||||
func NewGZIPCompressor() Compressor {
|
||||
return &gzipCompressor{}
|
||||
}
|
||||
|
||||
type gzipCompressor struct {
|
||||
}
|
||||
|
||||
func (c *gzipCompressor) Do(w io.Writer, p []byte) error {
|
||||
z := gzip.NewWriter(w)
|
||||
if _, err := z.Write(p); err != nil {
|
||||
return err
|
||||
}
|
||||
return z.Close()
|
||||
}
|
||||
|
||||
func (c *gzipCompressor) Type() string {
|
||||
return "gzip"
|
||||
}
|
||||
|
||||
// Decompressor defines the interface gRPC uses to decompress a message.
|
||||
type Decompressor interface {
|
||||
// Do reads the data from r and uncompress them.
|
||||
Do(r io.Reader) ([]byte, error)
|
||||
// Type returns the compression algorithm the Decompressor uses.
|
||||
Type() string
|
||||
}
|
||||
|
||||
type gzipDecompressor struct {
|
||||
}
|
||||
|
||||
// NewGZIPDecompressor creates a Decompressor based on GZIP.
|
||||
func NewGZIPDecompressor() Decompressor {
|
||||
return &gzipDecompressor{}
|
||||
}
|
||||
|
||||
func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) {
|
||||
z, err := gzip.NewReader(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer z.Close()
|
||||
return ioutil.ReadAll(z)
|
||||
}
|
||||
|
||||
func (d *gzipDecompressor) Type() string {
|
||||
return "gzip"
|
||||
}
|
||||
|
||||
// callInfo contains all related configuration and information about an RPC.
|
||||
type callInfo struct {
|
||||
failFast bool
|
||||
headerMD metadata.MD
|
||||
trailerMD metadata.MD
|
||||
traceInfo traceInfo // in trace.go
|
||||
}
|
||||
|
||||
// CallOption configures a Call before it starts or extracts information from
|
||||
// a Call after it completes.
|
||||
type CallOption interface {
|
||||
|
@ -118,51 +186,62 @@ type payloadFormat uint8
|
|||
|
||||
const (
|
||||
compressionNone payloadFormat = iota // no compression
|
||||
compressionFlate
|
||||
// More formats
|
||||
compressionMade
|
||||
)
|
||||
|
||||
// parser reads complelete gRPC messages from the underlying reader.
|
||||
type parser struct {
|
||||
s io.Reader
|
||||
// r is the underlying reader.
|
||||
// See the comment on recvMsg for the permissible
|
||||
// error types.
|
||||
r io.Reader
|
||||
|
||||
// The header of a gRPC message. Find more detail
|
||||
// at http://www.grpc.io/docs/guides/wire.html.
|
||||
header [5]byte
|
||||
}
|
||||
|
||||
// msgFixedHeader defines the header of a gRPC message (go/grpc-wirefmt).
|
||||
type msgFixedHeader struct {
|
||||
T payloadFormat
|
||||
Length uint32
|
||||
}
|
||||
|
||||
// recvMsg is to read a complete gRPC message from the stream. It is blocking if
|
||||
// the message has not been complete yet. It returns the message and its type,
|
||||
// EOF is returned with nil msg and 0 pf if the entire stream is done. Other
|
||||
// non-nil error is returned if something is wrong on reading.
|
||||
// recvMsg reads a complete gRPC message from the stream.
|
||||
//
|
||||
// It returns the message and its payload (compression/encoding)
|
||||
// format. The caller owns the returned msg memory.
|
||||
//
|
||||
// If there is an error, possible values are:
|
||||
// * io.EOF, when no messages remain
|
||||
// * io.ErrUnexpectedEOF
|
||||
// * of type transport.ConnectionError
|
||||
// * of type transport.StreamError
|
||||
// No other error values or types must be returned, which also means
|
||||
// that the underlying io.Reader must not return an incompatible
|
||||
// error.
|
||||
func (p *parser) recvMsg() (pf payloadFormat, msg []byte, err error) {
|
||||
var hdr msgFixedHeader
|
||||
if err := binary.Read(p.s, binary.BigEndian, &hdr); err != nil {
|
||||
if _, err := io.ReadFull(p.r, p.header[:]); err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
if hdr.Length == 0 {
|
||||
return hdr.T, nil, nil
|
||||
|
||||
pf = payloadFormat(p.header[0])
|
||||
length := binary.BigEndian.Uint32(p.header[1:])
|
||||
|
||||
if length == 0 {
|
||||
return pf, nil, nil
|
||||
}
|
||||
msg = make([]byte, int(hdr.Length))
|
||||
if _, err := io.ReadFull(p.s, msg); err != nil {
|
||||
// TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead
|
||||
// of making it for each message:
|
||||
msg = make([]byte, int(length))
|
||||
if _, err := io.ReadFull(p.r, msg); err != nil {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return 0, nil, err
|
||||
}
|
||||
return hdr.T, msg, nil
|
||||
return pf, msg, nil
|
||||
}
|
||||
|
||||
// encode serializes msg and prepends the message header. If msg is nil, it
|
||||
// generates the message header of 0 message length.
|
||||
func encode(c Codec, msg interface{}, pf payloadFormat) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
// Write message fixed header.
|
||||
buf.WriteByte(uint8(pf))
|
||||
func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer) ([]byte, error) {
|
||||
var b []byte
|
||||
var length uint32
|
||||
var length uint
|
||||
if msg != nil {
|
||||
var err error
|
||||
// TODO(zhaoq): optimize to reduce memory alloc and copying.
|
||||
|
@ -170,27 +249,71 @@ func encode(c Codec, msg interface{}, pf payloadFormat) ([]byte, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
length = uint32(len(b))
|
||||
if cp != nil {
|
||||
if err := cp.Do(cbuf, b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b = cbuf.Bytes()
|
||||
}
|
||||
length = uint(len(b))
|
||||
}
|
||||
var szHdr [4]byte
|
||||
binary.BigEndian.PutUint32(szHdr[:], length)
|
||||
buf.Write(szHdr[:])
|
||||
buf.Write(b)
|
||||
return buf.Bytes(), nil
|
||||
if length > math.MaxUint32 {
|
||||
return nil, Errorf(codes.InvalidArgument, "grpc: message too large (%d bytes)", length)
|
||||
}
|
||||
|
||||
const (
|
||||
payloadLen = 1
|
||||
sizeLen = 4
|
||||
)
|
||||
|
||||
var buf = make([]byte, payloadLen+sizeLen+len(b))
|
||||
|
||||
// Write payload format
|
||||
if cp == nil {
|
||||
buf[0] = byte(compressionNone)
|
||||
} else {
|
||||
buf[0] = byte(compressionMade)
|
||||
}
|
||||
// Write length of b into buf
|
||||
binary.BigEndian.PutUint32(buf[1:], uint32(length))
|
||||
// Copy encoded msg to buf
|
||||
copy(buf[5:], b)
|
||||
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func recv(p *parser, c Codec, m interface{}) error {
|
||||
func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) error {
|
||||
switch pf {
|
||||
case compressionNone:
|
||||
case compressionMade:
|
||||
if recvCompress == "" {
|
||||
return transport.StreamErrorf(codes.InvalidArgument, "grpc: invalid grpc-encoding %q with compression enabled", recvCompress)
|
||||
}
|
||||
if dc == nil || recvCompress != dc.Type() {
|
||||
return transport.StreamErrorf(codes.InvalidArgument, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
|
||||
}
|
||||
default:
|
||||
return transport.StreamErrorf(codes.InvalidArgument, "grpc: received unexpected payload format %d", pf)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}) error {
|
||||
pf, d, err := p.recvMsg()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch pf {
|
||||
case compressionNone:
|
||||
if err := c.Unmarshal(d, m); err != nil {
|
||||
return Errorf(codes.Internal, "grpc: %v", err)
|
||||
if err := checkRecvPayload(pf, s.RecvCompress(), dc); err != nil {
|
||||
return err
|
||||
}
|
||||
if pf == compressionMade {
|
||||
d, err = dc.Do(bytes.NewReader(d))
|
||||
if err != nil {
|
||||
return transport.StreamErrorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
|
||||
}
|
||||
default:
|
||||
return Errorf(codes.Internal, "gprc: compression is not supported yet.")
|
||||
}
|
||||
if err := c.Unmarshal(d, m); err != nil {
|
||||
return transport.StreamErrorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -217,6 +340,18 @@ func Code(err error) codes.Code {
|
|||
return codes.Unknown
|
||||
}
|
||||
|
||||
// ErrorDesc returns the error description of err if it was produced by the rpc system.
|
||||
// Otherwise, it returns err.Error() or empty string when err is nil.
|
||||
func ErrorDesc(err error) string {
|
||||
if err == nil {
|
||||
return ""
|
||||
}
|
||||
if e, ok := err.(rpcError); ok {
|
||||
return e.desc
|
||||
}
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
// Errorf returns an error containing an error code and a description;
|
||||
// Errorf returns nil if c is OK.
|
||||
func Errorf(c codes.Code, format string, a ...interface{}) error {
|
||||
|
@ -232,6 +367,8 @@ func Errorf(c codes.Code, format string, a ...interface{}) error {
|
|||
// toRPCErr converts an error into a rpcError.
|
||||
func toRPCErr(err error) error {
|
||||
switch e := err.(type) {
|
||||
case rpcError:
|
||||
return err
|
||||
case transport.StreamError:
|
||||
return rpcError{
|
||||
code: e.Code,
|
||||
|
@ -277,30 +414,39 @@ func convertCode(err error) codes.Code {
|
|||
const (
|
||||
// how long to wait after the first failure before retrying
|
||||
baseDelay = 1.0 * time.Second
|
||||
// upper bound on backoff delay
|
||||
maxDelay = 120 * time.Second
|
||||
backoffFactor = 2.0 // backoff increases by this factor on each retry
|
||||
backoffRange = 0.4 // backoff is randomized downwards by this factor
|
||||
// upper bound of backoff delay
|
||||
maxDelay = 120 * time.Second
|
||||
// backoff increases by this factor on each retry
|
||||
backoffFactor = 1.6
|
||||
// backoff is randomized downwards by this factor
|
||||
backoffJitter = 0.2
|
||||
)
|
||||
|
||||
// backoff returns a value in [0, maxDelay] that increases exponentially with
|
||||
// retries, starting from baseDelay.
|
||||
func backoff(retries int) time.Duration {
|
||||
func backoff(retries int) (t time.Duration) {
|
||||
if retries == 0 {
|
||||
return baseDelay
|
||||
}
|
||||
backoff, max := float64(baseDelay), float64(maxDelay)
|
||||
for backoff < max && retries > 0 {
|
||||
backoff = backoff * backoffFactor
|
||||
backoff *= backoffFactor
|
||||
retries--
|
||||
}
|
||||
if backoff > max {
|
||||
backoff = max
|
||||
}
|
||||
|
||||
// Randomize backoff delays so that if a cluster of requests start at
|
||||
// the same time, they won't operate in lockstep. We just subtract up
|
||||
// to 40% so that we obey maxDelay.
|
||||
backoff -= backoff * backoffRange * rand.Float64()
|
||||
// the same time, they won't operate in lockstep.
|
||||
backoff *= 1 + backoffJitter*(rand.Float64()*2-1)
|
||||
if backoff < 0 {
|
||||
return 0
|
||||
}
|
||||
return time.Duration(backoff)
|
||||
}
|
||||
|
||||
// SupportPackageIsVersion1 is referenced from generated protocol buffer files
|
||||
// to assert that that code is compatible with this version of the grpc package.
|
||||
//
|
||||
// This constant may be renamed in the future if a change in the generated code
|
||||
// requires a synchronised update of grpc-go and protoc-gen-go. This constant
|
||||
// should not be referenced from any other code.
|
||||
const SupportPackageIsVersion1 = true
|
||||
|
|
517
vendor/google.golang.org/grpc/server.go
generated
vendored
517
vendor/google.golang.org/grpc/server.go
generated
vendored
|
@ -34,23 +34,30 @@
|
|||
package grpc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/trace"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/internal"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/transport"
|
||||
)
|
||||
|
||||
type methodHandler func(srv interface{}, ctx context.Context, codec Codec, buf []byte) (interface{}, error)
|
||||
type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error)
|
||||
|
||||
// MethodDesc represents an RPC service's method specification.
|
||||
type MethodDesc struct {
|
||||
|
@ -78,17 +85,22 @@ type service struct {
|
|||
|
||||
// Server is a gRPC server to serve RPC requests.
|
||||
type Server struct {
|
||||
opts options
|
||||
mu sync.Mutex
|
||||
lis map[net.Listener]bool
|
||||
conns map[transport.ServerTransport]bool
|
||||
m map[string]*service // service name -> service info
|
||||
opts options
|
||||
|
||||
mu sync.Mutex // guards following
|
||||
lis map[net.Listener]bool
|
||||
conns map[io.Closer]bool
|
||||
m map[string]*service // service name -> service info
|
||||
events trace.EventLog
|
||||
}
|
||||
|
||||
type options struct {
|
||||
creds credentials.Credentials
|
||||
codec Codec
|
||||
cp Compressor
|
||||
dc Decompressor
|
||||
maxConcurrentStreams uint32
|
||||
useHandlerImpl bool // use http.Handler-based server
|
||||
}
|
||||
|
||||
// A ServerOption sets options.
|
||||
|
@ -101,6 +113,18 @@ func CustomCodec(codec Codec) ServerOption {
|
|||
}
|
||||
}
|
||||
|
||||
func RPCCompressor(cp Compressor) ServerOption {
|
||||
return func(o *options) {
|
||||
o.cp = cp
|
||||
}
|
||||
}
|
||||
|
||||
func RPCDecompressor(dc Decompressor) ServerOption {
|
||||
return func(o *options) {
|
||||
o.dc = dc
|
||||
}
|
||||
}
|
||||
|
||||
// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number
|
||||
// of concurrent streams to each ServerTransport.
|
||||
func MaxConcurrentStreams(n uint32) ServerOption {
|
||||
|
@ -127,12 +151,33 @@ func NewServer(opt ...ServerOption) *Server {
|
|||
// Set the default codec.
|
||||
opts.codec = protoCodec{}
|
||||
}
|
||||
return &Server{
|
||||
s := &Server{
|
||||
lis: make(map[net.Listener]bool),
|
||||
opts: opts,
|
||||
conns: make(map[transport.ServerTransport]bool),
|
||||
conns: make(map[io.Closer]bool),
|
||||
m: make(map[string]*service),
|
||||
}
|
||||
if EnableTracing {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// printf records an event in s's event log, unless s has been stopped.
|
||||
// REQUIRES s.mu is held.
|
||||
func (s *Server) printf(format string, a ...interface{}) {
|
||||
if s.events != nil {
|
||||
s.events.Printf(format, a...)
|
||||
}
|
||||
}
|
||||
|
||||
// errorf records an error in s's event log, unless s has been stopped.
|
||||
// REQUIRES s.mu is held.
|
||||
func (s *Server) errorf(format string, a ...interface{}) {
|
||||
if s.events != nil {
|
||||
s.events.Errorf(format, a...)
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterService register a service and its implementation to the gRPC
|
||||
|
@ -150,6 +195,7 @@ func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) {
|
|||
func (s *Server) register(sd *ServiceDesc, ss interface{}) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
s.printf("RegisterService(%q)", sd.ServiceName)
|
||||
if _, ok := s.m[sd.ServiceName]; ok {
|
||||
grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName)
|
||||
}
|
||||
|
@ -175,12 +221,21 @@ var (
|
|||
ErrServerStopped = errors.New("grpc: the server has been stopped")
|
||||
)
|
||||
|
||||
func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
|
||||
creds, ok := s.opts.creds.(credentials.TransportAuthenticator)
|
||||
if !ok {
|
||||
return rawConn, nil, nil
|
||||
}
|
||||
return creds.ServerHandshake(rawConn)
|
||||
}
|
||||
|
||||
// Serve accepts incoming connections on the listener lis, creating a new
|
||||
// ServerTransport and service goroutine for each. The service goroutines
|
||||
// read gRPC request and then call the registered handlers to reply to them.
|
||||
// read gRPC requests and then call the registered handlers to reply to them.
|
||||
// Service returns when lis.Accept fails.
|
||||
func (s *Server) Serve(lis net.Listener) error {
|
||||
s.mu.Lock()
|
||||
s.printf("serving")
|
||||
if s.lis == nil {
|
||||
s.mu.Unlock()
|
||||
return ErrServerStopped
|
||||
|
@ -194,46 +249,167 @@ func (s *Server) Serve(lis net.Listener) error {
|
|||
s.mu.Unlock()
|
||||
}()
|
||||
for {
|
||||
c, err := lis.Accept()
|
||||
rawConn, err := lis.Accept()
|
||||
if err != nil {
|
||||
s.mu.Lock()
|
||||
s.printf("done serving; Accept = %v", err)
|
||||
s.mu.Unlock()
|
||||
return err
|
||||
}
|
||||
if creds, ok := s.opts.creds.(credentials.TransportAuthenticator); ok {
|
||||
c, err = creds.ServerHandshake(c)
|
||||
if err != nil {
|
||||
grpclog.Println("grpc: Server.Serve failed to complete security handshake.")
|
||||
continue
|
||||
}
|
||||
}
|
||||
s.mu.Lock()
|
||||
if s.conns == nil {
|
||||
s.mu.Unlock()
|
||||
c.Close()
|
||||
return nil
|
||||
}
|
||||
st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams)
|
||||
if err != nil {
|
||||
s.mu.Unlock()
|
||||
c.Close()
|
||||
grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err)
|
||||
continue
|
||||
}
|
||||
s.conns[st] = true
|
||||
s.mu.Unlock()
|
||||
|
||||
go func() {
|
||||
st.HandleStreams(func(stream *transport.Stream) {
|
||||
s.handleStream(st, stream)
|
||||
})
|
||||
s.mu.Lock()
|
||||
delete(s.conns, st)
|
||||
s.mu.Unlock()
|
||||
}()
|
||||
// Start a new goroutine to deal with rawConn
|
||||
// so we don't stall this Accept loop goroutine.
|
||||
go s.handleRawConn(rawConn)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, pf payloadFormat, opts *transport.Options) error {
|
||||
p, err := encode(s.opts.codec, msg, pf)
|
||||
// handleRawConn is run in its own goroutine and handles a just-accepted
|
||||
// connection that has not had any I/O performed on it yet.
|
||||
func (s *Server) handleRawConn(rawConn net.Conn) {
|
||||
conn, authInfo, err := s.useTransportAuthenticator(rawConn)
|
||||
if err != nil {
|
||||
s.mu.Lock()
|
||||
s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
|
||||
s.mu.Unlock()
|
||||
grpclog.Printf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err)
|
||||
rawConn.Close()
|
||||
return
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
if s.conns == nil {
|
||||
s.mu.Unlock()
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
if s.opts.useHandlerImpl {
|
||||
s.serveUsingHandler(conn)
|
||||
} else {
|
||||
s.serveNewHTTP2Transport(conn, authInfo)
|
||||
}
|
||||
}
|
||||
|
||||
// serveNewHTTP2Transport sets up a new http/2 transport (using the
|
||||
// gRPC http2 server transport in transport/http2_server.go) and
|
||||
// serves streams on it.
|
||||
// This is run in its own goroutine (it does network I/O in
|
||||
// transport.NewServerTransport).
|
||||
func (s *Server) serveNewHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) {
|
||||
st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams, authInfo)
|
||||
if err != nil {
|
||||
s.mu.Lock()
|
||||
s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err)
|
||||
s.mu.Unlock()
|
||||
c.Close()
|
||||
grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err)
|
||||
return
|
||||
}
|
||||
if !s.addConn(st) {
|
||||
st.Close()
|
||||
return
|
||||
}
|
||||
s.serveStreams(st)
|
||||
}
|
||||
|
||||
func (s *Server) serveStreams(st transport.ServerTransport) {
|
||||
defer s.removeConn(st)
|
||||
defer st.Close()
|
||||
var wg sync.WaitGroup
|
||||
st.HandleStreams(func(stream *transport.Stream) {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
s.handleStream(st, stream, s.traceInfo(st, stream))
|
||||
}()
|
||||
})
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
var _ http.Handler = (*Server)(nil)
|
||||
|
||||
// serveUsingHandler is called from handleRawConn when s is configured
|
||||
// to handle requests via the http.Handler interface. It sets up a
|
||||
// net/http.Server to handle the just-accepted conn. The http.Server
|
||||
// is configured to route all incoming requests (all HTTP/2 streams)
|
||||
// to ServeHTTP, which creates a new ServerTransport for each stream.
|
||||
// serveUsingHandler blocks until conn closes.
|
||||
//
|
||||
// This codepath is only used when Server.TestingUseHandlerImpl has
|
||||
// been configured. This lets the end2end tests exercise the ServeHTTP
|
||||
// method as one of the environment types.
|
||||
//
|
||||
// conn is the *tls.Conn that's already been authenticated.
|
||||
func (s *Server) serveUsingHandler(conn net.Conn) {
|
||||
if !s.addConn(conn) {
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
defer s.removeConn(conn)
|
||||
h2s := &http2.Server{
|
||||
MaxConcurrentStreams: s.opts.maxConcurrentStreams,
|
||||
}
|
||||
h2s.ServeConn(conn, &http2.ServeConnOpts{
|
||||
Handler: s,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
st, err := transport.NewServerHandlerTransport(w, r)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if !s.addConn(st) {
|
||||
st.Close()
|
||||
return
|
||||
}
|
||||
defer s.removeConn(st)
|
||||
s.serveStreams(st)
|
||||
}
|
||||
|
||||
// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled.
|
||||
// If tracing is not enabled, it returns nil.
|
||||
func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) {
|
||||
if !EnableTracing {
|
||||
return nil
|
||||
}
|
||||
trInfo = &traceInfo{
|
||||
tr: trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()),
|
||||
}
|
||||
trInfo.firstLine.client = false
|
||||
trInfo.firstLine.remoteAddr = st.RemoteAddr()
|
||||
stream.TraceContext(trInfo.tr)
|
||||
if dl, ok := stream.Context().Deadline(); ok {
|
||||
trInfo.firstLine.deadline = dl.Sub(time.Now())
|
||||
}
|
||||
return trInfo
|
||||
}
|
||||
|
||||
func (s *Server) addConn(c io.Closer) bool {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.conns == nil {
|
||||
return false
|
||||
}
|
||||
s.conns[c] = true
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *Server) removeConn(c io.Closer) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.conns != nil {
|
||||
delete(s.conns, c)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options) error {
|
||||
var cbuf *bytes.Buffer
|
||||
if cp != nil {
|
||||
cbuf = new(bytes.Buffer)
|
||||
}
|
||||
p, err := encode(s.opts.codec, msg, cp, cbuf)
|
||||
if err != nil {
|
||||
// This typically indicates a fatal issue (e.g., memory
|
||||
// corruption or hardware faults) the application program
|
||||
|
@ -247,13 +423,27 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str
|
|||
return t.Write(stream, p, opts)
|
||||
}
|
||||
|
||||
func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc) {
|
||||
p := &parser{s: stream}
|
||||
func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) {
|
||||
if trInfo != nil {
|
||||
defer trInfo.tr.Finish()
|
||||
trInfo.firstLine.client = false
|
||||
trInfo.tr.LazyLog(&trInfo.firstLine, false)
|
||||
defer func() {
|
||||
if err != nil && err != io.EOF {
|
||||
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
trInfo.tr.SetError()
|
||||
}
|
||||
}()
|
||||
}
|
||||
p := &parser{r: stream}
|
||||
for {
|
||||
pf, req, err := p.recvMsg()
|
||||
if err == io.EOF {
|
||||
// The entire stream is done (for unary RPC only).
|
||||
return
|
||||
return err
|
||||
}
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
err = transport.StreamError{Code: codes.Internal, Desc: "io.ErrUnexpectedEOF"}
|
||||
}
|
||||
if err != nil {
|
||||
switch err := err.(type) {
|
||||
|
@ -261,110 +451,226 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
|||
// Nothing to do here.
|
||||
case transport.StreamError:
|
||||
if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil {
|
||||
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err)
|
||||
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", err, err))
|
||||
}
|
||||
return
|
||||
return err
|
||||
}
|
||||
switch pf {
|
||||
case compressionNone:
|
||||
statusCode := codes.OK
|
||||
statusDesc := ""
|
||||
reply, appErr := md.Handler(srv.server, stream.Context(), s.opts.codec, req)
|
||||
if appErr != nil {
|
||||
if err, ok := appErr.(rpcError); ok {
|
||||
statusCode = err.code
|
||||
statusDesc = err.desc
|
||||
} else {
|
||||
statusCode = convertCode(appErr)
|
||||
statusDesc = appErr.Error()
|
||||
|
||||
if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil {
|
||||
switch err := err.(type) {
|
||||
case transport.StreamError:
|
||||
if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil {
|
||||
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
|
||||
}
|
||||
if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil {
|
||||
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err)
|
||||
default:
|
||||
if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil {
|
||||
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
|
||||
}
|
||||
return
|
||||
|
||||
}
|
||||
opts := &transport.Options{
|
||||
Last: true,
|
||||
Delay: false,
|
||||
}
|
||||
if err := s.sendResponse(t, stream, reply, compressionNone, opts); err != nil {
|
||||
if _, ok := err.(transport.ConnectionError); ok {
|
||||
return
|
||||
}
|
||||
if e, ok := err.(transport.StreamError); ok {
|
||||
statusCode = e.Code
|
||||
statusDesc = e.Desc
|
||||
} else {
|
||||
statusCode = codes.Unknown
|
||||
statusDesc = err.Error()
|
||||
}
|
||||
}
|
||||
t.WriteStatus(stream, statusCode, statusDesc)
|
||||
default:
|
||||
panic(fmt.Sprintf("payload format to be supported: %d", pf))
|
||||
return err
|
||||
}
|
||||
statusCode := codes.OK
|
||||
statusDesc := ""
|
||||
df := func(v interface{}) error {
|
||||
if pf == compressionMade {
|
||||
var err error
|
||||
req, err = s.opts.dc.Do(bytes.NewReader(req))
|
||||
if err != nil {
|
||||
if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil {
|
||||
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := s.opts.codec.Unmarshal(req, v); err != nil {
|
||||
return err
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
reply, appErr := md.Handler(srv.server, stream.Context(), df)
|
||||
if appErr != nil {
|
||||
if err, ok := appErr.(rpcError); ok {
|
||||
statusCode = err.code
|
||||
statusDesc = err.desc
|
||||
} else {
|
||||
statusCode = convertCode(appErr)
|
||||
statusDesc = appErr.Error()
|
||||
}
|
||||
if trInfo != nil && statusCode != codes.OK {
|
||||
trInfo.tr.LazyLog(stringer(statusDesc), true)
|
||||
trInfo.tr.SetError()
|
||||
}
|
||||
if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil {
|
||||
grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(stringer("OK"), false)
|
||||
}
|
||||
opts := &transport.Options{
|
||||
Last: true,
|
||||
Delay: false,
|
||||
}
|
||||
if s.opts.cp != nil {
|
||||
stream.SetSendCompress(s.opts.cp.Type())
|
||||
}
|
||||
if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil {
|
||||
switch err := err.(type) {
|
||||
case transport.ConnectionError:
|
||||
// Nothing to do here.
|
||||
case transport.StreamError:
|
||||
statusCode = err.Code
|
||||
statusDesc = err.Desc
|
||||
default:
|
||||
statusCode = codes.Unknown
|
||||
statusDesc = err.Error()
|
||||
}
|
||||
return err
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
|
||||
}
|
||||
return t.WriteStatus(stream, statusCode, statusDesc)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc) {
|
||||
func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
|
||||
if s.opts.cp != nil {
|
||||
stream.SetSendCompress(s.opts.cp.Type())
|
||||
}
|
||||
ss := &serverStream{
|
||||
t: t,
|
||||
s: stream,
|
||||
p: &parser{s: stream},
|
||||
codec: s.opts.codec,
|
||||
t: t,
|
||||
s: stream,
|
||||
p: &parser{r: stream},
|
||||
codec: s.opts.codec,
|
||||
cp: s.opts.cp,
|
||||
dc: s.opts.dc,
|
||||
trInfo: trInfo,
|
||||
}
|
||||
if ss.cp != nil {
|
||||
ss.cbuf = new(bytes.Buffer)
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&trInfo.firstLine, false)
|
||||
defer func() {
|
||||
ss.mu.Lock()
|
||||
if err != nil && err != io.EOF {
|
||||
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
ss.trInfo.tr.SetError()
|
||||
}
|
||||
ss.trInfo.tr.Finish()
|
||||
ss.trInfo.tr = nil
|
||||
ss.mu.Unlock()
|
||||
}()
|
||||
}
|
||||
if appErr := sd.Handler(srv.server, ss); appErr != nil {
|
||||
if err, ok := appErr.(rpcError); ok {
|
||||
ss.statusCode = err.code
|
||||
ss.statusDesc = err.desc
|
||||
} else if err, ok := appErr.(transport.StreamError); ok {
|
||||
ss.statusCode = err.Code
|
||||
ss.statusDesc = err.Desc
|
||||
} else {
|
||||
ss.statusCode = convertCode(appErr)
|
||||
ss.statusDesc = appErr.Error()
|
||||
}
|
||||
}
|
||||
t.WriteStatus(ss.s, ss.statusCode, ss.statusDesc)
|
||||
if trInfo != nil {
|
||||
ss.mu.Lock()
|
||||
if ss.statusCode != codes.OK {
|
||||
ss.trInfo.tr.LazyLog(stringer(ss.statusDesc), true)
|
||||
ss.trInfo.tr.SetError()
|
||||
} else {
|
||||
ss.trInfo.tr.LazyLog(stringer("OK"), false)
|
||||
}
|
||||
ss.mu.Unlock()
|
||||
}
|
||||
return t.WriteStatus(ss.s, ss.statusCode, ss.statusDesc)
|
||||
|
||||
}
|
||||
|
||||
func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) {
|
||||
func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) {
|
||||
sm := stream.Method()
|
||||
if sm != "" && sm[0] == '/' {
|
||||
sm = sm[1:]
|
||||
}
|
||||
pos := strings.LastIndex(sm, "/")
|
||||
if pos == -1 {
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true)
|
||||
trInfo.tr.SetError()
|
||||
}
|
||||
if err := t.WriteStatus(stream, codes.InvalidArgument, fmt.Sprintf("malformed method name: %q", stream.Method())); err != nil {
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
trInfo.tr.SetError()
|
||||
}
|
||||
grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err)
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.Finish()
|
||||
}
|
||||
return
|
||||
}
|
||||
service := sm[:pos]
|
||||
method := sm[pos+1:]
|
||||
srv, ok := s.m[service]
|
||||
if !ok {
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true)
|
||||
trInfo.tr.SetError()
|
||||
}
|
||||
if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown service %v", service)); err != nil {
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
trInfo.tr.SetError()
|
||||
}
|
||||
grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err)
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.Finish()
|
||||
}
|
||||
return
|
||||
}
|
||||
// Unary RPC or Streaming RPC?
|
||||
if md, ok := srv.md[method]; ok {
|
||||
s.processUnaryRPC(t, stream, srv, md)
|
||||
s.processUnaryRPC(t, stream, srv, md, trInfo)
|
||||
return
|
||||
}
|
||||
if sd, ok := srv.sd[method]; ok {
|
||||
s.processStreamingRPC(t, stream, srv, sd)
|
||||
s.processStreamingRPC(t, stream, srv, sd, trInfo)
|
||||
return
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true)
|
||||
trInfo.tr.SetError()
|
||||
}
|
||||
if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown method %v", method)); err != nil {
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
trInfo.tr.SetError()
|
||||
}
|
||||
grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err)
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.Finish()
|
||||
}
|
||||
}
|
||||
|
||||
// Stop stops the gRPC server. Once Stop returns, the server stops accepting
|
||||
// connection requests and closes all the connected connections.
|
||||
// Stop stops the gRPC server. It immediately closes all open
|
||||
// connections and listeners.
|
||||
// It cancels all active RPCs on the server side and the corresponding
|
||||
// pending RPCs on the client side will get notified by connection
|
||||
// errors.
|
||||
func (s *Server) Stop() {
|
||||
s.mu.Lock()
|
||||
listeners := s.lis
|
||||
|
@ -372,22 +678,39 @@ func (s *Server) Stop() {
|
|||
cs := s.conns
|
||||
s.conns = nil
|
||||
s.mu.Unlock()
|
||||
|
||||
for lis := range listeners {
|
||||
lis.Close()
|
||||
}
|
||||
for c := range cs {
|
||||
c.Close()
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
if s.events != nil {
|
||||
s.events.Finish()
|
||||
s.events = nil
|
||||
}
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
// TestingCloseConns closes all exiting transports but keeps s.lis accepting new
|
||||
// connections. This is for test only now.
|
||||
func (s *Server) TestingCloseConns() {
|
||||
func init() {
|
||||
internal.TestingCloseConns = func(arg interface{}) {
|
||||
arg.(*Server).testingCloseConns()
|
||||
}
|
||||
internal.TestingUseHandlerImpl = func(arg interface{}) {
|
||||
arg.(*Server).opts.useHandlerImpl = true
|
||||
}
|
||||
}
|
||||
|
||||
// testingCloseConns closes all existing transports but keeps s.lis
|
||||
// accepting new connections.
|
||||
func (s *Server) testingCloseConns() {
|
||||
s.mu.Lock()
|
||||
for c := range s.conns {
|
||||
c.Close()
|
||||
delete(s.conns, c)
|
||||
}
|
||||
s.conns = make(map[transport.ServerTransport]bool)
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
|
|
207
vendor/google.golang.org/grpc/stream.go
generated
vendored
207
vendor/google.golang.org/grpc/stream.go
generated
vendored
|
@ -34,8 +34,10 @@
|
|||
package grpc
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
@ -69,7 +71,7 @@ type Stream interface {
|
|||
SendMsg(m interface{}) error
|
||||
// RecvMsg blocks until it receives a message or the stream is
|
||||
// done. On client side, it returns io.EOF when the stream is done. On
|
||||
// any other error, it aborts the streama nd returns an RPC status. On
|
||||
// any other error, it aborts the stream and returns an RPC status. On
|
||||
// server side, it simply returns the error to the caller.
|
||||
RecvMsg(m interface{}) error
|
||||
}
|
||||
|
@ -95,45 +97,84 @@ type ClientStream interface {
|
|||
// NewClientStream creates a new Stream for the client side. This is called
|
||||
// by generated code.
|
||||
func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
|
||||
var (
|
||||
t transport.ClientTransport
|
||||
err error
|
||||
)
|
||||
t, err = cc.dopts.picker.Pick(ctx)
|
||||
if err != nil {
|
||||
return nil, toRPCErr(err)
|
||||
}
|
||||
// TODO(zhaoq): CallOption is omitted. Add support when it is needed.
|
||||
callHdr := &transport.CallHdr{
|
||||
Host: cc.authority,
|
||||
Method: method,
|
||||
Flush: desc.ServerStreams && desc.ClientStreams,
|
||||
}
|
||||
if cc.dopts.cp != nil {
|
||||
callHdr.SendCompress = cc.dopts.cp.Type()
|
||||
}
|
||||
cs := &clientStream{
|
||||
desc: desc,
|
||||
codec: cc.dopts.codec,
|
||||
desc: desc,
|
||||
codec: cc.dopts.codec,
|
||||
cp: cc.dopts.cp,
|
||||
dc: cc.dopts.dc,
|
||||
tracing: EnableTracing,
|
||||
}
|
||||
if EnableTracing {
|
||||
cs.traceInfo.tr = trace.New("Sent."+methodFamily(method), method)
|
||||
cs.traceInfo.firstLine.client = true
|
||||
if cc.dopts.cp != nil {
|
||||
callHdr.SendCompress = cc.dopts.cp.Type()
|
||||
cs.cbuf = new(bytes.Buffer)
|
||||
}
|
||||
if cs.tracing {
|
||||
cs.trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
|
||||
cs.trInfo.firstLine.client = true
|
||||
if deadline, ok := ctx.Deadline(); ok {
|
||||
cs.traceInfo.firstLine.deadline = deadline.Sub(time.Now())
|
||||
cs.trInfo.firstLine.deadline = deadline.Sub(time.Now())
|
||||
}
|
||||
cs.traceInfo.tr.LazyLog(&cs.traceInfo.firstLine, false)
|
||||
}
|
||||
t, _, err := cc.wait(ctx, 0)
|
||||
if err != nil {
|
||||
return nil, toRPCErr(err)
|
||||
cs.trInfo.tr.LazyLog(&cs.trInfo.firstLine, false)
|
||||
ctx = trace.NewContext(ctx, cs.trInfo.tr)
|
||||
}
|
||||
s, err := t.NewStream(ctx, callHdr)
|
||||
if err != nil {
|
||||
cs.finish(err)
|
||||
return nil, toRPCErr(err)
|
||||
}
|
||||
cs.t = t
|
||||
cs.s = s
|
||||
cs.p = &parser{s: s}
|
||||
cs.p = &parser{r: s}
|
||||
// Listen on ctx.Done() to detect cancellation when there is no pending
|
||||
// I/O operations on this stream.
|
||||
go func() {
|
||||
select {
|
||||
case <-t.Error():
|
||||
// Incur transport error, simply exit.
|
||||
case <-s.Context().Done():
|
||||
err := s.Context().Err()
|
||||
cs.finish(err)
|
||||
cs.closeTransportStream(transport.ContextErr(err))
|
||||
}
|
||||
}()
|
||||
return cs, nil
|
||||
}
|
||||
|
||||
// clientStream implements a client side Stream.
|
||||
type clientStream struct {
|
||||
t transport.ClientTransport
|
||||
s *transport.Stream
|
||||
p *parser
|
||||
desc *StreamDesc
|
||||
codec Codec
|
||||
traceInfo traceInfo
|
||||
t transport.ClientTransport
|
||||
s *transport.Stream
|
||||
p *parser
|
||||
desc *StreamDesc
|
||||
codec Codec
|
||||
cp Compressor
|
||||
cbuf *bytes.Buffer
|
||||
dc Decompressor
|
||||
|
||||
tracing bool // set to EnableTracing when the clientStream is created.
|
||||
|
||||
mu sync.Mutex
|
||||
closed bool
|
||||
// trInfo.tr is set when the clientStream is created (if EnableTracing is true),
|
||||
// and is set to nil when the clientStream's finish method is called.
|
||||
trInfo traceInfo
|
||||
}
|
||||
|
||||
func (cs *clientStream) Context() context.Context {
|
||||
|
@ -144,7 +185,7 @@ func (cs *clientStream) Header() (metadata.MD, error) {
|
|||
m, err := cs.s.Header()
|
||||
if err != nil {
|
||||
if _, ok := err.(transport.ConnectionError); !ok {
|
||||
cs.t.CloseStream(cs.s, err)
|
||||
cs.closeTransportStream(err)
|
||||
}
|
||||
}
|
||||
return m, err
|
||||
|
@ -155,16 +196,31 @@ func (cs *clientStream) Trailer() metadata.MD {
|
|||
}
|
||||
|
||||
func (cs *clientStream) SendMsg(m interface{}) (err error) {
|
||||
if cs.tracing {
|
||||
cs.mu.Lock()
|
||||
if cs.trInfo.tr != nil {
|
||||
cs.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
|
||||
}
|
||||
cs.mu.Unlock()
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
cs.finish(err)
|
||||
}
|
||||
if err == nil || err == io.EOF {
|
||||
return
|
||||
}
|
||||
if _, ok := err.(transport.ConnectionError); !ok {
|
||||
cs.t.CloseStream(cs.s, err)
|
||||
cs.closeTransportStream(err)
|
||||
}
|
||||
err = toRPCErr(err)
|
||||
}()
|
||||
out, err := encode(cs.codec, m, compressionNone)
|
||||
out, err := encode(cs.codec, m, cs.cp, cs.cbuf)
|
||||
defer func() {
|
||||
if cs.cbuf != nil {
|
||||
cs.cbuf.Reset()
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
return transport.StreamErrorf(codes.Internal, "grpc: %v", err)
|
||||
}
|
||||
|
@ -172,29 +228,33 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) {
|
|||
}
|
||||
|
||||
func (cs *clientStream) RecvMsg(m interface{}) (err error) {
|
||||
err = recv(cs.p, cs.codec, m)
|
||||
err = recv(cs.p, cs.codec, cs.s, cs.dc, m)
|
||||
defer func() {
|
||||
// err != nil indicates the termination of the stream.
|
||||
if EnableTracing && err != nil {
|
||||
if err != io.EOF {
|
||||
cs.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
cs.traceInfo.tr.SetError()
|
||||
}
|
||||
cs.traceInfo.tr.Finish()
|
||||
if err != nil {
|
||||
cs.finish(err)
|
||||
}
|
||||
}()
|
||||
if err == nil {
|
||||
if cs.tracing {
|
||||
cs.mu.Lock()
|
||||
if cs.trInfo.tr != nil {
|
||||
cs.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
|
||||
}
|
||||
cs.mu.Unlock()
|
||||
}
|
||||
if !cs.desc.ClientStreams || cs.desc.ServerStreams {
|
||||
return
|
||||
}
|
||||
// Special handling for client streaming rpc.
|
||||
err = recv(cs.p, cs.codec, m)
|
||||
cs.t.CloseStream(cs.s, err)
|
||||
err = recv(cs.p, cs.codec, cs.s, cs.dc, m)
|
||||
cs.closeTransportStream(err)
|
||||
if err == nil {
|
||||
return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
|
||||
}
|
||||
if err == io.EOF {
|
||||
if cs.s.StatusCode() == codes.OK {
|
||||
cs.finish(err)
|
||||
return nil
|
||||
}
|
||||
return Errorf(cs.s.StatusCode(), cs.s.StatusDesc())
|
||||
|
@ -202,7 +262,7 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) {
|
|||
return toRPCErr(err)
|
||||
}
|
||||
if _, ok := err.(transport.ConnectionError); !ok {
|
||||
cs.t.CloseStream(cs.s, err)
|
||||
cs.closeTransportStream(err)
|
||||
}
|
||||
if err == io.EOF {
|
||||
if cs.s.StatusCode() == codes.OK {
|
||||
|
@ -216,16 +276,50 @@ func (cs *clientStream) RecvMsg(m interface{}) (err error) {
|
|||
|
||||
func (cs *clientStream) CloseSend() (err error) {
|
||||
err = cs.t.Write(cs.s, nil, &transport.Options{Last: true})
|
||||
defer func() {
|
||||
if err != nil {
|
||||
cs.finish(err)
|
||||
}
|
||||
}()
|
||||
if err == nil || err == io.EOF {
|
||||
return
|
||||
}
|
||||
if _, ok := err.(transport.ConnectionError); !ok {
|
||||
cs.t.CloseStream(cs.s, err)
|
||||
cs.closeTransportStream(err)
|
||||
}
|
||||
err = toRPCErr(err)
|
||||
return
|
||||
}
|
||||
|
||||
func (cs *clientStream) closeTransportStream(err error) {
|
||||
cs.mu.Lock()
|
||||
if cs.closed {
|
||||
cs.mu.Unlock()
|
||||
return
|
||||
}
|
||||
cs.closed = true
|
||||
cs.mu.Unlock()
|
||||
cs.t.CloseStream(cs.s, err)
|
||||
}
|
||||
|
||||
func (cs *clientStream) finish(err error) {
|
||||
if !cs.tracing {
|
||||
return
|
||||
}
|
||||
cs.mu.Lock()
|
||||
defer cs.mu.Unlock()
|
||||
if cs.trInfo.tr != nil {
|
||||
if err == nil || err == io.EOF {
|
||||
cs.trInfo.tr.LazyPrintf("RPC: [OK]")
|
||||
} else {
|
||||
cs.trInfo.tr.LazyPrintf("RPC: [%v]", err)
|
||||
cs.trInfo.tr.SetError()
|
||||
}
|
||||
cs.trInfo.tr.Finish()
|
||||
cs.trInfo.tr = nil
|
||||
}
|
||||
}
|
||||
|
||||
// ServerStream defines the interface a server stream has to satisfy.
|
||||
type ServerStream interface {
|
||||
// SendHeader sends the header metadata. It should not be called
|
||||
|
@ -244,8 +338,14 @@ type serverStream struct {
|
|||
s *transport.Stream
|
||||
p *parser
|
||||
codec Codec
|
||||
cp Compressor
|
||||
dc Decompressor
|
||||
cbuf *bytes.Buffer
|
||||
statusCode codes.Code
|
||||
statusDesc string
|
||||
trInfo *traceInfo
|
||||
|
||||
mu sync.Mutex // protects trInfo.tr after the service handler runs.
|
||||
}
|
||||
|
||||
func (ss *serverStream) Context() context.Context {
|
||||
|
@ -264,8 +364,27 @@ func (ss *serverStream) SetTrailer(md metadata.MD) {
|
|||
return
|
||||
}
|
||||
|
||||
func (ss *serverStream) SendMsg(m interface{}) error {
|
||||
out, err := encode(ss.codec, m, compressionNone)
|
||||
func (ss *serverStream) SendMsg(m interface{}) (err error) {
|
||||
defer func() {
|
||||
if ss.trInfo != nil {
|
||||
ss.mu.Lock()
|
||||
if ss.trInfo.tr != nil {
|
||||
if err == nil {
|
||||
ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
|
||||
} else {
|
||||
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
ss.trInfo.tr.SetError()
|
||||
}
|
||||
}
|
||||
ss.mu.Unlock()
|
||||
}
|
||||
}()
|
||||
out, err := encode(ss.codec, m, ss.cp, ss.cbuf)
|
||||
defer func() {
|
||||
if ss.cbuf != nil {
|
||||
ss.cbuf.Reset()
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
err = transport.StreamErrorf(codes.Internal, "grpc: %v", err)
|
||||
return err
|
||||
|
@ -273,6 +392,20 @@ func (ss *serverStream) SendMsg(m interface{}) error {
|
|||
return ss.t.Write(ss.s, out, &transport.Options{Last: false})
|
||||
}
|
||||
|
||||
func (ss *serverStream) RecvMsg(m interface{}) error {
|
||||
return recv(ss.p, ss.codec, m)
|
||||
func (ss *serverStream) RecvMsg(m interface{}) (err error) {
|
||||
defer func() {
|
||||
if ss.trInfo != nil {
|
||||
ss.mu.Lock()
|
||||
if ss.trInfo.tr != nil {
|
||||
if err == nil {
|
||||
ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
|
||||
} else if err != io.EOF {
|
||||
ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
|
||||
ss.trInfo.tr.SetError()
|
||||
}
|
||||
}
|
||||
ss.mu.Unlock()
|
||||
}
|
||||
}()
|
||||
return recv(ss.p, ss.codec, ss.s, ss.dc, m)
|
||||
}
|
||||
|
|
13
vendor/google.golang.org/grpc/trace.go
generated
vendored
13
vendor/google.golang.org/grpc/trace.go
generated
vendored
|
@ -93,12 +93,17 @@ func (f *firstLine) String() string {
|
|||
|
||||
// payload represents an RPC request or response payload.
|
||||
type payload struct {
|
||||
m interface{} // e.g. a proto.Message
|
||||
sent bool // whether this is an outgoing payload
|
||||
msg interface{} // e.g. a proto.Message
|
||||
// TODO(dsymonds): add stringifying info to codec, and limit how much we hold here?
|
||||
}
|
||||
|
||||
func (p payload) String() string {
|
||||
return fmt.Sprint(p.m)
|
||||
if p.sent {
|
||||
return fmt.Sprintf("sent: %v", p.msg)
|
||||
} else {
|
||||
return fmt.Sprintf("recv: %v", p.msg)
|
||||
}
|
||||
}
|
||||
|
||||
type fmtStringer struct {
|
||||
|
@ -109,3 +114,7 @@ type fmtStringer struct {
|
|||
func (f *fmtStringer) String() string {
|
||||
return fmt.Sprintf(f.format, f.a...)
|
||||
}
|
||||
|
||||
type stringer string
|
||||
|
||||
func (s stringer) String() string { return string(s) }
|
||||
|
|
19
vendor/google.golang.org/grpc/transport/control.go
generated
vendored
19
vendor/google.golang.org/grpc/transport/control.go
generated
vendored
|
@ -37,7 +37,7 @@ import (
|
|||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/bradfitz/http2"
|
||||
"golang.org/x/net/http2"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -61,8 +61,8 @@ func (windowUpdate) isItem() bool {
|
|||
}
|
||||
|
||||
type settings struct {
|
||||
ack bool
|
||||
setting []http2.Setting
|
||||
ack bool
|
||||
ss []http2.Setting
|
||||
}
|
||||
|
||||
func (settings) isItem() bool {
|
||||
|
@ -86,7 +86,8 @@ func (flushIO) isItem() bool {
|
|||
}
|
||||
|
||||
type ping struct {
|
||||
ack bool
|
||||
ack bool
|
||||
data [8]byte
|
||||
}
|
||||
|
||||
func (ping) isItem() bool {
|
||||
|
@ -104,8 +105,14 @@ type quotaPool struct {
|
|||
|
||||
// newQuotaPool creates a quotaPool which has quota q available to consume.
|
||||
func newQuotaPool(q int) *quotaPool {
|
||||
qb := "aPool{c: make(chan int, 1)}
|
||||
qb.c <- q
|
||||
qb := "aPool{
|
||||
c: make(chan int, 1),
|
||||
}
|
||||
if q > 0 {
|
||||
qb.c <- q
|
||||
} else {
|
||||
qb.quota = q
|
||||
}
|
||||
return qb
|
||||
}
|
||||
|
||||
|
|
377
vendor/google.golang.org/grpc/transport/handler_server.go
generated
vendored
Normal file
377
vendor/google.golang.org/grpc/transport/handler_server.go
generated
vendored
Normal file
|
@ -0,0 +1,377 @@
|
|||
/*
|
||||
* Copyright 2016, Google Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions are
|
||||
* met:
|
||||
*
|
||||
* * Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* * Redistributions in binary form must reproduce the above
|
||||
* copyright notice, this list of conditions and the following disclaimer
|
||||
* in the documentation and/or other materials provided with the
|
||||
* distribution.
|
||||
* * Neither the name of Google Inc. nor the names of its
|
||||
* contributors may be used to endorse or promote products derived from
|
||||
* this software without specific prior written permission.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
*/
|
||||
|
||||
// This file is the implementation of a gRPC server using HTTP/2 which
|
||||
// uses the standard Go http2 Server implementation (via the
|
||||
// http.Handler interface), rather than speaking low-level HTTP/2
|
||||
// frames itself. It is the implementation of *grpc.Server.ServeHTTP.
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/http2"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
)
|
||||
|
||||
// NewServerHandlerTransport returns a ServerTransport handling gRPC
|
||||
// from inside an http.Handler. It requires that the http Server
|
||||
// supports HTTP/2.
|
||||
func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTransport, error) {
|
||||
if r.ProtoMajor != 2 {
|
||||
return nil, errors.New("gRPC requires HTTP/2")
|
||||
}
|
||||
if r.Method != "POST" {
|
||||
return nil, errors.New("invalid gRPC request method")
|
||||
}
|
||||
if !strings.Contains(r.Header.Get("Content-Type"), "application/grpc") {
|
||||
return nil, errors.New("invalid gRPC request content-type")
|
||||
}
|
||||
if _, ok := w.(http.Flusher); !ok {
|
||||
return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher")
|
||||
}
|
||||
if _, ok := w.(http.CloseNotifier); !ok {
|
||||
return nil, errors.New("gRPC requires a ResponseWriter supporting http.CloseNotifier")
|
||||
}
|
||||
|
||||
st := &serverHandlerTransport{
|
||||
rw: w,
|
||||
req: r,
|
||||
closedCh: make(chan struct{}),
|
||||
writes: make(chan func()),
|
||||
}
|
||||
|
||||
if v := r.Header.Get("grpc-timeout"); v != "" {
|
||||
to, err := timeoutDecode(v)
|
||||
if err != nil {
|
||||
return nil, StreamErrorf(codes.Internal, "malformed time-out: %v", err)
|
||||
}
|
||||
st.timeoutSet = true
|
||||
st.timeout = to
|
||||
}
|
||||
|
||||
var metakv []string
|
||||
for k, vv := range r.Header {
|
||||
k = strings.ToLower(k)
|
||||
if isReservedHeader(k) {
|
||||
continue
|
||||
}
|
||||
for _, v := range vv {
|
||||
if k == "user-agent" {
|
||||
// user-agent is special. Copying logic of http_util.go.
|
||||
if i := strings.LastIndex(v, " "); i == -1 {
|
||||
// There is no application user agent string being set
|
||||
continue
|
||||
} else {
|
||||
v = v[:i]
|
||||
}
|
||||
}
|
||||
metakv = append(metakv, k, v)
|
||||
|
||||
}
|
||||
}
|
||||
st.headerMD = metadata.Pairs(metakv...)
|
||||
|
||||
return st, nil
|
||||
}
|
||||
|
||||
// serverHandlerTransport is an implementation of ServerTransport
|
||||
// which replies to exactly one gRPC request (exactly one HTTP request),
|
||||
// using the net/http.Handler interface. This http.Handler is guaranteed
|
||||
// at this point to be speaking over HTTP/2, so it's able to speak valid
|
||||
// gRPC.
|
||||
type serverHandlerTransport struct {
|
||||
rw http.ResponseWriter
|
||||
req *http.Request
|
||||
timeoutSet bool
|
||||
timeout time.Duration
|
||||
didCommonHeaders bool
|
||||
|
||||
headerMD metadata.MD
|
||||
|
||||
closeOnce sync.Once
|
||||
closedCh chan struct{} // closed on Close
|
||||
|
||||
// writes is a channel of code to run serialized in the
|
||||
// ServeHTTP (HandleStreams) goroutine. The channel is closed
|
||||
// when WriteStatus is called.
|
||||
writes chan func()
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) Close() error {
|
||||
ht.closeOnce.Do(ht.closeCloseChanOnce)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) }
|
||||
|
||||
func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) }
|
||||
|
||||
// strAddr is a net.Addr backed by either a TCP "ip:port" string, or
|
||||
// the empty string if unknown.
|
||||
type strAddr string
|
||||
|
||||
func (a strAddr) Network() string {
|
||||
if a != "" {
|
||||
// Per the documentation on net/http.Request.RemoteAddr, if this is
|
||||
// set, it's set to the IP:port of the peer (hence, TCP):
|
||||
// https://golang.org/pkg/net/http/#Request
|
||||
//
|
||||
// If we want to support Unix sockets later, we can
|
||||
// add our own grpc-specific convention within the
|
||||
// grpc codebase to set RemoteAddr to a different
|
||||
// format, or probably better: we can attach it to the
|
||||
// context and use that from serverHandlerTransport.RemoteAddr.
|
||||
return "tcp"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (a strAddr) String() string { return string(a) }
|
||||
|
||||
// do runs fn in the ServeHTTP goroutine.
|
||||
func (ht *serverHandlerTransport) do(fn func()) error {
|
||||
select {
|
||||
case ht.writes <- fn:
|
||||
return nil
|
||||
case <-ht.closedCh:
|
||||
return ErrConnClosing
|
||||
}
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error {
|
||||
err := ht.do(func() {
|
||||
ht.writeCommonHeaders(s)
|
||||
|
||||
// And flush, in case no header or body has been sent yet.
|
||||
// This forces a separation of headers and trailers if this is the
|
||||
// first call (for example, in end2end tests's TestNoService).
|
||||
ht.rw.(http.Flusher).Flush()
|
||||
|
||||
h := ht.rw.Header()
|
||||
h.Set("Grpc-Status", fmt.Sprintf("%d", statusCode))
|
||||
if statusDesc != "" {
|
||||
h.Set("Grpc-Message", statusDesc)
|
||||
}
|
||||
if md := s.Trailer(); len(md) > 0 {
|
||||
for k, vv := range md {
|
||||
for _, v := range vv {
|
||||
// http2 ResponseWriter mechanism to
|
||||
// send undeclared Trailers after the
|
||||
// headers have possibly been written.
|
||||
h.Add(http2.TrailerPrefix+k, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
close(ht.writes)
|
||||
return err
|
||||
}
|
||||
|
||||
// writeCommonHeaders sets common headers on the first write
|
||||
// call (Write, WriteHeader, or WriteStatus).
|
||||
func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
|
||||
if ht.didCommonHeaders {
|
||||
return
|
||||
}
|
||||
ht.didCommonHeaders = true
|
||||
|
||||
h := ht.rw.Header()
|
||||
h["Date"] = nil // suppress Date to make tests happy; TODO: restore
|
||||
h.Set("Content-Type", "application/grpc")
|
||||
|
||||
// Predeclare trailers we'll set later in WriteStatus (after the body).
|
||||
// This is a SHOULD in the HTTP RFC, and the way you add (known)
|
||||
// Trailers per the net/http.ResponseWriter contract.
|
||||
// See https://golang.org/pkg/net/http/#ResponseWriter
|
||||
// and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
|
||||
h.Add("Trailer", "Grpc-Status")
|
||||
h.Add("Trailer", "Grpc-Message")
|
||||
|
||||
if s.sendCompress != "" {
|
||||
h.Set("Grpc-Encoding", s.sendCompress)
|
||||
}
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) Write(s *Stream, data []byte, opts *Options) error {
|
||||
return ht.do(func() {
|
||||
ht.writeCommonHeaders(s)
|
||||
ht.rw.Write(data)
|
||||
if !opts.Delay {
|
||||
ht.rw.(http.Flusher).Flush()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
|
||||
return ht.do(func() {
|
||||
ht.writeCommonHeaders(s)
|
||||
h := ht.rw.Header()
|
||||
for k, vv := range md {
|
||||
for _, v := range vv {
|
||||
h.Add(k, v)
|
||||
}
|
||||
}
|
||||
ht.rw.WriteHeader(200)
|
||||
ht.rw.(http.Flusher).Flush()
|
||||
})
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) {
|
||||
// With this transport type there will be exactly 1 stream: this HTTP request.
|
||||
|
||||
var ctx context.Context
|
||||
var cancel context.CancelFunc
|
||||
if ht.timeoutSet {
|
||||
ctx, cancel = context.WithTimeout(context.Background(), ht.timeout)
|
||||
} else {
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
}
|
||||
|
||||
// requestOver is closed when either the request's context is done
|
||||
// or the status has been written via WriteStatus.
|
||||
requestOver := make(chan struct{})
|
||||
|
||||
// clientGone receives a single value if peer is gone, either
|
||||
// because the underlying connection is dead or because the
|
||||
// peer sends an http2 RST_STREAM.
|
||||
clientGone := ht.rw.(http.CloseNotifier).CloseNotify()
|
||||
go func() {
|
||||
select {
|
||||
case <-requestOver:
|
||||
return
|
||||
case <-ht.closedCh:
|
||||
case <-clientGone:
|
||||
}
|
||||
cancel()
|
||||
}()
|
||||
|
||||
req := ht.req
|
||||
|
||||
s := &Stream{
|
||||
id: 0, // irrelevant
|
||||
windowHandler: func(int) {}, // nothing
|
||||
cancel: cancel,
|
||||
buf: newRecvBuffer(),
|
||||
st: ht,
|
||||
method: req.URL.Path,
|
||||
recvCompress: req.Header.Get("grpc-encoding"),
|
||||
}
|
||||
pr := &peer.Peer{
|
||||
Addr: ht.RemoteAddr(),
|
||||
}
|
||||
if req.TLS != nil {
|
||||
pr.AuthInfo = credentials.TLSInfo{*req.TLS}
|
||||
}
|
||||
ctx = metadata.NewContext(ctx, ht.headerMD)
|
||||
ctx = peer.NewContext(ctx, pr)
|
||||
s.ctx = newContextWithStream(ctx, s)
|
||||
s.dec = &recvBufferReader{ctx: s.ctx, recv: s.buf}
|
||||
|
||||
// readerDone is closed when the Body.Read-ing goroutine exits.
|
||||
readerDone := make(chan struct{})
|
||||
go func() {
|
||||
defer close(readerDone)
|
||||
for {
|
||||
buf := make([]byte, 1024) // TODO: minimize garbage, optimize recvBuffer code/ownership
|
||||
n, err := req.Body.Read(buf)
|
||||
if n > 0 {
|
||||
s.buf.put(&recvMsg{data: buf[:n]})
|
||||
}
|
||||
if err != nil {
|
||||
s.buf.put(&recvMsg{err: mapRecvMsgError(err)})
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// startStream is provided by the *grpc.Server's serveStreams.
|
||||
// It starts a goroutine serving s and exits immediately.
|
||||
// The goroutine that is started is the one that then calls
|
||||
// into ht, calling WriteHeader, Write, WriteStatus, Close, etc.
|
||||
startStream(s)
|
||||
|
||||
ht.runStream()
|
||||
close(requestOver)
|
||||
|
||||
// Wait for reading goroutine to finish.
|
||||
req.Body.Close()
|
||||
<-readerDone
|
||||
}
|
||||
|
||||
func (ht *serverHandlerTransport) runStream() {
|
||||
for {
|
||||
select {
|
||||
case fn, ok := <-ht.writes:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
fn()
|
||||
case <-ht.closedCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// mapRecvMsgError returns the non-nil err into the appropriate
|
||||
// error value as expected by callers of *grpc.parser.recvMsg.
|
||||
// In particular, in can only be:
|
||||
// * io.EOF
|
||||
// * io.ErrUnexpectedEOF
|
||||
// * of type transport.ConnectionError
|
||||
// * of type transport.StreamError
|
||||
func mapRecvMsgError(err error) error {
|
||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
||||
return err
|
||||
}
|
||||
if se, ok := err.(http2.StreamError); ok {
|
||||
if code, ok := http2ErrConvTab[se.Code]; ok {
|
||||
return StreamError{
|
||||
Code: code,
|
||||
Desc: se.Error(),
|
||||
}
|
||||
}
|
||||
}
|
||||
return ConnectionError{Desc: err.Error()}
|
||||
}
|
343
vendor/google.golang.org/grpc/transport/http2_client.go
generated
vendored
343
vendor/google.golang.org/grpc/transport/http2_client.go
generated
vendored
|
@ -39,23 +39,27 @@ import (
|
|||
"io"
|
||||
"math"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/bradfitz/http2"
|
||||
"github.com/bradfitz/http2/hpack"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
)
|
||||
|
||||
// http2Client implements the ClientTransport interface with HTTP2.
|
||||
type http2Client struct {
|
||||
target string // server name/addr
|
||||
conn net.Conn // underlying communication channel
|
||||
nextID uint32 // the next stream ID to be used
|
||||
target string // server name/addr
|
||||
userAgent string
|
||||
conn net.Conn // underlying communication channel
|
||||
authInfo credentials.AuthInfo // auth info about the connection
|
||||
nextID uint32 // the next stream ID to be used
|
||||
|
||||
// writableChan synchronizes write access to the transport.
|
||||
// A writer acquires the write lock by sending a value on writableChan
|
||||
|
@ -113,6 +117,7 @@ func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err e
|
|||
if connErr != nil {
|
||||
return nil, ConnectionErrorf("transport: %v", connErr)
|
||||
}
|
||||
var authInfo credentials.AuthInfo
|
||||
for _, c := range opts.AuthOptions {
|
||||
if ccreds, ok := c.(credentials.TransportAuthenticator); ok {
|
||||
scheme = "https"
|
||||
|
@ -123,7 +128,7 @@ func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err e
|
|||
if timeout > 0 {
|
||||
timeout -= time.Since(startT)
|
||||
}
|
||||
conn, connErr = ccreds.ClientHandshake(addr, conn, timeout)
|
||||
conn, authInfo, connErr = ccreds.ClientHandshake(addr, conn, timeout)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -158,10 +163,16 @@ func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err e
|
|||
return nil, ConnectionErrorf("transport: %v", err)
|
||||
}
|
||||
}
|
||||
ua := primaryUA
|
||||
if opts.UserAgent != "" {
|
||||
ua = opts.UserAgent + " " + ua
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
t := &http2Client{
|
||||
target: addr,
|
||||
conn: conn,
|
||||
target: addr,
|
||||
userAgent: ua,
|
||||
conn: conn,
|
||||
authInfo: authInfo,
|
||||
// The client initiated stream id is odd starting from 1.
|
||||
nextID: 1,
|
||||
writableChan: make(chan int, 1),
|
||||
|
@ -190,7 +201,7 @@ func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err e
|
|||
return t, nil
|
||||
}
|
||||
|
||||
func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr, sq bool) *Stream {
|
||||
func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
|
||||
fc := &inFlow{
|
||||
limit: initialWindowSize,
|
||||
conn: t.fc,
|
||||
|
@ -199,8 +210,8 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr, sq bool)
|
|||
s := &Stream{
|
||||
id: t.nextID,
|
||||
method: callHdr.Method,
|
||||
sendCompress: callHdr.SendCompress,
|
||||
buf: newRecvBuffer(),
|
||||
updateStreams: sq,
|
||||
fc: fc,
|
||||
sendQuotaPool: newQuotaPool(int(t.streamSendQuota)),
|
||||
headerChan: make(chan struct{}),
|
||||
|
@ -229,9 +240,30 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
|||
return nil, ContextErr(context.DeadlineExceeded)
|
||||
}
|
||||
}
|
||||
pr := &peer.Peer{
|
||||
Addr: t.conn.RemoteAddr(),
|
||||
}
|
||||
// Attach Auth info if there is any.
|
||||
if t.authInfo != nil {
|
||||
pr.AuthInfo = t.authInfo
|
||||
}
|
||||
ctx = peer.NewContext(ctx, pr)
|
||||
authData := make(map[string]string)
|
||||
for _, c := range t.authCreds {
|
||||
data, err := c.GetRequestMetadata(ctx)
|
||||
// Construct URI required to get auth request metadata.
|
||||
var port string
|
||||
if pos := strings.LastIndex(t.target, ":"); pos != -1 {
|
||||
// Omit port if it is the default one.
|
||||
if t.target[pos+1:] != "443" {
|
||||
port = ":" + t.target[pos+1:]
|
||||
}
|
||||
}
|
||||
pos := strings.LastIndex(callHdr.Method, "/")
|
||||
if pos == -1 {
|
||||
return nil, StreamErrorf(codes.InvalidArgument, "transport: malformed method name: %q", callHdr.Method)
|
||||
}
|
||||
audience := "https://" + callHdr.Host + port + callHdr.Method[:pos]
|
||||
data, err := c.GetRequestMetadata(ctx, audience)
|
||||
if err != nil {
|
||||
return nil, StreamErrorf(codes.InvalidArgument, "transport: %v", err)
|
||||
}
|
||||
|
@ -261,9 +293,24 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
|||
return nil, err
|
||||
}
|
||||
t.mu.Lock()
|
||||
s := t.newStream(ctx, callHdr, checkStreamsQuota)
|
||||
if t.state != reachable {
|
||||
t.mu.Unlock()
|
||||
return nil, ErrConnClosing
|
||||
}
|
||||
s := t.newStream(ctx, callHdr)
|
||||
t.activeStreams[s.id] = s
|
||||
|
||||
// This stream is not counted when applySetings(...) initialize t.streamsQuota.
|
||||
// Reset t.streamsQuota to the right value.
|
||||
var reset bool
|
||||
if !checkStreamsQuota && t.streamsQuota != nil {
|
||||
reset = true
|
||||
}
|
||||
t.mu.Unlock()
|
||||
if reset {
|
||||
t.streamsQuota.reset(-1)
|
||||
}
|
||||
|
||||
// HPACK encodes various headers. Note that once WriteField(...) is
|
||||
// called, the corresponding headers/continuation frame has to be sent
|
||||
// because hpack.Encoder is stateful.
|
||||
|
@ -273,7 +320,12 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
|||
t.hEnc.WriteField(hpack.HeaderField{Name: ":path", Value: callHdr.Method})
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: ":authority", Value: callHdr.Host})
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "te", Value: "trailers"})
|
||||
|
||||
if callHdr.SendCompress != "" {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
|
||||
}
|
||||
if timeout > 0 {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: timeoutEncode(timeout)})
|
||||
}
|
||||
|
@ -287,7 +339,9 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
|||
if md, ok := metadata.FromContext(ctx); ok {
|
||||
hasMD = true
|
||||
for k, v := range md {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v})
|
||||
for _, entry := range v {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
|
||||
}
|
||||
}
|
||||
}
|
||||
first := true
|
||||
|
@ -299,6 +353,10 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
|||
} else {
|
||||
endHeaders = true
|
||||
}
|
||||
var flush bool
|
||||
if endHeaders && (hasMD || callHdr.Flush) {
|
||||
flush = true
|
||||
}
|
||||
if first {
|
||||
// Sends a HeadersFrame to server to start a new stream.
|
||||
p := http2.HeadersFrameParam{
|
||||
|
@ -310,11 +368,11 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
|||
// Do a force flush for the buffered frames iff it is the last headers frame
|
||||
// and there is header metadata to be sent. Otherwise, there is flushing until
|
||||
// the corresponding data frame is written.
|
||||
err = t.framer.writeHeaders(hasMD && endHeaders, p)
|
||||
err = t.framer.writeHeaders(flush, p)
|
||||
first = false
|
||||
} else {
|
||||
// Sends Continuation frames for the leftover headers.
|
||||
err = t.framer.writeContinuation(hasMD && endHeaders, s.id, endHeaders, t.hBuf.Next(size))
|
||||
err = t.framer.writeContinuation(flush, s.id, endHeaders, t.hBuf.Next(size))
|
||||
}
|
||||
if err != nil {
|
||||
t.notifyError(err)
|
||||
|
@ -328,12 +386,21 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Strea
|
|||
// CloseStream clears the footprint of a stream when the stream is not needed any more.
|
||||
// This must not be executed in reader's goroutine.
|
||||
func (t *http2Client) CloseStream(s *Stream, err error) {
|
||||
var updateStreams bool
|
||||
t.mu.Lock()
|
||||
if t.streamsQuota != nil {
|
||||
updateStreams = true
|
||||
}
|
||||
delete(t.activeStreams, s.id)
|
||||
t.mu.Unlock()
|
||||
if s.updateStreams {
|
||||
if updateStreams {
|
||||
t.streamsQuota.add(1)
|
||||
}
|
||||
// In case stream sending and receiving are invoked in separate
|
||||
// goroutines (e.g., bi-directional streaming), the caller needs
|
||||
// to call cancel on the stream to interrupt the blocking on
|
||||
// other goroutines.
|
||||
s.cancel()
|
||||
s.mu.Lock()
|
||||
if q := s.fc.restoreConn(); q > 0 {
|
||||
t.controlBuf.put(&windowUpdate{0, q})
|
||||
|
@ -348,11 +415,6 @@ func (t *http2Client) CloseStream(s *Stream, err error) {
|
|||
}
|
||||
s.state = streamDone
|
||||
s.mu.Unlock()
|
||||
// In case stream sending and receiving are invoked in separate
|
||||
// goroutines (e.g., bi-directional streaming), the caller needs
|
||||
// to call cancel on the stream to interrupt the blocking on
|
||||
// other goroutines.
|
||||
s.cancel()
|
||||
if _, ok := err.(StreamError); ok {
|
||||
t.controlBuf.put(&resetStream{s.id, http2.ErrCodeCancel})
|
||||
}
|
||||
|
@ -488,14 +550,8 @@ func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error {
|
|||
func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
if t.activeStreams == nil {
|
||||
// The transport is closing.
|
||||
return nil, false
|
||||
}
|
||||
if s, ok := t.activeStreams[f.Header().StreamID]; ok {
|
||||
return s, true
|
||||
}
|
||||
return nil, false
|
||||
s, ok := t.activeStreams[f.Header().StreamID]
|
||||
return s, ok
|
||||
}
|
||||
|
||||
// updateWindow adjusts the inbound quota for the stream and the transport.
|
||||
|
@ -518,30 +574,46 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
|
|||
return
|
||||
}
|
||||
size := len(f.Data())
|
||||
if err := s.fc.onData(uint32(size)); err != nil {
|
||||
if _, ok := err.(ConnectionError); ok {
|
||||
t.notifyError(err)
|
||||
return
|
||||
}
|
||||
s.mu.Lock()
|
||||
if s.state == streamDone {
|
||||
if size > 0 {
|
||||
if err := s.fc.onData(uint32(size)); err != nil {
|
||||
if _, ok := err.(ConnectionError); ok {
|
||||
t.notifyError(err)
|
||||
return
|
||||
}
|
||||
s.mu.Lock()
|
||||
if s.state == streamDone {
|
||||
s.mu.Unlock()
|
||||
return
|
||||
}
|
||||
s.state = streamDone
|
||||
s.statusCode = codes.Internal
|
||||
s.statusDesc = err.Error()
|
||||
s.mu.Unlock()
|
||||
s.write(recvMsg{err: io.EOF})
|
||||
t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl})
|
||||
return
|
||||
}
|
||||
s.state = streamDone
|
||||
// TODO(bradfitz, zhaoq): A copy is required here because there is no
|
||||
// guarantee f.Data() is consumed before the arrival of next frame.
|
||||
// Can this copy be eliminated?
|
||||
data := make([]byte, size)
|
||||
copy(data, f.Data())
|
||||
s.write(recvMsg{data: data})
|
||||
}
|
||||
// The server has closed the stream without sending trailers. Record that
|
||||
// the read direction is closed, and set the status appropriately.
|
||||
if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) {
|
||||
s.mu.Lock()
|
||||
if s.state == streamWriteDone {
|
||||
s.state = streamDone
|
||||
} else {
|
||||
s.state = streamReadDone
|
||||
}
|
||||
s.statusCode = codes.Internal
|
||||
s.statusDesc = err.Error()
|
||||
s.statusDesc = "server closed the stream without sending trailers"
|
||||
s.mu.Unlock()
|
||||
s.write(recvMsg{err: io.EOF})
|
||||
t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl})
|
||||
return
|
||||
}
|
||||
// TODO(bradfitz, zhaoq): A copy is required here because there is no
|
||||
// guarantee f.Data() is consumed before the arrival of next frame.
|
||||
// Can this copy be eliminated?
|
||||
data := make([]byte, size)
|
||||
copy(data, f.Data())
|
||||
s.write(recvMsg{data: data})
|
||||
}
|
||||
|
||||
func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
|
||||
|
@ -555,7 +627,11 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
|
|||
return
|
||||
}
|
||||
s.state = streamDone
|
||||
s.statusCode, ok = http2RSTErrConvTab[http2.ErrCode(f.ErrCode)]
|
||||
if !s.headerDone {
|
||||
close(s.headerChan)
|
||||
s.headerDone = true
|
||||
}
|
||||
s.statusCode, ok = http2ErrConvTab[http2.ErrCode(f.ErrCode)]
|
||||
if !ok {
|
||||
grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode)
|
||||
}
|
||||
|
@ -567,48 +643,23 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame) {
|
|||
if f.IsAck() {
|
||||
return
|
||||
}
|
||||
var ss []http2.Setting
|
||||
f.ForeachSetting(func(s http2.Setting) error {
|
||||
if v, ok := f.Value(s.ID); ok {
|
||||
switch s.ID {
|
||||
case http2.SettingMaxConcurrentStreams:
|
||||
// TODO(zhaoq): This is a hack to avoid significant refactoring of the
|
||||
// code to deal with the unrealistic int32 overflow. Probably will try
|
||||
// to find a better way to handle this later.
|
||||
if v > math.MaxInt32 {
|
||||
v = math.MaxInt32
|
||||
}
|
||||
t.mu.Lock()
|
||||
reset := t.streamsQuota != nil
|
||||
if !reset {
|
||||
t.streamsQuota = newQuotaPool(int(v))
|
||||
}
|
||||
ms := t.maxStreams
|
||||
t.maxStreams = int(v)
|
||||
t.mu.Unlock()
|
||||
if reset {
|
||||
t.streamsQuota.reset(int(v) - ms)
|
||||
}
|
||||
case http2.SettingInitialWindowSize:
|
||||
t.mu.Lock()
|
||||
for _, s := range t.activeStreams {
|
||||
// Adjust the sending quota for each s.
|
||||
s.sendQuotaPool.reset(int(v - t.streamSendQuota))
|
||||
}
|
||||
t.streamSendQuota = v
|
||||
t.mu.Unlock()
|
||||
}
|
||||
}
|
||||
ss = append(ss, s)
|
||||
return nil
|
||||
})
|
||||
t.controlBuf.put(&settings{ack: true})
|
||||
// The settings will be applied once the ack is sent.
|
||||
t.controlBuf.put(&settings{ack: true, ss: ss})
|
||||
}
|
||||
|
||||
func (t *http2Client) handlePing(f *http2.PingFrame) {
|
||||
t.controlBuf.put(&ping{true})
|
||||
pingAck := &ping{ack: true}
|
||||
copy(pingAck.data[:], f.Data[:])
|
||||
t.controlBuf.put(pingAck)
|
||||
}
|
||||
|
||||
func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
|
||||
// TODO(zhaoq): GoAwayFrame handler to be implemented"
|
||||
// TODO(zhaoq): GoAwayFrame handler to be implemented
|
||||
}
|
||||
|
||||
func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {
|
||||
|
@ -623,52 +674,59 @@ func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {
|
|||
}
|
||||
}
|
||||
|
||||
// operateHeader takes action on the decoded headers. It returns the current
|
||||
// stream if there are remaining headers on the wire (in the following
|
||||
// Continuation frame).
|
||||
func (t *http2Client) operateHeaders(hDec *hpackDecoder, s *Stream, frame headerFrame, endStream bool) (pendingStream *Stream) {
|
||||
defer func() {
|
||||
if pendingStream == nil {
|
||||
hDec.state = decodeState{}
|
||||
}
|
||||
}()
|
||||
endHeaders, err := hDec.decodeClientHTTP2Headers(frame)
|
||||
if s == nil {
|
||||
// s has been closed.
|
||||
return nil
|
||||
// operateHeaders takes action on the decoded headers.
|
||||
func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
|
||||
s, ok := t.getStream(frame)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
s.write(recvMsg{err: err})
|
||||
var state decodeState
|
||||
for _, hf := range frame.Fields {
|
||||
state.processHeaderField(hf)
|
||||
}
|
||||
if state.err != nil {
|
||||
s.write(recvMsg{err: state.err})
|
||||
// Something wrong. Stops reading even when there is remaining.
|
||||
return nil
|
||||
}
|
||||
if !endHeaders {
|
||||
return s
|
||||
return
|
||||
}
|
||||
|
||||
endStream := frame.StreamEnded()
|
||||
|
||||
s.mu.Lock()
|
||||
if !endStream {
|
||||
s.recvCompress = state.encoding
|
||||
}
|
||||
if !s.headerDone {
|
||||
if !endStream && len(hDec.state.mdata) > 0 {
|
||||
s.header = hDec.state.mdata
|
||||
if !endStream && len(state.mdata) > 0 {
|
||||
s.header = state.mdata
|
||||
}
|
||||
close(s.headerChan)
|
||||
s.headerDone = true
|
||||
}
|
||||
if !endStream || s.state == streamDone {
|
||||
s.mu.Unlock()
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
if len(hDec.state.mdata) > 0 {
|
||||
s.trailer = hDec.state.mdata
|
||||
if len(state.mdata) > 0 {
|
||||
s.trailer = state.mdata
|
||||
}
|
||||
s.state = streamDone
|
||||
s.statusCode = hDec.state.statusCode
|
||||
s.statusDesc = hDec.state.statusDesc
|
||||
s.statusCode = state.statusCode
|
||||
s.statusDesc = state.statusDesc
|
||||
s.mu.Unlock()
|
||||
|
||||
s.write(recvMsg{err: io.EOF})
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleMalformedHTTP2(s *Stream, err http2.StreamError) {
|
||||
s.mu.Lock()
|
||||
if !s.headerDone {
|
||||
close(s.headerChan)
|
||||
s.headerDone = true
|
||||
}
|
||||
s.mu.Unlock()
|
||||
s.write(recvMsg{err: StreamErrorf(http2ErrConvTab[err.Code], "%v", err)})
|
||||
}
|
||||
|
||||
// reader runs as a separate goroutine in charge of reading data from network
|
||||
|
@ -691,25 +749,30 @@ func (t *http2Client) reader() {
|
|||
}
|
||||
t.handleSettings(sf)
|
||||
|
||||
hDec := newHPACKDecoder()
|
||||
var curStream *Stream
|
||||
// loop to keep reading incoming messages on this transport.
|
||||
for {
|
||||
frame, err := t.framer.readFrame()
|
||||
if err != nil {
|
||||
t.notifyError(err)
|
||||
return
|
||||
// Abort an active stream if the http2.Framer returns a
|
||||
// http2.StreamError. This can happen only if the server's response
|
||||
// is malformed http2.
|
||||
if se, ok := err.(http2.StreamError); ok {
|
||||
t.mu.Lock()
|
||||
s := t.activeStreams[se.StreamID]
|
||||
t.mu.Unlock()
|
||||
if s != nil {
|
||||
handleMalformedHTTP2(s, se)
|
||||
}
|
||||
continue
|
||||
} else {
|
||||
// Transport error.
|
||||
t.notifyError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
switch frame := frame.(type) {
|
||||
case *http2.HeadersFrame:
|
||||
// operateHeaders has to be invoked regardless the value of curStream
|
||||
// because the HPACK decoder needs to be updated using the received
|
||||
// headers.
|
||||
curStream, _ = t.getStream(frame)
|
||||
endStream := frame.Header().Flags.Has(http2.FlagHeadersEndStream)
|
||||
curStream = t.operateHeaders(hDec, curStream, frame, endStream)
|
||||
case *http2.ContinuationFrame:
|
||||
curStream = t.operateHeaders(hDec, curStream, frame, false)
|
||||
case *http2.MetaHeadersFrame:
|
||||
t.operateHeaders(frame)
|
||||
case *http2.DataFrame:
|
||||
t.handleData(frame)
|
||||
case *http2.RSTStreamFrame:
|
||||
|
@ -728,6 +791,39 @@ func (t *http2Client) reader() {
|
|||
}
|
||||
}
|
||||
|
||||
func (t *http2Client) applySettings(ss []http2.Setting) {
|
||||
for _, s := range ss {
|
||||
switch s.ID {
|
||||
case http2.SettingMaxConcurrentStreams:
|
||||
// TODO(zhaoq): This is a hack to avoid significant refactoring of the
|
||||
// code to deal with the unrealistic int32 overflow. Probably will try
|
||||
// to find a better way to handle this later.
|
||||
if s.Val > math.MaxInt32 {
|
||||
s.Val = math.MaxInt32
|
||||
}
|
||||
t.mu.Lock()
|
||||
reset := t.streamsQuota != nil
|
||||
if !reset {
|
||||
t.streamsQuota = newQuotaPool(int(s.Val) - len(t.activeStreams))
|
||||
}
|
||||
ms := t.maxStreams
|
||||
t.maxStreams = int(s.Val)
|
||||
t.mu.Unlock()
|
||||
if reset {
|
||||
t.streamsQuota.reset(int(s.Val) - ms)
|
||||
}
|
||||
case http2.SettingInitialWindowSize:
|
||||
t.mu.Lock()
|
||||
for _, stream := range t.activeStreams {
|
||||
// Adjust the sending quota for each stream.
|
||||
stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota))
|
||||
}
|
||||
t.streamSendQuota = s.Val
|
||||
t.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// controller running in a separate goroutine takes charge of sending control
|
||||
// frames (e.g., window update, reset stream, setting, etc.) to the server.
|
||||
func (t *http2Client) controller() {
|
||||
|
@ -743,17 +839,16 @@ func (t *http2Client) controller() {
|
|||
case *settings:
|
||||
if i.ack {
|
||||
t.framer.writeSettingsAck(true)
|
||||
t.applySettings(i.ss)
|
||||
} else {
|
||||
t.framer.writeSettings(true, i.setting...)
|
||||
t.framer.writeSettings(true, i.ss...)
|
||||
}
|
||||
case *resetStream:
|
||||
t.framer.writeRSTStream(true, i.streamID, i.code)
|
||||
case *flushIO:
|
||||
t.framer.flushWrite()
|
||||
case *ping:
|
||||
// TODO(zhaoq): Ack with all-0 data now. will change to some
|
||||
// meaningful content when this is actually in use.
|
||||
t.framer.writePing(true, i.ack, [8]byte{})
|
||||
t.framer.writePing(true, i.ack, i.data)
|
||||
default:
|
||||
grpclog.Printf("transport: http2Client.controller got unexpected item type %v\n", i)
|
||||
}
|
||||
|
|
247
vendor/google.golang.org/grpc/transport/http2_server.go
generated
vendored
247
vendor/google.golang.org/grpc/transport/http2_server.go
generated
vendored
|
@ -42,12 +42,14 @@ import (
|
|||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/bradfitz/http2"
|
||||
"github.com/bradfitz/http2/hpack"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/peer"
|
||||
)
|
||||
|
||||
// ErrIllegalHeaderWrite indicates that setting header is illegal because of
|
||||
|
@ -57,10 +59,11 @@ var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHe
|
|||
// http2Server implements the ServerTransport interface with HTTP2.
|
||||
type http2Server struct {
|
||||
conn net.Conn
|
||||
maxStreamID uint32 // max stream ID ever seen
|
||||
maxStreamID uint32 // max stream ID ever seen
|
||||
authInfo credentials.AuthInfo // auth info about the connection
|
||||
// writableChan synchronizes write access to the transport.
|
||||
// A writer acquires the write lock by sending a value on writableChan
|
||||
// and releases it by receiving from writableChan.
|
||||
// A writer acquires the write lock by receiving a value on writableChan
|
||||
// and releases it by sending on writableChan.
|
||||
writableChan chan int
|
||||
// shutdownChan is closed when Close is called.
|
||||
// Blocking operations should select on shutdownChan to avoid
|
||||
|
@ -88,11 +91,9 @@ type http2Server struct {
|
|||
|
||||
// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
|
||||
// returned if something goes wrong.
|
||||
func newHTTP2Server(conn net.Conn, maxStreams uint32) (_ ServerTransport, err error) {
|
||||
func newHTTP2Server(conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (_ ServerTransport, err error) {
|
||||
framer := newFramer(conn)
|
||||
// Send initial settings as connection preface to client.
|
||||
// TODO(zhaoq): Have a better way to signal "no limit" because 0 is
|
||||
// permitted in the HTTP2 spec.
|
||||
var settings []http2.Setting
|
||||
// TODO(zhaoq): Have a better way to signal "no limit" because 0 is
|
||||
// permitted in the HTTP2 spec.
|
||||
|
@ -116,6 +117,7 @@ func newHTTP2Server(conn net.Conn, maxStreams uint32) (_ ServerTransport, err er
|
|||
var buf bytes.Buffer
|
||||
t := &http2Server{
|
||||
conn: conn,
|
||||
authInfo: authInfo,
|
||||
framer: framer,
|
||||
hBuf: &buf,
|
||||
hEnc: hpack.NewEncoder(&buf),
|
||||
|
@ -134,43 +136,73 @@ func newHTTP2Server(conn net.Conn, maxStreams uint32) (_ ServerTransport, err er
|
|||
return t, nil
|
||||
}
|
||||
|
||||
// operateHeader takes action on the decoded headers. It returns the current
|
||||
// stream if there are remaining headers on the wire (in the following
|
||||
// Continuation frame).
|
||||
func (t *http2Server) operateHeaders(hDec *hpackDecoder, s *Stream, frame headerFrame, endStream bool, handle func(*Stream), wg *sync.WaitGroup) (pendingStream *Stream) {
|
||||
defer func() {
|
||||
if pendingStream == nil {
|
||||
hDec.state = decodeState{}
|
||||
}
|
||||
}()
|
||||
endHeaders, err := hDec.decodeServerHTTP2Headers(frame)
|
||||
if s == nil {
|
||||
// s has been closed.
|
||||
return nil
|
||||
// operateHeader takes action on the decoded headers.
|
||||
func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) {
|
||||
buf := newRecvBuffer()
|
||||
fc := &inFlow{
|
||||
limit: initialWindowSize,
|
||||
conn: t.fc,
|
||||
}
|
||||
if err != nil {
|
||||
grpclog.Printf("transport: http2Server.operateHeader found %v", err)
|
||||
s := &Stream{
|
||||
id: frame.Header().StreamID,
|
||||
st: t,
|
||||
buf: buf,
|
||||
fc: fc,
|
||||
}
|
||||
|
||||
var state decodeState
|
||||
for _, hf := range frame.Fields {
|
||||
state.processHeaderField(hf)
|
||||
}
|
||||
if err := state.err; err != nil {
|
||||
if se, ok := err.(StreamError); ok {
|
||||
t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]})
|
||||
}
|
||||
return nil
|
||||
return
|
||||
}
|
||||
if endStream {
|
||||
|
||||
if frame.StreamEnded() {
|
||||
// s is just created by the caller. No lock needed.
|
||||
s.state = streamReadDone
|
||||
}
|
||||
if !endHeaders {
|
||||
return s
|
||||
s.recvCompress = state.encoding
|
||||
if state.timeoutSet {
|
||||
s.ctx, s.cancel = context.WithTimeout(context.TODO(), state.timeout)
|
||||
} else {
|
||||
s.ctx, s.cancel = context.WithCancel(context.TODO())
|
||||
}
|
||||
pr := &peer.Peer{
|
||||
Addr: t.conn.RemoteAddr(),
|
||||
}
|
||||
// Attach Auth info if there is any.
|
||||
if t.authInfo != nil {
|
||||
pr.AuthInfo = t.authInfo
|
||||
}
|
||||
s.ctx = peer.NewContext(s.ctx, pr)
|
||||
// Cache the current stream to the context so that the server application
|
||||
// can find out. Required when the server wants to send some metadata
|
||||
// back to the client (unary call only).
|
||||
s.ctx = newContextWithStream(s.ctx, s)
|
||||
// Attach the received metadata to the context.
|
||||
if len(state.mdata) > 0 {
|
||||
s.ctx = metadata.NewContext(s.ctx, state.mdata)
|
||||
}
|
||||
|
||||
s.dec = &recvBufferReader{
|
||||
ctx: s.ctx,
|
||||
recv: s.buf,
|
||||
}
|
||||
s.recvCompress = state.encoding
|
||||
s.method = state.method
|
||||
t.mu.Lock()
|
||||
if t.state != reachable {
|
||||
t.mu.Unlock()
|
||||
return nil
|
||||
return
|
||||
}
|
||||
if uint32(len(t.activeStreams)) >= t.maxStreams {
|
||||
t.mu.Unlock()
|
||||
t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream})
|
||||
return nil
|
||||
return
|
||||
}
|
||||
s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota))
|
||||
t.activeStreams[s.id] = s
|
||||
|
@ -178,32 +210,7 @@ func (t *http2Server) operateHeaders(hDec *hpackDecoder, s *Stream, frame header
|
|||
s.windowHandler = func(n int) {
|
||||
t.updateWindow(s, uint32(n))
|
||||
}
|
||||
if hDec.state.timeoutSet {
|
||||
s.ctx, s.cancel = context.WithTimeout(context.TODO(), hDec.state.timeout)
|
||||
} else {
|
||||
s.ctx, s.cancel = context.WithCancel(context.TODO())
|
||||
}
|
||||
// Cache the current stream to the context so that the server application
|
||||
// can find out. Required when the server wants to send some metadata
|
||||
// back to the client (unary call only).
|
||||
s.ctx = newContextWithStream(s.ctx, s)
|
||||
// Attach the received metadata to the context.
|
||||
if len(hDec.state.mdata) > 0 {
|
||||
s.ctx = metadata.NewContext(s.ctx, hDec.state.mdata)
|
||||
}
|
||||
|
||||
s.dec = &recvBufferReader{
|
||||
ctx: s.ctx,
|
||||
recv: s.buf,
|
||||
}
|
||||
s.method = hDec.state.method
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
handle(s)
|
||||
wg.Done()
|
||||
}()
|
||||
return nil
|
||||
handle(s)
|
||||
}
|
||||
|
||||
// HandleStreams receives incoming streams using the given handler. This is
|
||||
|
@ -236,10 +243,6 @@ func (t *http2Server) HandleStreams(handle func(*Stream)) {
|
|||
}
|
||||
t.handleSettings(sf)
|
||||
|
||||
hDec := newHPACKDecoder()
|
||||
var curStream *Stream
|
||||
var wg sync.WaitGroup
|
||||
defer wg.Wait()
|
||||
for {
|
||||
frame, err := t.framer.readFrame()
|
||||
if err != nil {
|
||||
|
@ -247,7 +250,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream)) {
|
|||
return
|
||||
}
|
||||
switch frame := frame.(type) {
|
||||
case *http2.HeadersFrame:
|
||||
case *http2.MetaHeadersFrame:
|
||||
id := frame.Header().StreamID
|
||||
if id%2 != 1 || id <= t.maxStreamID {
|
||||
// illegal gRPC stream id.
|
||||
|
@ -256,21 +259,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream)) {
|
|||
break
|
||||
}
|
||||
t.maxStreamID = id
|
||||
buf := newRecvBuffer()
|
||||
fc := &inFlow{
|
||||
limit: initialWindowSize,
|
||||
conn: t.fc,
|
||||
}
|
||||
curStream = &Stream{
|
||||
id: frame.Header().StreamID,
|
||||
st: t,
|
||||
buf: buf,
|
||||
fc: fc,
|
||||
}
|
||||
endStream := frame.Header().Flags.Has(http2.FlagHeadersEndStream)
|
||||
curStream = t.operateHeaders(hDec, curStream, frame, endStream, handle, &wg)
|
||||
case *http2.ContinuationFrame:
|
||||
curStream = t.operateHeaders(hDec, curStream, frame, false, handle, &wg)
|
||||
t.operateHeaders(frame, handle)
|
||||
case *http2.DataFrame:
|
||||
t.handleData(frame)
|
||||
case *http2.RSTStreamFrame:
|
||||
|
@ -324,22 +313,24 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
|
|||
return
|
||||
}
|
||||
size := len(f.Data())
|
||||
if err := s.fc.onData(uint32(size)); err != nil {
|
||||
if _, ok := err.(ConnectionError); ok {
|
||||
grpclog.Printf("transport: http2Server %v", err)
|
||||
t.Close()
|
||||
if size > 0 {
|
||||
if err := s.fc.onData(uint32(size)); err != nil {
|
||||
if _, ok := err.(ConnectionError); ok {
|
||||
grpclog.Printf("transport: http2Server %v", err)
|
||||
t.Close()
|
||||
return
|
||||
}
|
||||
t.closeStream(s)
|
||||
t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl})
|
||||
return
|
||||
}
|
||||
t.closeStream(s)
|
||||
t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl})
|
||||
return
|
||||
// TODO(bradfitz, zhaoq): A copy is required here because there is no
|
||||
// guarantee f.Data() is consumed before the arrival of next frame.
|
||||
// Can this copy be eliminated?
|
||||
data := make([]byte, size)
|
||||
copy(data, f.Data())
|
||||
s.write(recvMsg{data: data})
|
||||
}
|
||||
// TODO(bradfitz, zhaoq): A copy is required here because there is no
|
||||
// guarantee f.Data() is consumed before the arrival of next frame.
|
||||
// Can this copy be eliminated?
|
||||
data := make([]byte, size)
|
||||
copy(data, f.Data())
|
||||
s.write(recvMsg{data: data})
|
||||
if f.Header().Flags.Has(http2.FlagDataEndStream) {
|
||||
// Received the end of stream from the client.
|
||||
s.mu.Lock()
|
||||
|
@ -367,22 +358,19 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
|
|||
if f.IsAck() {
|
||||
return
|
||||
}
|
||||
var ss []http2.Setting
|
||||
f.ForeachSetting(func(s http2.Setting) error {
|
||||
if v, ok := f.Value(http2.SettingInitialWindowSize); ok {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
for _, s := range t.activeStreams {
|
||||
s.sendQuotaPool.reset(int(v - t.streamSendQuota))
|
||||
}
|
||||
t.streamSendQuota = v
|
||||
}
|
||||
ss = append(ss, s)
|
||||
return nil
|
||||
})
|
||||
t.controlBuf.put(&settings{ack: true})
|
||||
// The settings will be applied once the ack is sent.
|
||||
t.controlBuf.put(&settings{ack: true, ss: ss})
|
||||
}
|
||||
|
||||
func (t *http2Server) handlePing(f *http2.PingFrame) {
|
||||
t.controlBuf.put(&ping{true})
|
||||
pingAck := &ping{ack: true}
|
||||
copy(pingAck.data[:], f.Data[:])
|
||||
t.controlBuf.put(pingAck)
|
||||
}
|
||||
|
||||
func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) {
|
||||
|
@ -444,8 +432,13 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
|||
t.hBuf.Reset()
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
|
||||
if s.sendCompress != "" {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
|
||||
}
|
||||
for k, v := range md {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v})
|
||||
for _, entry := range v {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
|
||||
}
|
||||
}
|
||||
if err := t.writeHeaders(s, t.hBuf, false); err != nil {
|
||||
return err
|
||||
|
@ -459,17 +452,24 @@ func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
|
|||
// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
|
||||
// OK is adopted.
|
||||
func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error {
|
||||
s.mu.RLock()
|
||||
var headersSent bool
|
||||
s.mu.Lock()
|
||||
if s.state == streamDone {
|
||||
s.mu.RUnlock()
|
||||
s.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
s.mu.RUnlock()
|
||||
if s.headerOk {
|
||||
headersSent = true
|
||||
}
|
||||
s.mu.Unlock()
|
||||
if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil {
|
||||
return err
|
||||
}
|
||||
t.hBuf.Reset()
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
|
||||
if !headersSent {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
|
||||
}
|
||||
t.hEnc.WriteField(
|
||||
hpack.HeaderField{
|
||||
Name: "grpc-status",
|
||||
|
@ -478,7 +478,9 @@ func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc s
|
|||
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: statusDesc})
|
||||
// Attach the trailer metadata.
|
||||
for k, v := range s.trailer {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v})
|
||||
for _, entry := range v {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
|
||||
}
|
||||
}
|
||||
if err := t.writeHeaders(s, t.hBuf, true); err != nil {
|
||||
t.Close()
|
||||
|
@ -507,6 +509,9 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error {
|
|||
t.hBuf.Reset()
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
|
||||
if s.sendCompress != "" {
|
||||
t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
|
||||
}
|
||||
p := http2.HeadersFrameParam{
|
||||
StreamID: s.id,
|
||||
BlockFragment: t.hBuf.Bytes(),
|
||||
|
@ -584,6 +589,20 @@ func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error {
|
|||
|
||||
}
|
||||
|
||||
func (t *http2Server) applySettings(ss []http2.Setting) {
|
||||
for _, s := range ss {
|
||||
if s.ID == http2.SettingInitialWindowSize {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
for _, stream := range t.activeStreams {
|
||||
stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota))
|
||||
}
|
||||
t.streamSendQuota = s.Val
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// controller running in a separate goroutine takes charge of sending control
|
||||
// frames (e.g., window update, reset stream, setting, etc.) to the server.
|
||||
func (t *http2Server) controller() {
|
||||
|
@ -599,17 +618,16 @@ func (t *http2Server) controller() {
|
|||
case *settings:
|
||||
if i.ack {
|
||||
t.framer.writeSettingsAck(true)
|
||||
t.applySettings(i.ss)
|
||||
} else {
|
||||
t.framer.writeSettings(true, i.setting...)
|
||||
t.framer.writeSettings(true, i.ss...)
|
||||
}
|
||||
case *resetStream:
|
||||
t.framer.writeRSTStream(true, i.streamID, i.code)
|
||||
case *flushIO:
|
||||
t.framer.flushWrite()
|
||||
case *ping:
|
||||
// TODO(zhaoq): Ack with all-0 data now. will change to some
|
||||
// meaningful content when this is actually in use.
|
||||
t.framer.writePing(true, i.ack, [8]byte{})
|
||||
t.framer.writePing(true, i.ack, i.data)
|
||||
default:
|
||||
grpclog.Printf("transport: http2Server.controller got unexpected item type %v\n", i)
|
||||
}
|
||||
|
@ -639,9 +657,9 @@ func (t *http2Server) Close() (err error) {
|
|||
t.mu.Unlock()
|
||||
close(t.shutdownChan)
|
||||
err = t.conn.Close()
|
||||
// Notify all active streams.
|
||||
// Cancel all active streams.
|
||||
for _, s := range streams {
|
||||
s.write(recvMsg{err: ErrConnClosing})
|
||||
s.cancel()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -663,8 +681,11 @@ func (t *http2Server) closeStream(s *Stream) {
|
|||
s.state = streamDone
|
||||
s.mu.Unlock()
|
||||
// In case stream sending and receiving are invoked in separate
|
||||
// goroutines (e.g., bi-directional streaming), the caller needs
|
||||
// to call cancel on the stream to interrupt the blocking on
|
||||
// other goroutines.
|
||||
// goroutines (e.g., bi-directional streaming), cancel needs to be
|
||||
// called to interrupt the potential blocking on other goroutines.
|
||||
s.cancel()
|
||||
}
|
||||
|
||||
func (t *http2Server) RemoteAddr() net.Addr {
|
||||
return t.conn.RemoteAddr()
|
||||
}
|
||||
|
|
205
vendor/google.golang.org/grpc/transport/http_util.go
generated
vendored
205
vendor/google.golang.org/grpc/transport/http_util.go
generated
vendored
|
@ -39,17 +39,20 @@ import (
|
|||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/bradfitz/http2"
|
||||
"github.com/bradfitz/http2/hpack"
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
const (
|
||||
// The primary user agent
|
||||
primaryUA = "grpc-go/0.11"
|
||||
// http2MaxFrameLen specifies the max length of a HTTP2 frame.
|
||||
http2MaxFrameLen = 16384 // 16KB frame
|
||||
// http://http2.github.io/http2-spec/#SettingValues
|
||||
|
@ -59,35 +62,37 @@ const (
|
|||
)
|
||||
|
||||
var (
|
||||
clientPreface = []byte(http2.ClientPreface)
|
||||
clientPreface = []byte(http2.ClientPreface)
|
||||
http2ErrConvTab = map[http2.ErrCode]codes.Code{
|
||||
http2.ErrCodeNo: codes.Internal,
|
||||
http2.ErrCodeProtocol: codes.Internal,
|
||||
http2.ErrCodeInternal: codes.Internal,
|
||||
http2.ErrCodeFlowControl: codes.ResourceExhausted,
|
||||
http2.ErrCodeSettingsTimeout: codes.Internal,
|
||||
http2.ErrCodeFrameSize: codes.Internal,
|
||||
http2.ErrCodeRefusedStream: codes.Unavailable,
|
||||
http2.ErrCodeCancel: codes.Canceled,
|
||||
http2.ErrCodeCompression: codes.Internal,
|
||||
http2.ErrCodeConnect: codes.Internal,
|
||||
http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted,
|
||||
http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
|
||||
http2.ErrCodeHTTP11Required: codes.FailedPrecondition,
|
||||
}
|
||||
statusCodeConvTab = map[codes.Code]http2.ErrCode{
|
||||
codes.Internal: http2.ErrCodeInternal,
|
||||
codes.Canceled: http2.ErrCodeCancel,
|
||||
codes.Unavailable: http2.ErrCodeRefusedStream,
|
||||
codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm,
|
||||
codes.PermissionDenied: http2.ErrCodeInadequateSecurity,
|
||||
}
|
||||
)
|
||||
|
||||
var http2RSTErrConvTab = map[http2.ErrCode]codes.Code{
|
||||
http2.ErrCodeNo: codes.Internal,
|
||||
http2.ErrCodeProtocol: codes.Internal,
|
||||
http2.ErrCodeInternal: codes.Internal,
|
||||
http2.ErrCodeFlowControl: codes.Internal,
|
||||
http2.ErrCodeSettingsTimeout: codes.Internal,
|
||||
http2.ErrCodeFrameSize: codes.Internal,
|
||||
http2.ErrCodeRefusedStream: codes.Unavailable,
|
||||
http2.ErrCodeCancel: codes.Canceled,
|
||||
http2.ErrCodeCompression: codes.Internal,
|
||||
http2.ErrCodeConnect: codes.Internal,
|
||||
http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted,
|
||||
http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
|
||||
}
|
||||
|
||||
var statusCodeConvTab = map[codes.Code]http2.ErrCode{
|
||||
codes.Internal: http2.ErrCodeInternal, // pick an arbitrary one which is matched.
|
||||
codes.Canceled: http2.ErrCodeCancel,
|
||||
codes.Unavailable: http2.ErrCodeRefusedStream,
|
||||
codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm,
|
||||
codes.PermissionDenied: http2.ErrCodeInadequateSecurity,
|
||||
}
|
||||
|
||||
// Records the states during HPACK decoding. Must be reset once the
|
||||
// decoding of the entire headers are finished.
|
||||
type decodeState struct {
|
||||
err error // first error encountered decoding
|
||||
|
||||
encoding string
|
||||
// statusCode caches the stream status received from the trailer
|
||||
// the server sent. Client side only.
|
||||
statusCode codes.Code
|
||||
|
@ -97,28 +102,14 @@ type decodeState struct {
|
|||
timeout time.Duration
|
||||
method string
|
||||
// key-value metadata map from the peer.
|
||||
mdata map[string]string
|
||||
}
|
||||
|
||||
// An hpackDecoder decodes HTTP2 headers which may span multiple frames.
|
||||
type hpackDecoder struct {
|
||||
h *hpack.Decoder
|
||||
state decodeState
|
||||
err error // The err when decoding
|
||||
}
|
||||
|
||||
// A headerFrame is either a http2.HeaderFrame or http2.ContinuationFrame.
|
||||
type headerFrame interface {
|
||||
Header() http2.FrameHeader
|
||||
HeaderBlockFragment() []byte
|
||||
HeadersEnded() bool
|
||||
mdata map[string][]string
|
||||
}
|
||||
|
||||
// isReservedHeader checks whether hdr belongs to HTTP2 headers
|
||||
// reserved by gRPC protocol. Any other headers are classified as the
|
||||
// user-specified metadata.
|
||||
func isReservedHeader(hdr string) bool {
|
||||
if hdr[0] == ':' {
|
||||
if hdr != "" && hdr[0] == ':' {
|
||||
return true
|
||||
}
|
||||
switch hdr {
|
||||
|
@ -128,92 +119,69 @@ func isReservedHeader(hdr string) bool {
|
|||
"grpc-message",
|
||||
"grpc-status",
|
||||
"grpc-timeout",
|
||||
"te",
|
||||
"user-agent":
|
||||
"te":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func newHPACKDecoder() *hpackDecoder {
|
||||
d := &hpackDecoder{}
|
||||
d.h = hpack.NewDecoder(http2InitHeaderTableSize, func(f hpack.HeaderField) {
|
||||
switch f.Name {
|
||||
case "grpc-status":
|
||||
code, err := strconv.Atoi(f.Value)
|
||||
if err != nil {
|
||||
d.err = StreamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err)
|
||||
return
|
||||
}
|
||||
d.state.statusCode = codes.Code(code)
|
||||
case "grpc-message":
|
||||
d.state.statusDesc = f.Value
|
||||
case "grpc-timeout":
|
||||
d.state.timeoutSet = true
|
||||
var err error
|
||||
d.state.timeout, err = timeoutDecode(f.Value)
|
||||
if err != nil {
|
||||
d.err = StreamErrorf(codes.Internal, "transport: malformed time-out: %v", err)
|
||||
return
|
||||
}
|
||||
case ":path":
|
||||
d.state.method = f.Value
|
||||
default:
|
||||
if !isReservedHeader(f.Name) {
|
||||
if d.state.mdata == nil {
|
||||
d.state.mdata = make(map[string]string)
|
||||
}
|
||||
k, v, err := metadata.DecodeKeyValue(f.Name, f.Value)
|
||||
if err != nil {
|
||||
grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err)
|
||||
func (d *decodeState) setErr(err error) {
|
||||
if d.err == nil {
|
||||
d.err = err
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decodeState) processHeaderField(f hpack.HeaderField) {
|
||||
switch f.Name {
|
||||
case "content-type":
|
||||
if !strings.Contains(f.Value, "application/grpc") {
|
||||
d.setErr(StreamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value))
|
||||
return
|
||||
}
|
||||
case "grpc-encoding":
|
||||
d.encoding = f.Value
|
||||
case "grpc-status":
|
||||
code, err := strconv.Atoi(f.Value)
|
||||
if err != nil {
|
||||
d.setErr(StreamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err))
|
||||
return
|
||||
}
|
||||
d.statusCode = codes.Code(code)
|
||||
case "grpc-message":
|
||||
d.statusDesc = f.Value
|
||||
case "grpc-timeout":
|
||||
d.timeoutSet = true
|
||||
var err error
|
||||
d.timeout, err = timeoutDecode(f.Value)
|
||||
if err != nil {
|
||||
d.setErr(StreamErrorf(codes.Internal, "transport: malformed time-out: %v", err))
|
||||
return
|
||||
}
|
||||
case ":path":
|
||||
d.method = f.Value
|
||||
default:
|
||||
if !isReservedHeader(f.Name) {
|
||||
if f.Name == "user-agent" {
|
||||
i := strings.LastIndex(f.Value, " ")
|
||||
if i == -1 {
|
||||
// There is no application user agent string being set.
|
||||
return
|
||||
}
|
||||
d.state.mdata[k] = v
|
||||
// Extract the application user agent string.
|
||||
f.Value = f.Value[:i]
|
||||
}
|
||||
if d.mdata == nil {
|
||||
d.mdata = make(map[string][]string)
|
||||
}
|
||||
k, v, err := metadata.DecodeKeyValue(f.Name, f.Value)
|
||||
if err != nil {
|
||||
grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err)
|
||||
return
|
||||
}
|
||||
d.mdata[k] = append(d.mdata[k], v)
|
||||
}
|
||||
})
|
||||
return d
|
||||
}
|
||||
|
||||
func (d *hpackDecoder) decodeClientHTTP2Headers(frame headerFrame) (endHeaders bool, err error) {
|
||||
d.err = nil
|
||||
_, err = d.h.Write(frame.HeaderBlockFragment())
|
||||
if err != nil {
|
||||
err = StreamErrorf(codes.Internal, "transport: HPACK header decode error: %v", err)
|
||||
}
|
||||
|
||||
if frame.HeadersEnded() {
|
||||
if closeErr := d.h.Close(); closeErr != nil && err == nil {
|
||||
err = StreamErrorf(codes.Internal, "transport: HPACK decoder close error: %v", closeErr)
|
||||
}
|
||||
endHeaders = true
|
||||
}
|
||||
|
||||
if err == nil && d.err != nil {
|
||||
err = d.err
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *hpackDecoder) decodeServerHTTP2Headers(frame headerFrame) (endHeaders bool, err error) {
|
||||
d.err = nil
|
||||
_, err = d.h.Write(frame.HeaderBlockFragment())
|
||||
if err != nil {
|
||||
err = StreamErrorf(codes.Internal, "transport: HPACK header decode error: %v", err)
|
||||
}
|
||||
|
||||
if frame.HeadersEnded() {
|
||||
if closeErr := d.h.Close(); closeErr != nil && err == nil {
|
||||
err = StreamErrorf(codes.Internal, "transport: HPACK decoder close error: %v", closeErr)
|
||||
}
|
||||
endHeaders = true
|
||||
}
|
||||
|
||||
if err == nil && d.err != nil {
|
||||
err = d.err
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type timeoutUnit uint8
|
||||
|
@ -304,10 +272,11 @@ type framer struct {
|
|||
|
||||
func newFramer(conn net.Conn) *framer {
|
||||
f := &framer{
|
||||
reader: conn,
|
||||
reader: bufio.NewReaderSize(conn, http2IOBufSize),
|
||||
writer: bufio.NewWriterSize(conn, http2IOBufSize),
|
||||
}
|
||||
f.fr = http2.NewFramer(f.writer, f.reader)
|
||||
f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
|
||||
return f
|
||||
}
|
||||
|
||||
|
|
116
vendor/google.golang.org/grpc/transport/transport.go
generated
vendored
116
vendor/google.golang.org/grpc/transport/transport.go
generated
vendored
|
@ -47,6 +47,7 @@ import (
|
|||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/trace"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/metadata"
|
||||
|
@ -169,17 +170,13 @@ type Stream struct {
|
|||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
// method records the associated RPC method of the stream.
|
||||
method string
|
||||
buf *recvBuffer
|
||||
dec io.Reader
|
||||
|
||||
// updateStreams indicates whether the transport's streamsQuota needed
|
||||
// to be updated when this stream is closed. It is false when the transport
|
||||
// sticks to the initial infinite value of the number of concurrent streams.
|
||||
// Ture otherwise.
|
||||
updateStreams bool
|
||||
fc *inFlow
|
||||
recvQuota uint32
|
||||
method string
|
||||
recvCompress string
|
||||
sendCompress string
|
||||
buf *recvBuffer
|
||||
dec io.Reader
|
||||
fc *inFlow
|
||||
recvQuota uint32
|
||||
// The accumulated inbound quota pending for window update.
|
||||
updateQuota uint32
|
||||
// The handler to control the window update procedure for both this
|
||||
|
@ -206,6 +203,17 @@ type Stream struct {
|
|||
statusDesc string
|
||||
}
|
||||
|
||||
// RecvCompress returns the compression algorithm applied to the inbound
|
||||
// message. It is empty string if there is no compression applied.
|
||||
func (s *Stream) RecvCompress() string {
|
||||
return s.recvCompress
|
||||
}
|
||||
|
||||
// SetSendCompress sets the compression algorithm to the stream.
|
||||
func (s *Stream) SetSendCompress(str string) {
|
||||
s.sendCompress = str
|
||||
}
|
||||
|
||||
// Header acquires the key-value pairs of header metadata once it
|
||||
// is available. It blocks until i) the metadata is ready or ii) there is no
|
||||
// header metadata or iii) the stream is cancelled/expired.
|
||||
|
@ -238,6 +246,11 @@ func (s *Stream) Context() context.Context {
|
|||
return s.ctx
|
||||
}
|
||||
|
||||
// TraceContext recreates the context of s with a trace.Trace.
|
||||
func (s *Stream) TraceContext(tr trace.Trace) {
|
||||
s.ctx = trace.NewContext(s.ctx, tr)
|
||||
}
|
||||
|
||||
// Method returns the method for the stream.
|
||||
func (s *Stream) Method() string {
|
||||
return s.method
|
||||
|
@ -286,20 +299,18 @@ func (s *Stream) Read(p []byte) (n int, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
type key int
|
||||
|
||||
// The key to save transport.Stream in the context.
|
||||
const streamKey = key(0)
|
||||
type streamKey struct{}
|
||||
|
||||
// newContextWithStream creates a new context from ctx and attaches stream
|
||||
// to it.
|
||||
func newContextWithStream(ctx context.Context, stream *Stream) context.Context {
|
||||
return context.WithValue(ctx, streamKey, stream)
|
||||
return context.WithValue(ctx, streamKey{}, stream)
|
||||
}
|
||||
|
||||
// StreamFromContext returns the stream saved in ctx.
|
||||
func StreamFromContext(ctx context.Context) (s *Stream, ok bool) {
|
||||
s, ok = ctx.Value(streamKey).(*Stream)
|
||||
s, ok = ctx.Value(streamKey{}).(*Stream)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -314,15 +325,20 @@ const (
|
|||
|
||||
// NewServerTransport creates a ServerTransport with conn or non-nil error
|
||||
// if it fails.
|
||||
func NewServerTransport(protocol string, conn net.Conn, maxStreams uint32) (ServerTransport, error) {
|
||||
return newHTTP2Server(conn, maxStreams)
|
||||
func NewServerTransport(protocol string, conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (ServerTransport, error) {
|
||||
return newHTTP2Server(conn, maxStreams, authInfo)
|
||||
}
|
||||
|
||||
// ConnectOptions covers all relevant options for dialing a server.
|
||||
type ConnectOptions struct {
|
||||
Dialer func(string, time.Duration) (net.Conn, error)
|
||||
// UserAgent is the application user agent.
|
||||
UserAgent string
|
||||
// Dialer specifies how to dial a network address.
|
||||
Dialer func(string, time.Duration) (net.Conn, error)
|
||||
// AuthOptions stores the credentials required to setup a client connection and/or issue RPCs.
|
||||
AuthOptions []credentials.Credentials
|
||||
Timeout time.Duration
|
||||
// Timeout specifies the timeout for dialing a client connection.
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
// NewClientTransport establishes the transport with the required ConnectOptions
|
||||
|
@ -334,20 +350,40 @@ func NewClientTransport(target string, opts *ConnectOptions) (ClientTransport, e
|
|||
// Options provides additional hints and information for message
|
||||
// transmission.
|
||||
type Options struct {
|
||||
// Indicate whether it is the last piece for this stream.
|
||||
// Last indicates whether this write is the last piece for
|
||||
// this stream.
|
||||
Last bool
|
||||
// The hint to transport impl whether the data could be buffered for
|
||||
// batching write. Transport impl can feel free to ignore it.
|
||||
|
||||
// Delay is a hint to the transport implementation for whether
|
||||
// the data could be buffered for a batching write. The
|
||||
// Transport implementation may ignore the hint.
|
||||
Delay bool
|
||||
}
|
||||
|
||||
// CallHdr carries the information of a particular RPC.
|
||||
type CallHdr struct {
|
||||
Host string // peer host
|
||||
Method string // the operation to perform on the specified host
|
||||
// Host specifies the peer's host.
|
||||
Host string
|
||||
|
||||
// Method specifies the operation to perform.
|
||||
Method string
|
||||
|
||||
// RecvCompress specifies the compression algorithm applied on
|
||||
// inbound messages.
|
||||
RecvCompress string
|
||||
|
||||
// SendCompress specifies the compression algorithm applied on
|
||||
// outbound message.
|
||||
SendCompress string
|
||||
|
||||
// Flush indicates whether a new stream command should be sent
|
||||
// to the peer without waiting for the first data. This is
|
||||
// only a hint. The transport may modify the flush decision
|
||||
// for performance purposes.
|
||||
Flush bool
|
||||
}
|
||||
|
||||
// ClientTransport is the common interface for all gRPC client side transport
|
||||
// ClientTransport is the common interface for all gRPC client-side transport
|
||||
// implementations.
|
||||
type ClientTransport interface {
|
||||
// Close tears down this transport. Once it returns, the transport
|
||||
|
@ -376,21 +412,35 @@ type ClientTransport interface {
|
|||
Error() <-chan struct{}
|
||||
}
|
||||
|
||||
// ServerTransport is the common interface for all gRPC server side transport
|
||||
// ServerTransport is the common interface for all gRPC server-side transport
|
||||
// implementations.
|
||||
//
|
||||
// Methods may be called concurrently from multiple goroutines, but
|
||||
// Write methods for a given Stream will be called serially.
|
||||
type ServerTransport interface {
|
||||
// WriteStatus sends the status of a stream to the client.
|
||||
WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error
|
||||
// Write sends the data for the given stream.
|
||||
Write(s *Stream, data []byte, opts *Options) error
|
||||
// WriteHeader sends the header metedata for the given stream.
|
||||
WriteHeader(s *Stream, md metadata.MD) error
|
||||
// HandleStreams receives incoming streams using the given handler.
|
||||
HandleStreams(func(*Stream))
|
||||
|
||||
// WriteHeader sends the header metadata for the given stream.
|
||||
// WriteHeader may not be called on all streams.
|
||||
WriteHeader(s *Stream, md metadata.MD) error
|
||||
|
||||
// Write sends the data for the given stream.
|
||||
// Write may not be called on all streams.
|
||||
Write(s *Stream, data []byte, opts *Options) error
|
||||
|
||||
// WriteStatus sends the status of a stream to the client.
|
||||
// WriteStatus is the final call made on a stream and always
|
||||
// occurs.
|
||||
WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error
|
||||
|
||||
// Close tears down the transport. Once it is called, the transport
|
||||
// should not be accessed any more. All the pending streams and their
|
||||
// handlers will be terminated asynchronously.
|
||||
Close() error
|
||||
|
||||
// RemoteAddr returns the remote network address.
|
||||
RemoteAddr() net.Addr
|
||||
}
|
||||
|
||||
// StreamErrorf creates an StreamError with the specified error code and description.
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue