Merge pull request #466 from sameo/exec
Stream server for exec, attach and port forward
This commit is contained in:
commit
eb257d2526
2128 changed files with 809935 additions and 15 deletions
|
@ -4,7 +4,7 @@ set -o errexit
|
|||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
for d in $(find . -type d -not -iwholename '*.git*' -a -not -iname '.tool' -a -not -iwholename '*vendor*'); do
|
||||
for d in $(find . -type d -not -iwholename '*.git*' -a -not -iname '.tool' -a -not -iwholename '*vendor*' -a -not -iwholename '*.artifacts*' -a -not -iwholename '*contrib*' -a -not -iwholename '*test*' -a -not -iwholename '*logo*' -a -not -iwholename '*conmon*' -a -not -iwholename '*completions*' -a -not -iwholename '*docs*' -a -not -iwholename '*pause*'); do
|
||||
${GOPATH}/bin/gometalinter \
|
||||
--exclude='error return value not checked.*(Close|Log|Print|RemoveAll).*\(errcheck\)$' \
|
||||
--exclude='declaration of.*err.*shadows declaration.*\(vetshadow\)$' \
|
||||
|
@ -19,5 +19,5 @@ for d in $(find . -type d -not -iwholename '*.git*' -a -not -iname '.tool' -a -n
|
|||
--cyclo-over=80 \
|
||||
--dupl-threshold=100 \
|
||||
--tests \
|
||||
--deadline=60s "${d}"
|
||||
--deadline=120s "${d}"
|
||||
done
|
||||
|
|
|
@ -3,12 +3,17 @@ package main
|
|||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/urfave/cli"
|
||||
"golang.org/x/net/context"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/remotecommand"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
remotecommandserver "k8s.io/kubernetes/pkg/kubelet/server/remotecommand"
|
||||
)
|
||||
|
||||
var containerCommand = cli.Command{
|
||||
|
@ -22,6 +27,7 @@ var containerCommand = cli.Command{
|
|||
containerStatusCommand,
|
||||
listContainersCommand,
|
||||
execSyncCommand,
|
||||
execCommand,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -236,6 +242,45 @@ var execSyncCommand = cli.Command{
|
|||
},
|
||||
}
|
||||
|
||||
var execCommand = cli.Command{
|
||||
Name: "exec",
|
||||
Usage: "prepare a streaming endpoint to execute a command in the container",
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "id",
|
||||
Value: "",
|
||||
Usage: "id of the container",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "tty",
|
||||
Usage: "whether to use tty",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "stdin",
|
||||
Usage: "whether to stream to stdin",
|
||||
},
|
||||
cli.BoolFlag{
|
||||
Name: "url",
|
||||
Usage: "do not exec command, just prepare streaming endpoint",
|
||||
},
|
||||
},
|
||||
Action: func(context *cli.Context) error {
|
||||
// Set up a connection to the server.
|
||||
conn, err := getClientConnection(context)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to connect: %v", err)
|
||||
}
|
||||
defer conn.Close()
|
||||
client := pb.NewRuntimeServiceClient(conn)
|
||||
|
||||
err = Exec(client, context.String("id"), context.Bool("tty"), context.Bool("stdin"), context.Bool("url"), context.Args())
|
||||
if err != nil {
|
||||
return fmt.Errorf("execing command in container failed: %v", err)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
type listOptions struct {
|
||||
// id of the container
|
||||
id string
|
||||
|
@ -441,6 +486,52 @@ func ExecSync(client pb.RuntimeServiceClient, ID string, cmd []string, timeout i
|
|||
return nil
|
||||
}
|
||||
|
||||
// Exec sends an ExecRequest to the server, and parses
|
||||
// the returned ExecResponse.
|
||||
func Exec(client pb.RuntimeServiceClient, ID string, tty bool, stdin bool, urlOnly bool, cmd []string) error {
|
||||
if ID == "" {
|
||||
return fmt.Errorf("ID cannot be empty")
|
||||
}
|
||||
r, err := client.Exec(context.Background(), &pb.ExecRequest{
|
||||
ContainerId: ID,
|
||||
Cmd: cmd,
|
||||
Tty: tty,
|
||||
Stdin: stdin,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if urlOnly {
|
||||
fmt.Println("URL:")
|
||||
fmt.Println(r.Url)
|
||||
return nil
|
||||
}
|
||||
|
||||
execURL, err := url.Parse(r.Url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
streamExec, err := remotecommand.NewExecutor(&restclient.Config{}, "GET", execURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := remotecommand.StreamOptions{
|
||||
SupportedProtocols: remotecommandserver.SupportedStreamingProtocols,
|
||||
Stdout: os.Stdout,
|
||||
Stderr: os.Stderr,
|
||||
Tty: tty,
|
||||
}
|
||||
|
||||
if stdin {
|
||||
options.Stdin = os.Stdin
|
||||
}
|
||||
|
||||
return streamExec.Stream(options)
|
||||
}
|
||||
|
||||
// ListContainers sends a ListContainerRequest to the server, and parses
|
||||
// the returned ListContainerResponse.
|
||||
func ListContainers(client pb.RuntimeServiceClient, opts listOptions) error {
|
||||
|
|
417
lock.json
417
lock.json
|
@ -1,6 +1,24 @@
|
|||
{
|
||||
"memo": "0d3077faf280e4e13e18e56f085053d4ced593c2fcfcb09d7df1aea8f0bba403",
|
||||
"memo": "e99fe9f7a283d8fb8e0ec8b05fa68d01a7dfa4c7c48b6e85a84986a079685711",
|
||||
"projects": [
|
||||
{
|
||||
"name": "cloud.google.com/go",
|
||||
"version": "v0.7.0",
|
||||
"revision": "2e6a95edb1071d750f6d7db777bf66cd2997af6c",
|
||||
"packages": [
|
||||
"compute/metadata",
|
||||
"internal"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/Azure/go-ansiterm",
|
||||
"branch": "master",
|
||||
"revision": "fa152c58bc15761d0200cb75fe958b89a9d4888e",
|
||||
"packages": [
|
||||
".",
|
||||
"winterm"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/BurntSushi/toml",
|
||||
"version": "v0.2.0",
|
||||
|
@ -27,6 +45,22 @@
|
|||
"."
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/PuerkitoBio/purell",
|
||||
"version": "v1.1.0",
|
||||
"revision": "0bcb03f4b4d0a9428594752bd2a3b9aa0a9d4bd4",
|
||||
"packages": [
|
||||
"."
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/PuerkitoBio/urlesc",
|
||||
"branch": "master",
|
||||
"revision": "5bd2802263f21d8788851d5305584c82a5c75d7e",
|
||||
"packages": [
|
||||
"."
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/Sirupsen/logrus",
|
||||
"branch": "master",
|
||||
|
@ -119,6 +153,18 @@
|
|||
"storageversion"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/coreos/go-oidc",
|
||||
"branch": "master",
|
||||
"revision": "be73733bb8cc830d0205609b95d125215f8e9c70",
|
||||
"packages": [
|
||||
"http",
|
||||
"jose",
|
||||
"key",
|
||||
"oauth2",
|
||||
"oidc"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/coreos/go-systemd",
|
||||
"version": "v14",
|
||||
|
@ -128,6 +174,24 @@
|
|||
"dbus"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/coreos/pkg",
|
||||
"version": "v3",
|
||||
"revision": "3ac0863d7acf3bc44daf49afef8919af12f704ef",
|
||||
"packages": [
|
||||
"health",
|
||||
"httputil",
|
||||
"timeutil"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/davecgh/go-spew",
|
||||
"version": "v1.1.0",
|
||||
"revision": "346938d642f2ec3594ed81d874461961cd0faa76",
|
||||
"packages": [
|
||||
"spew"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/docker/distribution",
|
||||
"branch": "master",
|
||||
|
@ -174,6 +238,8 @@
|
|||
"pkg/stringutils",
|
||||
"pkg/symlink",
|
||||
"pkg/system",
|
||||
"pkg/term",
|
||||
"pkg/term/windows",
|
||||
"pkg/tlsconfig",
|
||||
"pkg/truncindex",
|
||||
"utils/templates"
|
||||
|
@ -205,6 +271,25 @@
|
|||
"."
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/docker/spdystream",
|
||||
"branch": "master",
|
||||
"revision": "ed496381df8283605c435b86d4fdd6f4f20b8c6e",
|
||||
"packages": [
|
||||
".",
|
||||
"spdy"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/emicklei/go-restful",
|
||||
"branch": "master",
|
||||
"revision": "09691a3b6378b740595c1002f40c34dd5f218a22",
|
||||
"packages": [
|
||||
".",
|
||||
"log",
|
||||
"swagger"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/fsnotify/fsnotify",
|
||||
"branch": "master",
|
||||
|
@ -221,6 +306,38 @@
|
|||
"."
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/go-openapi/jsonpointer",
|
||||
"branch": "master",
|
||||
"revision": "779f45308c19820f1a69e9a4cd965f496e0da10f",
|
||||
"packages": [
|
||||
"."
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/go-openapi/jsonreference",
|
||||
"branch": "master",
|
||||
"revision": "36d33bfe519efae5632669801b180bf1a245da3b",
|
||||
"packages": [
|
||||
"."
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/go-openapi/spec",
|
||||
"branch": "master",
|
||||
"revision": "02fb9cd3430ed0581e0ceb4804d5d4b3cc702694",
|
||||
"packages": [
|
||||
"."
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/go-openapi/swag",
|
||||
"branch": "master",
|
||||
"revision": "d5f8ebc3b1c55a4cf6489eeae7354f338cfe299e",
|
||||
"packages": [
|
||||
"."
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/godbus/dbus",
|
||||
"version": "v4.0.0",
|
||||
|
@ -240,6 +357,22 @@
|
|||
"sortkeys"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/golang/glog",
|
||||
"branch": "master",
|
||||
"revision": "23def4e6c14b4da8ac2ed8007337bc5eb5007998",
|
||||
"packages": [
|
||||
"."
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/golang/groupcache",
|
||||
"branch": "master",
|
||||
"revision": "b710c8433bd175204919eb38776e944233235d03",
|
||||
"packages": [
|
||||
"lru"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/golang/protobuf",
|
||||
"branch": "master",
|
||||
|
@ -248,6 +381,22 @@
|
|||
"proto"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/google/gofuzz",
|
||||
"branch": "master",
|
||||
"revision": "44d81051d367757e1c7c6a5a86423ece9afcf63c",
|
||||
"packages": [
|
||||
"."
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/googleapis/gax-go",
|
||||
"branch": "master",
|
||||
"revision": "9af46dd5a1713e8b5cd71106287eba3cefdde50b",
|
||||
"packages": [
|
||||
"."
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/gorilla/context",
|
||||
"version": "v1.1",
|
||||
|
@ -272,6 +421,40 @@
|
|||
"."
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/jonboulle/clockwork",
|
||||
"version": "v0.1.0",
|
||||
"revision": "2eee05ed794112d45db504eb05aa693efd2b8b09",
|
||||
"packages": [
|
||||
"."
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/juju/ratelimit",
|
||||
"branch": "master",
|
||||
"revision": "acf38b000a03e4ab89e40f20f1e548f4e6ac7f72",
|
||||
"packages": [
|
||||
"."
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/kr/pty",
|
||||
"version": "v1.0.0",
|
||||
"revision": "2c10821df3c3cf905230d078702dfbe9404c9b23",
|
||||
"packages": [
|
||||
"."
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/mailru/easyjson",
|
||||
"branch": "master",
|
||||
"revision": "99e922cf9de1bc0ab38310c277cff32c2147e747",
|
||||
"packages": [
|
||||
"buffer",
|
||||
"jlexer",
|
||||
"jwriter"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/mattn/go-runewidth",
|
||||
"version": "v0.0.1",
|
||||
|
@ -296,6 +479,14 @@
|
|||
"."
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/mitchellh/go-wordwrap",
|
||||
"branch": "master",
|
||||
"revision": "ad45545899c7b13c020ea92b2072220eefad42b8",
|
||||
"packages": [
|
||||
"."
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/mtrmac/gpgme",
|
||||
"branch": "master",
|
||||
|
@ -374,6 +565,14 @@
|
|||
"."
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/spf13/pflag",
|
||||
"branch": "master",
|
||||
"revision": "9ff6c6923cfffbcd502984b8e0c80539a94968b7",
|
||||
"packages": [
|
||||
"."
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/syndtr/gocapability",
|
||||
"branch": "master",
|
||||
|
@ -390,6 +589,14 @@
|
|||
"patricia"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/ugorji/go",
|
||||
"branch": "master",
|
||||
"revision": "d23841a297e5489e787e72fceffabf9d2994b52a",
|
||||
"packages": [
|
||||
"codec"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "github.com/urfave/cli",
|
||||
"version": "v1.19.1",
|
||||
|
@ -435,7 +642,20 @@
|
|||
"internal/timeseries",
|
||||
"lex/httplex",
|
||||
"proxy",
|
||||
"trace"
|
||||
"trace",
|
||||
"websocket"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "golang.org/x/oauth2",
|
||||
"branch": "master",
|
||||
"revision": "a6bd8cefa1811bd24b86f8902872e4e8225f74c4",
|
||||
"packages": [
|
||||
".",
|
||||
"google",
|
||||
"internal",
|
||||
"jws",
|
||||
"jwt"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -447,6 +667,37 @@
|
|||
"windows"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "golang.org/x/text",
|
||||
"branch": "master",
|
||||
"revision": "dafb3384ad25363d928a9e97ce4ad3a2f0667e34",
|
||||
"packages": [
|
||||
"internal/gen",
|
||||
"internal/triegen",
|
||||
"internal/ucd",
|
||||
"transform",
|
||||
"unicode/cldr",
|
||||
"unicode/norm",
|
||||
"width"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "google.golang.org/appengine",
|
||||
"version": "v1.0.0",
|
||||
"revision": "150dc57a1b433e64154302bdc40b6bb8aefa313a",
|
||||
"packages": [
|
||||
".",
|
||||
"internal",
|
||||
"internal/app_identity",
|
||||
"internal/base",
|
||||
"internal/datastore",
|
||||
"internal/log",
|
||||
"internal/modules",
|
||||
"internal/remote_api",
|
||||
"internal/urlfetch",
|
||||
"urlfetch"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "google.golang.org/grpc",
|
||||
"version": "v1.0.1-GA",
|
||||
|
@ -471,6 +722,14 @@
|
|||
"."
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "gopkg.in/inf.v0",
|
||||
"version": "v0.9.0",
|
||||
"revision": "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4",
|
||||
"packages": [
|
||||
"."
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "gopkg.in/yaml.v2",
|
||||
"branch": "v2",
|
||||
|
@ -484,8 +743,51 @@
|
|||
"branch": "master",
|
||||
"revision": "21807b270ec15d19215659a5caa08b17f66d6f44",
|
||||
"packages": [
|
||||
"pkg/api/errors",
|
||||
"pkg/api/meta",
|
||||
"pkg/apimachinery",
|
||||
"pkg/apimachinery/announced",
|
||||
"pkg/apimachinery/registered",
|
||||
"pkg/apis/meta/v1",
|
||||
"pkg/apis/meta/v1/unstructured",
|
||||
"pkg/conversion",
|
||||
"pkg/conversion/queryparams",
|
||||
"pkg/fields",
|
||||
"pkg/selection"
|
||||
"pkg/labels",
|
||||
"pkg/openapi",
|
||||
"pkg/runtime",
|
||||
"pkg/runtime/schema",
|
||||
"pkg/runtime/serializer",
|
||||
"pkg/runtime/serializer/json",
|
||||
"pkg/runtime/serializer/protobuf",
|
||||
"pkg/runtime/serializer/recognizer",
|
||||
"pkg/runtime/serializer/streaming",
|
||||
"pkg/runtime/serializer/versioning",
|
||||
"pkg/selection",
|
||||
"pkg/types",
|
||||
"pkg/util/errors",
|
||||
"pkg/util/framer",
|
||||
"pkg/util/json",
|
||||
"pkg/util/net",
|
||||
"pkg/util/rand",
|
||||
"pkg/util/runtime",
|
||||
"pkg/util/sets",
|
||||
"pkg/util/validation",
|
||||
"pkg/util/validation/field",
|
||||
"pkg/util/wait",
|
||||
"pkg/util/yaml",
|
||||
"pkg/version",
|
||||
"pkg/watch",
|
||||
"third_party/forked/golang/reflect"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "k8s.io/apiserver",
|
||||
"branch": "master",
|
||||
"revision": "18254ddaaab8024609bdf570493103036d72f86d",
|
||||
"packages": [
|
||||
"pkg/server/httplog",
|
||||
"pkg/util/wsstream"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -493,7 +795,31 @@
|
|||
"branch": "master",
|
||||
"revision": "b766ef93a46ce6dc863462254658ca2861a53314",
|
||||
"packages": [
|
||||
"util/homedir"
|
||||
"pkg/api",
|
||||
"pkg/api/resource",
|
||||
"pkg/api/v1",
|
||||
"pkg/apis/autoscaling",
|
||||
"pkg/apis/extensions",
|
||||
"pkg/util",
|
||||
"pkg/util/intstr",
|
||||
"pkg/util/labels",
|
||||
"pkg/util/parsers",
|
||||
"pkg/version",
|
||||
"plugin/pkg/client/auth",
|
||||
"plugin/pkg/client/auth/gcp",
|
||||
"plugin/pkg/client/auth/oidc",
|
||||
"rest",
|
||||
"rest/watch",
|
||||
"third_party/forked/golang/template",
|
||||
"tools/clientcmd/api",
|
||||
"tools/metrics",
|
||||
"transport",
|
||||
"util/cert",
|
||||
"util/clock",
|
||||
"util/flowcontrol",
|
||||
"util/homedir",
|
||||
"util/integer",
|
||||
"util/jsonpath"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
@ -501,7 +827,88 @@
|
|||
"branch": "master",
|
||||
"revision": "760d8e98e8f6ad27aaf50b1a030cb9e7b6859aab",
|
||||
"packages": [
|
||||
"pkg/kubelet/api/v1alpha1/runtime"
|
||||
"pkg/api",
|
||||
"pkg/api/install",
|
||||
"pkg/api/resource",
|
||||
"pkg/api/v1",
|
||||
"pkg/apis/apps",
|
||||
"pkg/apis/apps/install",
|
||||
"pkg/apis/apps/v1beta1",
|
||||
"pkg/apis/authentication",
|
||||
"pkg/apis/authentication/install",
|
||||
"pkg/apis/authentication/v1beta1",
|
||||
"pkg/apis/authorization",
|
||||
"pkg/apis/authorization/install",
|
||||
"pkg/apis/authorization/v1beta1",
|
||||
"pkg/apis/autoscaling",
|
||||
"pkg/apis/autoscaling/install",
|
||||
"pkg/apis/autoscaling/v1",
|
||||
"pkg/apis/batch",
|
||||
"pkg/apis/batch/install",
|
||||
"pkg/apis/batch/v1",
|
||||
"pkg/apis/batch/v2alpha1",
|
||||
"pkg/apis/certificates",
|
||||
"pkg/apis/certificates/install",
|
||||
"pkg/apis/certificates/v1beta1",
|
||||
"pkg/apis/extensions",
|
||||
"pkg/apis/extensions/install",
|
||||
"pkg/apis/extensions/v1beta1",
|
||||
"pkg/apis/policy",
|
||||
"pkg/apis/policy/install",
|
||||
"pkg/apis/policy/v1beta1",
|
||||
"pkg/apis/rbac",
|
||||
"pkg/apis/rbac/install",
|
||||
"pkg/apis/rbac/v1alpha1",
|
||||
"pkg/apis/rbac/v1beta1",
|
||||
"pkg/apis/storage",
|
||||
"pkg/apis/storage/install",
|
||||
"pkg/apis/storage/v1beta1",
|
||||
"pkg/client/clientset_generated/clientset",
|
||||
"pkg/client/clientset_generated/clientset/typed/apps/v1beta1",
|
||||
"pkg/client/clientset_generated/clientset/typed/authentication/v1beta1",
|
||||
"pkg/client/clientset_generated/clientset/typed/authorization/v1beta1",
|
||||
"pkg/client/clientset_generated/clientset/typed/autoscaling/v1",
|
||||
"pkg/client/clientset_generated/clientset/typed/batch/v1",
|
||||
"pkg/client/clientset_generated/clientset/typed/batch/v2alpha1",
|
||||
"pkg/client/clientset_generated/clientset/typed/certificates/v1beta1",
|
||||
"pkg/client/clientset_generated/clientset/typed/core/v1",
|
||||
"pkg/client/clientset_generated/clientset/typed/extensions/v1beta1",
|
||||
"pkg/client/clientset_generated/clientset/typed/policy/v1beta1",
|
||||
"pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1",
|
||||
"pkg/client/clientset_generated/clientset/typed/rbac/v1beta1",
|
||||
"pkg/client/clientset_generated/clientset/typed/storage/v1beta1",
|
||||
"pkg/client/record",
|
||||
"pkg/client/typed/discovery",
|
||||
"pkg/client/unversioned/remotecommand",
|
||||
"pkg/cloudprovider",
|
||||
"pkg/kubelet/api/v1alpha1/runtime",
|
||||
"pkg/kubelet/container",
|
||||
"pkg/kubelet/server/portforward",
|
||||
"pkg/kubelet/server/remotecommand",
|
||||
"pkg/kubelet/server/streaming",
|
||||
"pkg/kubelet/util/format",
|
||||
"pkg/kubelet/util/ioutils",
|
||||
"pkg/util",
|
||||
"pkg/util/chmod",
|
||||
"pkg/util/chown",
|
||||
"pkg/util/exec",
|
||||
"pkg/util/hash",
|
||||
"pkg/util/httpstream",
|
||||
"pkg/util/httpstream/spdy",
|
||||
"pkg/util/interrupt",
|
||||
"pkg/util/intstr",
|
||||
"pkg/util/io",
|
||||
"pkg/util/labels",
|
||||
"pkg/util/mount",
|
||||
"pkg/util/parsers",
|
||||
"pkg/util/strategicpatch",
|
||||
"pkg/util/term",
|
||||
"pkg/version",
|
||||
"pkg/volume",
|
||||
"pkg/volume/util",
|
||||
"third_party/forked/golang/expansion",
|
||||
"third_party/forked/golang/json",
|
||||
"third_party/forked/golang/netutil"
|
||||
]
|
||||
}
|
||||
]
|
||||
|
|
|
@ -1,11 +1,103 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
utilexec "k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/util/term"
|
||||
)
|
||||
|
||||
// Exec prepares a streaming endpoint to execute a command in the container.
|
||||
func (s *Server) Exec(ctx context.Context, req *pb.ExecRequest) (*pb.ExecResponse, error) {
|
||||
return &pb.ExecResponse{}, nil
|
||||
logrus.Debugf("ExecRequest %+v", req)
|
||||
|
||||
resp, err := s.GetExec(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to prepare exec endpoint")
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Exec endpoint for streaming.Runtime
|
||||
func (ss streamService) Exec(containerID string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan term.Size) error {
|
||||
fmt.Println(containerID, cmd, stdin, stdout, stderr, tty, resize)
|
||||
c := ss.runtimeServer.state.containers.Get(containerID)
|
||||
|
||||
if err := ss.runtimeServer.runtime.UpdateStatus(c); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cState := ss.runtimeServer.runtime.ContainerStatus(c)
|
||||
if !(cState.Status == oci.ContainerStateRunning || cState.Status == oci.ContainerStateCreated) {
|
||||
return fmt.Errorf("container is not created or running")
|
||||
}
|
||||
|
||||
args := []string{"exec"}
|
||||
if tty {
|
||||
args = append(args, "-t")
|
||||
}
|
||||
args = append(args, c.Name())
|
||||
args = append(args, cmd...)
|
||||
execCmd := exec.Command(ss.runtimeServer.runtime.Path(c), args...)
|
||||
var cmdErr error
|
||||
if tty {
|
||||
p, err := kubecontainer.StartPty(execCmd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer p.Close()
|
||||
|
||||
// make sure to close the stdout stream
|
||||
defer stdout.Close()
|
||||
|
||||
kubecontainer.HandleResizing(resize, func(size term.Size) {
|
||||
term.SetSize(p.Fd(), size)
|
||||
})
|
||||
|
||||
if stdin != nil {
|
||||
go io.Copy(p, stdin)
|
||||
}
|
||||
|
||||
if stdout != nil {
|
||||
go io.Copy(stdout, p)
|
||||
}
|
||||
|
||||
cmdErr = execCmd.Wait()
|
||||
} else {
|
||||
if stdin != nil {
|
||||
// Use an os.Pipe here as it returns true *os.File objects.
|
||||
// This way, if you run 'kubectl exec <pod> -i bash' (no tty) and type 'exit',
|
||||
// the call below to execCmd.Run() can unblock because its Stdin is the read half
|
||||
// of the pipe.
|
||||
r, w, err := os.Pipe()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go io.Copy(w, stdin)
|
||||
|
||||
execCmd.Stdin = r
|
||||
}
|
||||
if stdout != nil {
|
||||
execCmd.Stdout = stdout
|
||||
}
|
||||
if stderr != nil {
|
||||
execCmd.Stderr = stderr
|
||||
}
|
||||
|
||||
cmdErr = execCmd.Run()
|
||||
}
|
||||
|
||||
if exitErr, ok := cmdErr.(*exec.ExitError); ok {
|
||||
return &utilexec.ExitErrorWrapper{ExitError: exitErr}
|
||||
}
|
||||
return cmdErr
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
rspec "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
"k8s.io/kubernetes/pkg/kubelet/server/streaming"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -28,6 +29,13 @@ const (
|
|||
shutdownFile = "/var/lib/ocid/ocid.shutdown"
|
||||
)
|
||||
|
||||
// streamService implements streaming.Runtime.
|
||||
type streamService struct {
|
||||
runtimeServer *Server // needed by Exec() endpoint
|
||||
streamServer streaming.Server
|
||||
streaming.Runtime
|
||||
}
|
||||
|
||||
// Server implements the RuntimeService and ImageService
|
||||
type Server struct {
|
||||
config Config
|
||||
|
@ -50,6 +58,23 @@ type Server struct {
|
|||
|
||||
appArmorEnabled bool
|
||||
appArmorProfile string
|
||||
|
||||
stream streamService
|
||||
}
|
||||
|
||||
// GetExec returns exec stream request
|
||||
func (s *Server) GetExec(req *pb.ExecRequest) (*pb.ExecResponse, error) {
|
||||
return s.stream.streamServer.GetExec(req)
|
||||
}
|
||||
|
||||
// GetAttach returns attach stream request
|
||||
func (s *Server) GetAttach(req *pb.AttachRequest) (*pb.AttachResponse, error) {
|
||||
return s.stream.streamServer.GetAttach(req)
|
||||
}
|
||||
|
||||
// GetPortForward returns port forward stream request
|
||||
func (s *Server) GetPortForward(req *pb.PortForwardRequest) (*pb.PortForwardResponse, error) {
|
||||
return s.stream.streamServer.GetPortForward(req)
|
||||
}
|
||||
|
||||
func (s *Server) loadContainer(id string) error {
|
||||
|
@ -501,20 +526,20 @@ func New(config *Config) (*Server, error) {
|
|||
appArmorProfile: config.ApparmorProfile,
|
||||
}
|
||||
if s.seccompEnabled {
|
||||
seccompProfile, err := ioutil.ReadFile(config.SeccompProfile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("opening seccomp profile (%s) failed: %v", config.SeccompProfile, err)
|
||||
seccompProfile, fileErr := ioutil.ReadFile(config.SeccompProfile)
|
||||
if fileErr != nil {
|
||||
return nil, fmt.Errorf("opening seccomp profile (%s) failed: %v", config.SeccompProfile, fileErr)
|
||||
}
|
||||
var seccompConfig seccomp.Seccomp
|
||||
if err := json.Unmarshal(seccompProfile, &seccompConfig); err != nil {
|
||||
return nil, fmt.Errorf("decoding seccomp profile failed: %v", err)
|
||||
if jsonErr := json.Unmarshal(seccompProfile, &seccompConfig); jsonErr != nil {
|
||||
return nil, fmt.Errorf("decoding seccomp profile failed: %v", jsonErr)
|
||||
}
|
||||
s.seccompProfile = seccompConfig
|
||||
}
|
||||
|
||||
if s.appArmorEnabled && s.appArmorProfile == apparmor.DefaultApparmorProfile {
|
||||
if err := apparmor.EnsureDefaultApparmorProfile(); err != nil {
|
||||
return nil, fmt.Errorf("ensuring the default apparmor profile is installed failed: %v", err)
|
||||
if apparmorErr := apparmor.EnsureDefaultApparmorProfile(); apparmorErr != nil {
|
||||
return nil, fmt.Errorf("ensuring the default apparmor profile is installed failed: %v", apparmorErr)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -529,6 +554,20 @@ func New(config *Config) (*Server, error) {
|
|||
s.restore()
|
||||
s.cleanupSandboxesOnShutdown()
|
||||
|
||||
// Prepare streaming server
|
||||
streamServerConfig := streaming.DefaultConfig
|
||||
streamServerConfig.Addr = "0.0.0.0:10101"
|
||||
s.stream.runtimeServer = s
|
||||
s.stream.streamServer, err = streaming.NewServer(streamServerConfig, s.stream)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create streaming server")
|
||||
}
|
||||
|
||||
// TODO: Is it should be started somewhere else?
|
||||
go func() {
|
||||
s.stream.streamServer.Start(true)
|
||||
}()
|
||||
|
||||
logrus.Debugf("sandboxes: %v", s.state.sandboxes)
|
||||
logrus.Debugf("containers: %v", s.state.containers)
|
||||
return s, nil
|
||||
|
|
16
vendor/cloud.google.com/go/.travis.yml
generated
vendored
Normal file
16
vendor/cloud.google.com/go/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.6
|
||||
- 1.7
|
||||
- 1.8
|
||||
install:
|
||||
- go get -v cloud.google.com/go/...
|
||||
script:
|
||||
- openssl aes-256-cbc -K $encrypted_912ff8fa81ad_key -iv $encrypted_912ff8fa81ad_iv -in key.json.enc -out key.json -d
|
||||
- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" GCLOUD_TESTS_GOLANG_KEY="$(pwd)/key.json"
|
||||
go test -race -v cloud.google.com/go/...
|
||||
env:
|
||||
matrix:
|
||||
# The GCLOUD_TESTS_API_KEY environment variable.
|
||||
secure: VdldogUOoubQ60LhuHJ+g/aJoBiujkSkWEWl79Zb8cvQorcQbxISS+JsOOp4QkUOU4WwaHAm8/3pIH1QMWOR6O78DaLmDKi5Q4RpkVdCpUXy+OAfQaZIcBsispMrjxLXnqFjo9ELnrArfjoeCTzaX0QTCfwQwVmigC8rR30JBKI=
|
15
vendor/cloud.google.com/go/AUTHORS
generated
vendored
Normal file
15
vendor/cloud.google.com/go/AUTHORS
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
# This is the official list of cloud authors for copyright purposes.
|
||||
# This file is distinct from the CONTRIBUTORS files.
|
||||
# See the latter for an explanation.
|
||||
|
||||
# Names should be added to this file as:
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
Filippo Valsorda <hi@filippo.io>
|
||||
Google Inc.
|
||||
Ingo Oeser <nightlyone@googlemail.com>
|
||||
Palm Stone Games, Inc.
|
||||
Paweł Knap <pawelknap88@gmail.com>
|
||||
Péter Szilágyi <peterke@gmail.com>
|
||||
Tyler Treat <ttreat31@gmail.com>
|
132
vendor/cloud.google.com/go/CONTRIBUTING.md
generated
vendored
Normal file
132
vendor/cloud.google.com/go/CONTRIBUTING.md
generated
vendored
Normal file
|
@ -0,0 +1,132 @@
|
|||
# Contributing
|
||||
|
||||
1. Sign one of the contributor license agreements below.
|
||||
1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool.
|
||||
1. You will need to ensure that your `GOBIN` directory (by default
|
||||
`$GOPATH/bin`) is in your `PATH` so that git can find the command.
|
||||
1. Get the cloud package by running `go get -d cloud.google.com/go`.
|
||||
1. If you have already checked out the source, make sure that the remote git
|
||||
origin is https://code.googlesource.com/gocloud:
|
||||
|
||||
git remote set-url origin https://code.googlesource.com/gocloud
|
||||
1. Make sure your auth is configured correctly by visiting
|
||||
https://code.googlesource.com, clicking "Generate Password", and following
|
||||
the directions.
|
||||
1. Make changes and create a change by running `git codereview change <name>`,
|
||||
provide a commit message, and use `git codereview mail` to create a Gerrit CL.
|
||||
1. Keep amending to the change with `git codereview change` and mail as your receive
|
||||
feedback. Each new mailed amendment will create a new patch set for your change in Gerrit.
|
||||
|
||||
## Integration Tests
|
||||
|
||||
In addition to the unit tests, you may run the integration test suite.
|
||||
|
||||
To run the integrations tests, creating and configuration of a project in the
|
||||
Google Developers Console is required.
|
||||
|
||||
After creating a project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount).
|
||||
Ensure the project-level **Owner** [IAM role](console.cloud.google.com/iam-admin/iam/project)
|
||||
(or **Editor** and **Logs Configuration Writer** roles) are added to the
|
||||
service account.
|
||||
|
||||
Once you create a project, set the following environment variables to be able to
|
||||
run the against the actual APIs.
|
||||
|
||||
- **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455)
|
||||
- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file.
|
||||
- **GCLOUD_TESTS_API_KEY**: Your API key.
|
||||
|
||||
Install the [gcloud command-line tool][gcloudcli] to your machine and use it
|
||||
to create some resources used in integration tests.
|
||||
|
||||
From the project's root directory:
|
||||
|
||||
``` sh
|
||||
# Set the default project in your env.
|
||||
$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||
|
||||
# Authenticate the gcloud tool with your account.
|
||||
$ gcloud auth login
|
||||
|
||||
# Create the indexes used in the datastore integration tests.
|
||||
$ gcloud preview datastore create-indexes datastore/testdata/index.yaml
|
||||
|
||||
# Create a Google Cloud storage bucket with the same name as your test project,
|
||||
# and with the Stackdriver Logging service account as owner, for the sink
|
||||
# integration tests in logging.
|
||||
$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||
$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||
|
||||
# Create a Spanner instance for the spanner integration tests.
|
||||
$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 1 --description 'Instance for go client test'
|
||||
# NOTE: Spanner instances are priced by the node-hour, so you may want to delete
|
||||
# the instance after testing with 'gcloud beta spanner instances delete'.
|
||||
```
|
||||
|
||||
Once you've set the environment variables, you can run the integration tests by
|
||||
running:
|
||||
|
||||
``` sh
|
||||
$ go test -v cloud.google.com/go/...
|
||||
```
|
||||
|
||||
## Contributor License Agreements
|
||||
|
||||
Before we can accept your pull requests you'll need to sign a Contributor
|
||||
License Agreement (CLA):
|
||||
|
||||
- **If you are an individual writing original source code** and **you own the
|
||||
- intellectual property**, then you'll need to sign an [individual CLA][indvcla].
|
||||
- **If you work for a company that wants to allow you to contribute your work**,
|
||||
then you'll need to sign a [corporate CLA][corpcla].
|
||||
|
||||
You can sign these electronically (just scroll to the bottom). After that,
|
||||
we'll be able to accept your pull requests.
|
||||
|
||||
## Contributor Code of Conduct
|
||||
|
||||
As contributors and maintainers of this project,
|
||||
and in the interest of fostering an open and welcoming community,
|
||||
we pledge to respect all people who contribute through reporting issues,
|
||||
posting feature requests, updating documentation,
|
||||
submitting pull requests or patches, and other activities.
|
||||
|
||||
We are committed to making participation in this project
|
||||
a harassment-free experience for everyone,
|
||||
regardless of level of experience, gender, gender identity and expression,
|
||||
sexual orientation, disability, personal appearance,
|
||||
body size, race, ethnicity, age, religion, or nationality.
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery
|
||||
* Personal attacks
|
||||
* Trolling or insulting/derogatory comments
|
||||
* Public or private harassment
|
||||
* Publishing other's private information,
|
||||
such as physical or electronic
|
||||
addresses, without explicit permission
|
||||
* Other unethical or unprofessional conduct.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct.
|
||||
By adopting this Code of Conduct,
|
||||
project maintainers commit themselves to fairly and consistently
|
||||
applying these principles to every aspect of managing this project.
|
||||
Project maintainers who do not follow or enforce the Code of Conduct
|
||||
may be permanently removed from the project team.
|
||||
|
||||
This code of conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community.
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior
|
||||
may be reported by opening an issue
|
||||
or contacting one or more of the project maintainers.
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
|
||||
available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)
|
||||
|
||||
[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/
|
||||
[indvcla]: https://developers.google.com/open-source/cla/individual
|
||||
[corpcla]: https://developers.google.com/open-source/cla/corporate
|
37
vendor/cloud.google.com/go/CONTRIBUTORS
generated
vendored
Normal file
37
vendor/cloud.google.com/go/CONTRIBUTORS
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
|||
# People who have agreed to one of the CLAs and can contribute patches.
|
||||
# The AUTHORS file lists the copyright holders; this file
|
||||
# lists people. For example, Google employees are listed here
|
||||
# but not in AUTHORS, because Google holds the copyright.
|
||||
#
|
||||
# https://developers.google.com/open-source/cla/individual
|
||||
# https://developers.google.com/open-source/cla/corporate
|
||||
#
|
||||
# Names should be added to this file as:
|
||||
# Name <email address>
|
||||
|
||||
# Keep the list alphabetically sorted.
|
||||
|
||||
Alexis Hunt <lexer@google.com>
|
||||
Andreas Litt <andreas.litt@gmail.com>
|
||||
Andrew Gerrand <adg@golang.org>
|
||||
Brad Fitzpatrick <bradfitz@golang.org>
|
||||
Burcu Dogan <jbd@google.com>
|
||||
Dave Day <djd@golang.org>
|
||||
David Sansome <me@davidsansome.com>
|
||||
David Symonds <dsymonds@golang.org>
|
||||
Filippo Valsorda <hi@filippo.io>
|
||||
Glenn Lewis <gmlewis@google.com>
|
||||
Ingo Oeser <nightlyone@googlemail.com>
|
||||
Johan Euphrosine <proppy@google.com>
|
||||
Jonathan Amsterdam <jba@google.com>
|
||||
Luna Duclos <luna.duclos@palmstonegames.com>
|
||||
Magnus Hiie <magnus.hiie@gmail.com>
|
||||
Michael McGreevy <mcgreevy@golang.org>
|
||||
Omar Jarjur <ojarjur@google.com>
|
||||
Paweł Knap <pawelknap88@gmail.com>
|
||||
Péter Szilágyi <peterke@gmail.com>
|
||||
Sarah Adams <shadams@google.com>
|
||||
Thanatat Tamtan <acoshift@gmail.com>
|
||||
Toby Burress <kurin@google.com>
|
||||
Tuo Shan <shantuo@google.com>
|
||||
Tyler Treat <ttreat31@gmail.com>
|
202
vendor/cloud.google.com/go/LICENSE
generated
vendored
Normal file
202
vendor/cloud.google.com/go/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2014 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
528
vendor/cloud.google.com/go/README.md
generated
vendored
Normal file
528
vendor/cloud.google.com/go/README.md
generated
vendored
Normal file
|
@ -0,0 +1,528 @@
|
|||
# Google Cloud for Go
|
||||
|
||||
[![Build Status](https://travis-ci.org/GoogleCloudPlatform/google-cloud-go.svg?branch=master)](https://travis-ci.org/GoogleCloudPlatform/google-cloud-go)
|
||||
[![GoDoc](https://godoc.org/cloud.google.com/go?status.svg)](https://godoc.org/cloud.google.com/go)
|
||||
|
||||
``` go
|
||||
import "cloud.google.com/go"
|
||||
```
|
||||
|
||||
Go packages for Google Cloud Platform services.
|
||||
|
||||
To install the packages on your system,
|
||||
|
||||
```
|
||||
$ go get -u cloud.google.com/go/...
|
||||
```
|
||||
|
||||
**NOTE:** These packages are under development, and may occasionally make
|
||||
backwards-incompatible changes.
|
||||
|
||||
**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud).
|
||||
|
||||
* [News](#news)
|
||||
* [Supported APIs](#supported-apis)
|
||||
* [Go Versions Supported](#go-versions-supported)
|
||||
* [Authorization](#authorization)
|
||||
* [Cloud Datastore](#cloud-datastore-)
|
||||
* [Cloud Storage](#cloud-storage-)
|
||||
* [Cloud Pub/Sub](#cloud-pub-sub-)
|
||||
* [Cloud BigQuery](#cloud-bigquery-)
|
||||
* [Stackdriver Logging](#stackdriver-logging-)
|
||||
* [Cloud Spanner](#cloud-spanner-)
|
||||
|
||||
|
||||
## News
|
||||
|
||||
_February 14, 2017_
|
||||
|
||||
Release of a client library for Spanner. See
|
||||
the
|
||||
[blog post](https://cloudplatform.googleblog.com/2017/02/introducing-Cloud-Spanner-a-global-database-service-for-mission-critical-applications.html).
|
||||
|
||||
Note that although the Spanner service is beta, the Go client library is alpha.
|
||||
|
||||
_December 12, 2016_
|
||||
|
||||
Beta release of BigQuery, DataStore, Logging and Storage. See the
|
||||
[blog post](https://cloudplatform.googleblog.com/2016/12/announcing-new-google-cloud-client.html).
|
||||
|
||||
Also, BigQuery now supports structs. Read a row directly into a struct with
|
||||
`RowIterator.Next`, and upload a row directly from a struct with `Uploader.Put`.
|
||||
You can also use field tags. See the [package documentation][cloud-bigquery-ref]
|
||||
for details.
|
||||
|
||||
_December 5, 2016_
|
||||
|
||||
More changes to BigQuery:
|
||||
|
||||
* The `ValueList` type was removed. It is no longer necessary. Instead of
|
||||
```go
|
||||
var v ValueList
|
||||
... it.Next(&v) ..
|
||||
```
|
||||
use
|
||||
|
||||
```go
|
||||
var v []Value
|
||||
... it.Next(&v) ...
|
||||
```
|
||||
|
||||
* Previously, repeatedly calling `RowIterator.Next` on the same `[]Value` or
|
||||
`ValueList` would append to the slice. Now each call resets the size to zero first.
|
||||
|
||||
* Schema inference will infer the SQL type BYTES for a struct field of
|
||||
type []byte. Previously it inferred STRING.
|
||||
|
||||
* The types `uint`, `uint64` and `uintptr` are no longer supported in schema
|
||||
inference. BigQuery's integer type is INT64, and those types may hold values
|
||||
that are not correctly represented in a 64-bit signed integer.
|
||||
|
||||
* The SQL types DATE, TIME and DATETIME are now supported. They correspond to
|
||||
the `Date`, `Time` and `DateTime` types in the new `cloud.google.com/go/civil`
|
||||
package.
|
||||
|
||||
_November 17, 2016_
|
||||
|
||||
Change to BigQuery: values from INTEGER columns will now be returned as int64,
|
||||
not int. This will avoid errors arising from large values on 32-bit systems.
|
||||
|
||||
_November 8, 2016_
|
||||
|
||||
New datastore feature: datastore now encodes your nested Go structs as Entity values,
|
||||
instead of a flattened list of the embedded struct's fields.
|
||||
This means that you may now have twice-nested slices, eg.
|
||||
```go
|
||||
type State struct {
|
||||
Cities []struct{
|
||||
Populations []int
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/79jtrdeuJAg) for
|
||||
more details.
|
||||
|
||||
_November 8, 2016_
|
||||
|
||||
Breaking changes to datastore: contexts no longer hold namespaces; instead you
|
||||
must set a key's namespace explicitly. Also, key functions have been changed
|
||||
and renamed.
|
||||
|
||||
* The WithNamespace function has been removed. To specify a namespace in a Query, use the Query.Namespace method:
|
||||
```go
|
||||
q := datastore.NewQuery("Kind").Namespace("ns")
|
||||
```
|
||||
|
||||
* All the fields of Key are exported. That means you can construct any Key with a struct literal:
|
||||
```go
|
||||
k := &Key{Kind: "Kind", ID: 37, Namespace: "ns"}
|
||||
```
|
||||
|
||||
* As a result of the above, the Key methods Kind, ID, d.Name, Parent, SetParent and Namespace have been removed.
|
||||
|
||||
* `NewIncompleteKey` has been removed, replaced by `IncompleteKey`. Replace
|
||||
```go
|
||||
NewIncompleteKey(ctx, kind, parent)
|
||||
```
|
||||
with
|
||||
```go
|
||||
IncompleteKey(kind, parent)
|
||||
```
|
||||
and if you do use namespaces, make sure you set the namespace on the returned key.
|
||||
|
||||
* `NewKey` has been removed, replaced by `NameKey` and `IDKey`. Replace
|
||||
```go
|
||||
NewKey(ctx, kind, name, 0, parent)
|
||||
NewKey(ctx, kind, "", id, parent)
|
||||
```
|
||||
with
|
||||
```go
|
||||
NameKey(kind, name, parent)
|
||||
IDKey(kind, id, parent)
|
||||
```
|
||||
and if you do use namespaces, make sure you set the namespace on the returned key.
|
||||
|
||||
* The `Done` variable has been removed. Replace `datastore.Done` with `iterator.Done`, from the package `google.golang.org/api/iterator`.
|
||||
|
||||
* The `Client.Close` method will have a return type of error. It will return the result of closing the underlying gRPC connection.
|
||||
|
||||
See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/hqXtM_4Ix-0) for
|
||||
more details.
|
||||
|
||||
_October 27, 2016_
|
||||
|
||||
Breaking change to bigquery: `NewGCSReference` is now a function,
|
||||
not a method on `Client`.
|
||||
|
||||
New bigquery feature: `Table.LoaderFrom` now accepts a `ReaderSource`, enabling
|
||||
loading data into a table from a file or any `io.Reader`.
|
||||
|
||||
_October 21, 2016_
|
||||
|
||||
Breaking change to pubsub: removed `pubsub.Done`.
|
||||
|
||||
Use `iterator.Done` instead, where `iterator` is the package
|
||||
`google.golang.org/api/iterator`.
|
||||
|
||||
|
||||
[Older news](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/old-news.md)
|
||||
|
||||
## Supported APIs
|
||||
|
||||
Google API | Status | Package
|
||||
-------------------------------|--------------|-----------------------------------------------------------
|
||||
[Datastore][cloud-datastore] | beta | [`cloud.google.com/go/datastore`][cloud-datastore-ref]
|
||||
[Storage][cloud-storage] | beta | [`cloud.google.com/go/storage`][cloud-storage-ref]
|
||||
[Bigtable][cloud-bigtable] | beta | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref]
|
||||
[BigQuery][cloud-bigquery] | beta | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref]
|
||||
[Logging][cloud-logging] | beta | [`cloud.google.com/go/logging`][cloud-logging-ref]
|
||||
[Pub/Sub][cloud-pubsub] | alpha | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref]
|
||||
[Vision][cloud-vision] | beta | [`cloud.google.com/go/vision`][cloud-vision-ref]
|
||||
[Language][cloud-language] | alpha | [`cloud.google.com/go/language/apiv1`][cloud-language-ref]
|
||||
[Speech][cloud-speech] | alpha | [`cloud.google.com/go/speech/apiv1beta`][cloud-speech-ref]
|
||||
[Spanner][cloud-spanner] | alpha | [`cloud.google.com/go/spanner`][cloud-spanner-ref]
|
||||
|
||||
|
||||
> **Alpha status**: the API is still being actively developed. As a
|
||||
> result, it might change in backward-incompatible ways and is not recommended
|
||||
> for production use.
|
||||
>
|
||||
> **Beta status**: the API is largely complete, but still has outstanding
|
||||
> features and bugs to be addressed. There may be minor backwards-incompatible
|
||||
> changes where necessary.
|
||||
>
|
||||
> **Stable status**: the API is mature and ready for production use. We will
|
||||
> continue addressing bugs and feature requests.
|
||||
|
||||
Documentation and examples are available at
|
||||
https://godoc.org/cloud.google.com/go
|
||||
|
||||
Visit or join the
|
||||
[google-api-go-announce group](https://groups.google.com/forum/#!forum/google-api-go-announce)
|
||||
for updates on these packages.
|
||||
|
||||
## Go Versions Supported
|
||||
|
||||
We support the two most recent major versions of Go. If Google App Engine uses
|
||||
an older version, we support that as well. You can see which versions are
|
||||
currently supported by looking at the lines following `go:` in
|
||||
[`.travis.yml`](.travis.yml).
|
||||
|
||||
## Authorization
|
||||
|
||||
By default, each API will use [Google Application Default Credentials][default-creds]
|
||||
for authorization credentials used in calling the API endpoints. This will allow your
|
||||
application to run in many environments without requiring explicit configuration.
|
||||
|
||||
```go
|
||||
client, err := storage.NewClient(ctx)
|
||||
```
|
||||
|
||||
To authorize using a
|
||||
[JSON key file](https://cloud.google.com/iam/docs/managing-service-account-keys),
|
||||
pass
|
||||
[`option.WithServiceAccountFile`](https://godoc.org/google.golang.org/api/option#WithServiceAccountFile)
|
||||
to the `NewClient` function of the desired package. For example:
|
||||
|
||||
```go
|
||||
client, err := storage.NewClient(ctx, option.WithServiceAccountFile("path/to/keyfile.json"))
|
||||
```
|
||||
|
||||
You can exert more control over authorization by using the
|
||||
[`golang.org/x/oauth2`](https://godoc.org/golang.org/x/oauth2) package to
|
||||
create an `oauth2.TokenSource`. Then pass
|
||||
[`option.WithTokenSource`](https://godoc.org/google.golang.org/api/option#WithTokenSource)
|
||||
to the `NewClient` function:
|
||||
```go
|
||||
tokenSource := ...
|
||||
client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource))
|
||||
```
|
||||
|
||||
## Cloud Datastore [![GoDoc](https://godoc.org/cloud.google.com/go/datastore?status.svg)](https://godoc.org/cloud.google.com/go/datastore)
|
||||
|
||||
- [About Cloud Datastore][cloud-datastore]
|
||||
- [Activating the API for your project][cloud-datastore-activation]
|
||||
- [API documentation][cloud-datastore-docs]
|
||||
- [Go client documentation](https://godoc.org/cloud.google.com/go/datastore)
|
||||
- [Complete sample program](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/datastore/tasks)
|
||||
|
||||
### Example Usage
|
||||
|
||||
First create a `datastore.Client` to use throughout your application:
|
||||
|
||||
```go
|
||||
client, err := datastore.NewClient(ctx, "my-project-id")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
Then use that client to interact with the API:
|
||||
|
||||
```go
|
||||
type Post struct {
|
||||
Title string
|
||||
Body string `datastore:",noindex"`
|
||||
PublishedAt time.Time
|
||||
}
|
||||
keys := []*datastore.Key{
|
||||
datastore.NewKey(ctx, "Post", "post1", 0, nil),
|
||||
datastore.NewKey(ctx, "Post", "post2", 0, nil),
|
||||
}
|
||||
posts := []*Post{
|
||||
{Title: "Post 1", Body: "...", PublishedAt: time.Now()},
|
||||
{Title: "Post 2", Body: "...", PublishedAt: time.Now()},
|
||||
}
|
||||
if _, err := client.PutMulti(ctx, keys, posts); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
## Cloud Storage [![GoDoc](https://godoc.org/cloud.google.com/go/storage?status.svg)](https://godoc.org/cloud.google.com/go/storage)
|
||||
|
||||
- [About Cloud Storage][cloud-storage]
|
||||
- [API documentation][cloud-storage-docs]
|
||||
- [Go client documentation](https://godoc.org/cloud.google.com/go/storage)
|
||||
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/storage)
|
||||
|
||||
### Example Usage
|
||||
|
||||
First create a `storage.Client` to use throughout your application:
|
||||
|
||||
```go
|
||||
client, err := storage.NewClient(ctx)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
```go
|
||||
// Read the object1 from bucket.
|
||||
rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer rc.Close()
|
||||
body, err := ioutil.ReadAll(rc)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
## Cloud Pub/Sub [![GoDoc](https://godoc.org/cloud.google.com/go/pubsub?status.svg)](https://godoc.org/cloud.google.com/go/pubsub)
|
||||
|
||||
- [About Cloud Pubsub][cloud-pubsub]
|
||||
- [API documentation][cloud-pubsub-docs]
|
||||
- [Go client documentation](https://godoc.org/cloud.google.com/go/pubsub)
|
||||
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/pubsub)
|
||||
|
||||
### Example Usage
|
||||
|
||||
First create a `pubsub.Client` to use throughout your application:
|
||||
|
||||
```go
|
||||
client, err := pubsub.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
Then use the client to publish and subscribe:
|
||||
|
||||
```go
|
||||
// Publish "hello world" on topic1.
|
||||
topic := client.Topic("topic1")
|
||||
msgIDs, err := topic.Publish(ctx, &pubsub.Message{
|
||||
Data: []byte("hello world"),
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Create an iterator to pull messages via subscription1.
|
||||
it, err := client.Subscription("subscription1").Pull(ctx)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
defer it.Stop()
|
||||
|
||||
// Consume N messages from the iterator.
|
||||
for i := 0; i < N; i++ {
|
||||
msg, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to retrieve message: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Message %d: %s\n", i, msg.Data)
|
||||
msg.Done(true) // Acknowledge that we've consumed the message.
|
||||
}
|
||||
```
|
||||
|
||||
## Cloud BigQuery [![GoDoc](https://godoc.org/cloud.google.com/go/bigquery?status.svg)](https://godoc.org/cloud.google.com/go/bigquery)
|
||||
|
||||
- [About Cloud BigQuery][cloud-bigquery]
|
||||
- [API documentation][cloud-bigquery-docs]
|
||||
- [Go client documentation][cloud-bigquery-ref]
|
||||
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/bigquery)
|
||||
|
||||
### Example Usage
|
||||
|
||||
First create a `bigquery.Client` to use throughout your application:
|
||||
```go
|
||||
c, err := bigquery.NewClient(ctx, "my-project-ID")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
```
|
||||
Then use that client to interact with the API:
|
||||
```go
|
||||
// Construct a query.
|
||||
q := c.Query(`
|
||||
SELECT year, SUM(number)
|
||||
FROM [bigquery-public-data:usa_names.usa_1910_2013]
|
||||
WHERE name = "William"
|
||||
GROUP BY year
|
||||
ORDER BY year
|
||||
`)
|
||||
// Execute the query.
|
||||
it, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Iterate through the results.
|
||||
for {
|
||||
var values []bigquery.Value
|
||||
err := it.Next(&values)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(values)
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Stackdriver Logging [![GoDoc](https://godoc.org/cloud.google.com/go/logging?status.svg)](https://godoc.org/cloud.google.com/go/logging)
|
||||
|
||||
- [About Stackdriver Logging][cloud-logging]
|
||||
- [API documentation][cloud-logging-docs]
|
||||
- [Go client documentation][cloud-logging-ref]
|
||||
- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/logging)
|
||||
|
||||
### Example Usage
|
||||
|
||||
First create a `logging.Client` to use throughout your application:
|
||||
|
||||
```go
|
||||
ctx := context.Background()
|
||||
client, err := logging.NewClient(ctx, "my-project")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
```
|
||||
Usually, you'll want to add log entries to a buffer to be periodically flushed
|
||||
(automatically and asynchronously) to the Stackdriver Logging service.
|
||||
```go
|
||||
logger := client.Logger("my-log")
|
||||
logger.Log(logging.Entry{Payload: "something happened!"})
|
||||
```
|
||||
Close your client before your program exits, to flush any buffered log entries.
|
||||
```go
|
||||
err = client.Close()
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Cloud Spanner [![GoDoc](https://godoc.org/cloud.google.com/go/spanner?status.svg)](https://godoc.org/cloud.google.com/go/spanner)
|
||||
|
||||
- [About Cloud Spanner][cloud-spanner]
|
||||
- [API documentation][cloud-spanner-docs]
|
||||
- [Go client documentation](https://godoc.org/cloud.google.com/go/spanner)
|
||||
|
||||
### Example Usage
|
||||
|
||||
First create a `spanner.Client` to use throughout your application:
|
||||
|
||||
```go
|
||||
client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
```go
|
||||
// Simple Reads And Writes
|
||||
_, err := client.Apply(ctx, []*spanner.Mutation{
|
||||
spanner.Insert("Users",
|
||||
[]string{"name", "email"},
|
||||
[]interface{}{"alice", "a@example.com"})})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
row, err := client.Single().ReadRow(ctx, "Users",
|
||||
spanner.Key{"alice"}, []string{"email"})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are welcome. Please, see the
|
||||
[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md)
|
||||
document for details. We're using Gerrit for our code reviews. Please don't open pull
|
||||
requests against this repo, new pull requests will be automatically closed.
|
||||
|
||||
Please note that this project is released with a Contributor Code of Conduct.
|
||||
By participating in this project you agree to abide by its terms.
|
||||
See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md#contributor-code-of-conduct)
|
||||
for more information.
|
||||
|
||||
[cloud-datastore]: https://cloud.google.com/datastore/
|
||||
[cloud-datastore-ref]: https://godoc.org/cloud.google.com/go/datastore
|
||||
[cloud-datastore-docs]: https://cloud.google.com/datastore/docs
|
||||
[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate
|
||||
|
||||
[cloud-pubsub]: https://cloud.google.com/pubsub/
|
||||
[cloud-pubsub-ref]: https://godoc.org/cloud.google.com/go/pubsub
|
||||
[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs
|
||||
|
||||
[cloud-storage]: https://cloud.google.com/storage/
|
||||
[cloud-storage-ref]: https://godoc.org/cloud.google.com/go/storage
|
||||
[cloud-storage-docs]: https://cloud.google.com/storage/docs
|
||||
[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets
|
||||
|
||||
[cloud-bigtable]: https://cloud.google.com/bigtable/
|
||||
[cloud-bigtable-ref]: https://godoc.org/cloud.google.com/go/bigtable
|
||||
|
||||
[cloud-bigquery]: https://cloud.google.com/bigquery/
|
||||
[cloud-bigquery-docs]: https://cloud.google.com/bigquery/docs
|
||||
[cloud-bigquery-ref]: https://godoc.org/cloud.google.com/go/bigquery
|
||||
|
||||
[cloud-logging]: https://cloud.google.com/logging/
|
||||
[cloud-logging-docs]: https://cloud.google.com/logging/docs
|
||||
[cloud-logging-ref]: https://godoc.org/cloud.google.com/go/logging
|
||||
|
||||
[cloud-vision]: https://cloud.google.com/vision/
|
||||
[cloud-vision-ref]: https://godoc.org/cloud.google.com/go/vision
|
||||
|
||||
[cloud-language]: https://cloud.google.com/natural-language
|
||||
[cloud-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1
|
||||
|
||||
[cloud-speech]: https://cloud.google.com/speech
|
||||
[cloud-speech-ref]: https://godoc.org/cloud.google.com/go/speech/apiv1beta1
|
||||
|
||||
[cloud-spanner]: https://cloud.google.com/spanner/
|
||||
[cloud-spanner-ref]: https://godoc.org/cloud.google.com/go/spanner
|
||||
[cloud-spanner-docs]: https://cloud.google.com/spanner/docs
|
||||
|
||||
[default-creds]: https://developers.google.com/identity/protocols/application-default-credentials
|
32
vendor/cloud.google.com/go/appveyor.yml
generated
vendored
Normal file
32
vendor/cloud.google.com/go/appveyor.yml
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
# This file configures AppVeyor (http://www.appveyor.com),
|
||||
# a Windows-based CI service similar to Travis.
|
||||
|
||||
# Identifier for this run
|
||||
version: "{build}"
|
||||
|
||||
# Clone the repo into this path, which conforms to the standard
|
||||
# Go workspace structure.
|
||||
clone_folder: c:\gopath\src\cloud.google.com\go
|
||||
|
||||
environment:
|
||||
GOPATH: c:\gopath
|
||||
GCLOUD_TESTS_GOLANG_PROJECT_ID: dulcet-port-762
|
||||
GCLOUD_TESTS_GOLANG_KEY: c:\gopath\src\cloud.google.com\go\key.json
|
||||
KEYFILE_CONTENTS:
|
||||
secure: IvRbDAhM2PIQqzVkjzJ4FjizUvoQ+c3vG/qhJQG+HlZ/L5KEkqLu+x6WjLrExrNMyGku4znB2jmbTrUW3Ob4sGG+R5vvqeQ3YMHCVIkw5CxY+/bUDkW5RZWsVbuCnNa/vKsWmCP+/sZW6ICe29yKJ2ZOb6QaauI4s9R6j+cqBbU9pumMGYFRb0Rw3uUU7DKmVFCy+NjTENZIlDP9rmjANgAzigowJJEb2Tg9sLlQKmQeKiBSRN8lKc5Nq60a+fIzHGKvql4eIitDDDpOpyHv15/Xr1BzFw2yDoiR4X1lng0u7q0X9RgX4VIYa6gT16NXBEmQgbuX8gh7SfPMp9RhiZD9sVUaV+yogEabYpyPnmUURo0hXwkctKaBkQlEmKvjHwF5dvbg8+yqGhwtjAgFNimXG3INrwQsfQsZskkQWanutbJf9xy50GyWWFZZdi0uT4oXP/b5P7aklPXKXsvrJKBh7RjEaqBrhi86IJwOjBspvoR4l2WmcQyxb2xzQS1pjbBJFQfYJJ8+JgsstTL8PBO9d4ybJC0li1Om1qnWxkaewvPxxuoHJ9LpRKof19yRYWBmhTXb2tTASKG/zslvl4fgG4DmQBS93WC7dsiGOhAraGw2eCTgd0lYZOhk1FjWl9TS80aktXxzH/7nTvem5ohm+eDl6O0wnTL4KXjQVNSQ1PyLn4lGRJ5MNGzBTRFWIr2API2rca4Fysyfh/UdmazPGlNbY9JPGqb9+F04QzLfqm+Zz/cHy59E7lOSMBlUI4KD6d6ZNNKNRH+/g9i+fSiyiXKugTfda8KBnWGyPwprxuWGYaiQUGUYOwJY5R6x5c4mjImAB310V+Wo33UbWFJiwxEDsiCNqW1meVkBzt2er26vh4qbgCUIQ3iM3gFPfHgy+QxkmIhic7Q1HYacQElt8AAP41M7cCKWCuZidegP37MBB//mjjiNt047ZSQEvB4tqsX/OvfbByVef+cbtVw9T0yjHvmCdPW1XrhyrCCgclu6oYYdbmc5D7BBDRbjjMWGv6YvceAbfGf6ukdB5PuV+TGEN/FoQ1QTRA6Aqf+3fLMg4mS4oyTfw5xyYNbv3qoyLPrp+BnxI53WB9p0hfMg4n9FD6NntBxjDq+Q3Lk/bjC/Y4MaRWdzbMzF9a0lgGfcw9DURlK5p7uGJC9vg34feNoQprxVEZRQ01cHLeob6eGkYm4HxSRx8JY39Mh+9wzJo+k/aIvFleNC3e35NOrkXr6wb5e42n2DwBdPqdNolTLtLFRglAL1LTpp27UjvjieWJAKfoDTR5CKl01sZqt0wPdLLcvsMj6CiPFmccUIOYeZMe86kLBD61Qa5F1EwkgO3Om2qSjW96FzL4skRc+BmU5RrHlAFSldR1wpUgtkUMv9vH5Cy+UJdcvpZ8KbmhZ2PsjF7ddJ1ve9RAw3cP325AyIMwZ77Ef1mgTM0NJze6eSW1qKlEsgt1FADPyeUu1NQTA2H2dueMPGlArWTSUgyWR9AdfpqouT7eg0JWI5w+yUZZC+/rPglYbt84oLmYpwuli0z8FyEQRPIc3EtkfWIv/yYgDr2TZ0N2KvGfpi/MAUWgxI1gleC2uKgEOEtuJthd3XZjF2NoE7IBqjQOINybcJOjyeB5vRLDY1FLuxYzdg1y1etkV4XQig/vje
|
||||
|
||||
install:
|
||||
# Info for debugging.
|
||||
- echo %PATH%
|
||||
- go version
|
||||
- go env
|
||||
- go get -v -d -t ./...
|
||||
|
||||
|
||||
# Provide a build script, or AppVeyor will call msbuild.
|
||||
build_script:
|
||||
- go install -v ./...
|
||||
- echo %KEYFILE_CONTENTS% > %GCLOUD_TESTS_GOLANG_KEY%
|
||||
|
||||
test_script:
|
||||
- go test -v ./...
|
49
vendor/cloud.google.com/go/authexample_test.go
generated
vendored
Normal file
49
vendor/cloud.google.com/go/authexample_test.go
generated
vendored
Normal file
|
@ -0,0 +1,49 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package cloud_test
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/datastore"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
func Example_applicationDefaultCredentials() {
|
||||
// Google Application Default Credentials is the recommended way to authorize
|
||||
// and authenticate clients.
|
||||
//
|
||||
// See the following link on how to create and obtain Application Default Credentials:
|
||||
// https://developers.google.com/identity/protocols/application-default-credentials.
|
||||
client, err := datastore.NewClient(context.Background(), "project-id")
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
_ = client // Use the client.
|
||||
}
|
||||
|
||||
func Example_serviceAccountFile() {
|
||||
// Use a JSON key file associated with a Google service account to
|
||||
// authenticate and authorize. Service Account keys can be created and
|
||||
// downloaded from https://console.developers.google.com/permissions/serviceaccounts.
|
||||
//
|
||||
// Note: This example uses the datastore client, but the same steps apply to
|
||||
// the other client libraries underneath this package.
|
||||
client, err := datastore.NewClient(context.Background(),
|
||||
"project-id", option.WithServiceAccountFile("/path/to/service-account-key.json"))
|
||||
if err != nil {
|
||||
// TODO: handle error.
|
||||
}
|
||||
_ = client // Use the client.
|
||||
}
|
76
vendor/cloud.google.com/go/bigquery/bigquery.go
generated
vendored
Normal file
76
vendor/cloud.google.com/go/bigquery/bigquery.go
generated
vendored
Normal file
|
@ -0,0 +1,76 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
// TODO(mcgreevy): support dry-run mode when creating jobs.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/transport"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
const prodAddr = "https://www.googleapis.com/bigquery/v2/"
|
||||
|
||||
// ExternalData is a table which is stored outside of BigQuery. It is implemented by GCSReference.
|
||||
type ExternalData interface {
|
||||
externalDataConfig() bq.ExternalDataConfiguration
|
||||
}
|
||||
|
||||
const Scope = "https://www.googleapis.com/auth/bigquery"
|
||||
const userAgent = "gcloud-golang-bigquery/20160429"
|
||||
|
||||
// Client may be used to perform BigQuery operations.
|
||||
type Client struct {
|
||||
service service
|
||||
projectID string
|
||||
}
|
||||
|
||||
// NewClient constructs a new Client which can perform BigQuery operations.
|
||||
// Operations performed via the client are billed to the specified GCP project.
|
||||
func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) {
|
||||
o := []option.ClientOption{
|
||||
option.WithEndpoint(prodAddr),
|
||||
option.WithScopes(Scope),
|
||||
option.WithUserAgent(userAgent),
|
||||
}
|
||||
o = append(o, opts...)
|
||||
httpClient, endpoint, err := transport.NewHTTPClient(ctx, o...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dialing: %v", err)
|
||||
}
|
||||
|
||||
s, err := newBigqueryService(httpClient, endpoint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("constructing bigquery client: %v", err)
|
||||
}
|
||||
|
||||
c := &Client{
|
||||
service: s,
|
||||
projectID: projectID,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Close closes any resources held by the client.
|
||||
// Close should be called when the client is no longer needed.
|
||||
// It need not be called at program exit.
|
||||
func (c *Client) Close() error {
|
||||
return nil
|
||||
}
|
74
vendor/cloud.google.com/go/bigquery/copy.go
generated
vendored
Normal file
74
vendor/cloud.google.com/go/bigquery/copy.go
generated
vendored
Normal file
|
@ -0,0 +1,74 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// CopyConfig holds the configuration for a copy job.
|
||||
type CopyConfig struct {
|
||||
// JobID is the ID to use for the copy job. If unset, a job ID will be automatically created.
|
||||
JobID string
|
||||
|
||||
// Srcs are the tables from which data will be copied.
|
||||
Srcs []*Table
|
||||
|
||||
// Dst is the table into which the data will be copied.
|
||||
Dst *Table
|
||||
|
||||
// CreateDisposition specifies the circumstances under which the destination table will be created.
|
||||
// The default is CreateIfNeeded.
|
||||
CreateDisposition TableCreateDisposition
|
||||
|
||||
// WriteDisposition specifies how existing data in the destination table is treated.
|
||||
// The default is WriteAppend.
|
||||
WriteDisposition TableWriteDisposition
|
||||
}
|
||||
|
||||
// A Copier copies data into a BigQuery table from one or more BigQuery tables.
|
||||
type Copier struct {
|
||||
CopyConfig
|
||||
c *Client
|
||||
}
|
||||
|
||||
// CopierFrom returns a Copier which can be used to copy data into a
|
||||
// BigQuery table from one or more BigQuery tables.
|
||||
// The returned Copier may optionally be further configured before its Run method is called.
|
||||
func (t *Table) CopierFrom(srcs ...*Table) *Copier {
|
||||
return &Copier{
|
||||
c: t.c,
|
||||
CopyConfig: CopyConfig{
|
||||
Srcs: srcs,
|
||||
Dst: t,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run initiates a copy job.
|
||||
func (c *Copier) Run(ctx context.Context) (*Job, error) {
|
||||
conf := &bq.JobConfigurationTableCopy{
|
||||
CreateDisposition: string(c.CreateDisposition),
|
||||
WriteDisposition: string(c.WriteDisposition),
|
||||
DestinationTable: c.Dst.tableRefProto(),
|
||||
}
|
||||
for _, t := range c.Srcs {
|
||||
conf.SourceTables = append(conf.SourceTables, t.tableRefProto())
|
||||
}
|
||||
job := &bq.Job{Configuration: &bq.JobConfiguration{Copy: conf}}
|
||||
setJobRef(job, c.JobID, c.c.projectID)
|
||||
return c.c.service.insertJob(ctx, c.c.projectID, &insertJobConf{job: job})
|
||||
}
|
136
vendor/cloud.google.com/go/bigquery/copy_test.go
generated
vendored
Normal file
136
vendor/cloud.google.com/go/bigquery/copy_test.go
generated
vendored
Normal file
|
@ -0,0 +1,136 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func defaultCopyJob() *bq.Job {
|
||||
return &bq.Job{
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Copy: &bq.JobConfigurationTableCopy{
|
||||
DestinationTable: &bq.TableReference{
|
||||
ProjectId: "d-project-id",
|
||||
DatasetId: "d-dataset-id",
|
||||
TableId: "d-table-id",
|
||||
},
|
||||
SourceTables: []*bq.TableReference{
|
||||
{
|
||||
ProjectId: "s-project-id",
|
||||
DatasetId: "s-dataset-id",
|
||||
TableId: "s-table-id",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestCopy(t *testing.T) {
|
||||
testCases := []struct {
|
||||
dst *Table
|
||||
srcs []*Table
|
||||
config CopyConfig
|
||||
want *bq.Job
|
||||
}{
|
||||
{
|
||||
dst: &Table{
|
||||
ProjectID: "d-project-id",
|
||||
DatasetID: "d-dataset-id",
|
||||
TableID: "d-table-id",
|
||||
},
|
||||
srcs: []*Table{
|
||||
{
|
||||
ProjectID: "s-project-id",
|
||||
DatasetID: "s-dataset-id",
|
||||
TableID: "s-table-id",
|
||||
},
|
||||
},
|
||||
want: defaultCopyJob(),
|
||||
},
|
||||
{
|
||||
dst: &Table{
|
||||
ProjectID: "d-project-id",
|
||||
DatasetID: "d-dataset-id",
|
||||
TableID: "d-table-id",
|
||||
},
|
||||
srcs: []*Table{
|
||||
{
|
||||
ProjectID: "s-project-id",
|
||||
DatasetID: "s-dataset-id",
|
||||
TableID: "s-table-id",
|
||||
},
|
||||
},
|
||||
config: CopyConfig{
|
||||
CreateDisposition: CreateNever,
|
||||
WriteDisposition: WriteTruncate,
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultCopyJob()
|
||||
j.Configuration.Copy.CreateDisposition = "CREATE_NEVER"
|
||||
j.Configuration.Copy.WriteDisposition = "WRITE_TRUNCATE"
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: &Table{
|
||||
ProjectID: "d-project-id",
|
||||
DatasetID: "d-dataset-id",
|
||||
TableID: "d-table-id",
|
||||
},
|
||||
srcs: []*Table{
|
||||
{
|
||||
ProjectID: "s-project-id",
|
||||
DatasetID: "s-dataset-id",
|
||||
TableID: "s-table-id",
|
||||
},
|
||||
},
|
||||
config: CopyConfig{JobID: "job-id"},
|
||||
want: func() *bq.Job {
|
||||
j := defaultCopyJob()
|
||||
j.JobReference = &bq.JobReference{
|
||||
JobId: "job-id",
|
||||
ProjectId: "client-project-id",
|
||||
}
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
s := &testService{}
|
||||
c := &Client{
|
||||
service: s,
|
||||
projectID: "client-project-id",
|
||||
}
|
||||
tc.dst.c = c
|
||||
copier := tc.dst.CopierFrom(tc.srcs...)
|
||||
tc.config.Srcs = tc.srcs
|
||||
tc.config.Dst = tc.dst
|
||||
copier.CopyConfig = tc.config
|
||||
if _, err := copier.Run(context.Background()); err != nil {
|
||||
t.Errorf("err calling Run: %v", err)
|
||||
continue
|
||||
}
|
||||
if !reflect.DeepEqual(s.Job, tc.want) {
|
||||
t.Errorf("copying: got:\n%v\nwant:\n%v", s.Job, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
103
vendor/cloud.google.com/go/bigquery/create_table_test.go
generated
vendored
Normal file
103
vendor/cloud.google.com/go/bigquery/create_table_test.go
generated
vendored
Normal file
|
@ -0,0 +1,103 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
type createTableRecorder struct {
|
||||
conf *createTableConf
|
||||
service
|
||||
}
|
||||
|
||||
func (rec *createTableRecorder) createTable(ctx context.Context, conf *createTableConf) error {
|
||||
rec.conf = conf
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestCreateTableOptions(t *testing.T) {
|
||||
s := &createTableRecorder{}
|
||||
c := &Client{
|
||||
projectID: "p",
|
||||
service: s,
|
||||
}
|
||||
ds := c.Dataset("d")
|
||||
table := ds.Table("t")
|
||||
exp := time.Now()
|
||||
q := "query"
|
||||
if err := table.Create(context.Background(), TableExpiration(exp), ViewQuery(q), UseStandardSQL()); err != nil {
|
||||
t.Fatalf("err calling Table.Create: %v", err)
|
||||
}
|
||||
want := createTableConf{
|
||||
projectID: "p",
|
||||
datasetID: "d",
|
||||
tableID: "t",
|
||||
expiration: exp,
|
||||
viewQuery: q,
|
||||
useStandardSQL: true,
|
||||
}
|
||||
if !reflect.DeepEqual(*s.conf, want) {
|
||||
t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want)
|
||||
}
|
||||
|
||||
sc := Schema{fieldSchema("desc", "name", "STRING", false, true)}
|
||||
if err := table.Create(context.Background(), TableExpiration(exp), sc); err != nil {
|
||||
t.Fatalf("err calling Table.Create: %v", err)
|
||||
}
|
||||
want = createTableConf{
|
||||
projectID: "p",
|
||||
datasetID: "d",
|
||||
tableID: "t",
|
||||
expiration: exp,
|
||||
// No need for an elaborate schema, that is tested in schema_test.go.
|
||||
schema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"),
|
||||
},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(*s.conf, want) {
|
||||
t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want)
|
||||
}
|
||||
|
||||
partitionCases := []struct {
|
||||
timePartitioning TimePartitioning
|
||||
expectedExpiration time.Duration
|
||||
}{
|
||||
{TimePartitioning{}, time.Duration(0)},
|
||||
{TimePartitioning{time.Second}, time.Second},
|
||||
}
|
||||
|
||||
for _, c := range partitionCases {
|
||||
if err := table.Create(context.Background(), c.timePartitioning); err != nil {
|
||||
t.Fatalf("err calling Table.Create: %v", err)
|
||||
}
|
||||
want = createTableConf{
|
||||
projectID: "p",
|
||||
datasetID: "d",
|
||||
tableID: "t",
|
||||
timePartitioning: &TimePartitioning{c.expectedExpiration},
|
||||
}
|
||||
if !reflect.DeepEqual(*s.conf, want) {
|
||||
t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want)
|
||||
}
|
||||
}
|
||||
}
|
188
vendor/cloud.google.com/go/bigquery/dataset.go
generated
vendored
Normal file
188
vendor/cloud.google.com/go/bigquery/dataset.go
generated
vendored
Normal file
|
@ -0,0 +1,188 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
// Dataset is a reference to a BigQuery dataset.
|
||||
type Dataset struct {
|
||||
ProjectID string
|
||||
DatasetID string
|
||||
c *Client
|
||||
}
|
||||
|
||||
type DatasetMetadata struct {
|
||||
CreationTime time.Time
|
||||
LastModifiedTime time.Time // When the dataset or any of its tables were modified.
|
||||
DefaultTableExpiration time.Duration
|
||||
Description string // The user-friendly description of this table.
|
||||
Name string // The user-friendly name for this table.
|
||||
ID string
|
||||
Location string // The geo location of the dataset.
|
||||
Labels map[string]string // User-provided labels.
|
||||
// TODO(jba): access rules
|
||||
}
|
||||
|
||||
// Dataset creates a handle to a BigQuery dataset in the client's project.
|
||||
func (c *Client) Dataset(id string) *Dataset {
|
||||
return c.DatasetInProject(c.projectID, id)
|
||||
}
|
||||
|
||||
// DatasetInProject creates a handle to a BigQuery dataset in the specified project.
|
||||
func (c *Client) DatasetInProject(projectID, datasetID string) *Dataset {
|
||||
return &Dataset{
|
||||
ProjectID: projectID,
|
||||
DatasetID: datasetID,
|
||||
c: c,
|
||||
}
|
||||
}
|
||||
|
||||
// Create creates a dataset in the BigQuery service. An error will be returned
|
||||
// if the dataset already exists.
|
||||
func (d *Dataset) Create(ctx context.Context) error {
|
||||
return d.c.service.insertDataset(ctx, d.DatasetID, d.ProjectID)
|
||||
}
|
||||
|
||||
// Delete deletes the dataset.
|
||||
func (d *Dataset) Delete(ctx context.Context) error {
|
||||
return d.c.service.deleteDataset(ctx, d.DatasetID, d.ProjectID)
|
||||
}
|
||||
|
||||
// Metadata fetches the metadata for the dataset.
|
||||
func (d *Dataset) Metadata(ctx context.Context) (*DatasetMetadata, error) {
|
||||
return d.c.service.getDatasetMetadata(ctx, d.ProjectID, d.DatasetID)
|
||||
}
|
||||
|
||||
// Table creates a handle to a BigQuery table in the dataset.
|
||||
// To determine if a table exists, call Table.Metadata.
|
||||
// If the table does not already exist, use Table.Create to create it.
|
||||
func (d *Dataset) Table(tableID string) *Table {
|
||||
return &Table{ProjectID: d.ProjectID, DatasetID: d.DatasetID, TableID: tableID, c: d.c}
|
||||
}
|
||||
|
||||
// Tables returns an iterator over the tables in the Dataset.
|
||||
func (d *Dataset) Tables(ctx context.Context) *TableIterator {
|
||||
it := &TableIterator{
|
||||
ctx: ctx,
|
||||
dataset: d,
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
func() int { return len(it.tables) },
|
||||
func() interface{} { b := it.tables; it.tables = nil; return b })
|
||||
return it
|
||||
}
|
||||
|
||||
// A TableIterator is an iterator over Tables.
|
||||
type TableIterator struct {
|
||||
ctx context.Context
|
||||
dataset *Dataset
|
||||
tables []*Table
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is Done if there are
|
||||
// no more results. Once Next returns Done, all subsequent calls will return
|
||||
// Done.
|
||||
func (it *TableIterator) Next() (*Table, error) {
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t := it.tables[0]
|
||||
it.tables = it.tables[1:]
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *TableIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
func (it *TableIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
tables, tok, err := it.dataset.c.service.listTables(it.ctx, it.dataset.ProjectID, it.dataset.DatasetID, pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, t := range tables {
|
||||
t.c = it.dataset.c
|
||||
it.tables = append(it.tables, t)
|
||||
}
|
||||
return tok, nil
|
||||
}
|
||||
|
||||
// Datasets returns an iterator over the datasets in the Client's project.
|
||||
func (c *Client) Datasets(ctx context.Context) *DatasetIterator {
|
||||
return c.DatasetsInProject(ctx, c.projectID)
|
||||
}
|
||||
|
||||
// DatasetsInProject returns an iterator over the datasets in the provided project.
|
||||
func (c *Client) DatasetsInProject(ctx context.Context, projectID string) *DatasetIterator {
|
||||
it := &DatasetIterator{
|
||||
ctx: ctx,
|
||||
c: c,
|
||||
projectID: projectID,
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
func() int { return len(it.items) },
|
||||
func() interface{} { b := it.items; it.items = nil; return b })
|
||||
return it
|
||||
}
|
||||
|
||||
// DatasetIterator iterates over the datasets in a project.
|
||||
type DatasetIterator struct {
|
||||
// ListHidden causes hidden datasets to be listed when set to true.
|
||||
ListHidden bool
|
||||
|
||||
// Filter restricts the datasets returned by label. The filter syntax is described in
|
||||
// https://cloud.google.com/bigquery/docs/labeling-datasets#filtering_datasets_using_labels
|
||||
Filter string
|
||||
|
||||
ctx context.Context
|
||||
projectID string
|
||||
c *Client
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
items []*Dataset
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *DatasetIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
func (it *DatasetIterator) Next() (*Dataset, error) {
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
item := it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *DatasetIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
datasets, nextPageToken, err := it.c.service.listDatasets(it.ctx, it.projectID,
|
||||
pageSize, pageToken, it.ListHidden, it.Filter)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, d := range datasets {
|
||||
d.c = it.c
|
||||
it.items = append(it.items, d)
|
||||
}
|
||||
return nextPageToken, nil
|
||||
}
|
156
vendor/cloud.google.com/go/bigquery/dataset_test.go
generated
vendored
Normal file
156
vendor/cloud.google.com/go/bigquery/dataset_test.go
generated
vendored
Normal file
|
@ -0,0 +1,156 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
itest "google.golang.org/api/iterator/testing"
|
||||
)
|
||||
|
||||
// readServiceStub services read requests by returning data from an in-memory list of values.
|
||||
type listTablesServiceStub struct {
|
||||
expectedProject, expectedDataset string
|
||||
tables []*Table
|
||||
service
|
||||
}
|
||||
|
||||
func (s *listTablesServiceStub) listTables(ctx context.Context, projectID, datasetID string, pageSize int, pageToken string) ([]*Table, string, error) {
|
||||
if projectID != s.expectedProject {
|
||||
return nil, "", errors.New("wrong project id")
|
||||
}
|
||||
if datasetID != s.expectedDataset {
|
||||
return nil, "", errors.New("wrong dataset id")
|
||||
}
|
||||
const maxPageSize = 2
|
||||
if pageSize <= 0 || pageSize > maxPageSize {
|
||||
pageSize = maxPageSize
|
||||
}
|
||||
start := 0
|
||||
if pageToken != "" {
|
||||
var err error
|
||||
start, err = strconv.Atoi(pageToken)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
}
|
||||
end := start + pageSize
|
||||
if end > len(s.tables) {
|
||||
end = len(s.tables)
|
||||
}
|
||||
nextPageToken := ""
|
||||
if end < len(s.tables) {
|
||||
nextPageToken = strconv.Itoa(end)
|
||||
}
|
||||
return s.tables[start:end], nextPageToken, nil
|
||||
}
|
||||
|
||||
func TestTables(t *testing.T) {
|
||||
t1 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t1"}
|
||||
t2 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t2"}
|
||||
t3 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t3"}
|
||||
allTables := []*Table{t1, t2, t3}
|
||||
c := &Client{
|
||||
service: &listTablesServiceStub{
|
||||
expectedProject: "x",
|
||||
expectedDataset: "y",
|
||||
tables: allTables,
|
||||
},
|
||||
projectID: "x",
|
||||
}
|
||||
msg, ok := itest.TestIterator(allTables,
|
||||
func() interface{} { return c.Dataset("y").Tables(context.Background()) },
|
||||
func(it interface{}) (interface{}, error) { return it.(*TableIterator).Next() })
|
||||
if !ok {
|
||||
t.Error(msg)
|
||||
}
|
||||
}
|
||||
|
||||
type listDatasetsFake struct {
|
||||
service
|
||||
|
||||
projectID string
|
||||
datasets []*Dataset
|
||||
hidden map[*Dataset]bool
|
||||
}
|
||||
|
||||
func (df *listDatasetsFake) listDatasets(_ context.Context, projectID string, pageSize int, pageToken string, listHidden bool, filter string) ([]*Dataset, string, error) {
|
||||
const maxPageSize = 2
|
||||
if pageSize <= 0 || pageSize > maxPageSize {
|
||||
pageSize = maxPageSize
|
||||
}
|
||||
if filter != "" {
|
||||
return nil, "", errors.New("filter not supported")
|
||||
}
|
||||
if projectID != df.projectID {
|
||||
return nil, "", errors.New("bad project ID")
|
||||
}
|
||||
start := 0
|
||||
if pageToken != "" {
|
||||
var err error
|
||||
start, err = strconv.Atoi(pageToken)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
}
|
||||
var (
|
||||
i int
|
||||
result []*Dataset
|
||||
nextPageToken string
|
||||
)
|
||||
for i = start; len(result) < pageSize && i < len(df.datasets); i++ {
|
||||
if df.hidden[df.datasets[i]] && !listHidden {
|
||||
continue
|
||||
}
|
||||
result = append(result, df.datasets[i])
|
||||
}
|
||||
if i < len(df.datasets) {
|
||||
nextPageToken = strconv.Itoa(i)
|
||||
}
|
||||
return result, nextPageToken, nil
|
||||
}
|
||||
|
||||
func TestDatasets(t *testing.T) {
|
||||
service := &listDatasetsFake{projectID: "p"}
|
||||
client := &Client{service: service}
|
||||
datasets := []*Dataset{
|
||||
{"p", "a", client},
|
||||
{"p", "b", client},
|
||||
{"p", "hidden", client},
|
||||
{"p", "c", client},
|
||||
}
|
||||
service.datasets = datasets
|
||||
service.hidden = map[*Dataset]bool{datasets[2]: true}
|
||||
c := &Client{
|
||||
projectID: "p",
|
||||
service: service,
|
||||
}
|
||||
msg, ok := itest.TestIterator(datasets,
|
||||
func() interface{} { it := c.Datasets(context.Background()); it.ListHidden = true; return it },
|
||||
func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() })
|
||||
if !ok {
|
||||
t.Fatalf("ListHidden=true: %s", msg)
|
||||
}
|
||||
|
||||
msg, ok = itest.TestIterator([]*Dataset{datasets[0], datasets[1], datasets[3]},
|
||||
func() interface{} { it := c.Datasets(context.Background()); it.ListHidden = false; return it },
|
||||
func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() })
|
||||
if !ok {
|
||||
t.Fatalf("ListHidden=false: %s", msg)
|
||||
}
|
||||
}
|
295
vendor/cloud.google.com/go/bigquery/doc.go
generated
vendored
Normal file
295
vendor/cloud.google.com/go/bigquery/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,295 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package bigquery provides a client for the BigQuery service.
|
||||
|
||||
Note: This package is in beta. Some backwards-incompatible changes may occur.
|
||||
|
||||
The following assumes a basic familiarity with BigQuery concepts.
|
||||
See https://cloud.google.com/bigquery/docs.
|
||||
|
||||
|
||||
Creating a Client
|
||||
|
||||
To start working with this package, create a client:
|
||||
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, projectID)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Querying
|
||||
|
||||
To query existing tables, create a Query and call its Read method:
|
||||
|
||||
q := client.Query(`
|
||||
SELECT year, SUM(number) as num
|
||||
FROM [bigquery-public-data:usa_names.usa_1910_2013]
|
||||
WHERE name = "William"
|
||||
GROUP BY year
|
||||
ORDER BY year
|
||||
`)
|
||||
it, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Then iterate through the resulting rows. You can store a row using
|
||||
anything that implements the ValueLoader interface, or with a slice or map of bigquery.Value.
|
||||
A slice is simplest:
|
||||
|
||||
for {
|
||||
var values []bigquery.Value
|
||||
err := it.Next(&values)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(values)
|
||||
}
|
||||
|
||||
You can also use a struct whose exported fields match the query:
|
||||
|
||||
type Count struct {
|
||||
Year int
|
||||
Num int
|
||||
}
|
||||
for {
|
||||
var c Count
|
||||
err := it.Next(&c)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(c)
|
||||
}
|
||||
|
||||
You can also start the query running and get the results later.
|
||||
Create the query as above, but call Run instead of Read. This returns a Job,
|
||||
which represents an asychronous operation.
|
||||
|
||||
job, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Get the job's ID, a printable string. You can save this string to retrieve
|
||||
the results at a later time, even in another process.
|
||||
|
||||
jobID := job.ID()
|
||||
fmt.Printf("The job ID is %s\n", jobID)
|
||||
|
||||
To retrieve the job's results from the ID, first look up the Job:
|
||||
|
||||
job, err = client.JobFromID(ctx, jobID)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Use the Job.Read method to obtain an iterator, and loop over the rows.
|
||||
Query.Read is just a convenience method that combines Query.Run and Job.Read.
|
||||
|
||||
it, err = job.Read(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Proceed with iteration as above.
|
||||
|
||||
Datasets and Tables
|
||||
|
||||
You can refer to datasets in the client's project with the Dataset method, and
|
||||
in other projects with the DatasetInProject method:
|
||||
|
||||
myDataset := client.Dataset("my_dataset")
|
||||
yourDataset := client.DatasetInProject("your-project-id", "your_dataset")
|
||||
|
||||
These methods create references to datasets, not the datasets themselves. You can have
|
||||
a dataset reference even if the dataset doesn't exist yet. Use Dataset.Create to
|
||||
create a dataset from a reference:
|
||||
|
||||
if err := myDataset.Create(ctx); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
You can refer to tables with Dataset.Table. Like bigquery.Dataset, bigquery.Table is a reference
|
||||
to an object in BigQuery that may or may not exist.
|
||||
|
||||
table := myDataset.Table("my_table")
|
||||
|
||||
You can create, delete and update the metadata of tables with methods on Table.
|
||||
Table.Create supports a few options. For instance, you could create a temporary table with:
|
||||
|
||||
err = myDataset.Table("temp").Create(ctx, bigquery.TableExpiration(time.Now().Add(1*time.Hour)))
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
We'll see how to create a table with a schema in the next section.
|
||||
|
||||
Schemas
|
||||
|
||||
There are two ways to construct schemas with this package.
|
||||
You can build a schema by hand, like so:
|
||||
|
||||
schema1 := bigquery.Schema{
|
||||
&bigquery.FieldSchema{Name: "Name", Required: true, Type: bigquery.StringFieldType},
|
||||
&bigquery.FieldSchema{Name: "Grades", Repeated: true, Type: bigquery.IntegerFieldType},
|
||||
}
|
||||
|
||||
Or you can infer the schema from a struct:
|
||||
|
||||
type student struct {
|
||||
Name string
|
||||
Grades []int
|
||||
}
|
||||
schema2, err := bigquery.InferSchema(student{})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// schema1 and schema2 are identical.
|
||||
|
||||
Struct inference supports tags like those of the encoding/json package,
|
||||
so you can change names or ignore fields:
|
||||
|
||||
type student2 struct {
|
||||
Name string `bigquery:"full_name"`
|
||||
Grades []int
|
||||
Secret string `bigquery:"-"`
|
||||
}
|
||||
schema3, err := bigquery.InferSchema(student2{})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// schema3 has fields "full_name" and "Grade".
|
||||
|
||||
Having constructed a schema, you can pass it to Table.Create as an option:
|
||||
|
||||
if err := table.Create(ctx, schema1); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Copying
|
||||
|
||||
You can copy one or more tables to another table. Begin by constructing a Copier
|
||||
describing the copy. Then set any desired copy options, and finally call Run to get a Job:
|
||||
|
||||
copier := myDataset.Table("dest").CopierFrom(myDataset.Table("src"))
|
||||
copier.WriteDisposition = bigquery.WriteTruncate
|
||||
job, err = copier.Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
You can chain the call to Run if you don't want to set options:
|
||||
|
||||
job, err = myDataset.Table("dest").CopierFrom(myDataset.Table("src")).Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
You can wait for your job to complete:
|
||||
|
||||
status, err := job.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Job.Wait polls with exponential backoff. You can also poll yourself, if you
|
||||
wish:
|
||||
|
||||
for {
|
||||
status, err := job.Status(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if status.Done() {
|
||||
if status.Err() != nil {
|
||||
log.Fatalf("Job failed with error %v", status.Err())
|
||||
}
|
||||
break
|
||||
}
|
||||
time.Sleep(pollInterval)
|
||||
}
|
||||
|
||||
Loading and Uploading
|
||||
|
||||
There are two ways to populate a table with this package: load the data from a Google Cloud Storage
|
||||
object, or upload rows directly from your program.
|
||||
|
||||
For loading, first create a GCSReference, configuring it if desired. Then make a Loader, optionally configure
|
||||
it as well, and call its Run method.
|
||||
|
||||
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
|
||||
gcsRef.AllowJaggedRows = true
|
||||
loader := myDataset.Table("dest").LoaderFrom(gcsRef)
|
||||
loader.CreateDisposition = bigquery.CreateNever
|
||||
job, err = loader.Run(ctx)
|
||||
// Poll the job for completion if desired, as above.
|
||||
|
||||
To upload, first define a type that implements the ValueSaver interface, which has a single method named Save.
|
||||
Then create an Uploader, and call its Put method with a slice of values.
|
||||
|
||||
u := table.Uploader()
|
||||
// Item implements the ValueSaver interface.
|
||||
items := []*Item{
|
||||
{Name: "n1", Size: 32.6, Count: 7},
|
||||
{Name: "n2", Size: 4, Count: 2},
|
||||
{Name: "n3", Size: 101.5, Count: 1},
|
||||
}
|
||||
if err := u.Put(ctx, items); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
You can also upload a struct that doesn't implement ValueSaver. Use the StructSaver type
|
||||
to specify the schema and insert ID by hand, or just supply the struct or struct pointer
|
||||
directly and the schema will be inferred:
|
||||
|
||||
type Item2 struct {
|
||||
Name string
|
||||
Size float64
|
||||
Count int
|
||||
}
|
||||
// Item implements the ValueSaver interface.
|
||||
items2 := []*Item2{
|
||||
{Name: "n1", Size: 32.6, Count: 7},
|
||||
{Name: "n2", Size: 4, Count: 2},
|
||||
{Name: "n3", Size: 101.5, Count: 1},
|
||||
}
|
||||
if err := u.Put(ctx, items2); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
Extracting
|
||||
|
||||
If you've been following so far, extracting data from a BigQuery table
|
||||
into a Google Cloud Storage object will feel familiar. First create an
|
||||
Extractor, then optionally configure it, and lastly call its Run method.
|
||||
|
||||
extractor := table.ExtractorTo(gcsRef)
|
||||
extractor.DisableHeader = true
|
||||
job, err = extractor.Run(ctx)
|
||||
// Poll the job for completion if desired, as above.
|
||||
|
||||
Authentication
|
||||
|
||||
See examples of authorization and authentication at
|
||||
https://godoc.org/cloud.google.com/go#pkg-examples.
|
||||
*/
|
||||
package bigquery // import "cloud.google.com/go/bigquery"
|
82
vendor/cloud.google.com/go/bigquery/error.go
generated
vendored
Normal file
82
vendor/cloud.google.com/go/bigquery/error.go
generated
vendored
Normal file
|
@ -0,0 +1,82 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// An Error contains detailed information about a failed bigquery operation.
|
||||
type Error struct {
|
||||
// Mirrors bq.ErrorProto, but drops DebugInfo
|
||||
Location, Message, Reason string
|
||||
}
|
||||
|
||||
func (e Error) Error() string {
|
||||
return fmt.Sprintf("{Location: %q; Message: %q; Reason: %q}", e.Location, e.Message, e.Reason)
|
||||
}
|
||||
|
||||
func errorFromErrorProto(ep *bq.ErrorProto) *Error {
|
||||
if ep == nil {
|
||||
return nil
|
||||
}
|
||||
return &Error{
|
||||
Location: ep.Location,
|
||||
Message: ep.Message,
|
||||
Reason: ep.Reason,
|
||||
}
|
||||
}
|
||||
|
||||
// A MultiError contains multiple related errors.
|
||||
type MultiError []error
|
||||
|
||||
func (m MultiError) Error() string {
|
||||
switch len(m) {
|
||||
case 0:
|
||||
return "(0 errors)"
|
||||
case 1:
|
||||
return m[0].Error()
|
||||
case 2:
|
||||
return m[0].Error() + " (and 1 other error)"
|
||||
}
|
||||
return fmt.Sprintf("%s (and %d other errors)", m[0].Error(), len(m)-1)
|
||||
}
|
||||
|
||||
// RowInsertionError contains all errors that occurred when attempting to insert a row.
|
||||
type RowInsertionError struct {
|
||||
InsertID string // The InsertID associated with the affected row.
|
||||
RowIndex int // The 0-based index of the affected row in the batch of rows being inserted.
|
||||
Errors MultiError
|
||||
}
|
||||
|
||||
func (e *RowInsertionError) Error() string {
|
||||
errFmt := "insertion of row [insertID: %q; insertIndex: %v] failed with error: %s"
|
||||
return fmt.Sprintf(errFmt, e.InsertID, e.RowIndex, e.Errors.Error())
|
||||
}
|
||||
|
||||
// PutMultiError contains an error for each row which was not successfully inserted
|
||||
// into a BigQuery table.
|
||||
type PutMultiError []RowInsertionError
|
||||
|
||||
func (pme PutMultiError) Error() string {
|
||||
plural := "s"
|
||||
if len(pme) == 1 {
|
||||
plural = ""
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%v row insertion%s failed", len(pme), plural)
|
||||
}
|
109
vendor/cloud.google.com/go/bigquery/error_test.go
generated
vendored
Normal file
109
vendor/cloud.google.com/go/bigquery/error_test.go
generated
vendored
Normal file
|
@ -0,0 +1,109 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func rowInsertionError(msg string) RowInsertionError {
|
||||
return RowInsertionError{Errors: []error{errors.New(msg)}}
|
||||
}
|
||||
|
||||
func TestPutMultiErrorString(t *testing.T) {
|
||||
testCases := []struct {
|
||||
errs PutMultiError
|
||||
want string
|
||||
}{
|
||||
{
|
||||
errs: PutMultiError{},
|
||||
want: "0 row insertions failed",
|
||||
},
|
||||
{
|
||||
errs: PutMultiError{rowInsertionError("a")},
|
||||
want: "1 row insertion failed",
|
||||
},
|
||||
{
|
||||
errs: PutMultiError{rowInsertionError("a"), rowInsertionError("b")},
|
||||
want: "2 row insertions failed",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
if tc.errs.Error() != tc.want {
|
||||
t.Errorf("PutMultiError string: got:\n%v\nwant:\n%v", tc.errs.Error(), tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiErrorString(t *testing.T) {
|
||||
testCases := []struct {
|
||||
errs MultiError
|
||||
want string
|
||||
}{
|
||||
{
|
||||
errs: MultiError{},
|
||||
want: "(0 errors)",
|
||||
},
|
||||
{
|
||||
errs: MultiError{errors.New("a")},
|
||||
want: "a",
|
||||
},
|
||||
{
|
||||
errs: MultiError{errors.New("a"), errors.New("b")},
|
||||
want: "a (and 1 other error)",
|
||||
},
|
||||
{
|
||||
errs: MultiError{errors.New("a"), errors.New("b"), errors.New("c")},
|
||||
want: "a (and 2 other errors)",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
if tc.errs.Error() != tc.want {
|
||||
t.Errorf("PutMultiError string: got:\n%v\nwant:\n%v", tc.errs.Error(), tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorFromErrorProto(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in *bq.ErrorProto
|
||||
want *Error
|
||||
}{
|
||||
{nil, nil},
|
||||
{
|
||||
in: &bq.ErrorProto{Location: "L", Message: "M", Reason: "R"},
|
||||
want: &Error{Location: "L", Message: "M", Reason: "R"},
|
||||
},
|
||||
} {
|
||||
if got := errorFromErrorProto(test.in); !reflect.DeepEqual(got, test.want) {
|
||||
t.Errorf("%v: got %v, want %v", test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestErrorString(t *testing.T) {
|
||||
e := &Error{Location: "<L>", Message: "<M>", Reason: "<R>"}
|
||||
got := e.Error()
|
||||
if !strings.Contains(got, "<L>") || !strings.Contains(got, "<M>") || !strings.Contains(got, "<R>") {
|
||||
t.Errorf(`got %q, expected to see "<L>", "<M>" and "<R>"`, got)
|
||||
}
|
||||
}
|
652
vendor/cloud.google.com/go/bigquery/examples_test.go
generated
vendored
Normal file
652
vendor/cloud.google.com/go/bigquery/examples_test.go
generated
vendored
Normal file
|
@ -0,0 +1,652 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"cloud.google.com/go/bigquery"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
func ExampleNewClient() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
_ = client // TODO: Use client.
|
||||
}
|
||||
|
||||
func ExampleClient_Dataset() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
ds := client.Dataset("my_dataset")
|
||||
fmt.Println(ds)
|
||||
}
|
||||
|
||||
func ExampleClient_DatasetInProject() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
ds := client.DatasetInProject("their-project-id", "their-dataset")
|
||||
fmt.Println(ds)
|
||||
}
|
||||
|
||||
func ExampleClient_Datasets() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it := client.Datasets(ctx)
|
||||
_ = it // TODO: iterate using Next or iterator.Pager.
|
||||
}
|
||||
|
||||
func ExampleClient_DatasetsInProject() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it := client.DatasetsInProject(ctx, "their-project-id")
|
||||
_ = it // TODO: iterate using Next or iterator.Pager.
|
||||
}
|
||||
|
||||
func getJobID() string { return "" }
|
||||
|
||||
func ExampleClient_JobFromID() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
jobID := getJobID() // Get a job ID using Job.ID, the console or elsewhere.
|
||||
job, err := client.JobFromID(ctx, jobID)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(job)
|
||||
}
|
||||
|
||||
func ExampleNewGCSReference() {
|
||||
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
|
||||
fmt.Println(gcsRef)
|
||||
}
|
||||
|
||||
func ExampleClient_Query() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
q := client.Query("select name, num from t1")
|
||||
q.DefaultProjectID = "project-id"
|
||||
// TODO: set other options on the Query.
|
||||
// TODO: Call Query.Run or Query.Read.
|
||||
}
|
||||
|
||||
func ExampleClient_Query_parameters() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
q := client.Query("select num from t1 where name = @user")
|
||||
q.Parameters = []bigquery.QueryParameter{
|
||||
{Name: "user", Value: "Elizabeth"},
|
||||
}
|
||||
// TODO: set other options on the Query.
|
||||
// TODO: Call Query.Run or Query.Read.
|
||||
}
|
||||
|
||||
func ExampleQuery_Read() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
q := client.Query("select name, num from t1")
|
||||
it, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
_ = it // TODO: iterate using Next or iterator.Pager.
|
||||
}
|
||||
|
||||
func ExampleRowIterator_Next() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
q := client.Query("select name, num from t1")
|
||||
it, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for {
|
||||
var row []bigquery.Value
|
||||
err := it.Next(&row)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(row)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleRowIterator_Next_struct() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
type score struct {
|
||||
Name string
|
||||
Num int
|
||||
}
|
||||
|
||||
q := client.Query("select name, num from t1")
|
||||
it, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for {
|
||||
var s score
|
||||
err := it.Next(&s)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(s)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleJob_Read() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
q := client.Query("select name, num from t1")
|
||||
// Call Query.Run to get a Job, then call Read on the job.
|
||||
// Note: Query.Read is a shorthand for this.
|
||||
job, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it, err := job.Read(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
_ = it // TODO: iterate using Next or iterator.Pager.
|
||||
}
|
||||
|
||||
func ExampleJob_Wait() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
ds := client.Dataset("my_dataset")
|
||||
job, err := ds.Table("t1").CopierFrom(ds.Table("t2")).Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
status, err := job.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if status.Err() != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleDataset_Create() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if err := client.Dataset("my_dataset").Create(ctx); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleDataset_Delete() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if err := client.Dataset("my_dataset").Delete(ctx); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleDataset_Metadata() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
md, err := client.Dataset("my_dataset").Metadata(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(md)
|
||||
}
|
||||
|
||||
func ExampleDataset_Table() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Table creates a reference to the table. It does not create the actual
|
||||
// table in BigQuery; to do so, use Table.Create.
|
||||
t := client.Dataset("my_dataset").Table("my_table")
|
||||
fmt.Println(t)
|
||||
}
|
||||
|
||||
func ExampleDataset_Tables() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it := client.Dataset("my_dataset").Tables(ctx)
|
||||
_ = it // TODO: iterate using Next or iterator.Pager.
|
||||
}
|
||||
|
||||
func ExampleDatasetIterator_Next() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it := client.Datasets(ctx)
|
||||
for {
|
||||
ds, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(ds)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleInferSchema() {
|
||||
type Item struct {
|
||||
Name string
|
||||
Size float64
|
||||
Count int
|
||||
}
|
||||
schema, err := bigquery.InferSchema(Item{})
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for _, fs := range schema {
|
||||
fmt.Println(fs.Name, fs.Type)
|
||||
}
|
||||
// Output:
|
||||
// Name STRING
|
||||
// Size FLOAT
|
||||
// Count INTEGER
|
||||
}
|
||||
|
||||
func ExampleInferSchema_tags() {
|
||||
type Item struct {
|
||||
Name string
|
||||
Size float64
|
||||
Count int `bigquery:"number"`
|
||||
Secret []byte `bigquery:"-"`
|
||||
}
|
||||
schema, err := bigquery.InferSchema(Item{})
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
// TODO: Handle error.
|
||||
}
|
||||
for _, fs := range schema {
|
||||
fmt.Println(fs.Name, fs.Type)
|
||||
}
|
||||
// Output:
|
||||
// Name STRING
|
||||
// Size FLOAT
|
||||
// number INTEGER
|
||||
}
|
||||
|
||||
func ExampleTable_Create() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
t := client.Dataset("my_dataset").Table("new-table")
|
||||
if err := t.Create(ctx); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleTable_Create_schema() {
|
||||
ctx := context.Background()
|
||||
// Infer table schema from a Go type.
|
||||
schema, err := bigquery.InferSchema(Item{})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
t := client.Dataset("my_dataset").Table("new-table")
|
||||
if err := t.Create(ctx, schema); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleTable_Delete() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if err := client.Dataset("my_dataset").Table("my_table").Delete(ctx); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleTable_Metadata() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
md, err := client.Dataset("my_dataset").Table("my_table").Metadata(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(md)
|
||||
}
|
||||
|
||||
func ExampleTable_Uploader() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
u := client.Dataset("my_dataset").Table("my_table").Uploader()
|
||||
_ = u // TODO: Use u.
|
||||
}
|
||||
|
||||
func ExampleTable_Uploader_options() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
u := client.Dataset("my_dataset").Table("my_table").Uploader()
|
||||
u.SkipInvalidRows = true
|
||||
u.IgnoreUnknownValues = true
|
||||
_ = u // TODO: Use u.
|
||||
}
|
||||
|
||||
func ExampleTable_CopierFrom() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
ds := client.Dataset("my_dataset")
|
||||
c := ds.Table("combined").CopierFrom(ds.Table("t1"), ds.Table("t2"))
|
||||
c.WriteDisposition = bigquery.WriteTruncate
|
||||
// TODO: set other options on the Copier.
|
||||
job, err := c.Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
status, err := job.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if status.Err() != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleTable_ExtractorTo() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
|
||||
gcsRef.FieldDelimiter = ":"
|
||||
// TODO: set other options on the GCSReference.
|
||||
ds := client.Dataset("my_dataset")
|
||||
extractor := ds.Table("my_table").ExtractorTo(gcsRef)
|
||||
extractor.DisableHeader = true
|
||||
// TODO: set other options on the Extractor.
|
||||
job, err := extractor.Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
status, err := job.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if status.Err() != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleTable_LoaderFrom() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
|
||||
gcsRef.AllowJaggedRows = true
|
||||
// TODO: set other options on the GCSReference.
|
||||
ds := client.Dataset("my_dataset")
|
||||
loader := ds.Table("my_table").LoaderFrom(gcsRef)
|
||||
loader.CreateDisposition = bigquery.CreateNever
|
||||
// TODO: set other options on the Loader.
|
||||
job, err := loader.Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
status, err := job.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if status.Err() != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleTable_LoaderFrom_reader() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
f, err := os.Open("data.csv")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
rs := bigquery.NewReaderSource(f)
|
||||
rs.AllowJaggedRows = true
|
||||
// TODO: set other options on the GCSReference.
|
||||
ds := client.Dataset("my_dataset")
|
||||
loader := ds.Table("my_table").LoaderFrom(rs)
|
||||
loader.CreateDisposition = bigquery.CreateNever
|
||||
// TODO: set other options on the Loader.
|
||||
job, err := loader.Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
status, err := job.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
if status.Err() != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleTable_Read() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it := client.Dataset("my_dataset").Table("my_table").Read(ctx)
|
||||
_ = it // TODO: iterate using Next or iterator.Pager.
|
||||
}
|
||||
|
||||
func ExampleTable_Update() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
t := client.Dataset("my_dataset").Table("my_table")
|
||||
tm, err := t.Update(ctx, bigquery.TableMetadataToUpdate{
|
||||
Description: "my favorite table",
|
||||
})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(tm)
|
||||
}
|
||||
|
||||
func ExampleTableIterator_Next() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it := client.Dataset("my_dataset").Tables(ctx)
|
||||
for {
|
||||
t, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(t)
|
||||
}
|
||||
}
|
||||
|
||||
type Item struct {
|
||||
Name string
|
||||
Size float64
|
||||
Count int
|
||||
}
|
||||
|
||||
// Save implements the ValueSaver interface.
|
||||
func (i *Item) Save() (map[string]bigquery.Value, string, error) {
|
||||
return map[string]bigquery.Value{
|
||||
"Name": i.Name,
|
||||
"Size": i.Size,
|
||||
"Count": i.Count,
|
||||
}, "", nil
|
||||
}
|
||||
|
||||
func ExampleUploader_Put() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
u := client.Dataset("my_dataset").Table("my_table").Uploader()
|
||||
// Item implements the ValueSaver interface.
|
||||
items := []*Item{
|
||||
{Name: "n1", Size: 32.6, Count: 7},
|
||||
{Name: "n2", Size: 4, Count: 2},
|
||||
{Name: "n3", Size: 101.5, Count: 1},
|
||||
}
|
||||
if err := u.Put(ctx, items); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
var schema bigquery.Schema
|
||||
|
||||
func ExampleUploader_Put_structSaver() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
u := client.Dataset("my_dataset").Table("my_table").Uploader()
|
||||
|
||||
type score struct {
|
||||
Name string
|
||||
Num int
|
||||
}
|
||||
|
||||
// Assume schema holds the table's schema.
|
||||
savers := []*bigquery.StructSaver{
|
||||
{Struct: score{Name: "n1", Num: 12}, Schema: schema, InsertID: "id1"},
|
||||
{Struct: score{Name: "n2", Num: 31}, Schema: schema, InsertID: "id2"},
|
||||
{Struct: score{Name: "n3", Num: 7}, Schema: schema, InsertID: "id3"},
|
||||
}
|
||||
if err := u.Put(ctx, savers); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleUploader_Put_struct() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
u := client.Dataset("my_dataset").Table("my_table").Uploader()
|
||||
|
||||
type score struct {
|
||||
Name string
|
||||
Num int
|
||||
}
|
||||
scores := []score{
|
||||
{Name: "n1", Num: 12},
|
||||
{Name: "n2", Num: 31},
|
||||
{Name: "n3", Num: 7},
|
||||
}
|
||||
// Schema is inferred from the score type.
|
||||
if err := u.Put(ctx, scores); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
76
vendor/cloud.google.com/go/bigquery/extract.go
generated
vendored
Normal file
76
vendor/cloud.google.com/go/bigquery/extract.go
generated
vendored
Normal file
|
@ -0,0 +1,76 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// ExtractConfig holds the configuration for an extract job.
|
||||
type ExtractConfig struct {
|
||||
// JobID is the ID to use for the extract job. If empty, a job ID will be automatically created.
|
||||
JobID string
|
||||
|
||||
// Src is the table from which data will be extracted.
|
||||
Src *Table
|
||||
|
||||
// Dst is the destination into which the data will be extracted.
|
||||
Dst *GCSReference
|
||||
|
||||
// DisableHeader disables the printing of a header row in exported data.
|
||||
DisableHeader bool
|
||||
}
|
||||
|
||||
// An Extractor extracts data from a BigQuery table into Google Cloud Storage.
|
||||
type Extractor struct {
|
||||
ExtractConfig
|
||||
c *Client
|
||||
}
|
||||
|
||||
// ExtractorTo returns an Extractor which can be used to extract data from a
|
||||
// BigQuery table into Google Cloud Storage.
|
||||
// The returned Extractor may optionally be further configured before its Run method is called.
|
||||
func (t *Table) ExtractorTo(dst *GCSReference) *Extractor {
|
||||
return &Extractor{
|
||||
c: t.c,
|
||||
ExtractConfig: ExtractConfig{
|
||||
Src: t,
|
||||
Dst: dst,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run initiates an extract job.
|
||||
func (e *Extractor) Run(ctx context.Context) (*Job, error) {
|
||||
conf := &bq.JobConfigurationExtract{}
|
||||
job := &bq.Job{Configuration: &bq.JobConfiguration{Extract: conf}}
|
||||
|
||||
setJobRef(job, e.JobID, e.c.projectID)
|
||||
|
||||
conf.DestinationUris = append([]string{}, e.Dst.uris...)
|
||||
conf.Compression = string(e.Dst.Compression)
|
||||
conf.DestinationFormat = string(e.Dst.DestinationFormat)
|
||||
conf.FieldDelimiter = e.Dst.FieldDelimiter
|
||||
|
||||
conf.SourceTable = e.Src.tableRefProto()
|
||||
|
||||
if e.DisableHeader {
|
||||
f := false
|
||||
conf.PrintHeader = &f
|
||||
}
|
||||
|
||||
return e.c.service.insertJob(ctx, e.c.projectID, &insertJobConf{job: job})
|
||||
}
|
102
vendor/cloud.google.com/go/bigquery/extract_test.go
generated
vendored
Normal file
102
vendor/cloud.google.com/go/bigquery/extract_test.go
generated
vendored
Normal file
|
@ -0,0 +1,102 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func defaultExtractJob() *bq.Job {
|
||||
return &bq.Job{
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Extract: &bq.JobConfigurationExtract{
|
||||
SourceTable: &bq.TableReference{
|
||||
ProjectId: "project-id",
|
||||
DatasetId: "dataset-id",
|
||||
TableId: "table-id",
|
||||
},
|
||||
DestinationUris: []string{"uri"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtract(t *testing.T) {
|
||||
s := &testService{}
|
||||
c := &Client{
|
||||
service: s,
|
||||
projectID: "project-id",
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
dst *GCSReference
|
||||
src *Table
|
||||
config ExtractConfig
|
||||
want *bq.Job
|
||||
}{
|
||||
{
|
||||
dst: defaultGCS(),
|
||||
src: c.Dataset("dataset-id").Table("table-id"),
|
||||
want: defaultExtractJob(),
|
||||
},
|
||||
{
|
||||
dst: defaultGCS(),
|
||||
src: c.Dataset("dataset-id").Table("table-id"),
|
||||
config: ExtractConfig{DisableHeader: true},
|
||||
want: func() *bq.Job {
|
||||
j := defaultExtractJob()
|
||||
f := false
|
||||
j.Configuration.Extract.PrintHeader = &f
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: func() *GCSReference {
|
||||
g := NewGCSReference("uri")
|
||||
g.Compression = Gzip
|
||||
g.DestinationFormat = JSON
|
||||
g.FieldDelimiter = "\t"
|
||||
return g
|
||||
}(),
|
||||
src: c.Dataset("dataset-id").Table("table-id"),
|
||||
want: func() *bq.Job {
|
||||
j := defaultExtractJob()
|
||||
j.Configuration.Extract.Compression = "GZIP"
|
||||
j.Configuration.Extract.DestinationFormat = "NEWLINE_DELIMITED_JSON"
|
||||
j.Configuration.Extract.FieldDelimiter = "\t"
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
ext := tc.src.ExtractorTo(tc.dst)
|
||||
tc.config.Src = ext.Src
|
||||
tc.config.Dst = ext.Dst
|
||||
ext.ExtractConfig = tc.config
|
||||
if _, err := ext.Run(context.Background()); err != nil {
|
||||
t.Errorf("err calling extract: %v", err)
|
||||
continue
|
||||
}
|
||||
if !reflect.DeepEqual(s.Job, tc.want) {
|
||||
t.Errorf("extracting: got:\n%v\nwant:\n%v", s.Job, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
172
vendor/cloud.google.com/go/bigquery/file.go
generated
vendored
Normal file
172
vendor/cloud.google.com/go/bigquery/file.go
generated
vendored
Normal file
|
@ -0,0 +1,172 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// A ReaderSource is a source for a load operation that gets
|
||||
// data from an io.Reader.
|
||||
type ReaderSource struct {
|
||||
r io.Reader
|
||||
FileConfig
|
||||
}
|
||||
|
||||
// NewReaderSource creates a ReaderSource from an io.Reader. You may
|
||||
// optionally configure properties on the ReaderSource that describe the
|
||||
// data being read, before passing it to Table.LoaderFrom.
|
||||
func NewReaderSource(r io.Reader) *ReaderSource {
|
||||
return &ReaderSource{r: r}
|
||||
}
|
||||
|
||||
func (r *ReaderSource) populateInsertJobConfForLoad(conf *insertJobConf) {
|
||||
conf.media = r.r
|
||||
r.FileConfig.populateLoadConfig(conf.job.Configuration.Load)
|
||||
}
|
||||
|
||||
// FileConfig contains configuration options that pertain to files, typically
|
||||
// text files that require interpretation to be used as a BigQuery table. A
|
||||
// file may live in Google Cloud Storage (see GCSReference), or it may be
|
||||
// loaded into a table via the Table.LoaderFromReader.
|
||||
type FileConfig struct {
|
||||
// SourceFormat is the format of the GCS data to be read.
|
||||
// Allowed values are: CSV, Avro, JSON, DatastoreBackup. The default is CSV.
|
||||
SourceFormat DataFormat
|
||||
|
||||
// FieldDelimiter is the separator for fields in a CSV file, used when
|
||||
// reading or exporting data. The default is ",".
|
||||
FieldDelimiter string
|
||||
|
||||
// The number of rows at the top of a CSV file that BigQuery will skip when
|
||||
// reading data.
|
||||
SkipLeadingRows int64
|
||||
|
||||
// AllowJaggedRows causes missing trailing optional columns to be tolerated
|
||||
// when reading CSV data. Missing values are treated as nulls.
|
||||
AllowJaggedRows bool
|
||||
|
||||
// AllowQuotedNewlines sets whether quoted data sections containing
|
||||
// newlines are allowed when reading CSV data.
|
||||
AllowQuotedNewlines bool
|
||||
|
||||
// Indicates if we should automatically infer the options and
|
||||
// schema for CSV and JSON sources.
|
||||
AutoDetect bool
|
||||
|
||||
// Encoding is the character encoding of data to be read.
|
||||
Encoding Encoding
|
||||
|
||||
// MaxBadRecords is the maximum number of bad records that will be ignored
|
||||
// when reading data.
|
||||
MaxBadRecords int64
|
||||
|
||||
// IgnoreUnknownValues causes values not matching the schema to be
|
||||
// tolerated. Unknown values are ignored. For CSV this ignores extra values
|
||||
// at the end of a line. For JSON this ignores named values that do not
|
||||
// match any column name. If this field is not set, records containing
|
||||
// unknown values are treated as bad records. The MaxBadRecords field can
|
||||
// be used to customize how bad records are handled.
|
||||
IgnoreUnknownValues bool
|
||||
|
||||
// Schema describes the data. It is required when reading CSV or JSON data,
|
||||
// unless the data is being loaded into a table that already exists.
|
||||
Schema Schema
|
||||
|
||||
// Quote is the value used to quote data sections in a CSV file. The
|
||||
// default quotation character is the double quote ("), which is used if
|
||||
// both Quote and ForceZeroQuote are unset.
|
||||
// To specify that no character should be interpreted as a quotation
|
||||
// character, set ForceZeroQuote to true.
|
||||
// Only used when reading data.
|
||||
Quote string
|
||||
ForceZeroQuote bool
|
||||
}
|
||||
|
||||
// quote returns the CSV quote character, or nil if unset.
|
||||
func (fc *FileConfig) quote() *string {
|
||||
if fc.ForceZeroQuote {
|
||||
quote := ""
|
||||
return "e
|
||||
}
|
||||
if fc.Quote == "" {
|
||||
return nil
|
||||
}
|
||||
return &fc.Quote
|
||||
}
|
||||
|
||||
func (fc *FileConfig) populateLoadConfig(conf *bq.JobConfigurationLoad) {
|
||||
conf.SkipLeadingRows = fc.SkipLeadingRows
|
||||
conf.SourceFormat = string(fc.SourceFormat)
|
||||
conf.Autodetect = fc.AutoDetect
|
||||
conf.AllowJaggedRows = fc.AllowJaggedRows
|
||||
conf.AllowQuotedNewlines = fc.AllowQuotedNewlines
|
||||
conf.Encoding = string(fc.Encoding)
|
||||
conf.FieldDelimiter = fc.FieldDelimiter
|
||||
conf.IgnoreUnknownValues = fc.IgnoreUnknownValues
|
||||
conf.MaxBadRecords = fc.MaxBadRecords
|
||||
if fc.Schema != nil {
|
||||
conf.Schema = fc.Schema.asTableSchema()
|
||||
}
|
||||
conf.Quote = fc.quote()
|
||||
}
|
||||
|
||||
func (fc *FileConfig) populateExternalDataConfig(conf *bq.ExternalDataConfiguration) {
|
||||
format := fc.SourceFormat
|
||||
if format == "" {
|
||||
// Format must be explicitly set for external data sources.
|
||||
format = CSV
|
||||
}
|
||||
// TODO(jba): support AutoDetect.
|
||||
conf.IgnoreUnknownValues = fc.IgnoreUnknownValues
|
||||
conf.MaxBadRecords = fc.MaxBadRecords
|
||||
conf.SourceFormat = string(format)
|
||||
if fc.Schema != nil {
|
||||
conf.Schema = fc.Schema.asTableSchema()
|
||||
}
|
||||
if format == CSV {
|
||||
conf.CsvOptions = &bq.CsvOptions{
|
||||
AllowJaggedRows: fc.AllowJaggedRows,
|
||||
AllowQuotedNewlines: fc.AllowQuotedNewlines,
|
||||
Encoding: string(fc.Encoding),
|
||||
FieldDelimiter: fc.FieldDelimiter,
|
||||
SkipLeadingRows: fc.SkipLeadingRows,
|
||||
Quote: fc.quote(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// DataFormat describes the format of BigQuery table data.
|
||||
type DataFormat string
|
||||
|
||||
// Constants describing the format of BigQuery table data.
|
||||
const (
|
||||
CSV DataFormat = "CSV"
|
||||
Avro DataFormat = "AVRO"
|
||||
JSON DataFormat = "NEWLINE_DELIMITED_JSON"
|
||||
DatastoreBackup DataFormat = "DATASTORE_BACKUP"
|
||||
)
|
||||
|
||||
// Encoding specifies the character encoding of data to be loaded into BigQuery.
|
||||
// See https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding
|
||||
// for more details about how this is used.
|
||||
type Encoding string
|
||||
|
||||
const (
|
||||
UTF_8 Encoding = "UTF-8"
|
||||
ISO_8859_1 Encoding = "ISO-8859-1"
|
||||
)
|
90
vendor/cloud.google.com/go/bigquery/file_test.go
generated
vendored
Normal file
90
vendor/cloud.google.com/go/bigquery/file_test.go
generated
vendored
Normal file
|
@ -0,0 +1,90 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func TestQuote(t *testing.T) {
|
||||
ptr := func(s string) *string { return &s }
|
||||
|
||||
for _, test := range []struct {
|
||||
quote string
|
||||
force bool
|
||||
want *string
|
||||
}{
|
||||
{"", false, nil},
|
||||
{"", true, ptr("")},
|
||||
{"-", false, ptr("-")},
|
||||
{"-", true, ptr("")},
|
||||
} {
|
||||
fc := FileConfig{
|
||||
Quote: test.quote,
|
||||
ForceZeroQuote: test.force,
|
||||
}
|
||||
got := fc.quote()
|
||||
if (got == nil) != (test.want == nil) {
|
||||
t.Errorf("%+v\ngot %v\nwant %v", test, pretty.Value(got), pretty.Value(test.want))
|
||||
}
|
||||
if got != nil && test.want != nil && *got != *test.want {
|
||||
t.Errorf("%+v: got %q, want %q", test, *got, *test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPopulateLoadConfig(t *testing.T) {
|
||||
hyphen := "-"
|
||||
fc := FileConfig{
|
||||
SourceFormat: CSV,
|
||||
FieldDelimiter: "\t",
|
||||
SkipLeadingRows: 8,
|
||||
AllowJaggedRows: true,
|
||||
AllowQuotedNewlines: true,
|
||||
Encoding: UTF_8,
|
||||
MaxBadRecords: 7,
|
||||
IgnoreUnknownValues: true,
|
||||
Schema: Schema{
|
||||
stringFieldSchema(),
|
||||
nestedFieldSchema(),
|
||||
},
|
||||
Quote: hyphen,
|
||||
}
|
||||
want := &bq.JobConfigurationLoad{
|
||||
SourceFormat: "CSV",
|
||||
FieldDelimiter: "\t",
|
||||
SkipLeadingRows: 8,
|
||||
AllowJaggedRows: true,
|
||||
AllowQuotedNewlines: true,
|
||||
Encoding: "UTF-8",
|
||||
MaxBadRecords: 7,
|
||||
IgnoreUnknownValues: true,
|
||||
Schema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqStringFieldSchema(),
|
||||
bqNestedFieldSchema(),
|
||||
}},
|
||||
Quote: &hyphen,
|
||||
}
|
||||
got := &bq.JobConfigurationLoad{}
|
||||
fc.populateLoadConfig(got)
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("got:\n%v\nwant:\n%v", pretty.Value(got), pretty.Value(want))
|
||||
}
|
||||
}
|
68
vendor/cloud.google.com/go/bigquery/gcs.go
generated
vendored
Normal file
68
vendor/cloud.google.com/go/bigquery/gcs.go
generated
vendored
Normal file
|
@ -0,0 +1,68 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import bq "google.golang.org/api/bigquery/v2"
|
||||
|
||||
// GCSReference is a reference to one or more Google Cloud Storage objects, which together constitute
|
||||
// an input or output to a BigQuery operation.
|
||||
type GCSReference struct {
|
||||
// TODO(jba): Export so that GCSReference can be used to hold data from a Job.get api call and expose it to the user.
|
||||
uris []string
|
||||
|
||||
FileConfig
|
||||
|
||||
// DestinationFormat is the format to use when writing exported files.
|
||||
// Allowed values are: CSV, Avro, JSON. The default is CSV.
|
||||
// CSV is not supported for tables with nested or repeated fields.
|
||||
DestinationFormat DataFormat
|
||||
|
||||
// Compression specifies the type of compression to apply when writing data
|
||||
// to Google Cloud Storage, or using this GCSReference as an ExternalData
|
||||
// source with CSV or JSON SourceFormat. Default is None.
|
||||
Compression Compression
|
||||
}
|
||||
|
||||
// NewGCSReference constructs a reference to one or more Google Cloud Storage objects, which together constitute a data source or destination.
|
||||
// In the simple case, a single URI in the form gs://bucket/object may refer to a single GCS object.
|
||||
// Data may also be split into mutiple files, if multiple URIs or URIs containing wildcards are provided.
|
||||
// Each URI may contain one '*' wildcard character, which (if present) must come after the bucket name.
|
||||
// For more information about the treatment of wildcards and multiple URIs,
|
||||
// see https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
|
||||
func NewGCSReference(uri ...string) *GCSReference {
|
||||
return &GCSReference{uris: uri}
|
||||
}
|
||||
|
||||
// Compression is the type of compression to apply when writing data to Google Cloud Storage.
|
||||
type Compression string
|
||||
|
||||
const (
|
||||
None Compression = "NONE"
|
||||
Gzip Compression = "GZIP"
|
||||
)
|
||||
|
||||
func (gcs *GCSReference) populateInsertJobConfForLoad(conf *insertJobConf) {
|
||||
conf.job.Configuration.Load.SourceUris = gcs.uris
|
||||
gcs.FileConfig.populateLoadConfig(conf.job.Configuration.Load)
|
||||
}
|
||||
|
||||
func (gcs *GCSReference) externalDataConfig() bq.ExternalDataConfiguration {
|
||||
conf := bq.ExternalDataConfiguration{
|
||||
Compression: string(gcs.Compression),
|
||||
SourceUris: append([]string{}, gcs.uris...),
|
||||
}
|
||||
gcs.FileConfig.populateExternalDataConfig(&conf)
|
||||
return conf
|
||||
}
|
754
vendor/cloud.google.com/go/bigquery/integration_test.go
generated
vendored
Normal file
754
vendor/cloud.google.com/go/bigquery/integration_test.go
generated
vendored
Normal file
|
@ -0,0 +1,754 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
var (
|
||||
client *Client
|
||||
dataset *Dataset
|
||||
schema = Schema{
|
||||
{Name: "name", Type: StringFieldType},
|
||||
{Name: "num", Type: IntegerFieldType},
|
||||
}
|
||||
fiveMinutesFromNow time.Time
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
initIntegrationTest()
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func getClient(t *testing.T) *Client {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
return client
|
||||
}
|
||||
|
||||
// If integration tests will be run, create a unique bucket for them.
|
||||
func initIntegrationTest() {
|
||||
flag.Parse() // needed for testing.Short()
|
||||
if testing.Short() {
|
||||
return
|
||||
}
|
||||
ctx := context.Background()
|
||||
ts := testutil.TokenSource(ctx, Scope)
|
||||
if ts == nil {
|
||||
log.Println("Integration tests skipped. See CONTRIBUTING.md for details")
|
||||
return
|
||||
}
|
||||
projID := testutil.ProjID()
|
||||
var err error
|
||||
client, err = NewClient(ctx, projID, option.WithTokenSource(ts))
|
||||
if err != nil {
|
||||
log.Fatalf("NewClient: %v", err)
|
||||
}
|
||||
dataset = client.Dataset("bigquery_integration_test")
|
||||
if err := dataset.Create(ctx); err != nil && !hasStatusCode(err, http.StatusConflict) { // AlreadyExists is 409
|
||||
log.Fatalf("creating dataset: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_Create(t *testing.T) {
|
||||
// Check that creating a record field with an empty schema is an error.
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
table := dataset.Table("t_bad")
|
||||
schema := Schema{
|
||||
{Name: "rec", Type: RecordFieldType, Schema: Schema{}},
|
||||
}
|
||||
err := table.Create(context.Background(), schema, TableExpiration(time.Now().Add(5*time.Minute)))
|
||||
if err == nil {
|
||||
t.Fatal("want error, got nil")
|
||||
}
|
||||
if !hasStatusCode(err, http.StatusBadRequest) {
|
||||
t.Fatalf("want a 400 error, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_CreateView(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
table := newTable(t, schema)
|
||||
defer table.Delete(ctx)
|
||||
|
||||
// Test that standard SQL views work.
|
||||
view := dataset.Table("t_view_standardsql")
|
||||
query := ViewQuery(fmt.Sprintf("SELECT APPROX_COUNT_DISTINCT(name) FROM `%s.%s.%s`", dataset.ProjectID, dataset.DatasetID, table.TableID))
|
||||
err := view.Create(context.Background(), UseStandardSQL(), query)
|
||||
if err != nil {
|
||||
t.Fatalf("table.create: Did not expect an error, got: %v", err)
|
||||
}
|
||||
view.Delete(ctx)
|
||||
}
|
||||
|
||||
func TestIntegration_TableMetadata(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
table := newTable(t, schema)
|
||||
defer table.Delete(ctx)
|
||||
// Check table metadata.
|
||||
md, err := table.Metadata(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// TODO(jba): check md more thorougly.
|
||||
if got, want := md.ID, fmt.Sprintf("%s:%s.%s", dataset.ProjectID, dataset.DatasetID, table.TableID); got != want {
|
||||
t.Errorf("metadata.ID: got %q, want %q", got, want)
|
||||
}
|
||||
if got, want := md.Type, RegularTable; got != want {
|
||||
t.Errorf("metadata.Type: got %v, want %v", got, want)
|
||||
}
|
||||
if got, want := md.ExpirationTime, fiveMinutesFromNow; !got.Equal(want) {
|
||||
t.Errorf("metadata.Type: got %v, want %v", got, want)
|
||||
}
|
||||
|
||||
// Check that timePartitioning is nil by default
|
||||
if md.TimePartitioning != nil {
|
||||
t.Errorf("metadata.TimePartitioning: got %v, want %v", md.TimePartitioning, nil)
|
||||
}
|
||||
|
||||
// Create tables that have time partitioning
|
||||
partitionCases := []struct {
|
||||
timePartitioning TimePartitioning
|
||||
expectedExpiration time.Duration
|
||||
}{
|
||||
{TimePartitioning{}, time.Duration(0)},
|
||||
{TimePartitioning{time.Second}, time.Second},
|
||||
}
|
||||
for i, c := range partitionCases {
|
||||
table := dataset.Table(fmt.Sprintf("t_metadata_partition_%v", i))
|
||||
err = table.Create(context.Background(), schema, c.timePartitioning, TableExpiration(time.Now().Add(5*time.Minute)))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer table.Delete(ctx)
|
||||
md, err = table.Metadata(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
got := md.TimePartitioning
|
||||
want := &TimePartitioning{c.expectedExpiration}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("metadata.TimePartitioning: got %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_DatasetMetadata(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
md, err := dataset.Metadata(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got, want := md.ID, fmt.Sprintf("%s:%s", dataset.ProjectID, dataset.DatasetID); got != want {
|
||||
t.Errorf("ID: got %q, want %q", got, want)
|
||||
}
|
||||
jan2016 := time.Date(2016, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
if md.CreationTime.Before(jan2016) {
|
||||
t.Errorf("CreationTime: got %s, want > 2016-1-1", md.CreationTime)
|
||||
}
|
||||
if md.LastModifiedTime.Before(jan2016) {
|
||||
t.Errorf("LastModifiedTime: got %s, want > 2016-1-1", md.LastModifiedTime)
|
||||
}
|
||||
|
||||
// Verify that we get a NotFound for a nonexistent dataset.
|
||||
_, err = client.Dataset("does_not_exist").Metadata(ctx)
|
||||
if err == nil || !hasStatusCode(err, http.StatusNotFound) {
|
||||
t.Errorf("got %v, want NotFound error", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_DatasetDelete(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
ds := client.Dataset("delete_test")
|
||||
if err := ds.Create(ctx); err != nil && !hasStatusCode(err, http.StatusConflict) { // AlreadyExists is 409
|
||||
t.Fatalf("creating dataset %s: %v", ds, err)
|
||||
}
|
||||
if err := ds.Delete(ctx); err != nil {
|
||||
t.Fatalf("deleting dataset %s: %v", ds, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_Tables(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
table := newTable(t, schema)
|
||||
defer table.Delete(ctx)
|
||||
|
||||
// Iterate over tables in the dataset.
|
||||
it := dataset.Tables(ctx)
|
||||
var tables []*Table
|
||||
for {
|
||||
tbl, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
tables = append(tables, tbl)
|
||||
}
|
||||
// Other tests may be running with this dataset, so there might be more
|
||||
// than just our table in the list. So don't try for an exact match; just
|
||||
// make sure that our table is there somewhere.
|
||||
found := false
|
||||
for _, tbl := range tables {
|
||||
if reflect.DeepEqual(tbl, table) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("Tables: got %v\nshould see %v in the list", pretty.Value(tables), pretty.Value(table))
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_UploadAndRead(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
table := newTable(t, schema)
|
||||
defer table.Delete(ctx)
|
||||
|
||||
// Populate the table.
|
||||
upl := table.Uploader()
|
||||
var (
|
||||
wantRows [][]Value
|
||||
saverRows []*ValuesSaver
|
||||
)
|
||||
for i, name := range []string{"a", "b", "c"} {
|
||||
row := []Value{name, int64(i)}
|
||||
wantRows = append(wantRows, row)
|
||||
saverRows = append(saverRows, &ValuesSaver{
|
||||
Schema: schema,
|
||||
InsertID: name,
|
||||
Row: row,
|
||||
})
|
||||
}
|
||||
if err := upl.Put(ctx, saverRows); err != nil {
|
||||
t.Fatal(putError(err))
|
||||
}
|
||||
|
||||
// Wait until the data has been uploaded. This can take a few seconds, according
|
||||
// to https://cloud.google.com/bigquery/streaming-data-into-bigquery.
|
||||
if err := waitForRow(ctx, table); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Read the table.
|
||||
checkRead(t, "upload", table.Read(ctx), wantRows)
|
||||
|
||||
// Query the table.
|
||||
q := client.Query(fmt.Sprintf("select name, num from %s", table.TableID))
|
||||
q.DefaultProjectID = dataset.ProjectID
|
||||
q.DefaultDatasetID = dataset.DatasetID
|
||||
|
||||
rit, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkRead(t, "query", rit, wantRows)
|
||||
|
||||
// Query the long way.
|
||||
job1, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
job2, err := client.JobFromID(ctx, job1.ID())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
rit, err = job2.Read(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkRead(t, "job.Read", rit, wantRows)
|
||||
|
||||
// Test reading directly into a []Value.
|
||||
valueLists, err := readAll(table.Read(ctx))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
it := table.Read(ctx)
|
||||
for i, vl := range valueLists {
|
||||
var got []Value
|
||||
if err := it.Next(&got); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := []Value(vl)
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("%d: got %v, want %v", i, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// Test reading into a map.
|
||||
it = table.Read(ctx)
|
||||
for _, vl := range valueLists {
|
||||
var vm map[string]Value
|
||||
if err := it.Next(&vm); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got, want := len(vm), len(vl); got != want {
|
||||
t.Fatalf("valueMap len: got %d, want %d", got, want)
|
||||
}
|
||||
for i, v := range vl {
|
||||
if got, want := vm[schema[i].Name], v; got != want {
|
||||
t.Errorf("%d, name=%s: got %v, want %v",
|
||||
i, schema[i].Name, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type TestStruct struct {
|
||||
Name string
|
||||
Nums []int
|
||||
Sub Sub
|
||||
Subs []*Sub
|
||||
}
|
||||
|
||||
type Sub struct {
|
||||
B bool
|
||||
SubSub SubSub
|
||||
SubSubs []*SubSub
|
||||
}
|
||||
|
||||
type SubSub struct{ Count int }
|
||||
|
||||
func TestIntegration_UploadAndReadStructs(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
schema, err := InferSchema(TestStruct{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
table := newTable(t, schema)
|
||||
defer table.Delete(ctx)
|
||||
|
||||
// Populate the table.
|
||||
upl := table.Uploader()
|
||||
want := []*TestStruct{
|
||||
{Name: "a", Nums: []int{1, 2}, Sub: Sub{B: true}, Subs: []*Sub{{B: false}, {B: true}}},
|
||||
{Name: "b", Nums: []int{1}, Subs: []*Sub{{B: false}, {B: false}, {B: true}}},
|
||||
{Name: "c", Sub: Sub{B: true}},
|
||||
{
|
||||
Name: "d",
|
||||
Sub: Sub{SubSub: SubSub{12}, SubSubs: []*SubSub{{1}, {2}, {3}}},
|
||||
Subs: []*Sub{{B: false, SubSub: SubSub{4}}, {B: true, SubSubs: []*SubSub{{5}, {6}}}},
|
||||
},
|
||||
}
|
||||
var savers []*StructSaver
|
||||
for _, s := range want {
|
||||
savers = append(savers, &StructSaver{Schema: schema, Struct: s})
|
||||
}
|
||||
if err := upl.Put(ctx, savers); err != nil {
|
||||
t.Fatal(putError(err))
|
||||
}
|
||||
|
||||
// Wait until the data has been uploaded. This can take a few seconds, according
|
||||
// to https://cloud.google.com/bigquery/streaming-data-into-bigquery.
|
||||
if err := waitForRow(ctx, table); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Test iteration with structs.
|
||||
it := table.Read(ctx)
|
||||
var got []*TestStruct
|
||||
for {
|
||||
var g TestStruct
|
||||
err := it.Next(&g)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
got = append(got, &g)
|
||||
}
|
||||
sort.Sort(byName(got))
|
||||
|
||||
// BigQuery does not elide nils. It reports an error for nil fields.
|
||||
for i, g := range got {
|
||||
if i >= len(want) {
|
||||
t.Errorf("%d: got %v, past end of want", i, pretty.Value(g))
|
||||
} else if w := want[i]; !reflect.DeepEqual(g, w) {
|
||||
t.Errorf("%d: got %v, want %v", i, pretty.Value(g), pretty.Value(w))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type byName []*TestStruct
|
||||
|
||||
func (b byName) Len() int { return len(b) }
|
||||
func (b byName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b byName) Less(i, j int) bool { return b[i].Name < b[j].Name }
|
||||
|
||||
func TestIntegration_Update(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
table := newTable(t, schema)
|
||||
defer table.Delete(ctx)
|
||||
|
||||
// Test Update of non-schema fields.
|
||||
tm, err := table.Metadata(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wantDescription := tm.Description + "more"
|
||||
wantName := tm.Name + "more"
|
||||
got, err := table.Update(ctx, TableMetadataToUpdate{
|
||||
Description: wantDescription,
|
||||
Name: wantName,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got.Description != wantDescription {
|
||||
t.Errorf("Description: got %q, want %q", got.Description, wantDescription)
|
||||
}
|
||||
if got.Name != wantName {
|
||||
t.Errorf("Name: got %q, want %q", got.Name, wantName)
|
||||
}
|
||||
if !reflect.DeepEqual(got.Schema, schema) {
|
||||
t.Errorf("Schema: got %v, want %v", pretty.Value(got.Schema), pretty.Value(schema))
|
||||
}
|
||||
|
||||
// Test schema update.
|
||||
// Columns can be added. schema2 is the same as schema, except for the
|
||||
// added column in the middle.
|
||||
nested := Schema{
|
||||
{Name: "nested", Type: BooleanFieldType},
|
||||
{Name: "other", Type: StringFieldType},
|
||||
}
|
||||
schema2 := Schema{
|
||||
schema[0],
|
||||
{Name: "rec", Type: RecordFieldType, Schema: nested},
|
||||
schema[1],
|
||||
}
|
||||
|
||||
got, err = table.Update(ctx, TableMetadataToUpdate{Schema: schema2})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Wherever you add the column, it appears at the end.
|
||||
schema3 := Schema{schema2[0], schema2[2], schema2[1]}
|
||||
if !reflect.DeepEqual(got.Schema, schema3) {
|
||||
t.Errorf("add field:\ngot %v\nwant %v",
|
||||
pretty.Value(got.Schema), pretty.Value(schema3))
|
||||
}
|
||||
|
||||
// Updating with the empty schema succeeds, but is a no-op.
|
||||
got, err = table.Update(ctx, TableMetadataToUpdate{Schema: Schema{}})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(got.Schema, schema3) {
|
||||
t.Errorf("empty schema:\ngot %v\nwant %v",
|
||||
pretty.Value(got.Schema), pretty.Value(schema3))
|
||||
}
|
||||
|
||||
// Error cases.
|
||||
for _, test := range []struct {
|
||||
desc string
|
||||
fields []*FieldSchema
|
||||
}{
|
||||
{"change from optional to required", []*FieldSchema{
|
||||
schema3[0],
|
||||
{Name: "num", Type: IntegerFieldType, Required: true},
|
||||
schema3[2],
|
||||
}},
|
||||
{"add a required field", []*FieldSchema{
|
||||
schema3[0], schema3[1], schema3[2],
|
||||
{Name: "req", Type: StringFieldType, Required: true},
|
||||
}},
|
||||
{"remove a field", []*FieldSchema{schema3[0], schema3[1]}},
|
||||
{"remove a nested field", []*FieldSchema{
|
||||
schema3[0], schema3[1],
|
||||
{Name: "rec", Type: RecordFieldType, Schema: Schema{nested[0]}}}},
|
||||
{"remove all nested fields", []*FieldSchema{
|
||||
schema3[0], schema3[1],
|
||||
{Name: "rec", Type: RecordFieldType, Schema: Schema{}}}},
|
||||
} {
|
||||
for {
|
||||
_, err = table.Update(ctx, TableMetadataToUpdate{Schema: Schema(test.fields)})
|
||||
if !hasStatusCode(err, 403) {
|
||||
break
|
||||
}
|
||||
// We've hit the rate limit for updates. Wait a bit and retry.
|
||||
t.Logf("%s: retrying after getting %v", test.desc, err)
|
||||
time.Sleep(4 * time.Second)
|
||||
}
|
||||
if err == nil {
|
||||
t.Errorf("%s: want error, got nil", test.desc)
|
||||
} else if !hasStatusCode(err, 400) {
|
||||
t.Errorf("%s: want 400, got %v", test.desc, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_Load(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
table := newTable(t, schema)
|
||||
defer table.Delete(ctx)
|
||||
|
||||
// Load the table from a reader.
|
||||
r := strings.NewReader("a,0\nb,1\nc,2\n")
|
||||
wantRows := [][]Value{
|
||||
[]Value{"a", int64(0)},
|
||||
[]Value{"b", int64(1)},
|
||||
[]Value{"c", int64(2)},
|
||||
}
|
||||
rs := NewReaderSource(r)
|
||||
loader := table.LoaderFrom(rs)
|
||||
loader.WriteDisposition = WriteTruncate
|
||||
job, err := loader.Run(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := wait(ctx, job); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkRead(t, "reader load", table.Read(ctx), wantRows)
|
||||
}
|
||||
|
||||
func TestIntegration_DML(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
table := newTable(t, schema)
|
||||
defer table.Delete(ctx)
|
||||
|
||||
// Use DML to insert.
|
||||
wantRows := [][]Value{
|
||||
[]Value{"a", int64(0)},
|
||||
[]Value{"b", int64(1)},
|
||||
[]Value{"c", int64(2)},
|
||||
}
|
||||
query := fmt.Sprintf("INSERT bigquery_integration_test.%s (name, num) "+
|
||||
"VALUES ('a', 0), ('b', 1), ('c', 2)",
|
||||
table.TableID)
|
||||
q := client.Query(query)
|
||||
q.UseStandardSQL = true // necessary for DML
|
||||
job, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := wait(ctx, job); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkRead(t, "INSERT", table.Read(ctx), wantRows)
|
||||
}
|
||||
|
||||
func TestIntegration_TimeTypes(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
dtSchema := Schema{
|
||||
{Name: "d", Type: DateFieldType},
|
||||
{Name: "t", Type: TimeFieldType},
|
||||
{Name: "dt", Type: DateTimeFieldType},
|
||||
{Name: "ts", Type: TimestampFieldType},
|
||||
}
|
||||
table := newTable(t, dtSchema)
|
||||
defer table.Delete(ctx)
|
||||
|
||||
d := civil.Date{2016, 3, 20}
|
||||
tm := civil.Time{12, 30, 0, 0}
|
||||
ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC)
|
||||
wantRows := [][]Value{
|
||||
[]Value{d, tm, civil.DateTime{d, tm}, ts},
|
||||
}
|
||||
upl := table.Uploader()
|
||||
if err := upl.Put(ctx, []*ValuesSaver{
|
||||
{Schema: dtSchema, Row: wantRows[0]},
|
||||
}); err != nil {
|
||||
t.Fatal(putError(err))
|
||||
}
|
||||
if err := waitForRow(ctx, table); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// SQL wants DATETIMEs with a space between date and time, but the service
|
||||
// returns them in RFC3339 form, with a "T" between.
|
||||
query := fmt.Sprintf("INSERT bigquery_integration_test.%s (d, t, dt, ts) "+
|
||||
"VALUES ('%s', '%s', '%s %s', '%s')",
|
||||
table.TableID, d, tm, d, tm, ts.Format("2006-01-02 15:04:05"))
|
||||
q := client.Query(query)
|
||||
q.UseStandardSQL = true // necessary for DML
|
||||
job, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := wait(ctx, job); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wantRows = append(wantRows, wantRows[0])
|
||||
checkRead(t, "TimeTypes", table.Read(ctx), wantRows)
|
||||
}
|
||||
|
||||
// Creates a new, temporary table with a unique name and the given schema.
|
||||
func newTable(t *testing.T, s Schema) *Table {
|
||||
fiveMinutesFromNow = time.Now().Add(5 * time.Minute).Round(time.Second)
|
||||
name := fmt.Sprintf("t%d", time.Now().UnixNano())
|
||||
table := dataset.Table(name)
|
||||
err := table.Create(context.Background(), s, TableExpiration(fiveMinutesFromNow))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return table
|
||||
}
|
||||
|
||||
func checkRead(t *testing.T, msg string, it *RowIterator, want [][]Value) {
|
||||
got, err := readAll(it)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: %v", msg, err)
|
||||
}
|
||||
if len(got) != len(want) {
|
||||
t.Errorf("%s: got %d rows, want %d", msg, len(got), len(want))
|
||||
}
|
||||
sort.Sort(byCol0(got))
|
||||
for i, r := range got {
|
||||
gotRow := []Value(r)
|
||||
wantRow := want[i]
|
||||
if !reflect.DeepEqual(gotRow, wantRow) {
|
||||
t.Errorf("%s #%d: got %v, want %v", msg, i, gotRow, wantRow)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func readAll(it *RowIterator) ([][]Value, error) {
|
||||
var rows [][]Value
|
||||
for {
|
||||
var vals []Value
|
||||
err := it.Next(&vals)
|
||||
if err == iterator.Done {
|
||||
return rows, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rows = append(rows, vals)
|
||||
}
|
||||
}
|
||||
|
||||
type byCol0 [][]Value
|
||||
|
||||
func (b byCol0) Len() int { return len(b) }
|
||||
func (b byCol0) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b byCol0) Less(i, j int) bool {
|
||||
switch a := b[i][0].(type) {
|
||||
case string:
|
||||
return a < b[j][0].(string)
|
||||
case civil.Date:
|
||||
return a.Before(b[j][0].(civil.Date))
|
||||
default:
|
||||
panic("unknown type")
|
||||
}
|
||||
}
|
||||
|
||||
func hasStatusCode(err error, code int) bool {
|
||||
if e, ok := err.(*googleapi.Error); ok && e.Code == code {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// wait polls the job until it is complete or an error is returned.
|
||||
func wait(ctx context.Context, job *Job) error {
|
||||
status, err := job.Wait(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting job status: %v", err)
|
||||
}
|
||||
if status.Err() != nil {
|
||||
return fmt.Errorf("job status error: %#v", status.Err())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitForRow polls the table until it contains a row.
|
||||
// TODO(jba): use internal.Retry.
|
||||
func waitForRow(ctx context.Context, table *Table) error {
|
||||
for {
|
||||
it := table.Read(ctx)
|
||||
var v []Value
|
||||
err := it.Next(&v)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if err != iterator.Done {
|
||||
return err
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func putError(err error) string {
|
||||
pme, ok := err.(PutMultiError)
|
||||
if !ok {
|
||||
return err.Error()
|
||||
}
|
||||
var msgs []string
|
||||
for _, err := range pme {
|
||||
msgs = append(msgs, err.Error())
|
||||
}
|
||||
return strings.Join(msgs, "\n")
|
||||
}
|
158
vendor/cloud.google.com/go/bigquery/iterator.go
generated
vendored
Normal file
158
vendor/cloud.google.com/go/bigquery/iterator.go
generated
vendored
Normal file
|
@ -0,0 +1,158 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
// A pageFetcher returns a page of rows, starting from the row specified by token.
|
||||
type pageFetcher interface {
|
||||
fetch(ctx context.Context, s service, token string) (*readDataResult, error)
|
||||
setPaging(*pagingConf)
|
||||
}
|
||||
|
||||
func newRowIterator(ctx context.Context, s service, pf pageFetcher) *RowIterator {
|
||||
it := &RowIterator{
|
||||
ctx: ctx,
|
||||
service: s,
|
||||
pf: pf,
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
func() int { return len(it.rows) },
|
||||
func() interface{} { r := it.rows; it.rows = nil; return r })
|
||||
return it
|
||||
}
|
||||
|
||||
// A RowIterator provides access to the result of a BigQuery lookup.
|
||||
type RowIterator struct {
|
||||
ctx context.Context
|
||||
service service
|
||||
pf pageFetcher
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// StartIndex can be set before the first call to Next. If PageInfo().Token
|
||||
// is also set, StartIndex is ignored.
|
||||
StartIndex uint64
|
||||
|
||||
rows [][]Value
|
||||
|
||||
schema Schema // populated on first call to fetch
|
||||
structLoader structLoader // used to populate a pointer to a struct
|
||||
}
|
||||
|
||||
// Next loads the next row into dst. Its return value is iterator.Done if there
|
||||
// are no more results. Once Next returns iterator.Done, all subsequent calls
|
||||
// will return iterator.Done.
|
||||
//
|
||||
// dst may implement ValueLoader, or may be a *[]Value, *map[string]Value, or struct pointer.
|
||||
//
|
||||
// If dst is a *[]Value, it will be set to to new []Value whose i'th element
|
||||
// will be populated with the i'th column of the row.
|
||||
//
|
||||
// If dst is a *map[string]Value, a new map will be created if dst is nil. Then
|
||||
// for each schema column name, the map key of that name will be set to the column's
|
||||
// value.
|
||||
//
|
||||
// If dst is pointer to a struct, each column in the schema will be matched
|
||||
// with an exported field of the struct that has the same name, ignoring case.
|
||||
// Unmatched schema columns and struct fields will be ignored.
|
||||
//
|
||||
// Each BigQuery column type corresponds to one or more Go types; a matching struct
|
||||
// field must be of the correct type. The correspondences are:
|
||||
//
|
||||
// STRING string
|
||||
// BOOL bool
|
||||
// INTEGER int, int8, int16, int32, int64, uint8, uint16, uint32
|
||||
// FLOAT float32, float64
|
||||
// BYTES []byte
|
||||
// TIMESTAMP time.Time
|
||||
// DATE civil.Date
|
||||
// TIME civil.Time
|
||||
// DATETIME civil.DateTime
|
||||
//
|
||||
// A repeated field corresponds to a slice or array of the element type.
|
||||
// A RECORD type (nested schema) corresponds to a nested struct or struct pointer.
|
||||
// All calls to Next on the same iterator must use the same struct type.
|
||||
func (it *RowIterator) Next(dst interface{}) error {
|
||||
var vl ValueLoader
|
||||
switch dst := dst.(type) {
|
||||
case ValueLoader:
|
||||
vl = dst
|
||||
case *[]Value:
|
||||
vl = (*valueList)(dst)
|
||||
case *map[string]Value:
|
||||
vl = (*valueMap)(dst)
|
||||
default:
|
||||
if !isStructPtr(dst) {
|
||||
return fmt.Errorf("bigquery: cannot convert %T to ValueLoader (need pointer to []Value, map[string]Value, or struct)", dst)
|
||||
}
|
||||
}
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return err
|
||||
}
|
||||
row := it.rows[0]
|
||||
it.rows = it.rows[1:]
|
||||
|
||||
if vl == nil {
|
||||
// This can only happen if dst is a pointer to a struct. We couldn't
|
||||
// set vl above because we need the schema.
|
||||
if err := it.structLoader.set(dst, it.schema); err != nil {
|
||||
return err
|
||||
}
|
||||
vl = &it.structLoader
|
||||
}
|
||||
return vl.Load(row, it.schema)
|
||||
}
|
||||
|
||||
func isStructPtr(x interface{}) bool {
|
||||
t := reflect.TypeOf(x)
|
||||
return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *RowIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
func (it *RowIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
pc := &pagingConf{}
|
||||
if pageSize > 0 {
|
||||
pc.recordsPerRequest = int64(pageSize)
|
||||
pc.setRecordsPerRequest = true
|
||||
}
|
||||
if pageToken == "" {
|
||||
pc.startIndex = it.StartIndex
|
||||
}
|
||||
it.pf.setPaging(pc)
|
||||
var res *readDataResult
|
||||
var err error
|
||||
for {
|
||||
res, err = it.pf.fetch(it.ctx, it.service, pageToken)
|
||||
if err != errIncompleteJob {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.rows = append(it.rows, res.rows...)
|
||||
it.schema = res.schema
|
||||
return res.pageToken, nil
|
||||
}
|
413
vendor/cloud.google.com/go/bigquery/iterator_test.go
generated
vendored
Normal file
413
vendor/cloud.google.com/go/bigquery/iterator_test.go
generated
vendored
Normal file
|
@ -0,0 +1,413 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
type fetchResponse struct {
|
||||
result *readDataResult // The result to return.
|
||||
err error // The error to return.
|
||||
}
|
||||
|
||||
// pageFetcherStub services fetch requests by returning data from an in-memory list of values.
|
||||
type pageFetcherStub struct {
|
||||
fetchResponses map[string]fetchResponse
|
||||
|
||||
err error
|
||||
}
|
||||
|
||||
func (pf *pageFetcherStub) fetch(ctx context.Context, s service, token string) (*readDataResult, error) {
|
||||
call, ok := pf.fetchResponses[token]
|
||||
if !ok {
|
||||
pf.err = fmt.Errorf("Unexpected page token: %q", token)
|
||||
}
|
||||
return call.result, call.err
|
||||
}
|
||||
|
||||
func (pf *pageFetcherStub) setPaging(pc *pagingConf) {}
|
||||
|
||||
func TestIterator(t *testing.T) {
|
||||
var (
|
||||
iiSchema = Schema{
|
||||
{Type: IntegerFieldType},
|
||||
{Type: IntegerFieldType},
|
||||
}
|
||||
siSchema = Schema{
|
||||
{Type: StringFieldType},
|
||||
{Type: IntegerFieldType},
|
||||
}
|
||||
)
|
||||
fetchFailure := errors.New("fetch failure")
|
||||
|
||||
testCases := []struct {
|
||||
desc string
|
||||
pageToken string
|
||||
fetchResponses map[string]fetchResponse
|
||||
want [][]Value
|
||||
wantErr error
|
||||
wantSchema Schema
|
||||
}{
|
||||
{
|
||||
desc: "Iteration over single empty page",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{},
|
||||
schema: Schema{},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{},
|
||||
wantSchema: Schema{},
|
||||
},
|
||||
{
|
||||
desc: "Iteration over single page",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{{1, 2}, {11, 12}},
|
||||
wantSchema: iiSchema,
|
||||
},
|
||||
{
|
||||
desc: "Iteration over single page with different schema",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{"1", 2}, {"11", 12}},
|
||||
schema: siSchema,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{{"1", 2}, {"11", 12}},
|
||||
wantSchema: siSchema,
|
||||
},
|
||||
{
|
||||
desc: "Iteration over two pages",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}},
|
||||
wantSchema: iiSchema,
|
||||
},
|
||||
{
|
||||
desc: "Server response includes empty page",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &readDataResult{
|
||||
pageToken: "b",
|
||||
rows: [][]Value{},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"b": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}},
|
||||
wantSchema: iiSchema,
|
||||
},
|
||||
{
|
||||
desc: "Fetch error",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
// We returns some data from this fetch, but also an error.
|
||||
// So the end result should include only data from the previous fetch.
|
||||
err: fetchFailure,
|
||||
result: &readDataResult{
|
||||
pageToken: "b",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{{1, 2}, {11, 12}},
|
||||
wantErr: fetchFailure,
|
||||
wantSchema: iiSchema,
|
||||
},
|
||||
|
||||
{
|
||||
desc: "Skip over an entire page",
|
||||
pageToken: "a",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{{101, 102}, {111, 112}},
|
||||
wantSchema: iiSchema,
|
||||
},
|
||||
|
||||
{
|
||||
desc: "Skip beyond all data",
|
||||
pageToken: "b",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &readDataResult{
|
||||
pageToken: "b",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"b": {
|
||||
result: &readDataResult{},
|
||||
},
|
||||
},
|
||||
// In this test case, Next will return false on its first call,
|
||||
// so we won't even attempt to call Get.
|
||||
want: [][]Value{},
|
||||
wantSchema: Schema{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
pf := &pageFetcherStub{
|
||||
fetchResponses: tc.fetchResponses,
|
||||
}
|
||||
it := newRowIterator(context.Background(), nil, pf)
|
||||
it.PageInfo().Token = tc.pageToken
|
||||
values, schema, err := consumeRowIterator(it)
|
||||
if err != tc.wantErr {
|
||||
t.Fatalf("%s: got %v, want %v", tc.desc, err, tc.wantErr)
|
||||
}
|
||||
if (len(values) != 0 || len(tc.want) != 0) && !reflect.DeepEqual(values, tc.want) {
|
||||
t.Errorf("%s: values:\ngot: %v\nwant:%v", tc.desc, values, tc.want)
|
||||
}
|
||||
if (len(schema) != 0 || len(tc.wantSchema) != 0) && !reflect.DeepEqual(schema, tc.wantSchema) {
|
||||
t.Errorf("%s: iterator.Schema:\ngot: %v\nwant: %v", tc.desc, schema, tc.wantSchema)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type valueListWithSchema struct {
|
||||
vals valueList
|
||||
schema Schema
|
||||
}
|
||||
|
||||
func (v *valueListWithSchema) Load(vs []Value, s Schema) error {
|
||||
v.vals.Load(vs, s)
|
||||
v.schema = s
|
||||
return nil
|
||||
}
|
||||
|
||||
// consumeRowIterator reads the schema and all values from a RowIterator and returns them.
|
||||
func consumeRowIterator(it *RowIterator) ([][]Value, Schema, error) {
|
||||
var got [][]Value
|
||||
var schema Schema
|
||||
for {
|
||||
var vls valueListWithSchema
|
||||
err := it.Next(&vls)
|
||||
if err == iterator.Done {
|
||||
return got, schema, nil
|
||||
}
|
||||
if err != nil {
|
||||
return got, schema, err
|
||||
}
|
||||
got = append(got, vls.vals)
|
||||
schema = vls.schema
|
||||
}
|
||||
}
|
||||
|
||||
type delayedPageFetcher struct {
|
||||
pageFetcherStub
|
||||
delayCount int
|
||||
}
|
||||
|
||||
func (pf *delayedPageFetcher) fetch(ctx context.Context, s service, token string) (*readDataResult, error) {
|
||||
if pf.delayCount > 0 {
|
||||
pf.delayCount--
|
||||
return nil, errIncompleteJob
|
||||
}
|
||||
return pf.pageFetcherStub.fetch(ctx, s, token)
|
||||
}
|
||||
|
||||
func TestIterateIncompleteJob(t *testing.T) {
|
||||
want := [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}}
|
||||
pf := pageFetcherStub{
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
dpf := &delayedPageFetcher{
|
||||
pageFetcherStub: pf,
|
||||
delayCount: 1,
|
||||
}
|
||||
it := newRowIterator(context.Background(), nil, dpf)
|
||||
|
||||
values, _, err := consumeRowIterator(it)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if (len(values) != 0 || len(want) != 0) && !reflect.DeepEqual(values, want) {
|
||||
t.Errorf("values: got:\n%v\nwant:\n%v", values, want)
|
||||
}
|
||||
if dpf.delayCount != 0 {
|
||||
t.Errorf("delayCount: got: %v, want: 0", dpf.delayCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNextDuringErrorState(t *testing.T) {
|
||||
pf := &pageFetcherStub{
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {err: errors.New("bang")},
|
||||
},
|
||||
}
|
||||
it := newRowIterator(context.Background(), nil, pf)
|
||||
var vals []Value
|
||||
if err := it.Next(&vals); err == nil {
|
||||
t.Errorf("Expected error after calling Next")
|
||||
}
|
||||
if err := it.Next(&vals); err == nil {
|
||||
t.Errorf("Expected error calling Next again when iterator has a non-nil error.")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNextAfterFinished(t *testing.T) {
|
||||
testCases := []struct {
|
||||
fetchResponses map[string]fetchResponse
|
||||
want [][]Value
|
||||
}{
|
||||
{
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{{1, 2}, {11, 12}},
|
||||
},
|
||||
{
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
pf := &pageFetcherStub{
|
||||
fetchResponses: tc.fetchResponses,
|
||||
}
|
||||
it := newRowIterator(context.Background(), nil, pf)
|
||||
|
||||
values, _, err := consumeRowIterator(it)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if (len(values) != 0 || len(tc.want) != 0) && !reflect.DeepEqual(values, tc.want) {
|
||||
t.Errorf("values: got:\n%v\nwant:\n%v", values, tc.want)
|
||||
}
|
||||
// Try calling Get again.
|
||||
var vals []Value
|
||||
if err := it.Next(&vals); err != iterator.Done {
|
||||
t.Errorf("Expected Done calling Next when there are no more values")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIteratorNextTypes(t *testing.T) {
|
||||
it := newRowIterator(context.Background(), nil, nil)
|
||||
for _, v := range []interface{}{3, "s", []int{}, &[]int{},
|
||||
map[string]Value{}, &map[string]interface{}{},
|
||||
struct{}{},
|
||||
} {
|
||||
if err := it.Next(v); err == nil {
|
||||
t.Error("%v: want error, got nil", v)
|
||||
}
|
||||
}
|
||||
}
|
133
vendor/cloud.google.com/go/bigquery/job.go
generated
vendored
Normal file
133
vendor/cloud.google.com/go/bigquery/job.go
generated
vendored
Normal file
|
@ -0,0 +1,133 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/internal"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// A Job represents an operation which has been submitted to BigQuery for processing.
|
||||
type Job struct {
|
||||
service service
|
||||
projectID string
|
||||
jobID string
|
||||
|
||||
isQuery bool
|
||||
}
|
||||
|
||||
// JobFromID creates a Job which refers to an existing BigQuery job. The job
|
||||
// need not have been created by this package. For example, the job may have
|
||||
// been created in the BigQuery console.
|
||||
func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) {
|
||||
jobType, err := c.service.getJobType(ctx, c.projectID, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Job{
|
||||
service: c.service,
|
||||
projectID: c.projectID,
|
||||
jobID: id,
|
||||
isQuery: jobType == queryJobType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (j *Job) ID() string {
|
||||
return j.jobID
|
||||
}
|
||||
|
||||
// State is one of a sequence of states that a Job progresses through as it is processed.
|
||||
type State int
|
||||
|
||||
const (
|
||||
Pending State = iota
|
||||
Running
|
||||
Done
|
||||
)
|
||||
|
||||
// JobStatus contains the current State of a job, and errors encountered while processing that job.
|
||||
type JobStatus struct {
|
||||
State State
|
||||
|
||||
err error
|
||||
|
||||
// All errors encountered during the running of the job.
|
||||
// Not all Errors are fatal, so errors here do not necessarily mean that the job has completed or was unsuccessful.
|
||||
Errors []*Error
|
||||
}
|
||||
|
||||
// setJobRef initializes job's JobReference if given a non-empty jobID.
|
||||
// projectID must be non-empty.
|
||||
func setJobRef(job *bq.Job, jobID, projectID string) {
|
||||
if jobID == "" {
|
||||
return
|
||||
}
|
||||
// We don't check whether projectID is empty; the server will return an
|
||||
// error when it encounters the resulting JobReference.
|
||||
|
||||
job.JobReference = &bq.JobReference{
|
||||
JobId: jobID,
|
||||
ProjectId: projectID,
|
||||
}
|
||||
}
|
||||
|
||||
// Done reports whether the job has completed.
|
||||
// After Done returns true, the Err method will return an error if the job completed unsuccesfully.
|
||||
func (s *JobStatus) Done() bool {
|
||||
return s.State == Done
|
||||
}
|
||||
|
||||
// Err returns the error that caused the job to complete unsuccesfully (if any).
|
||||
func (s *JobStatus) Err() error {
|
||||
return s.err
|
||||
}
|
||||
|
||||
// Status returns the current status of the job. It fails if the Status could not be determined.
|
||||
func (j *Job) Status(ctx context.Context) (*JobStatus, error) {
|
||||
return j.service.jobStatus(ctx, j.projectID, j.jobID)
|
||||
}
|
||||
|
||||
// Cancel requests that a job be cancelled. This method returns without waiting for
|
||||
// cancellation to take effect. To check whether the job has terminated, use Job.Status.
|
||||
// Cancelled jobs may still incur costs.
|
||||
func (j *Job) Cancel(ctx context.Context) error {
|
||||
return j.service.jobCancel(ctx, j.projectID, j.jobID)
|
||||
}
|
||||
|
||||
// Wait blocks until the job or th context is done. It returns the final status
|
||||
// of the job.
|
||||
// If an error occurs while retrieving the status, Wait returns that error. But
|
||||
// Wait returns nil if the status was retrieved successfully, even if
|
||||
// status.Err() != nil. So callers must check both errors. See the example.
|
||||
func (j *Job) Wait(ctx context.Context) (*JobStatus, error) {
|
||||
var js *JobStatus
|
||||
err := internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
|
||||
js, err = j.Status(ctx)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
if js.Done() {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return js, nil
|
||||
}
|
86
vendor/cloud.google.com/go/bigquery/load.go
generated
vendored
Normal file
86
vendor/cloud.google.com/go/bigquery/load.go
generated
vendored
Normal file
|
@ -0,0 +1,86 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// LoadConfig holds the configuration for a load job.
|
||||
type LoadConfig struct {
|
||||
// JobID is the ID to use for the load job. If unset, a job ID will be automatically created.
|
||||
JobID string
|
||||
|
||||
// Src is the source from which data will be loaded.
|
||||
Src LoadSource
|
||||
|
||||
// Dst is the table into which the data will be loaded.
|
||||
Dst *Table
|
||||
|
||||
// CreateDisposition specifies the circumstances under which the destination table will be created.
|
||||
// The default is CreateIfNeeded.
|
||||
CreateDisposition TableCreateDisposition
|
||||
|
||||
// WriteDisposition specifies how existing data in the destination table is treated.
|
||||
// The default is WriteAppend.
|
||||
WriteDisposition TableWriteDisposition
|
||||
}
|
||||
|
||||
// A Loader loads data from Google Cloud Storage into a BigQuery table.
|
||||
type Loader struct {
|
||||
LoadConfig
|
||||
c *Client
|
||||
}
|
||||
|
||||
// A LoadSource represents a source of data that can be loaded into
|
||||
// a BigQuery table.
|
||||
//
|
||||
// This package defines two LoadSources: GCSReference, for Google Cloud Storage
|
||||
// objects, and ReaderSource, for data read from an io.Reader.
|
||||
type LoadSource interface {
|
||||
populateInsertJobConfForLoad(conf *insertJobConf)
|
||||
}
|
||||
|
||||
// LoaderFrom returns a Loader which can be used to load data into a BigQuery table.
|
||||
// The returned Loader may optionally be further configured before its Run method is called.
|
||||
func (t *Table) LoaderFrom(src LoadSource) *Loader {
|
||||
return &Loader{
|
||||
c: t.c,
|
||||
LoadConfig: LoadConfig{
|
||||
Src: src,
|
||||
Dst: t,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Run initiates a load job.
|
||||
func (l *Loader) Run(ctx context.Context) (*Job, error) {
|
||||
job := &bq.Job{
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Load: &bq.JobConfigurationLoad{
|
||||
CreateDisposition: string(l.CreateDisposition),
|
||||
WriteDisposition: string(l.WriteDisposition),
|
||||
},
|
||||
},
|
||||
}
|
||||
conf := &insertJobConf{job: job}
|
||||
l.Src.populateInsertJobConfForLoad(conf)
|
||||
setJobRef(job, l.JobID, l.c.projectID)
|
||||
|
||||
job.Configuration.Load.DestinationTable = l.Dst.tableRefProto()
|
||||
|
||||
return l.c.service.insertJob(ctx, l.c.projectID, conf)
|
||||
}
|
229
vendor/cloud.google.com/go/bigquery/load_test.go
generated
vendored
Normal file
229
vendor/cloud.google.com/go/bigquery/load_test.go
generated
vendored
Normal file
|
@ -0,0 +1,229 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func defaultLoadJob() *bq.Job {
|
||||
return &bq.Job{
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Load: &bq.JobConfigurationLoad{
|
||||
DestinationTable: &bq.TableReference{
|
||||
ProjectId: "project-id",
|
||||
DatasetId: "dataset-id",
|
||||
TableId: "table-id",
|
||||
},
|
||||
SourceUris: []string{"uri"},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func stringFieldSchema() *FieldSchema {
|
||||
return &FieldSchema{Name: "fieldname", Type: StringFieldType}
|
||||
}
|
||||
|
||||
func nestedFieldSchema() *FieldSchema {
|
||||
return &FieldSchema{
|
||||
Name: "nested",
|
||||
Type: RecordFieldType,
|
||||
Schema: Schema{stringFieldSchema()},
|
||||
}
|
||||
}
|
||||
|
||||
func bqStringFieldSchema() *bq.TableFieldSchema {
|
||||
return &bq.TableFieldSchema{
|
||||
Name: "fieldname",
|
||||
Type: "STRING",
|
||||
}
|
||||
}
|
||||
|
||||
func bqNestedFieldSchema() *bq.TableFieldSchema {
|
||||
return &bq.TableFieldSchema{
|
||||
Name: "nested",
|
||||
Type: "RECORD",
|
||||
Fields: []*bq.TableFieldSchema{bqStringFieldSchema()},
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoad(t *testing.T) {
|
||||
c := &Client{projectID: "project-id"}
|
||||
|
||||
testCases := []struct {
|
||||
dst *Table
|
||||
src LoadSource
|
||||
config LoadConfig
|
||||
want *bq.Job
|
||||
}{
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: NewGCSReference("uri"),
|
||||
want: defaultLoadJob(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
config: LoadConfig{
|
||||
CreateDisposition: CreateNever,
|
||||
WriteDisposition: WriteTruncate,
|
||||
JobID: "ajob",
|
||||
},
|
||||
src: NewGCSReference("uri"),
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
j.Configuration.Load.CreateDisposition = "CREATE_NEVER"
|
||||
j.Configuration.Load.WriteDisposition = "WRITE_TRUNCATE"
|
||||
j.JobReference = &bq.JobReference{
|
||||
JobId: "ajob",
|
||||
ProjectId: "project-id",
|
||||
}
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: func() *GCSReference {
|
||||
g := NewGCSReference("uri")
|
||||
g.MaxBadRecords = 1
|
||||
g.AllowJaggedRows = true
|
||||
g.AllowQuotedNewlines = true
|
||||
g.IgnoreUnknownValues = true
|
||||
return g
|
||||
}(),
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
j.Configuration.Load.MaxBadRecords = 1
|
||||
j.Configuration.Load.AllowJaggedRows = true
|
||||
j.Configuration.Load.AllowQuotedNewlines = true
|
||||
j.Configuration.Load.IgnoreUnknownValues = true
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: func() *GCSReference {
|
||||
g := NewGCSReference("uri")
|
||||
g.Schema = Schema{
|
||||
stringFieldSchema(),
|
||||
nestedFieldSchema(),
|
||||
}
|
||||
return g
|
||||
}(),
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
j.Configuration.Load.Schema = &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqStringFieldSchema(),
|
||||
bqNestedFieldSchema(),
|
||||
}}
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: func() *GCSReference {
|
||||
g := NewGCSReference("uri")
|
||||
g.SkipLeadingRows = 1
|
||||
g.SourceFormat = JSON
|
||||
g.Encoding = UTF_8
|
||||
g.FieldDelimiter = "\t"
|
||||
g.Quote = "-"
|
||||
return g
|
||||
}(),
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
j.Configuration.Load.SkipLeadingRows = 1
|
||||
j.Configuration.Load.SourceFormat = "NEWLINE_DELIMITED_JSON"
|
||||
j.Configuration.Load.Encoding = "UTF-8"
|
||||
j.Configuration.Load.FieldDelimiter = "\t"
|
||||
hyphen := "-"
|
||||
j.Configuration.Load.Quote = &hyphen
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: NewGCSReference("uri"),
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
// Quote is left unset in GCSReference, so should be nil here.
|
||||
j.Configuration.Load.Quote = nil
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: func() *GCSReference {
|
||||
g := NewGCSReference("uri")
|
||||
g.ForceZeroQuote = true
|
||||
return g
|
||||
}(),
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
empty := ""
|
||||
j.Configuration.Load.Quote = &empty
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: func() *ReaderSource {
|
||||
r := NewReaderSource(strings.NewReader("foo"))
|
||||
r.SkipLeadingRows = 1
|
||||
r.SourceFormat = JSON
|
||||
r.Encoding = UTF_8
|
||||
r.FieldDelimiter = "\t"
|
||||
r.Quote = "-"
|
||||
return r
|
||||
}(),
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
j.Configuration.Load.SourceUris = nil
|
||||
j.Configuration.Load.SkipLeadingRows = 1
|
||||
j.Configuration.Load.SourceFormat = "NEWLINE_DELIMITED_JSON"
|
||||
j.Configuration.Load.Encoding = "UTF-8"
|
||||
j.Configuration.Load.FieldDelimiter = "\t"
|
||||
hyphen := "-"
|
||||
j.Configuration.Load.Quote = &hyphen
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
s := &testService{}
|
||||
c.service = s
|
||||
loader := tc.dst.LoaderFrom(tc.src)
|
||||
tc.config.Src = tc.src
|
||||
tc.config.Dst = tc.dst
|
||||
loader.LoadConfig = tc.config
|
||||
if _, err := loader.Run(context.Background()); err != nil {
|
||||
t.Errorf("%d: err calling Loader.Run: %v", i, err)
|
||||
continue
|
||||
}
|
||||
if !reflect.DeepEqual(s.Job, tc.want) {
|
||||
t.Errorf("loading %d: got:\n%v\nwant:\n%v",
|
||||
i, pretty.Value(s.Job), pretty.Value(tc.want))
|
||||
}
|
||||
}
|
||||
}
|
265
vendor/cloud.google.com/go/bigquery/params.go
generated
vendored
Normal file
265
vendor/cloud.google.com/go/bigquery/params.go
generated
vendored
Normal file
|
@ -0,0 +1,265 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
"cloud.google.com/go/internal/fields"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
// See https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#timestamp-type.
|
||||
timestampFormat = "2006-01-02 15:04:05.999999-07:00"
|
||||
|
||||
// See https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#schema.fields.name
|
||||
validFieldName = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]{0,127}$")
|
||||
)
|
||||
|
||||
func bqTagParser(t reflect.StructTag) (name string, keep bool, other interface{}, err error) {
|
||||
if s := t.Get("bigquery"); s != "" {
|
||||
if s == "-" {
|
||||
return "", false, nil, nil
|
||||
}
|
||||
if !validFieldName.MatchString(s) {
|
||||
return "", false, nil, errInvalidFieldName
|
||||
}
|
||||
return s, true, nil, nil
|
||||
}
|
||||
return "", true, nil, nil
|
||||
}
|
||||
|
||||
var fieldCache = fields.NewCache(bqTagParser, nil, nil)
|
||||
|
||||
var (
|
||||
int64ParamType = &bq.QueryParameterType{Type: "INT64"}
|
||||
float64ParamType = &bq.QueryParameterType{Type: "FLOAT64"}
|
||||
boolParamType = &bq.QueryParameterType{Type: "BOOL"}
|
||||
stringParamType = &bq.QueryParameterType{Type: "STRING"}
|
||||
bytesParamType = &bq.QueryParameterType{Type: "BYTES"}
|
||||
dateParamType = &bq.QueryParameterType{Type: "DATE"}
|
||||
timeParamType = &bq.QueryParameterType{Type: "TIME"}
|
||||
dateTimeParamType = &bq.QueryParameterType{Type: "DATETIME"}
|
||||
timestampParamType = &bq.QueryParameterType{Type: "TIMESTAMP"}
|
||||
)
|
||||
|
||||
var (
|
||||
typeOfDate = reflect.TypeOf(civil.Date{})
|
||||
typeOfTime = reflect.TypeOf(civil.Time{})
|
||||
typeOfDateTime = reflect.TypeOf(civil.DateTime{})
|
||||
typeOfGoTime = reflect.TypeOf(time.Time{})
|
||||
)
|
||||
|
||||
// A QueryParameter is a parameter to a query.
|
||||
type QueryParameter struct {
|
||||
// Name is used for named parameter mode.
|
||||
// It must match the name in the query case-insensitively.
|
||||
Name string
|
||||
|
||||
// Value is the value of the parameter.
|
||||
// The following Go types are supported, with their corresponding
|
||||
// Bigquery types:
|
||||
// int, int8, int16, int32, int64, uint8, uint16, uint32: INT64
|
||||
// Note that uint, uint64 and uintptr are not supported, because
|
||||
// they may contain values that cannot fit into a 64-bit signed integer.
|
||||
// float32, float64: FLOAT64
|
||||
// bool: BOOL
|
||||
// string: STRING
|
||||
// []byte: BYTES
|
||||
// time.Time: TIMESTAMP
|
||||
// Arrays and slices of the above.
|
||||
// Structs of the above. Only the exported fields are used.
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
func (p QueryParameter) toRaw() (*bq.QueryParameter, error) {
|
||||
pv, err := paramValue(reflect.ValueOf(p.Value))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pt, err := paramType(reflect.TypeOf(p.Value))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &bq.QueryParameter{
|
||||
Name: p.Name,
|
||||
ParameterValue: &pv,
|
||||
ParameterType: pt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func paramType(t reflect.Type) (*bq.QueryParameterType, error) {
|
||||
if t == nil {
|
||||
return nil, errors.New("bigquery: nil parameter")
|
||||
}
|
||||
switch t {
|
||||
case typeOfDate:
|
||||
return dateParamType, nil
|
||||
case typeOfTime:
|
||||
return timeParamType, nil
|
||||
case typeOfDateTime:
|
||||
return dateTimeParamType, nil
|
||||
case typeOfGoTime:
|
||||
return timestampParamType, nil
|
||||
}
|
||||
switch t.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uint16, reflect.Uint32:
|
||||
return int64ParamType, nil
|
||||
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return float64ParamType, nil
|
||||
|
||||
case reflect.Bool:
|
||||
return boolParamType, nil
|
||||
|
||||
case reflect.String:
|
||||
return stringParamType, nil
|
||||
|
||||
case reflect.Slice:
|
||||
if t.Elem().Kind() == reflect.Uint8 {
|
||||
return bytesParamType, nil
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
et, err := paramType(t.Elem())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &bq.QueryParameterType{Type: "ARRAY", ArrayType: et}, nil
|
||||
|
||||
case reflect.Ptr:
|
||||
if t.Elem().Kind() != reflect.Struct {
|
||||
break
|
||||
}
|
||||
t = t.Elem()
|
||||
fallthrough
|
||||
|
||||
case reflect.Struct:
|
||||
var fts []*bq.QueryParameterTypeStructTypes
|
||||
fields, err := fieldCache.Fields(t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, f := range fields {
|
||||
pt, err := paramType(f.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fts = append(fts, &bq.QueryParameterTypeStructTypes{
|
||||
Name: f.Name,
|
||||
Type: pt,
|
||||
})
|
||||
}
|
||||
return &bq.QueryParameterType{Type: "STRUCT", StructTypes: fts}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("bigquery: Go type %s cannot be represented as a parameter type", t)
|
||||
}
|
||||
|
||||
func paramValue(v reflect.Value) (bq.QueryParameterValue, error) {
|
||||
var res bq.QueryParameterValue
|
||||
if !v.IsValid() {
|
||||
return res, errors.New("bigquery: nil parameter")
|
||||
}
|
||||
t := v.Type()
|
||||
switch t {
|
||||
case typeOfDate:
|
||||
res.Value = v.Interface().(civil.Date).String()
|
||||
return res, nil
|
||||
|
||||
case typeOfTime:
|
||||
// civil.Time has nanosecond resolution, but BigQuery TIME only microsecond.
|
||||
res.Value = civilTimeParamString(v.Interface().(civil.Time))
|
||||
return res, nil
|
||||
|
||||
case typeOfDateTime:
|
||||
dt := v.Interface().(civil.DateTime)
|
||||
res.Value = dt.Date.String() + " " + civilTimeParamString(dt.Time)
|
||||
return res, nil
|
||||
|
||||
case typeOfGoTime:
|
||||
res.Value = v.Interface().(time.Time).Format(timestampFormat)
|
||||
return res, nil
|
||||
}
|
||||
switch t.Kind() {
|
||||
case reflect.Slice:
|
||||
if t.Elem().Kind() == reflect.Uint8 {
|
||||
res.Value = base64.StdEncoding.EncodeToString(v.Interface().([]byte))
|
||||
return res, nil
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
var vals []*bq.QueryParameterValue
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
val, err := paramValue(v.Index(i))
|
||||
if err != nil {
|
||||
return bq.QueryParameterValue{}, err
|
||||
}
|
||||
vals = append(vals, &val)
|
||||
}
|
||||
return bq.QueryParameterValue{ArrayValues: vals}, nil
|
||||
|
||||
case reflect.Ptr:
|
||||
if t.Elem().Kind() != reflect.Struct {
|
||||
return res, fmt.Errorf("bigquery: Go type %s cannot be represented as a parameter value", t)
|
||||
}
|
||||
t = t.Elem()
|
||||
v = v.Elem()
|
||||
if !v.IsValid() {
|
||||
// nil pointer becomes empty value
|
||||
return res, nil
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Struct:
|
||||
fields, err := fieldCache.Fields(t)
|
||||
if err != nil {
|
||||
return bq.QueryParameterValue{}, err
|
||||
}
|
||||
res.StructValues = map[string]bq.QueryParameterValue{}
|
||||
for _, f := range fields {
|
||||
fv := v.FieldByIndex(f.Index)
|
||||
fp, err := paramValue(fv)
|
||||
if err != nil {
|
||||
return bq.QueryParameterValue{}, err
|
||||
}
|
||||
res.StructValues[f.Name] = fp
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
// None of the above: assume a scalar type. (If it's not a valid type,
|
||||
// paramType will catch the error.)
|
||||
res.Value = fmt.Sprint(v.Interface())
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func civilTimeParamString(t civil.Time) string {
|
||||
if t.Nanosecond == 0 {
|
||||
return t.String()
|
||||
} else {
|
||||
micro := (t.Nanosecond + 500) / 1000 // round to nearest microsecond
|
||||
t.Nanosecond = 0
|
||||
return t.String() + fmt.Sprintf(".%06d", micro)
|
||||
}
|
||||
}
|
262
vendor/cloud.google.com/go/bigquery/params_test.go
generated
vendored
Normal file
262
vendor/cloud.google.com/go/bigquery/params_test.go
generated
vendored
Normal file
|
@ -0,0 +1,262 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
var scalarTests = []struct {
|
||||
val interface{}
|
||||
want string
|
||||
}{
|
||||
{int64(0), "0"},
|
||||
{3.14, "3.14"},
|
||||
{3.14159e-87, "3.14159e-87"},
|
||||
{true, "true"},
|
||||
{"string", "string"},
|
||||
{"\u65e5\u672c\u8a9e\n", "\u65e5\u672c\u8a9e\n"},
|
||||
{math.NaN(), "NaN"},
|
||||
{[]byte("foo"), "Zm9v"}, // base64 encoding of "foo"
|
||||
{time.Date(2016, 3, 20, 4, 22, 9, 5000, time.FixedZone("neg1-2", -3720)),
|
||||
"2016-03-20 04:22:09.000005-01:02"},
|
||||
{civil.Date{2016, 3, 20}, "2016-03-20"},
|
||||
{civil.Time{4, 5, 6, 789000000}, "04:05:06.789000"},
|
||||
{civil.DateTime{civil.Date{2016, 3, 20}, civil.Time{4, 5, 6, 789000000}}, "2016-03-20 04:05:06.789000"},
|
||||
}
|
||||
|
||||
type S1 struct {
|
||||
A int
|
||||
B *S2
|
||||
C bool
|
||||
}
|
||||
|
||||
type S2 struct {
|
||||
D string
|
||||
e int
|
||||
}
|
||||
|
||||
var s1 = S1{
|
||||
A: 1,
|
||||
B: &S2{D: "s"},
|
||||
C: true,
|
||||
}
|
||||
|
||||
func sval(s string) bq.QueryParameterValue {
|
||||
return bq.QueryParameterValue{Value: s}
|
||||
}
|
||||
|
||||
func TestParamValueScalar(t *testing.T) {
|
||||
for _, test := range scalarTests {
|
||||
got, err := paramValue(reflect.ValueOf(test.val))
|
||||
if err != nil {
|
||||
t.Errorf("%v: got %v, want nil", test.val, err)
|
||||
continue
|
||||
}
|
||||
want := sval(test.want)
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("%v:\ngot %+v\nwant %+v", test.val, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParamValueArray(t *testing.T) {
|
||||
qpv := bq.QueryParameterValue{ArrayValues: []*bq.QueryParameterValue{
|
||||
{Value: "1"},
|
||||
{Value: "2"},
|
||||
},
|
||||
}
|
||||
for _, test := range []struct {
|
||||
val interface{}
|
||||
want bq.QueryParameterValue
|
||||
}{
|
||||
{[]int(nil), bq.QueryParameterValue{}},
|
||||
{[]int{}, bq.QueryParameterValue{}},
|
||||
{[]int{1, 2}, qpv},
|
||||
{[2]int{1, 2}, qpv},
|
||||
} {
|
||||
got, err := paramValue(reflect.ValueOf(test.val))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, test.want) {
|
||||
t.Errorf("%#v:\ngot %+v\nwant %+v", test.val, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParamValueStruct(t *testing.T) {
|
||||
got, err := paramValue(reflect.ValueOf(s1))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := bq.QueryParameterValue{
|
||||
StructValues: map[string]bq.QueryParameterValue{
|
||||
"A": sval("1"),
|
||||
"B": bq.QueryParameterValue{
|
||||
StructValues: map[string]bq.QueryParameterValue{
|
||||
"D": sval("s"),
|
||||
},
|
||||
},
|
||||
"C": sval("true"),
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("got %+v\nwant %+v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParamValueErrors(t *testing.T) {
|
||||
// paramValue lets a few invalid types through, but paramType catches them.
|
||||
// Since we never call one without the other that's fine.
|
||||
for _, val := range []interface{}{nil, new([]int)} {
|
||||
_, err := paramValue(reflect.ValueOf(val))
|
||||
if err == nil {
|
||||
t.Errorf("%v (%T): got nil, want error", val, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParamType(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
val interface{}
|
||||
want *bq.QueryParameterType
|
||||
}{
|
||||
{0, int64ParamType},
|
||||
{uint32(32767), int64ParamType},
|
||||
{3.14, float64ParamType},
|
||||
{float32(3.14), float64ParamType},
|
||||
{math.NaN(), float64ParamType},
|
||||
{true, boolParamType},
|
||||
{"", stringParamType},
|
||||
{"string", stringParamType},
|
||||
{time.Now(), timestampParamType},
|
||||
{[]byte("foo"), bytesParamType},
|
||||
{[]int{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: int64ParamType}},
|
||||
{[3]bool{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: boolParamType}},
|
||||
{S1{}, &bq.QueryParameterType{
|
||||
Type: "STRUCT",
|
||||
StructTypes: []*bq.QueryParameterTypeStructTypes{
|
||||
{Name: "A", Type: int64ParamType},
|
||||
{Name: "B", Type: &bq.QueryParameterType{
|
||||
Type: "STRUCT",
|
||||
StructTypes: []*bq.QueryParameterTypeStructTypes{
|
||||
{Name: "D", Type: stringParamType},
|
||||
},
|
||||
}},
|
||||
{Name: "C", Type: boolParamType},
|
||||
},
|
||||
}},
|
||||
} {
|
||||
got, err := paramType(reflect.TypeOf(test.val))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, test.want) {
|
||||
t.Errorf("%v (%T): got %v, want %v", test.val, test.val, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParamTypeErrors(t *testing.T) {
|
||||
for _, val := range []interface{}{
|
||||
nil, uint(0), new([]int), make(chan int),
|
||||
} {
|
||||
_, err := paramType(reflect.TypeOf(val))
|
||||
if err == nil {
|
||||
t.Errorf("%v (%T): got nil, want error", val, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_ScalarParam(t *testing.T) {
|
||||
c := getClient(t)
|
||||
for _, test := range scalarTests {
|
||||
got, err := paramRoundTrip(c, test.val)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !equal(got, test.val) {
|
||||
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", got, got, test.val, test.val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_OtherParam(t *testing.T) {
|
||||
c := getClient(t)
|
||||
for _, test := range []struct {
|
||||
val interface{}
|
||||
want interface{}
|
||||
}{
|
||||
{[]int(nil), []Value(nil)},
|
||||
{[]int{}, []Value(nil)},
|
||||
{[]int{1, 2}, []Value{int64(1), int64(2)}},
|
||||
{[3]int{1, 2, 3}, []Value{int64(1), int64(2), int64(3)}},
|
||||
{S1{}, []Value{int64(0), nil, false}},
|
||||
{s1, []Value{int64(1), []Value{"s"}, true}},
|
||||
} {
|
||||
got, err := paramRoundTrip(c, test.val)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !equal(got, test.want) {
|
||||
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", got, got, test.want, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func paramRoundTrip(c *Client, x interface{}) (Value, error) {
|
||||
q := c.Query("select ?")
|
||||
q.Parameters = []QueryParameter{{Value: x}}
|
||||
it, err := q.Read(context.Background())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var val []Value
|
||||
err = it.Next(&val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(val) != 1 {
|
||||
return nil, errors.New("wrong number of values")
|
||||
}
|
||||
return val[0], nil
|
||||
}
|
||||
|
||||
func equal(x1, x2 interface{}) bool {
|
||||
if reflect.TypeOf(x1) != reflect.TypeOf(x2) {
|
||||
return false
|
||||
}
|
||||
switch x1 := x1.(type) {
|
||||
case float64:
|
||||
if math.IsNaN(x1) {
|
||||
return math.IsNaN(x2.(float64))
|
||||
}
|
||||
return x1 == x2
|
||||
case time.Time:
|
||||
// BigQuery is only accurate to the microsecond.
|
||||
return x1.Round(time.Microsecond).Equal(x2.(time.Time).Round(time.Microsecond))
|
||||
default:
|
||||
return reflect.DeepEqual(x1, x2)
|
||||
}
|
||||
}
|
196
vendor/cloud.google.com/go/bigquery/query.go
generated
vendored
Normal file
196
vendor/cloud.google.com/go/bigquery/query.go
generated
vendored
Normal file
|
@ -0,0 +1,196 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// QueryConfig holds the configuration for a query job.
|
||||
type QueryConfig struct {
|
||||
// JobID is the ID to use for the query job. If this field is empty, a job ID
|
||||
// will be automatically created.
|
||||
JobID string
|
||||
|
||||
// Dst is the table into which the results of the query will be written.
|
||||
// If this field is nil, a temporary table will be created.
|
||||
Dst *Table
|
||||
|
||||
// The query to execute. See https://cloud.google.com/bigquery/query-reference for details.
|
||||
Q string
|
||||
|
||||
// DefaultProjectID and DefaultDatasetID specify the dataset to use for unqualified table names in the query.
|
||||
// If DefaultProjectID is set, DefaultDatasetID must also be set.
|
||||
DefaultProjectID string
|
||||
DefaultDatasetID string
|
||||
|
||||
// TableDefinitions describes data sources outside of BigQuery.
|
||||
// The map keys may be used as table names in the query string.
|
||||
TableDefinitions map[string]ExternalData
|
||||
|
||||
// CreateDisposition specifies the circumstances under which the destination table will be created.
|
||||
// The default is CreateIfNeeded.
|
||||
CreateDisposition TableCreateDisposition
|
||||
|
||||
// WriteDisposition specifies how existing data in the destination table is treated.
|
||||
// The default is WriteAppend.
|
||||
WriteDisposition TableWriteDisposition
|
||||
|
||||
// DisableQueryCache prevents results being fetched from the query cache.
|
||||
// If this field is false, results are fetched from the cache if they are available.
|
||||
// The query cache is a best-effort cache that is flushed whenever tables in the query are modified.
|
||||
// Cached results are only available when TableID is unspecified in the query's destination Table.
|
||||
// For more information, see https://cloud.google.com/bigquery/querying-data#querycaching
|
||||
DisableQueryCache bool
|
||||
|
||||
// DisableFlattenedResults prevents results being flattened.
|
||||
// If this field is false, results from nested and repeated fields are flattened.
|
||||
// DisableFlattenedResults implies AllowLargeResults
|
||||
// For more information, see https://cloud.google.com/bigquery/docs/data#nested
|
||||
DisableFlattenedResults bool
|
||||
|
||||
// AllowLargeResults allows the query to produce arbitrarily large result tables.
|
||||
// The destination must be a table.
|
||||
// When using this option, queries will take longer to execute, even if the result set is small.
|
||||
// For additional limitations, see https://cloud.google.com/bigquery/querying-data#largequeryresults
|
||||
AllowLargeResults bool
|
||||
|
||||
// Priority specifies the priority with which to schedule the query.
|
||||
// The default priority is InteractivePriority.
|
||||
// For more information, see https://cloud.google.com/bigquery/querying-data#batchqueries
|
||||
Priority QueryPriority
|
||||
|
||||
// MaxBillingTier sets the maximum billing tier for a Query.
|
||||
// Queries that have resource usage beyond this tier will fail (without
|
||||
// incurring a charge). If this field is zero, the project default will be used.
|
||||
MaxBillingTier int
|
||||
|
||||
// MaxBytesBilled limits the number of bytes billed for
|
||||
// this job. Queries that would exceed this limit will fail (without incurring
|
||||
// a charge).
|
||||
// If this field is less than 1, the project default will be
|
||||
// used.
|
||||
MaxBytesBilled int64
|
||||
|
||||
// UseStandardSQL causes the query to use standard SQL.
|
||||
// The default is false (using legacy SQL).
|
||||
UseStandardSQL bool
|
||||
|
||||
// Parameters is a list of query parameters. The presence of parameters
|
||||
// implies the use of standard SQL.
|
||||
// If the query uses positional syntax ("?"), then no parameter may have a name.
|
||||
// If the query uses named syntax ("@p"), then all parameters must have names.
|
||||
// It is illegal to mix positional and named syntax.
|
||||
Parameters []QueryParameter
|
||||
}
|
||||
|
||||
// QueryPriority specifies a priority with which a query is to be executed.
|
||||
type QueryPriority string
|
||||
|
||||
const (
|
||||
BatchPriority QueryPriority = "BATCH"
|
||||
InteractivePriority QueryPriority = "INTERACTIVE"
|
||||
)
|
||||
|
||||
// A Query queries data from a BigQuery table. Use Client.Query to create a Query.
|
||||
type Query struct {
|
||||
client *Client
|
||||
QueryConfig
|
||||
}
|
||||
|
||||
// Query creates a query with string q.
|
||||
// The returned Query may optionally be further configured before its Run method is called.
|
||||
func (c *Client) Query(q string) *Query {
|
||||
return &Query{
|
||||
client: c,
|
||||
QueryConfig: QueryConfig{Q: q},
|
||||
}
|
||||
}
|
||||
|
||||
// Run initiates a query job.
|
||||
func (q *Query) Run(ctx context.Context) (*Job, error) {
|
||||
job := &bq.Job{
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Query: &bq.JobConfigurationQuery{},
|
||||
},
|
||||
}
|
||||
setJobRef(job, q.JobID, q.client.projectID)
|
||||
|
||||
if err := q.QueryConfig.populateJobQueryConfig(job.Configuration.Query); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
j, err := q.client.service.insertJob(ctx, q.client.projectID, &insertJobConf{job: job})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
j.isQuery = true
|
||||
return j, nil
|
||||
}
|
||||
|
||||
func (q *QueryConfig) populateJobQueryConfig(conf *bq.JobConfigurationQuery) error {
|
||||
conf.Query = q.Q
|
||||
|
||||
if len(q.TableDefinitions) > 0 {
|
||||
conf.TableDefinitions = make(map[string]bq.ExternalDataConfiguration)
|
||||
}
|
||||
for name, data := range q.TableDefinitions {
|
||||
conf.TableDefinitions[name] = data.externalDataConfig()
|
||||
}
|
||||
|
||||
if q.DefaultProjectID != "" || q.DefaultDatasetID != "" {
|
||||
conf.DefaultDataset = &bq.DatasetReference{
|
||||
DatasetId: q.DefaultDatasetID,
|
||||
ProjectId: q.DefaultProjectID,
|
||||
}
|
||||
}
|
||||
|
||||
if tier := int64(q.MaxBillingTier); tier > 0 {
|
||||
conf.MaximumBillingTier = &tier
|
||||
}
|
||||
conf.CreateDisposition = string(q.CreateDisposition)
|
||||
conf.WriteDisposition = string(q.WriteDisposition)
|
||||
conf.AllowLargeResults = q.AllowLargeResults
|
||||
conf.Priority = string(q.Priority)
|
||||
|
||||
f := false
|
||||
if q.DisableQueryCache {
|
||||
conf.UseQueryCache = &f
|
||||
}
|
||||
if q.DisableFlattenedResults {
|
||||
conf.FlattenResults = &f
|
||||
// DisableFlattenResults implies AllowLargeResults.
|
||||
conf.AllowLargeResults = true
|
||||
}
|
||||
if q.MaxBytesBilled >= 1 {
|
||||
conf.MaximumBytesBilled = q.MaxBytesBilled
|
||||
}
|
||||
if q.UseStandardSQL || len(q.Parameters) > 0 {
|
||||
conf.UseLegacySql = false
|
||||
conf.ForceSendFields = append(conf.ForceSendFields, "UseLegacySql")
|
||||
}
|
||||
|
||||
if q.Dst != nil && !q.Dst.implicitTable() {
|
||||
conf.DestinationTable = q.Dst.tableRefProto()
|
||||
}
|
||||
for _, p := range q.Parameters {
|
||||
qp, err := p.toRaw()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conf.QueryParameters = append(conf.QueryParameters, qp)
|
||||
}
|
||||
return nil
|
||||
}
|
305
vendor/cloud.google.com/go/bigquery/query_test.go
generated
vendored
Normal file
305
vendor/cloud.google.com/go/bigquery/query_test.go
generated
vendored
Normal file
|
@ -0,0 +1,305 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func defaultQueryJob() *bq.Job {
|
||||
return &bq.Job{
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Query: &bq.JobConfigurationQuery{
|
||||
DestinationTable: &bq.TableReference{
|
||||
ProjectId: "project-id",
|
||||
DatasetId: "dataset-id",
|
||||
TableId: "table-id",
|
||||
},
|
||||
Query: "query string",
|
||||
DefaultDataset: &bq.DatasetReference{
|
||||
ProjectId: "def-project-id",
|
||||
DatasetId: "def-dataset-id",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestQuery(t *testing.T) {
|
||||
c := &Client{
|
||||
projectID: "project-id",
|
||||
}
|
||||
testCases := []struct {
|
||||
dst *Table
|
||||
src *QueryConfig
|
||||
want *bq.Job
|
||||
}{
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: defaultQuery,
|
||||
want: defaultQueryJob(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.DefaultDataset = nil
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: &Table{},
|
||||
src: defaultQuery,
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.DestinationTable = nil
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
TableDefinitions: map[string]ExternalData{
|
||||
"atable": func() *GCSReference {
|
||||
g := NewGCSReference("uri")
|
||||
g.AllowJaggedRows = true
|
||||
g.AllowQuotedNewlines = true
|
||||
g.Compression = Gzip
|
||||
g.Encoding = UTF_8
|
||||
g.FieldDelimiter = ";"
|
||||
g.IgnoreUnknownValues = true
|
||||
g.MaxBadRecords = 1
|
||||
g.Quote = "'"
|
||||
g.SkipLeadingRows = 2
|
||||
g.Schema = Schema([]*FieldSchema{
|
||||
{Name: "name", Type: StringFieldType},
|
||||
})
|
||||
return g
|
||||
}(),
|
||||
},
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.DefaultDataset = nil
|
||||
td := make(map[string]bq.ExternalDataConfiguration)
|
||||
quote := "'"
|
||||
td["atable"] = bq.ExternalDataConfiguration{
|
||||
Compression: "GZIP",
|
||||
IgnoreUnknownValues: true,
|
||||
MaxBadRecords: 1,
|
||||
SourceFormat: "CSV", // must be explicitly set.
|
||||
SourceUris: []string{"uri"},
|
||||
CsvOptions: &bq.CsvOptions{
|
||||
AllowJaggedRows: true,
|
||||
AllowQuotedNewlines: true,
|
||||
Encoding: "UTF-8",
|
||||
FieldDelimiter: ";",
|
||||
SkipLeadingRows: 2,
|
||||
Quote: "e,
|
||||
},
|
||||
Schema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
{Name: "name", Type: "STRING"},
|
||||
},
|
||||
},
|
||||
}
|
||||
j.Configuration.Query.TableDefinitions = td
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: &Table{
|
||||
ProjectID: "project-id",
|
||||
DatasetID: "dataset-id",
|
||||
TableID: "table-id",
|
||||
},
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
CreateDisposition: CreateNever,
|
||||
WriteDisposition: WriteTruncate,
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.WriteDisposition = "WRITE_TRUNCATE"
|
||||
j.Configuration.Query.CreateDisposition = "CREATE_NEVER"
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
DisableQueryCache: true,
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
f := false
|
||||
j.Configuration.Query.UseQueryCache = &f
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
AllowLargeResults: true,
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.AllowLargeResults = true
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
DisableFlattenedResults: true,
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
f := false
|
||||
j.Configuration.Query.FlattenResults = &f
|
||||
j.Configuration.Query.AllowLargeResults = true
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
Priority: QueryPriority("low"),
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.Priority = "low"
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
MaxBillingTier: 3,
|
||||
MaxBytesBilled: 5,
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
tier := int64(3)
|
||||
j.Configuration.Query.MaximumBillingTier = &tier
|
||||
j.Configuration.Query.MaximumBytesBilled = 5
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
MaxBytesBilled: -1,
|
||||
},
|
||||
want: defaultQueryJob(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
UseStandardSQL: true,
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.UseLegacySql = false
|
||||
j.Configuration.Query.ForceSendFields = []string{"UseLegacySql"}
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
s := &testService{}
|
||||
c.service = s
|
||||
query := c.Query("")
|
||||
query.QueryConfig = *tc.src
|
||||
query.Dst = tc.dst
|
||||
if _, err := query.Run(context.Background()); err != nil {
|
||||
t.Errorf("err calling query: %v", err)
|
||||
continue
|
||||
}
|
||||
if !reflect.DeepEqual(s.Job, tc.want) {
|
||||
t.Errorf("querying: got:\n%v\nwant:\n%v", s.Job, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfiguringQuery(t *testing.T) {
|
||||
s := &testService{}
|
||||
c := &Client{
|
||||
projectID: "project-id",
|
||||
service: s,
|
||||
}
|
||||
|
||||
query := c.Query("q")
|
||||
query.JobID = "ajob"
|
||||
query.DefaultProjectID = "def-project-id"
|
||||
query.DefaultDatasetID = "def-dataset-id"
|
||||
// Note: Other configuration fields are tested in other tests above.
|
||||
// A lot of that can be consolidated once Client.Copy is gone.
|
||||
|
||||
want := &bq.Job{
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Query: &bq.JobConfigurationQuery{
|
||||
Query: "q",
|
||||
DefaultDataset: &bq.DatasetReference{
|
||||
ProjectId: "def-project-id",
|
||||
DatasetId: "def-dataset-id",
|
||||
},
|
||||
},
|
||||
},
|
||||
JobReference: &bq.JobReference{
|
||||
JobId: "ajob",
|
||||
ProjectId: "project-id",
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := query.Run(context.Background()); err != nil {
|
||||
t.Fatalf("err calling Query.Run: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(s.Job, want) {
|
||||
t.Errorf("querying: got:\n%v\nwant:\n%v", s.Job, want)
|
||||
}
|
||||
}
|
64
vendor/cloud.google.com/go/bigquery/read.go
generated
vendored
Normal file
64
vendor/cloud.google.com/go/bigquery/read.go
generated
vendored
Normal file
|
@ -0,0 +1,64 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func (conf *readTableConf) fetch(ctx context.Context, s service, token string) (*readDataResult, error) {
|
||||
return s.readTabledata(ctx, conf, token)
|
||||
}
|
||||
|
||||
func (conf *readTableConf) setPaging(pc *pagingConf) { conf.paging = *pc }
|
||||
|
||||
// Read fetches the contents of the table.
|
||||
func (t *Table) Read(ctx context.Context) *RowIterator {
|
||||
return newRowIterator(ctx, t.c.service, &readTableConf{
|
||||
projectID: t.ProjectID,
|
||||
datasetID: t.DatasetID,
|
||||
tableID: t.TableID,
|
||||
})
|
||||
}
|
||||
|
||||
func (conf *readQueryConf) fetch(ctx context.Context, s service, token string) (*readDataResult, error) {
|
||||
return s.readQuery(ctx, conf, token)
|
||||
}
|
||||
|
||||
func (conf *readQueryConf) setPaging(pc *pagingConf) { conf.paging = *pc }
|
||||
|
||||
// Read fetches the results of a query job.
|
||||
// If j is not a query job, Read returns an error.
|
||||
func (j *Job) Read(ctx context.Context) (*RowIterator, error) {
|
||||
if !j.isQuery {
|
||||
return nil, errors.New("Cannot read from a non-query job")
|
||||
}
|
||||
return newRowIterator(ctx, j.service, &readQueryConf{
|
||||
projectID: j.projectID,
|
||||
jobID: j.jobID,
|
||||
}), nil
|
||||
}
|
||||
|
||||
// Read submits a query for execution and returns the results via a RowIterator.
|
||||
// It is a shorthand for Query.Run followed by Job.Read.
|
||||
func (q *Query) Read(ctx context.Context) (*RowIterator, error) {
|
||||
job, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return job.Read(ctx)
|
||||
}
|
303
vendor/cloud.google.com/go/bigquery/read_test.go
generated
vendored
Normal file
303
vendor/cloud.google.com/go/bigquery/read_test.go
generated
vendored
Normal file
|
@ -0,0 +1,303 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
type readTabledataArgs struct {
|
||||
conf *readTableConf
|
||||
tok string
|
||||
}
|
||||
|
||||
type readQueryArgs struct {
|
||||
conf *readQueryConf
|
||||
tok string
|
||||
}
|
||||
|
||||
// readServiceStub services read requests by returning data from an in-memory list of values.
|
||||
type readServiceStub struct {
|
||||
// values and pageTokens are used as sources of data to return in response to calls to readTabledata or readQuery.
|
||||
values [][][]Value // contains pages / rows / columns.
|
||||
pageTokens map[string]string // maps incoming page token to returned page token.
|
||||
|
||||
// arguments are recorded for later inspection.
|
||||
readTabledataCalls []readTabledataArgs
|
||||
readQueryCalls []readQueryArgs
|
||||
|
||||
service
|
||||
}
|
||||
|
||||
func (s *readServiceStub) readValues(tok string) *readDataResult {
|
||||
result := &readDataResult{
|
||||
pageToken: s.pageTokens[tok],
|
||||
rows: s.values[0],
|
||||
}
|
||||
s.values = s.values[1:]
|
||||
|
||||
return result
|
||||
}
|
||||
func (s *readServiceStub) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) {
|
||||
s.readTabledataCalls = append(s.readTabledataCalls, readTabledataArgs{conf, token})
|
||||
return s.readValues(token), nil
|
||||
}
|
||||
|
||||
func (s *readServiceStub) readQuery(ctx context.Context, conf *readQueryConf, token string) (*readDataResult, error) {
|
||||
s.readQueryCalls = append(s.readQueryCalls, readQueryArgs{conf, token})
|
||||
return s.readValues(token), nil
|
||||
}
|
||||
|
||||
func TestRead(t *testing.T) {
|
||||
// The data for the service stub to return is populated for each test case in the testCases for loop.
|
||||
ctx := context.Background()
|
||||
service := &readServiceStub{}
|
||||
c := &Client{
|
||||
projectID: "project-id",
|
||||
service: service,
|
||||
}
|
||||
|
||||
queryJob := &Job{
|
||||
projectID: "project-id",
|
||||
jobID: "job-id",
|
||||
service: service,
|
||||
isQuery: true,
|
||||
}
|
||||
|
||||
for _, readFunc := range []func() *RowIterator{
|
||||
func() *RowIterator {
|
||||
return c.Dataset("dataset-id").Table("table-id").Read(ctx)
|
||||
},
|
||||
func() *RowIterator {
|
||||
it, err := queryJob.Read(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return it
|
||||
},
|
||||
} {
|
||||
testCases := []struct {
|
||||
data [][][]Value
|
||||
pageTokens map[string]string
|
||||
want [][]Value
|
||||
}{
|
||||
{
|
||||
data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}},
|
||||
pageTokens: map[string]string{"": "a", "a": ""},
|
||||
want: [][]Value{{1, 2}, {11, 12}, {30, 40}, {31, 41}},
|
||||
},
|
||||
{
|
||||
data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}},
|
||||
pageTokens: map[string]string{"": ""}, // no more pages after first one.
|
||||
want: [][]Value{{1, 2}, {11, 12}},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
service.values = tc.data
|
||||
service.pageTokens = tc.pageTokens
|
||||
if got, ok := collectValues(t, readFunc()); ok {
|
||||
if !reflect.DeepEqual(got, tc.want) {
|
||||
t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func collectValues(t *testing.T, it *RowIterator) ([][]Value, bool) {
|
||||
var got [][]Value
|
||||
for {
|
||||
var vals []Value
|
||||
err := it.Next(&vals)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("err calling Next: %v", err)
|
||||
return nil, false
|
||||
}
|
||||
got = append(got, vals)
|
||||
}
|
||||
return got, true
|
||||
}
|
||||
|
||||
func TestNoMoreValues(t *testing.T) {
|
||||
c := &Client{
|
||||
projectID: "project-id",
|
||||
service: &readServiceStub{
|
||||
values: [][][]Value{{{1, 2}, {11, 12}}},
|
||||
},
|
||||
}
|
||||
it := c.Dataset("dataset-id").Table("table-id").Read(context.Background())
|
||||
var vals []Value
|
||||
// We expect to retrieve two values and then fail on the next attempt.
|
||||
if err := it.Next(&vals); err != nil {
|
||||
t.Fatalf("Next: got: %v: want: nil", err)
|
||||
}
|
||||
if err := it.Next(&vals); err != nil {
|
||||
t.Fatalf("Next: got: %v: want: nil", err)
|
||||
}
|
||||
if err := it.Next(&vals); err != iterator.Done {
|
||||
t.Fatalf("Next: got: %v: want: iterator.Done", err)
|
||||
}
|
||||
}
|
||||
|
||||
// delayedReadStub simulates reading results from a query that has not yet
|
||||
// completed. Its readQuery method initially reports that the query job is not
|
||||
// yet complete. Subsequently, it proxies the request through to another
|
||||
// service stub.
|
||||
type delayedReadStub struct {
|
||||
numDelays int
|
||||
|
||||
readServiceStub
|
||||
}
|
||||
|
||||
func (s *delayedReadStub) readQuery(ctx context.Context, conf *readQueryConf, token string) (*readDataResult, error) {
|
||||
if s.numDelays > 0 {
|
||||
s.numDelays--
|
||||
return nil, errIncompleteJob
|
||||
}
|
||||
return s.readServiceStub.readQuery(ctx, conf, token)
|
||||
}
|
||||
|
||||
// TestIncompleteJob tests that an Iterator which reads from a query job will block until the job is complete.
|
||||
func TestIncompleteJob(t *testing.T) {
|
||||
service := &delayedReadStub{
|
||||
numDelays: 2,
|
||||
readServiceStub: readServiceStub{
|
||||
values: [][][]Value{{{1, 2}}},
|
||||
},
|
||||
}
|
||||
queryJob := &Job{
|
||||
projectID: "project-id",
|
||||
jobID: "job-id",
|
||||
service: service,
|
||||
isQuery: true,
|
||||
}
|
||||
it, err := queryJob.Read(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("err calling Read: %v", err)
|
||||
}
|
||||
var got []Value
|
||||
want := []Value{1, 2}
|
||||
if err := it.Next(&got); err != nil {
|
||||
t.Fatalf("Next: got: %v: want: nil", err)
|
||||
}
|
||||
if service.numDelays != 0 {
|
||||
t.Errorf("remaining numDelays : got: %v want:0", service.numDelays)
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("reading: got:\n%v\nwant:\n%v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
type errorReadService struct {
|
||||
service
|
||||
}
|
||||
|
||||
var errBang = errors.New("bang!")
|
||||
|
||||
func (s *errorReadService) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) {
|
||||
return nil, errBang
|
||||
}
|
||||
|
||||
func TestReadError(t *testing.T) {
|
||||
// test that service read errors are propagated back to the caller.
|
||||
c := &Client{
|
||||
projectID: "project-id",
|
||||
service: &errorReadService{},
|
||||
}
|
||||
it := c.Dataset("dataset-id").Table("table-id").Read(context.Background())
|
||||
var vals []Value
|
||||
if err := it.Next(&vals); err != errBang {
|
||||
t.Fatalf("Get: got: %v: want: %v", err, errBang)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadTabledataOptions(t *testing.T) {
|
||||
// test that read options are propagated.
|
||||
s := &readServiceStub{
|
||||
values: [][][]Value{{{1, 2}}},
|
||||
}
|
||||
c := &Client{
|
||||
projectID: "project-id",
|
||||
service: s,
|
||||
}
|
||||
it := c.Dataset("dataset-id").Table("table-id").Read(context.Background())
|
||||
it.PageInfo().MaxSize = 5
|
||||
var vals []Value
|
||||
if err := it.Next(&vals); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := []readTabledataArgs{{
|
||||
conf: &readTableConf{
|
||||
projectID: "project-id",
|
||||
datasetID: "dataset-id",
|
||||
tableID: "table-id",
|
||||
paging: pagingConf{
|
||||
recordsPerRequest: 5,
|
||||
setRecordsPerRequest: true,
|
||||
},
|
||||
},
|
||||
tok: "",
|
||||
}}
|
||||
|
||||
if !reflect.DeepEqual(s.readTabledataCalls, want) {
|
||||
t.Errorf("reading: got:\n%v\nwant:\n%v", s.readTabledataCalls, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadQueryOptions(t *testing.T) {
|
||||
// test that read options are propagated.
|
||||
s := &readServiceStub{
|
||||
values: [][][]Value{{{1, 2}}},
|
||||
}
|
||||
queryJob := &Job{
|
||||
projectID: "project-id",
|
||||
jobID: "job-id",
|
||||
service: s,
|
||||
isQuery: true,
|
||||
}
|
||||
it, err := queryJob.Read(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("err calling Read: %v", err)
|
||||
}
|
||||
it.PageInfo().MaxSize = 5
|
||||
var vals []Value
|
||||
if err := it.Next(&vals); err != nil {
|
||||
t.Fatalf("Next: got: %v: want: nil", err)
|
||||
}
|
||||
|
||||
want := []readQueryArgs{{
|
||||
conf: &readQueryConf{
|
||||
projectID: "project-id",
|
||||
jobID: "job-id",
|
||||
paging: pagingConf{
|
||||
recordsPerRequest: 5,
|
||||
setRecordsPerRequest: true,
|
||||
},
|
||||
},
|
||||
tok: "",
|
||||
}}
|
||||
|
||||
if !reflect.DeepEqual(s.readQueryCalls, want) {
|
||||
t.Errorf("reading: got:\n%v\nwant:\n%v", s.readQueryCalls, want)
|
||||
}
|
||||
}
|
312
vendor/cloud.google.com/go/bigquery/schema.go
generated
vendored
Normal file
312
vendor/cloud.google.com/go/bigquery/schema.go
generated
vendored
Normal file
|
@ -0,0 +1,312 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"cloud.google.com/go/internal/atomiccache"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// Schema describes the fields in a table or query result.
|
||||
type Schema []*FieldSchema
|
||||
|
||||
type FieldSchema struct {
|
||||
// The field name.
|
||||
// Must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_),
|
||||
// and must start with a letter or underscore.
|
||||
// The maximum length is 128 characters.
|
||||
Name string
|
||||
|
||||
// A description of the field. The maximum length is 16,384 characters.
|
||||
Description string
|
||||
|
||||
// Whether the field may contain multiple values.
|
||||
Repeated bool
|
||||
// Whether the field is required. Ignored if Repeated is true.
|
||||
Required bool
|
||||
|
||||
// The field data type. If Type is Record, then this field contains a nested schema,
|
||||
// which is described by Schema.
|
||||
Type FieldType
|
||||
// Describes the nested schema if Type is set to Record.
|
||||
Schema Schema
|
||||
}
|
||||
|
||||
func (fs *FieldSchema) asTableFieldSchema() *bq.TableFieldSchema {
|
||||
tfs := &bq.TableFieldSchema{
|
||||
Description: fs.Description,
|
||||
Name: fs.Name,
|
||||
Type: string(fs.Type),
|
||||
}
|
||||
|
||||
if fs.Repeated {
|
||||
tfs.Mode = "REPEATED"
|
||||
} else if fs.Required {
|
||||
tfs.Mode = "REQUIRED"
|
||||
} // else leave as default, which is interpreted as NULLABLE.
|
||||
|
||||
for _, f := range fs.Schema {
|
||||
tfs.Fields = append(tfs.Fields, f.asTableFieldSchema())
|
||||
}
|
||||
|
||||
return tfs
|
||||
}
|
||||
|
||||
func (s Schema) asTableSchema() *bq.TableSchema {
|
||||
var fields []*bq.TableFieldSchema
|
||||
for _, f := range s {
|
||||
fields = append(fields, f.asTableFieldSchema())
|
||||
}
|
||||
return &bq.TableSchema{Fields: fields}
|
||||
}
|
||||
|
||||
// customizeCreateTable allows a Schema to be used directly as an option to CreateTable.
|
||||
func (s Schema) customizeCreateTable(conf *createTableConf) {
|
||||
conf.schema = s.asTableSchema()
|
||||
}
|
||||
|
||||
func convertTableFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema {
|
||||
fs := &FieldSchema{
|
||||
Description: tfs.Description,
|
||||
Name: tfs.Name,
|
||||
Repeated: tfs.Mode == "REPEATED",
|
||||
Required: tfs.Mode == "REQUIRED",
|
||||
Type: FieldType(tfs.Type),
|
||||
}
|
||||
|
||||
for _, f := range tfs.Fields {
|
||||
fs.Schema = append(fs.Schema, convertTableFieldSchema(f))
|
||||
}
|
||||
return fs
|
||||
}
|
||||
|
||||
func convertTableSchema(ts *bq.TableSchema) Schema {
|
||||
var s Schema
|
||||
for _, f := range ts.Fields {
|
||||
s = append(s, convertTableFieldSchema(f))
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
type FieldType string
|
||||
|
||||
const (
|
||||
StringFieldType FieldType = "STRING"
|
||||
BytesFieldType FieldType = "BYTES"
|
||||
IntegerFieldType FieldType = "INTEGER"
|
||||
FloatFieldType FieldType = "FLOAT"
|
||||
BooleanFieldType FieldType = "BOOLEAN"
|
||||
TimestampFieldType FieldType = "TIMESTAMP"
|
||||
RecordFieldType FieldType = "RECORD"
|
||||
DateFieldType FieldType = "DATE"
|
||||
TimeFieldType FieldType = "TIME"
|
||||
DateTimeFieldType FieldType = "DATETIME"
|
||||
)
|
||||
|
||||
var (
|
||||
errNoStruct = errors.New("bigquery: can only infer schema from struct or pointer to struct")
|
||||
errUnsupportedFieldType = errors.New("bigquery: unsupported type of field in struct")
|
||||
errInvalidFieldName = errors.New("bigquery: invalid name of field in struct")
|
||||
)
|
||||
|
||||
var typeOfByteSlice = reflect.TypeOf([]byte{})
|
||||
|
||||
// InferSchema tries to derive a BigQuery schema from the supplied struct value.
|
||||
// NOTE: All fields in the returned Schema are configured to be required,
|
||||
// unless the corresponding field in the supplied struct is a slice or array.
|
||||
//
|
||||
// It is considered an error if the struct (including nested structs) contains
|
||||
// any exported fields that are pointers or one of the following types:
|
||||
// uint, uint64, uintptr, map, interface, complex64, complex128, func, chan.
|
||||
// In these cases, an error will be returned.
|
||||
// Future versions may handle these cases without error.
|
||||
//
|
||||
// Recursively defined structs are also disallowed.
|
||||
func InferSchema(st interface{}) (Schema, error) {
|
||||
return inferSchemaReflectCached(reflect.TypeOf(st))
|
||||
}
|
||||
|
||||
var schemaCache atomiccache.Cache
|
||||
|
||||
type cacheVal struct {
|
||||
schema Schema
|
||||
err error
|
||||
}
|
||||
|
||||
func inferSchemaReflectCached(t reflect.Type) (Schema, error) {
|
||||
cv := schemaCache.Get(t, func() interface{} {
|
||||
s, err := inferSchemaReflect(t)
|
||||
return cacheVal{s, err}
|
||||
}).(cacheVal)
|
||||
return cv.schema, cv.err
|
||||
}
|
||||
|
||||
func inferSchemaReflect(t reflect.Type) (Schema, error) {
|
||||
rec, err := hasRecursiveType(t, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rec {
|
||||
return nil, fmt.Errorf("bigquery: schema inference for recursive type %s", t)
|
||||
}
|
||||
return inferStruct(t)
|
||||
}
|
||||
|
||||
func inferStruct(t reflect.Type) (Schema, error) {
|
||||
switch t.Kind() {
|
||||
case reflect.Ptr:
|
||||
if t.Elem().Kind() != reflect.Struct {
|
||||
return nil, errNoStruct
|
||||
}
|
||||
t = t.Elem()
|
||||
fallthrough
|
||||
|
||||
case reflect.Struct:
|
||||
return inferFields(t)
|
||||
default:
|
||||
return nil, errNoStruct
|
||||
}
|
||||
}
|
||||
|
||||
// inferFieldSchema infers the FieldSchema for a Go type
|
||||
func inferFieldSchema(rt reflect.Type) (*FieldSchema, error) {
|
||||
switch rt {
|
||||
case typeOfByteSlice:
|
||||
return &FieldSchema{Required: true, Type: BytesFieldType}, nil
|
||||
case typeOfGoTime:
|
||||
return &FieldSchema{Required: true, Type: TimestampFieldType}, nil
|
||||
case typeOfDate:
|
||||
return &FieldSchema{Required: true, Type: DateFieldType}, nil
|
||||
case typeOfTime:
|
||||
return &FieldSchema{Required: true, Type: TimeFieldType}, nil
|
||||
case typeOfDateTime:
|
||||
return &FieldSchema{Required: true, Type: DateTimeFieldType}, nil
|
||||
}
|
||||
if isSupportedIntType(rt) {
|
||||
return &FieldSchema{Required: true, Type: IntegerFieldType}, nil
|
||||
}
|
||||
switch rt.Kind() {
|
||||
case reflect.Slice, reflect.Array:
|
||||
et := rt.Elem()
|
||||
if et != typeOfByteSlice && (et.Kind() == reflect.Slice || et.Kind() == reflect.Array) {
|
||||
// Multi dimensional slices/arrays are not supported by BigQuery
|
||||
return nil, errUnsupportedFieldType
|
||||
}
|
||||
|
||||
f, err := inferFieldSchema(et)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.Repeated = true
|
||||
f.Required = false
|
||||
return f, nil
|
||||
case reflect.Struct, reflect.Ptr:
|
||||
nested, err := inferStruct(rt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FieldSchema{Required: true, Type: RecordFieldType, Schema: nested}, nil
|
||||
case reflect.String:
|
||||
return &FieldSchema{Required: true, Type: StringFieldType}, nil
|
||||
case reflect.Bool:
|
||||
return &FieldSchema{Required: true, Type: BooleanFieldType}, nil
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return &FieldSchema{Required: true, Type: FloatFieldType}, nil
|
||||
default:
|
||||
return nil, errUnsupportedFieldType
|
||||
}
|
||||
}
|
||||
|
||||
// inferFields extracts all exported field types from struct type.
|
||||
func inferFields(rt reflect.Type) (Schema, error) {
|
||||
var s Schema
|
||||
fields, err := fieldCache.Fields(rt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, field := range fields {
|
||||
f, err := inferFieldSchema(field.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.Name = field.Name
|
||||
s = append(s, f)
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// isSupportedIntType reports whether t can be properly represented by the
|
||||
// BigQuery INTEGER/INT64 type.
|
||||
func isSupportedIntType(t reflect.Type) bool {
|
||||
switch t.Kind() {
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int,
|
||||
reflect.Uint8, reflect.Uint16, reflect.Uint32:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// typeList is a linked list of reflect.Types.
|
||||
type typeList struct {
|
||||
t reflect.Type
|
||||
next *typeList
|
||||
}
|
||||
|
||||
func (l *typeList) has(t reflect.Type) bool {
|
||||
for l != nil {
|
||||
if l.t == t {
|
||||
return true
|
||||
}
|
||||
l = l.next
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// hasRecursiveType reports whether t or any type inside t refers to itself, directly or indirectly,
|
||||
// via exported fields. (Schema inference ignores unexported fields.)
|
||||
func hasRecursiveType(t reflect.Type, seen *typeList) (bool, error) {
|
||||
if t.Kind() == reflect.Ptr {
|
||||
t = t.Elem()
|
||||
}
|
||||
if t.Kind() != reflect.Struct {
|
||||
return false, nil
|
||||
}
|
||||
if seen.has(t) {
|
||||
return true, nil
|
||||
}
|
||||
fields, err := fieldCache.Fields(t)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
seen = &typeList{t, seen}
|
||||
// Because seen is a linked list, additions to it from one field's
|
||||
// recursive call will not affect the value for subsequent fields' calls.
|
||||
for _, field := range fields {
|
||||
ok, err := hasRecursiveType(field.Type, seen)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if ok {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
792
vendor/cloud.google.com/go/bigquery/schema_test.go
generated
vendored
Normal file
792
vendor/cloud.google.com/go/bigquery/schema_test.go
generated
vendored
Normal file
|
@ -0,0 +1,792 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func (fs *FieldSchema) GoString() string {
|
||||
if fs == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
|
||||
return fmt.Sprintf("{Name:%s Description:%s Repeated:%t Required:%t Type:%s Schema:%s}",
|
||||
fs.Name,
|
||||
fs.Description,
|
||||
fs.Repeated,
|
||||
fs.Required,
|
||||
fs.Type,
|
||||
fmt.Sprintf("%#v", fs.Schema),
|
||||
)
|
||||
}
|
||||
|
||||
func bqTableFieldSchema(desc, name, typ, mode string) *bq.TableFieldSchema {
|
||||
return &bq.TableFieldSchema{
|
||||
Description: desc,
|
||||
Name: name,
|
||||
Mode: mode,
|
||||
Type: typ,
|
||||
}
|
||||
}
|
||||
|
||||
func fieldSchema(desc, name, typ string, repeated, required bool) *FieldSchema {
|
||||
return &FieldSchema{
|
||||
Description: desc,
|
||||
Name: name,
|
||||
Repeated: repeated,
|
||||
Required: required,
|
||||
Type: FieldType(typ),
|
||||
}
|
||||
}
|
||||
|
||||
func TestSchemaConversion(t *testing.T) {
|
||||
testCases := []struct {
|
||||
schema Schema
|
||||
bqSchema *bq.TableSchema
|
||||
}{
|
||||
{
|
||||
// required
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "STRING", false, true),
|
||||
},
|
||||
},
|
||||
{
|
||||
// repeated
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "STRING", "REPEATED"),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "STRING", true, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// nullable, string
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "STRING", ""),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "STRING", false, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// integer
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "INTEGER", ""),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "INTEGER", false, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// float
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "FLOAT", ""),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "FLOAT", false, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// boolean
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "BOOLEAN", ""),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "BOOLEAN", false, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// timestamp
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "TIMESTAMP", ""),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "name", "TIMESTAMP", false, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// civil times
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "f1", "TIME", ""),
|
||||
bqTableFieldSchema("desc", "f2", "DATE", ""),
|
||||
bqTableFieldSchema("desc", "f3", "DATETIME", ""),
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
fieldSchema("desc", "f1", "TIME", false, false),
|
||||
fieldSchema("desc", "f2", "DATE", false, false),
|
||||
fieldSchema("desc", "f3", "DATETIME", false, false),
|
||||
},
|
||||
},
|
||||
{
|
||||
// nested
|
||||
bqSchema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
{
|
||||
Description: "An outer schema wrapping a nested schema",
|
||||
Name: "outer",
|
||||
Mode: "REQUIRED",
|
||||
Type: "RECORD",
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("inner field", "inner", "STRING", ""),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
schema: Schema{
|
||||
&FieldSchema{
|
||||
Description: "An outer schema wrapping a nested schema",
|
||||
Name: "outer",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: []*FieldSchema{
|
||||
{
|
||||
Description: "inner field",
|
||||
Name: "inner",
|
||||
Type: "STRING",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
bqSchema := tc.schema.asTableSchema()
|
||||
if !reflect.DeepEqual(bqSchema, tc.bqSchema) {
|
||||
t.Errorf("converting to TableSchema: got:\n%v\nwant:\n%v",
|
||||
pretty.Value(bqSchema), pretty.Value(tc.bqSchema))
|
||||
}
|
||||
schema := convertTableSchema(tc.bqSchema)
|
||||
if !reflect.DeepEqual(schema, tc.schema) {
|
||||
t.Errorf("converting to Schema: got:\n%v\nwant:\n%v", schema, tc.schema)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type allStrings struct {
|
||||
String string
|
||||
ByteSlice []byte
|
||||
}
|
||||
|
||||
type allSignedIntegers struct {
|
||||
Int64 int64
|
||||
Int32 int32
|
||||
Int16 int16
|
||||
Int8 int8
|
||||
Int int
|
||||
}
|
||||
|
||||
type allUnsignedIntegers struct {
|
||||
Uint32 uint32
|
||||
Uint16 uint16
|
||||
Uint8 uint8
|
||||
}
|
||||
|
||||
type allFloat struct {
|
||||
Float64 float64
|
||||
Float32 float32
|
||||
// NOTE: Complex32 and Complex64 are unsupported by BigQuery
|
||||
}
|
||||
|
||||
type allBoolean struct {
|
||||
Bool bool
|
||||
}
|
||||
|
||||
type allTime struct {
|
||||
Timestamp time.Time
|
||||
Time civil.Time
|
||||
Date civil.Date
|
||||
DateTime civil.DateTime
|
||||
}
|
||||
|
||||
func reqField(name, typ string) *FieldSchema {
|
||||
return &FieldSchema{
|
||||
Name: name,
|
||||
Type: FieldType(typ),
|
||||
Required: true,
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleInference(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in interface{}
|
||||
want Schema
|
||||
}{
|
||||
{
|
||||
in: allSignedIntegers{},
|
||||
want: Schema{
|
||||
reqField("Int64", "INTEGER"),
|
||||
reqField("Int32", "INTEGER"),
|
||||
reqField("Int16", "INTEGER"),
|
||||
reqField("Int8", "INTEGER"),
|
||||
reqField("Int", "INTEGER"),
|
||||
},
|
||||
},
|
||||
{
|
||||
in: allUnsignedIntegers{},
|
||||
want: Schema{
|
||||
reqField("Uint32", "INTEGER"),
|
||||
reqField("Uint16", "INTEGER"),
|
||||
reqField("Uint8", "INTEGER"),
|
||||
},
|
||||
},
|
||||
{
|
||||
in: allFloat{},
|
||||
want: Schema{
|
||||
reqField("Float64", "FLOAT"),
|
||||
reqField("Float32", "FLOAT"),
|
||||
},
|
||||
},
|
||||
{
|
||||
in: allBoolean{},
|
||||
want: Schema{
|
||||
reqField("Bool", "BOOLEAN"),
|
||||
},
|
||||
},
|
||||
{
|
||||
in: &allBoolean{},
|
||||
want: Schema{
|
||||
reqField("Bool", "BOOLEAN"),
|
||||
},
|
||||
},
|
||||
{
|
||||
in: allTime{},
|
||||
want: Schema{
|
||||
reqField("Timestamp", "TIMESTAMP"),
|
||||
reqField("Time", "TIME"),
|
||||
reqField("Date", "DATE"),
|
||||
reqField("DateTime", "DATETIME"),
|
||||
},
|
||||
},
|
||||
{
|
||||
in: allStrings{},
|
||||
want: Schema{
|
||||
reqField("String", "STRING"),
|
||||
reqField("ByteSlice", "BYTES"),
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
got, err := InferSchema(tc.in)
|
||||
if err != nil {
|
||||
t.Fatalf("%T: error inferring TableSchema: %v", tc.in, err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, tc.want) {
|
||||
t.Errorf("%T: inferring TableSchema: got:\n%#v\nwant:\n%#v", tc.in,
|
||||
pretty.Value(got), pretty.Value(tc.want))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type containsNested struct {
|
||||
hidden string
|
||||
NotNested int
|
||||
Nested struct {
|
||||
Inside int
|
||||
}
|
||||
}
|
||||
|
||||
type containsDoubleNested struct {
|
||||
NotNested int
|
||||
Nested struct {
|
||||
InsideNested struct {
|
||||
Inside int
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type ptrNested struct {
|
||||
Ptr *struct{ Inside int }
|
||||
}
|
||||
|
||||
type dup struct { // more than one field of the same struct type
|
||||
A, B allBoolean
|
||||
}
|
||||
|
||||
func TestNestedInference(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in interface{}
|
||||
want Schema
|
||||
}{
|
||||
{
|
||||
in: containsNested{},
|
||||
want: Schema{
|
||||
reqField("NotNested", "INTEGER"),
|
||||
&FieldSchema{
|
||||
Name: "Nested",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("Inside", "INTEGER")},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: containsDoubleNested{},
|
||||
want: Schema{
|
||||
reqField("NotNested", "INTEGER"),
|
||||
&FieldSchema{
|
||||
Name: "Nested",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{
|
||||
{
|
||||
Name: "InsideNested",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("Inside", "INTEGER")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: ptrNested{},
|
||||
want: Schema{
|
||||
&FieldSchema{
|
||||
Name: "Ptr",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("Inside", "INTEGER")},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: dup{},
|
||||
want: Schema{
|
||||
&FieldSchema{
|
||||
Name: "A",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("Bool", "BOOLEAN")},
|
||||
},
|
||||
&FieldSchema{
|
||||
Name: "B",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("Bool", "BOOLEAN")},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
got, err := InferSchema(tc.in)
|
||||
if err != nil {
|
||||
t.Fatalf("%T: error inferring TableSchema: %v", tc.in, err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, tc.want) {
|
||||
t.Errorf("%T: inferring TableSchema: got:\n%#v\nwant:\n%#v", tc.in,
|
||||
pretty.Value(got), pretty.Value(tc.want))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type repeated struct {
|
||||
NotRepeated []byte
|
||||
RepeatedByteSlice [][]byte
|
||||
Slice []int
|
||||
Array [5]bool
|
||||
}
|
||||
|
||||
type nestedRepeated struct {
|
||||
NotRepeated int
|
||||
Repeated []struct {
|
||||
Inside int
|
||||
}
|
||||
RepeatedPtr []*struct{ Inside int }
|
||||
}
|
||||
|
||||
func repField(name, typ string) *FieldSchema {
|
||||
return &FieldSchema{
|
||||
Name: name,
|
||||
Type: FieldType(typ),
|
||||
Repeated: true,
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepeatedInference(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in interface{}
|
||||
want Schema
|
||||
}{
|
||||
{
|
||||
in: repeated{},
|
||||
want: Schema{
|
||||
reqField("NotRepeated", "BYTES"),
|
||||
repField("RepeatedByteSlice", "BYTES"),
|
||||
repField("Slice", "INTEGER"),
|
||||
repField("Array", "BOOLEAN"),
|
||||
},
|
||||
},
|
||||
{
|
||||
in: nestedRepeated{},
|
||||
want: Schema{
|
||||
reqField("NotRepeated", "INTEGER"),
|
||||
{
|
||||
Name: "Repeated",
|
||||
Repeated: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("Inside", "INTEGER")},
|
||||
},
|
||||
{
|
||||
Name: "RepeatedPtr",
|
||||
Repeated: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("Inside", "INTEGER")},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
got, err := InferSchema(tc.in)
|
||||
if err != nil {
|
||||
t.Fatalf("%d: error inferring TableSchema: %v", i, err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, tc.want) {
|
||||
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i,
|
||||
pretty.Value(got), pretty.Value(tc.want))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type Embedded struct {
|
||||
Embedded int
|
||||
}
|
||||
|
||||
type embedded struct {
|
||||
Embedded2 int
|
||||
}
|
||||
|
||||
type nestedEmbedded struct {
|
||||
Embedded
|
||||
embedded
|
||||
}
|
||||
|
||||
func TestEmbeddedInference(t *testing.T) {
|
||||
got, err := InferSchema(nestedEmbedded{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := Schema{
|
||||
reqField("Embedded", "INTEGER"),
|
||||
reqField("Embedded2", "INTEGER"),
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("got %v, want %v", pretty.Value(got), pretty.Value(want))
|
||||
}
|
||||
}
|
||||
|
||||
func TestRecursiveInference(t *testing.T) {
|
||||
type List struct {
|
||||
Val int
|
||||
Next *List
|
||||
}
|
||||
|
||||
_, err := InferSchema(List{})
|
||||
if err == nil {
|
||||
t.Fatal("got nil, want error")
|
||||
}
|
||||
}
|
||||
|
||||
type withTags struct {
|
||||
NoTag int
|
||||
ExcludeTag int `bigquery:"-"`
|
||||
SimpleTag int `bigquery:"simple_tag"`
|
||||
UnderscoreTag int `bigquery:"_id"`
|
||||
MixedCase int `bigquery:"MIXEDcase"`
|
||||
}
|
||||
|
||||
type withTagsNested struct {
|
||||
Nested withTags `bigquery:"nested"`
|
||||
NestedAnonymous struct {
|
||||
ExcludeTag int `bigquery:"-"`
|
||||
Inside int `bigquery:"inside"`
|
||||
} `bigquery:"anon"`
|
||||
}
|
||||
|
||||
type withTagsRepeated struct {
|
||||
Repeated []withTags `bigquery:"repeated"`
|
||||
RepeatedAnonymous []struct {
|
||||
ExcludeTag int `bigquery:"-"`
|
||||
Inside int `bigquery:"inside"`
|
||||
} `bigquery:"anon"`
|
||||
}
|
||||
|
||||
type withTagsEmbedded struct {
|
||||
withTags
|
||||
}
|
||||
|
||||
var withTagsSchema = Schema{
|
||||
reqField("NoTag", "INTEGER"),
|
||||
reqField("simple_tag", "INTEGER"),
|
||||
reqField("_id", "INTEGER"),
|
||||
reqField("MIXEDcase", "INTEGER"),
|
||||
}
|
||||
|
||||
func TestTagInference(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in interface{}
|
||||
want Schema
|
||||
}{
|
||||
{
|
||||
in: withTags{},
|
||||
want: withTagsSchema,
|
||||
},
|
||||
{
|
||||
in: withTagsNested{},
|
||||
want: Schema{
|
||||
&FieldSchema{
|
||||
Name: "nested",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: withTagsSchema,
|
||||
},
|
||||
&FieldSchema{
|
||||
Name: "anon",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("inside", "INTEGER")},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: withTagsRepeated{},
|
||||
want: Schema{
|
||||
&FieldSchema{
|
||||
Name: "repeated",
|
||||
Repeated: true,
|
||||
Type: "RECORD",
|
||||
Schema: withTagsSchema,
|
||||
},
|
||||
&FieldSchema{
|
||||
Name: "anon",
|
||||
Repeated: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("inside", "INTEGER")},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: withTagsEmbedded{},
|
||||
want: withTagsSchema,
|
||||
},
|
||||
}
|
||||
for i, tc := range testCases {
|
||||
got, err := InferSchema(tc.in)
|
||||
if err != nil {
|
||||
t.Fatalf("%d: error inferring TableSchema: %v", i, err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, tc.want) {
|
||||
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i,
|
||||
pretty.Value(got), pretty.Value(tc.want))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTagInferenceErrors(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in interface{}
|
||||
err error
|
||||
}{
|
||||
{
|
||||
in: struct {
|
||||
LongTag int `bigquery:"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxy"`
|
||||
}{},
|
||||
err: errInvalidFieldName,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
UnsupporedStartChar int `bigquery:"øab"`
|
||||
}{},
|
||||
err: errInvalidFieldName,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
UnsupportedEndChar int `bigquery:"abø"`
|
||||
}{},
|
||||
err: errInvalidFieldName,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
UnsupportedMiddleChar int `bigquery:"aøb"`
|
||||
}{},
|
||||
err: errInvalidFieldName,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
StartInt int `bigquery:"1abc"`
|
||||
}{},
|
||||
err: errInvalidFieldName,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
Hyphens int `bigquery:"a-b"`
|
||||
}{},
|
||||
err: errInvalidFieldName,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
OmitEmpty int `bigquery:"abc,omitempty"`
|
||||
}{},
|
||||
err: errInvalidFieldName,
|
||||
},
|
||||
}
|
||||
for i, tc := range testCases {
|
||||
want := tc.err
|
||||
_, got := InferSchema(tc.in)
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSchemaErrors(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in interface{}
|
||||
err error
|
||||
}{
|
||||
{
|
||||
in: []byte{},
|
||||
err: errNoStruct,
|
||||
},
|
||||
{
|
||||
in: new(int),
|
||||
err: errNoStruct,
|
||||
},
|
||||
{
|
||||
in: struct{ Uint uint }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ Uint64 uint64 }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ Uintptr uintptr }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ Complex complex64 }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ Map map[string]int }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ Chan chan bool }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ Ptr *int }{},
|
||||
err: errNoStruct,
|
||||
},
|
||||
{
|
||||
in: struct{ Interface interface{} }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ MultiDimensional [][]int }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ MultiDimensional [][][]byte }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ ChanSlice []chan bool }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ NestedChan struct{ Chan []chan bool } }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
want := tc.err
|
||||
_, got := InferSchema(tc.in)
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("%#v: got:\n%#v\nwant:\n%#v", tc.in, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestHasRecursiveType(t *testing.T) {
|
||||
type (
|
||||
nonStruct int
|
||||
nonRec struct{ A string }
|
||||
dup struct{ A, B nonRec }
|
||||
rec struct {
|
||||
A int
|
||||
B *rec
|
||||
}
|
||||
recUnexported struct {
|
||||
A int
|
||||
b *rec
|
||||
}
|
||||
hasRec struct {
|
||||
A int
|
||||
R *rec
|
||||
}
|
||||
)
|
||||
for _, test := range []struct {
|
||||
in interface{}
|
||||
want bool
|
||||
}{
|
||||
{nonStruct(0), false},
|
||||
{nonRec{}, false},
|
||||
{dup{}, false},
|
||||
{rec{}, true},
|
||||
{recUnexported{}, false},
|
||||
{hasRec{}, true},
|
||||
} {
|
||||
got, err := hasRecursiveType(reflect.TypeOf(test.in), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got != test.want {
|
||||
t.Errorf("%T: got %t, want %t", test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
623
vendor/cloud.google.com/go/bigquery/service.go
generated
vendored
Normal file
623
vendor/cloud.google.com/go/bigquery/service.go
generated
vendored
Normal file
|
@ -0,0 +1,623 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
// service provides an internal abstraction to isolate the generated
|
||||
// BigQuery API; most of this package uses this interface instead.
|
||||
// The single implementation, *bigqueryService, contains all the knowledge
|
||||
// of the generated BigQuery API.
|
||||
type service interface {
|
||||
// Jobs
|
||||
insertJob(ctx context.Context, projectId string, conf *insertJobConf) (*Job, error)
|
||||
getJobType(ctx context.Context, projectId, jobID string) (jobType, error)
|
||||
jobCancel(ctx context.Context, projectId, jobID string) error
|
||||
jobStatus(ctx context.Context, projectId, jobID string) (*JobStatus, error)
|
||||
|
||||
// Tables
|
||||
createTable(ctx context.Context, conf *createTableConf) error
|
||||
getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error)
|
||||
deleteTable(ctx context.Context, projectID, datasetID, tableID string) error
|
||||
|
||||
// listTables returns a page of Tables and a next page token. Note: the Tables do not have their c field populated.
|
||||
listTables(ctx context.Context, projectID, datasetID string, pageSize int, pageToken string) ([]*Table, string, error)
|
||||
patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf) (*TableMetadata, error)
|
||||
|
||||
// Table data
|
||||
readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error)
|
||||
insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error
|
||||
|
||||
// Datasets
|
||||
insertDataset(ctx context.Context, datasetID, projectID string) error
|
||||
deleteDataset(ctx context.Context, datasetID, projectID string) error
|
||||
getDatasetMetadata(ctx context.Context, projectID, datasetID string) (*DatasetMetadata, error)
|
||||
|
||||
// Misc
|
||||
|
||||
// readQuery reads data resulting from a query job. If the job is
|
||||
// incomplete, an errIncompleteJob is returned. readQuery may be called
|
||||
// repeatedly to poll for job completion.
|
||||
readQuery(ctx context.Context, conf *readQueryConf, pageToken string) (*readDataResult, error)
|
||||
|
||||
// listDatasets returns a page of Datasets and a next page token. Note: the Datasets do not have their c field populated.
|
||||
listDatasets(ctx context.Context, projectID string, maxResults int, pageToken string, all bool, filter string) ([]*Dataset, string, error)
|
||||
}
|
||||
|
||||
type bigqueryService struct {
|
||||
s *bq.Service
|
||||
}
|
||||
|
||||
func newBigqueryService(client *http.Client, endpoint string) (*bigqueryService, error) {
|
||||
s, err := bq.New(client)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("constructing bigquery client: %v", err)
|
||||
}
|
||||
s.BasePath = endpoint
|
||||
|
||||
return &bigqueryService{s: s}, nil
|
||||
}
|
||||
|
||||
// getPages calls the supplied getPage function repeatedly until there are no pages left to get.
|
||||
// token is the token of the initial page to start from. Use an empty string to start from the beginning.
|
||||
func getPages(token string, getPage func(token string) (nextToken string, err error)) error {
|
||||
for {
|
||||
var err error
|
||||
token, err = getPage(token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if token == "" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type insertJobConf struct {
|
||||
job *bq.Job
|
||||
media io.Reader
|
||||
}
|
||||
|
||||
func (s *bigqueryService) insertJob(ctx context.Context, projectID string, conf *insertJobConf) (*Job, error) {
|
||||
call := s.s.Jobs.Insert(projectID, conf.job).Context(ctx)
|
||||
if conf.media != nil {
|
||||
call.Media(conf.media)
|
||||
}
|
||||
res, err := call.Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Job{service: s, projectID: projectID, jobID: res.JobReference.JobId}, nil
|
||||
}
|
||||
|
||||
type pagingConf struct {
|
||||
recordsPerRequest int64
|
||||
setRecordsPerRequest bool
|
||||
|
||||
startIndex uint64
|
||||
}
|
||||
|
||||
type readTableConf struct {
|
||||
projectID, datasetID, tableID string
|
||||
paging pagingConf
|
||||
schema Schema // lazily initialized when the first page of data is fetched.
|
||||
}
|
||||
|
||||
type readDataResult struct {
|
||||
pageToken string
|
||||
rows [][]Value
|
||||
totalRows uint64
|
||||
schema Schema
|
||||
}
|
||||
|
||||
type readQueryConf struct {
|
||||
projectID, jobID string
|
||||
paging pagingConf
|
||||
}
|
||||
|
||||
func (s *bigqueryService) readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error) {
|
||||
// Prepare request to fetch one page of table data.
|
||||
req := s.s.Tabledata.List(conf.projectID, conf.datasetID, conf.tableID)
|
||||
|
||||
if pageToken != "" {
|
||||
req.PageToken(pageToken)
|
||||
} else {
|
||||
req.StartIndex(conf.paging.startIndex)
|
||||
}
|
||||
|
||||
if conf.paging.setRecordsPerRequest {
|
||||
req.MaxResults(conf.paging.recordsPerRequest)
|
||||
}
|
||||
|
||||
// Fetch the table schema in the background, if necessary.
|
||||
var schemaErr error
|
||||
var schemaFetch sync.WaitGroup
|
||||
if conf.schema == nil {
|
||||
schemaFetch.Add(1)
|
||||
go func() {
|
||||
defer schemaFetch.Done()
|
||||
var t *bq.Table
|
||||
t, schemaErr = s.s.Tables.Get(conf.projectID, conf.datasetID, conf.tableID).
|
||||
Fields("schema").
|
||||
Context(ctx).
|
||||
Do()
|
||||
if schemaErr == nil && t.Schema != nil {
|
||||
conf.schema = convertTableSchema(t.Schema)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
res, err := req.Context(ctx).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
schemaFetch.Wait()
|
||||
if schemaErr != nil {
|
||||
return nil, schemaErr
|
||||
}
|
||||
|
||||
result := &readDataResult{
|
||||
pageToken: res.PageToken,
|
||||
totalRows: uint64(res.TotalRows),
|
||||
schema: conf.schema,
|
||||
}
|
||||
result.rows, err = convertRows(res.Rows, conf.schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
var errIncompleteJob = errors.New("internal error: query results not available because job is not complete")
|
||||
|
||||
// getQueryResultsTimeout controls the maximum duration of a request to the
|
||||
// BigQuery GetQueryResults endpoint. Setting a long timeout here does not
|
||||
// cause increased overall latency, as results are returned as soon as they are
|
||||
// available.
|
||||
const getQueryResultsTimeout = time.Minute
|
||||
|
||||
func (s *bigqueryService) readQuery(ctx context.Context, conf *readQueryConf, pageToken string) (*readDataResult, error) {
|
||||
req := s.s.Jobs.GetQueryResults(conf.projectID, conf.jobID).
|
||||
TimeoutMs(getQueryResultsTimeout.Nanoseconds() / 1e6)
|
||||
|
||||
if pageToken != "" {
|
||||
req.PageToken(pageToken)
|
||||
} else {
|
||||
req.StartIndex(conf.paging.startIndex)
|
||||
}
|
||||
|
||||
if conf.paging.setRecordsPerRequest {
|
||||
req.MaxResults(conf.paging.recordsPerRequest)
|
||||
}
|
||||
|
||||
res, err := req.Context(ctx).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !res.JobComplete {
|
||||
return nil, errIncompleteJob
|
||||
}
|
||||
schema := convertTableSchema(res.Schema)
|
||||
result := &readDataResult{
|
||||
pageToken: res.PageToken,
|
||||
totalRows: res.TotalRows,
|
||||
schema: schema,
|
||||
}
|
||||
result.rows, err = convertRows(res.Rows, schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type insertRowsConf struct {
|
||||
templateSuffix string
|
||||
ignoreUnknownValues bool
|
||||
skipInvalidRows bool
|
||||
}
|
||||
|
||||
func (s *bigqueryService) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error {
|
||||
req := &bq.TableDataInsertAllRequest{
|
||||
TemplateSuffix: conf.templateSuffix,
|
||||
IgnoreUnknownValues: conf.ignoreUnknownValues,
|
||||
SkipInvalidRows: conf.skipInvalidRows,
|
||||
}
|
||||
for _, row := range rows {
|
||||
m := make(map[string]bq.JsonValue)
|
||||
for k, v := range row.Row {
|
||||
m[k] = bq.JsonValue(v)
|
||||
}
|
||||
req.Rows = append(req.Rows, &bq.TableDataInsertAllRequestRows{
|
||||
InsertId: row.InsertID,
|
||||
Json: m,
|
||||
})
|
||||
}
|
||||
var res *bq.TableDataInsertAllResponse
|
||||
err := runWithRetry(ctx, func() error {
|
||||
var err error
|
||||
res, err = s.s.Tabledata.InsertAll(projectID, datasetID, tableID, req).Context(ctx).Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(res.InsertErrors) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var errs PutMultiError
|
||||
for _, e := range res.InsertErrors {
|
||||
if int(e.Index) > len(rows) {
|
||||
return fmt.Errorf("internal error: unexpected row index: %v", e.Index)
|
||||
}
|
||||
rie := RowInsertionError{
|
||||
InsertID: rows[e.Index].InsertID,
|
||||
RowIndex: int(e.Index),
|
||||
}
|
||||
for _, errp := range e.Errors {
|
||||
rie.Errors = append(rie.Errors, errorFromErrorProto(errp))
|
||||
}
|
||||
errs = append(errs, rie)
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
type jobType int
|
||||
|
||||
const (
|
||||
copyJobType jobType = iota
|
||||
extractJobType
|
||||
loadJobType
|
||||
queryJobType
|
||||
)
|
||||
|
||||
func (s *bigqueryService) getJobType(ctx context.Context, projectID, jobID string) (jobType, error) {
|
||||
res, err := s.s.Jobs.Get(projectID, jobID).
|
||||
Fields("configuration").
|
||||
Context(ctx).
|
||||
Do()
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
switch {
|
||||
case res.Configuration.Copy != nil:
|
||||
return copyJobType, nil
|
||||
case res.Configuration.Extract != nil:
|
||||
return extractJobType, nil
|
||||
case res.Configuration.Load != nil:
|
||||
return loadJobType, nil
|
||||
case res.Configuration.Query != nil:
|
||||
return queryJobType, nil
|
||||
default:
|
||||
return 0, errors.New("unknown job type")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *bigqueryService) jobCancel(ctx context.Context, projectID, jobID string) error {
|
||||
// Jobs.Cancel returns a job entity, but the only relevant piece of
|
||||
// data it may contain (the status of the job) is unreliable. From the
|
||||
// docs: "This call will return immediately, and the client will need
|
||||
// to poll for the job status to see if the cancel completed
|
||||
// successfully". So it would be misleading to return a status.
|
||||
_, err := s.s.Jobs.Cancel(projectID, jobID).
|
||||
Fields(). // We don't need any of the response data.
|
||||
Context(ctx).
|
||||
Do()
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *bigqueryService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) {
|
||||
res, err := s.s.Jobs.Get(projectID, jobID).
|
||||
Fields("status"). // Only fetch what we need.
|
||||
Context(ctx).
|
||||
Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return jobStatusFromProto(res.Status)
|
||||
}
|
||||
|
||||
var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done}
|
||||
|
||||
func jobStatusFromProto(status *bq.JobStatus) (*JobStatus, error) {
|
||||
state, ok := stateMap[status.State]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected job state: %v", status.State)
|
||||
}
|
||||
|
||||
newStatus := &JobStatus{
|
||||
State: state,
|
||||
err: nil,
|
||||
}
|
||||
if err := errorFromErrorProto(status.ErrorResult); state == Done && err != nil {
|
||||
newStatus.err = err
|
||||
}
|
||||
|
||||
for _, ep := range status.Errors {
|
||||
newStatus.Errors = append(newStatus.Errors, errorFromErrorProto(ep))
|
||||
}
|
||||
return newStatus, nil
|
||||
}
|
||||
|
||||
// listTables returns a subset of tables that belong to a dataset, and a token for fetching the next subset.
|
||||
func (s *bigqueryService) listTables(ctx context.Context, projectID, datasetID string, pageSize int, pageToken string) ([]*Table, string, error) {
|
||||
var tables []*Table
|
||||
req := s.s.Tables.List(projectID, datasetID).
|
||||
PageToken(pageToken).
|
||||
Context(ctx)
|
||||
if pageSize > 0 {
|
||||
req.MaxResults(int64(pageSize))
|
||||
}
|
||||
res, err := req.Do()
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
for _, t := range res.Tables {
|
||||
tables = append(tables, s.convertListedTable(t))
|
||||
}
|
||||
return tables, res.NextPageToken, nil
|
||||
}
|
||||
|
||||
type createTableConf struct {
|
||||
projectID, datasetID, tableID string
|
||||
expiration time.Time
|
||||
viewQuery string
|
||||
schema *bq.TableSchema
|
||||
useStandardSQL bool
|
||||
timePartitioning *TimePartitioning
|
||||
}
|
||||
|
||||
// createTable creates a table in the BigQuery service.
|
||||
// expiration is an optional time after which the table will be deleted and its storage reclaimed.
|
||||
// If viewQuery is non-empty, the created table will be of type VIEW.
|
||||
// Note: expiration can only be set during table creation.
|
||||
// Note: after table creation, a view can be modified only if its table was initially created with a view.
|
||||
func (s *bigqueryService) createTable(ctx context.Context, conf *createTableConf) error {
|
||||
table := &bq.Table{
|
||||
TableReference: &bq.TableReference{
|
||||
ProjectId: conf.projectID,
|
||||
DatasetId: conf.datasetID,
|
||||
TableId: conf.tableID,
|
||||
},
|
||||
}
|
||||
if !conf.expiration.IsZero() {
|
||||
table.ExpirationTime = conf.expiration.UnixNano() / 1e6
|
||||
}
|
||||
// TODO(jba): make it impossible to provide both a view query and a schema.
|
||||
if conf.viewQuery != "" {
|
||||
table.View = &bq.ViewDefinition{
|
||||
Query: conf.viewQuery,
|
||||
}
|
||||
if conf.useStandardSQL {
|
||||
table.View.UseLegacySql = false
|
||||
table.View.ForceSendFields = append(table.View.ForceSendFields, "UseLegacySql")
|
||||
}
|
||||
}
|
||||
if conf.schema != nil {
|
||||
table.Schema = conf.schema
|
||||
}
|
||||
if conf.timePartitioning != nil {
|
||||
table.TimePartitioning = &bq.TimePartitioning{
|
||||
Type: "DAY",
|
||||
ExpirationMs: int64(conf.timePartitioning.Expiration.Seconds() * 1000),
|
||||
}
|
||||
}
|
||||
|
||||
_, err := s.s.Tables.Insert(conf.projectID, conf.datasetID, table).Context(ctx).Do()
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *bigqueryService) getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error) {
|
||||
table, err := s.s.Tables.Get(projectID, datasetID, tableID).Context(ctx).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqTableToMetadata(table), nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) deleteTable(ctx context.Context, projectID, datasetID, tableID string) error {
|
||||
return s.s.Tables.Delete(projectID, datasetID, tableID).Context(ctx).Do()
|
||||
}
|
||||
|
||||
func bqTableToMetadata(t *bq.Table) *TableMetadata {
|
||||
md := &TableMetadata{
|
||||
Description: t.Description,
|
||||
Name: t.FriendlyName,
|
||||
Type: TableType(t.Type),
|
||||
ID: t.Id,
|
||||
NumBytes: t.NumBytes,
|
||||
NumRows: t.NumRows,
|
||||
ExpirationTime: unixMillisToTime(t.ExpirationTime),
|
||||
CreationTime: unixMillisToTime(t.CreationTime),
|
||||
LastModifiedTime: unixMillisToTime(int64(t.LastModifiedTime)),
|
||||
}
|
||||
if t.Schema != nil {
|
||||
md.Schema = convertTableSchema(t.Schema)
|
||||
}
|
||||
if t.View != nil {
|
||||
md.View = t.View.Query
|
||||
}
|
||||
if t.TimePartitioning != nil {
|
||||
md.TimePartitioning = &TimePartitioning{time.Duration(t.TimePartitioning.ExpirationMs) * time.Millisecond}
|
||||
}
|
||||
|
||||
return md
|
||||
}
|
||||
|
||||
func bqDatasetToMetadata(d *bq.Dataset) *DatasetMetadata {
|
||||
/// TODO(jba): access
|
||||
return &DatasetMetadata{
|
||||
CreationTime: unixMillisToTime(d.CreationTime),
|
||||
LastModifiedTime: unixMillisToTime(d.LastModifiedTime),
|
||||
DefaultTableExpiration: time.Duration(d.DefaultTableExpirationMs) * time.Millisecond,
|
||||
Description: d.Description,
|
||||
Name: d.FriendlyName,
|
||||
ID: d.Id,
|
||||
Location: d.Location,
|
||||
Labels: d.Labels,
|
||||
}
|
||||
}
|
||||
|
||||
// Convert a number of milliseconds since the Unix epoch to a time.Time.
|
||||
// Treat an input of zero specially: convert it to the zero time,
|
||||
// rather than the start of the epoch.
|
||||
func unixMillisToTime(m int64) time.Time {
|
||||
if m == 0 {
|
||||
return time.Time{}
|
||||
}
|
||||
return time.Unix(0, m*1e6)
|
||||
}
|
||||
|
||||
func (s *bigqueryService) convertListedTable(t *bq.TableListTables) *Table {
|
||||
return &Table{
|
||||
ProjectID: t.TableReference.ProjectId,
|
||||
DatasetID: t.TableReference.DatasetId,
|
||||
TableID: t.TableReference.TableId,
|
||||
}
|
||||
}
|
||||
|
||||
// patchTableConf contains fields to be patched.
|
||||
type patchTableConf struct {
|
||||
// These fields are omitted from the patch operation if nil.
|
||||
Description *string
|
||||
Name *string
|
||||
Schema Schema
|
||||
}
|
||||
|
||||
func (s *bigqueryService) patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf) (*TableMetadata, error) {
|
||||
t := &bq.Table{}
|
||||
forceSend := func(field string) {
|
||||
t.ForceSendFields = append(t.ForceSendFields, field)
|
||||
}
|
||||
|
||||
if conf.Description != nil {
|
||||
t.Description = *conf.Description
|
||||
forceSend("Description")
|
||||
}
|
||||
if conf.Name != nil {
|
||||
t.FriendlyName = *conf.Name
|
||||
forceSend("FriendlyName")
|
||||
}
|
||||
if conf.Schema != nil {
|
||||
t.Schema = conf.Schema.asTableSchema()
|
||||
forceSend("Schema")
|
||||
}
|
||||
table, err := s.s.Tables.Patch(projectID, datasetID, tableID, t).
|
||||
Context(ctx).
|
||||
Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqTableToMetadata(table), nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) insertDataset(ctx context.Context, datasetID, projectID string) error {
|
||||
ds := &bq.Dataset{
|
||||
DatasetReference: &bq.DatasetReference{DatasetId: datasetID},
|
||||
}
|
||||
_, err := s.s.Datasets.Insert(projectID, ds).Context(ctx).Do()
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *bigqueryService) deleteDataset(ctx context.Context, datasetID, projectID string) error {
|
||||
return s.s.Datasets.Delete(projectID, datasetID).Context(ctx).Do()
|
||||
}
|
||||
|
||||
func (s *bigqueryService) getDatasetMetadata(ctx context.Context, projectID, datasetID string) (*DatasetMetadata, error) {
|
||||
table, err := s.s.Datasets.Get(projectID, datasetID).Context(ctx).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqDatasetToMetadata(table), nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) listDatasets(ctx context.Context, projectID string, maxResults int, pageToken string, all bool, filter string) ([]*Dataset, string, error) {
|
||||
req := s.s.Datasets.List(projectID).
|
||||
Context(ctx).
|
||||
PageToken(pageToken).
|
||||
All(all)
|
||||
if maxResults > 0 {
|
||||
req.MaxResults(int64(maxResults))
|
||||
}
|
||||
if filter != "" {
|
||||
req.Filter(filter)
|
||||
}
|
||||
res, err := req.Do()
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
var datasets []*Dataset
|
||||
for _, d := range res.Datasets {
|
||||
datasets = append(datasets, s.convertListedDataset(d))
|
||||
}
|
||||
return datasets, res.NextPageToken, nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) convertListedDataset(d *bq.DatasetListDatasets) *Dataset {
|
||||
return &Dataset{
|
||||
ProjectID: d.DatasetReference.ProjectId,
|
||||
DatasetID: d.DatasetReference.DatasetId,
|
||||
}
|
||||
}
|
||||
|
||||
// runWithRetry calls the function until it returns nil or a non-retryable error, or
|
||||
// the context is done.
|
||||
// See the similar function in ../storage/invoke.go. The main difference is the
|
||||
// reason for retrying.
|
||||
func runWithRetry(ctx context.Context, call func() error) error {
|
||||
backoff := gax.Backoff{
|
||||
Initial: 2 * time.Second,
|
||||
Max: 32 * time.Second,
|
||||
Multiplier: 2,
|
||||
}
|
||||
return internal.Retry(ctx, backoff, func() (stop bool, err error) {
|
||||
err = call()
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
e, ok := err.(*googleapi.Error)
|
||||
if !ok {
|
||||
return true, err
|
||||
}
|
||||
var reason string
|
||||
if len(e.Errors) > 0 {
|
||||
reason = e.Errors[0].Reason
|
||||
}
|
||||
// Retry using the criteria in
|
||||
// https://cloud.google.com/bigquery/troubleshooting-errors
|
||||
if reason == "backendError" && (e.Code == 500 || e.Code == 503) {
|
||||
return false, nil
|
||||
}
|
||||
return true, err
|
||||
})
|
||||
}
|
224
vendor/cloud.google.com/go/bigquery/table.go
generated
vendored
Normal file
224
vendor/cloud.google.com/go/bigquery/table.go
generated
vendored
Normal file
|
@ -0,0 +1,224 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"cloud.google.com/go/internal/optional"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// A Table is a reference to a BigQuery table.
|
||||
type Table struct {
|
||||
// ProjectID, DatasetID and TableID may be omitted if the Table is the destination for a query.
|
||||
// In this case the result will be stored in an ephemeral table.
|
||||
ProjectID string
|
||||
DatasetID string
|
||||
// TableID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_).
|
||||
// The maximum length is 1,024 characters.
|
||||
TableID string
|
||||
|
||||
c *Client
|
||||
}
|
||||
|
||||
// TableMetadata contains information about a BigQuery table.
|
||||
type TableMetadata struct {
|
||||
Description string // The user-friendly description of this table.
|
||||
Name string // The user-friendly name for this table.
|
||||
Schema Schema
|
||||
View string
|
||||
|
||||
ID string // An opaque ID uniquely identifying the table.
|
||||
Type TableType
|
||||
|
||||
// The time when this table expires. If not set, the table will persist
|
||||
// indefinitely. Expired tables will be deleted and their storage reclaimed.
|
||||
ExpirationTime time.Time
|
||||
|
||||
CreationTime time.Time
|
||||
LastModifiedTime time.Time
|
||||
|
||||
// The size of the table in bytes.
|
||||
// This does not include data that is being buffered during a streaming insert.
|
||||
NumBytes int64
|
||||
|
||||
// The number of rows of data in this table.
|
||||
// This does not include data that is being buffered during a streaming insert.
|
||||
NumRows uint64
|
||||
|
||||
// The time-based partitioning settings for this table.
|
||||
TimePartitioning *TimePartitioning
|
||||
}
|
||||
|
||||
// TableCreateDisposition specifies the circumstances under which destination table will be created.
|
||||
// Default is CreateIfNeeded.
|
||||
type TableCreateDisposition string
|
||||
|
||||
const (
|
||||
// CreateIfNeeded will create the table if it does not already exist.
|
||||
// Tables are created atomically on successful completion of a job.
|
||||
CreateIfNeeded TableCreateDisposition = "CREATE_IF_NEEDED"
|
||||
|
||||
// CreateNever ensures the table must already exist and will not be
|
||||
// automatically created.
|
||||
CreateNever TableCreateDisposition = "CREATE_NEVER"
|
||||
)
|
||||
|
||||
// TableWriteDisposition specifies how existing data in a destination table is treated.
|
||||
// Default is WriteAppend.
|
||||
type TableWriteDisposition string
|
||||
|
||||
const (
|
||||
// WriteAppend will append to any existing data in the destination table.
|
||||
// Data is appended atomically on successful completion of a job.
|
||||
WriteAppend TableWriteDisposition = "WRITE_APPEND"
|
||||
|
||||
// WriteTruncate overrides the existing data in the destination table.
|
||||
// Data is overwritten atomically on successful completion of a job.
|
||||
WriteTruncate TableWriteDisposition = "WRITE_TRUNCATE"
|
||||
|
||||
// WriteEmpty fails writes if the destination table already contains data.
|
||||
WriteEmpty TableWriteDisposition = "WRITE_EMPTY"
|
||||
)
|
||||
|
||||
// TableType is the type of table.
|
||||
type TableType string
|
||||
|
||||
const (
|
||||
RegularTable TableType = "TABLE"
|
||||
ViewTable TableType = "VIEW"
|
||||
)
|
||||
|
||||
func (t *Table) tableRefProto() *bq.TableReference {
|
||||
return &bq.TableReference{
|
||||
ProjectId: t.ProjectID,
|
||||
DatasetId: t.DatasetID,
|
||||
TableId: t.TableID,
|
||||
}
|
||||
}
|
||||
|
||||
// FullyQualifiedName returns the ID of the table in projectID:datasetID.tableID format.
|
||||
func (t *Table) FullyQualifiedName() string {
|
||||
return fmt.Sprintf("%s:%s.%s", t.ProjectID, t.DatasetID, t.TableID)
|
||||
}
|
||||
|
||||
// implicitTable reports whether Table is an empty placeholder, which signifies that a new table should be created with an auto-generated Table ID.
|
||||
func (t *Table) implicitTable() bool {
|
||||
return t.ProjectID == "" && t.DatasetID == "" && t.TableID == ""
|
||||
}
|
||||
|
||||
// Create creates a table in the BigQuery service.
|
||||
func (t *Table) Create(ctx context.Context, options ...CreateTableOption) error {
|
||||
conf := &createTableConf{
|
||||
projectID: t.ProjectID,
|
||||
datasetID: t.DatasetID,
|
||||
tableID: t.TableID,
|
||||
}
|
||||
for _, o := range options {
|
||||
o.customizeCreateTable(conf)
|
||||
}
|
||||
return t.c.service.createTable(ctx, conf)
|
||||
}
|
||||
|
||||
// Metadata fetches the metadata for the table.
|
||||
func (t *Table) Metadata(ctx context.Context) (*TableMetadata, error) {
|
||||
return t.c.service.getTableMetadata(ctx, t.ProjectID, t.DatasetID, t.TableID)
|
||||
}
|
||||
|
||||
// Delete deletes the table.
|
||||
func (t *Table) Delete(ctx context.Context) error {
|
||||
return t.c.service.deleteTable(ctx, t.ProjectID, t.DatasetID, t.TableID)
|
||||
}
|
||||
|
||||
// A CreateTableOption is an optional argument to CreateTable.
|
||||
type CreateTableOption interface {
|
||||
customizeCreateTable(*createTableConf)
|
||||
}
|
||||
|
||||
type tableExpiration time.Time
|
||||
|
||||
// TableExpiration returns a CreateTableOption that will cause the created table to be deleted after the expiration time.
|
||||
func TableExpiration(exp time.Time) CreateTableOption { return tableExpiration(exp) }
|
||||
|
||||
func (opt tableExpiration) customizeCreateTable(conf *createTableConf) {
|
||||
conf.expiration = time.Time(opt)
|
||||
}
|
||||
|
||||
type viewQuery string
|
||||
|
||||
// ViewQuery returns a CreateTableOption that causes the created table to be a virtual table defined by the supplied query.
|
||||
// For more information see: https://cloud.google.com/bigquery/querying-data#views
|
||||
func ViewQuery(query string) CreateTableOption { return viewQuery(query) }
|
||||
|
||||
func (opt viewQuery) customizeCreateTable(conf *createTableConf) {
|
||||
conf.viewQuery = string(opt)
|
||||
}
|
||||
|
||||
type useStandardSQL struct{}
|
||||
|
||||
// UseStandardSQL returns a CreateTableOption to set the table to use standard SQL.
|
||||
// The default setting is false (using legacy SQL).
|
||||
func UseStandardSQL() CreateTableOption { return useStandardSQL{} }
|
||||
|
||||
func (opt useStandardSQL) customizeCreateTable(conf *createTableConf) {
|
||||
conf.useStandardSQL = true
|
||||
}
|
||||
|
||||
// TimePartitioning is a CreateTableOption that can be used to set time-based
|
||||
// date partitioning on a table.
|
||||
// For more information see: https://cloud.google.com/bigquery/docs/creating-partitioned-tables
|
||||
type TimePartitioning struct {
|
||||
// (Optional) The amount of time to keep the storage for a partition.
|
||||
// If the duration is empty (0), the data in the partitions do not expire.
|
||||
Expiration time.Duration
|
||||
}
|
||||
|
||||
func (opt TimePartitioning) customizeCreateTable(conf *createTableConf) {
|
||||
conf.timePartitioning = &opt
|
||||
}
|
||||
|
||||
// Update modifies specific Table metadata fields.
|
||||
func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate) (*TableMetadata, error) {
|
||||
var conf patchTableConf
|
||||
if tm.Description != nil {
|
||||
s := optional.ToString(tm.Description)
|
||||
conf.Description = &s
|
||||
}
|
||||
if tm.Name != nil {
|
||||
s := optional.ToString(tm.Name)
|
||||
conf.Name = &s
|
||||
}
|
||||
conf.Schema = tm.Schema
|
||||
return t.c.service.patchTable(ctx, t.ProjectID, t.DatasetID, t.TableID, &conf)
|
||||
}
|
||||
|
||||
// TableMetadataToUpdate is used when updating a table's metadata.
|
||||
// Only non-nil fields will be updated.
|
||||
type TableMetadataToUpdate struct {
|
||||
// Description is the user-friendly description of this table.
|
||||
Description optional.String
|
||||
|
||||
// Name is the user-friendly name for this table.
|
||||
Name optional.String
|
||||
|
||||
// Schema is the table's schema.
|
||||
// When updating a schema, you can add columns but not remove them.
|
||||
Schema Schema
|
||||
// TODO(jba): support updating the view
|
||||
}
|
162
vendor/cloud.google.com/go/bigquery/uploader.go
generated
vendored
Normal file
162
vendor/cloud.google.com/go/bigquery/uploader.go
generated
vendored
Normal file
|
@ -0,0 +1,162 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// An Uploader does streaming inserts into a BigQuery table.
|
||||
// It is safe for concurrent use.
|
||||
type Uploader struct {
|
||||
t *Table
|
||||
|
||||
// SkipInvalidRows causes rows containing invalid data to be silently
|
||||
// ignored. The default value is false, which causes the entire request to
|
||||
// fail if there is an attempt to insert an invalid row.
|
||||
SkipInvalidRows bool
|
||||
|
||||
// IgnoreUnknownValues causes values not matching the schema to be ignored.
|
||||
// The default value is false, which causes records containing such values
|
||||
// to be treated as invalid records.
|
||||
IgnoreUnknownValues bool
|
||||
|
||||
// A TableTemplateSuffix allows Uploaders to create tables automatically.
|
||||
//
|
||||
// Experimental: this option is experimental and may be modified or removed in future versions,
|
||||
// regardless of any other documented package stability guarantees.
|
||||
//
|
||||
// When you specify a suffix, the table you upload data to
|
||||
// will be used as a template for creating a new table, with the same schema,
|
||||
// called <table> + <suffix>.
|
||||
//
|
||||
// More information is available at
|
||||
// https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables
|
||||
TableTemplateSuffix string
|
||||
}
|
||||
|
||||
// Uploader returns an Uploader that can be used to append rows to t.
|
||||
// The returned Uploader may optionally be further configured before its Put method is called.
|
||||
func (t *Table) Uploader() *Uploader {
|
||||
return &Uploader{t: t}
|
||||
}
|
||||
|
||||
// Put uploads one or more rows to the BigQuery service.
|
||||
//
|
||||
// If src is ValueSaver, then its Save method is called to produce a row for uploading.
|
||||
//
|
||||
// If src is a struct or pointer to a struct, then a schema is inferred from it
|
||||
// and used to create a StructSaver. The InsertID of the StructSaver will be
|
||||
// empty.
|
||||
//
|
||||
// If src is a slice of ValueSavers, structs, or struct pointers, then each
|
||||
// element of the slice is treated as above, and multiple rows are uploaded.
|
||||
//
|
||||
// Put returns a PutMultiError if one or more rows failed to be uploaded.
|
||||
// The PutMultiError contains a RowInsertionError for each failed row.
|
||||
//
|
||||
// Put will retry on temporary errors (see
|
||||
// https://cloud.google.com/bigquery/troubleshooting-errors). This can result
|
||||
// in duplicate rows if you do not use insert IDs. Also, if the error persists,
|
||||
// the call will run indefinitely. Pass a context with a timeout to prevent
|
||||
// hanging calls.
|
||||
func (u *Uploader) Put(ctx context.Context, src interface{}) error {
|
||||
savers, err := valueSavers(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return u.putMulti(ctx, savers)
|
||||
}
|
||||
|
||||
func valueSavers(src interface{}) ([]ValueSaver, error) {
|
||||
saver, ok, err := toValueSaver(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ok {
|
||||
return []ValueSaver{saver}, nil
|
||||
}
|
||||
srcVal := reflect.ValueOf(src)
|
||||
if srcVal.Kind() != reflect.Slice {
|
||||
return nil, fmt.Errorf("%T is not a ValueSaver, struct, struct pointer, or slice", src)
|
||||
|
||||
}
|
||||
var savers []ValueSaver
|
||||
for i := 0; i < srcVal.Len(); i++ {
|
||||
s := srcVal.Index(i).Interface()
|
||||
saver, ok, err := toValueSaver(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("src[%d] has type %T, which is not a ValueSaver, struct or struct pointer", i, s)
|
||||
}
|
||||
savers = append(savers, saver)
|
||||
}
|
||||
return savers, nil
|
||||
}
|
||||
|
||||
// Make a ValueSaver from x, which must implement ValueSaver already
|
||||
// or be a struct or pointer to struct.
|
||||
func toValueSaver(x interface{}) (ValueSaver, bool, error) {
|
||||
if saver, ok := x.(ValueSaver); ok {
|
||||
return saver, ok, nil
|
||||
}
|
||||
v := reflect.ValueOf(x)
|
||||
// Support Put with []interface{}
|
||||
if v.Kind() == reflect.Interface {
|
||||
v = v.Elem()
|
||||
}
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
if v.Kind() != reflect.Struct {
|
||||
return nil, false, nil
|
||||
}
|
||||
schema, err := inferSchemaReflect(v.Type())
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
return &StructSaver{Struct: x, Schema: schema}, true, nil
|
||||
}
|
||||
|
||||
func (u *Uploader) putMulti(ctx context.Context, src []ValueSaver) error {
|
||||
var rows []*insertionRow
|
||||
for _, saver := range src {
|
||||
row, insertID, err := saver.Save()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rows = append(rows, &insertionRow{InsertID: insertID, Row: row})
|
||||
}
|
||||
|
||||
return u.t.c.service.insertRows(ctx, u.t.ProjectID, u.t.DatasetID, u.t.TableID, rows, &insertRowsConf{
|
||||
skipInvalidRows: u.SkipInvalidRows,
|
||||
ignoreUnknownValues: u.IgnoreUnknownValues,
|
||||
templateSuffix: u.TableTemplateSuffix,
|
||||
})
|
||||
}
|
||||
|
||||
// An insertionRow represents a row of data to be inserted into a table.
|
||||
type insertionRow struct {
|
||||
// If InsertID is non-empty, BigQuery will use it to de-duplicate insertions of
|
||||
// this row on a best-effort basis.
|
||||
InsertID string
|
||||
// The data to be inserted, represented as a map from field name to Value.
|
||||
Row map[string]Value
|
||||
}
|
285
vendor/cloud.google.com/go/bigquery/uploader_test.go
generated
vendored
Normal file
285
vendor/cloud.google.com/go/bigquery/uploader_test.go
generated
vendored
Normal file
|
@ -0,0 +1,285 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type testSaver struct {
|
||||
ir *insertionRow
|
||||
err error
|
||||
}
|
||||
|
||||
func (ts testSaver) Save() (map[string]Value, string, error) {
|
||||
return ts.ir.Row, ts.ir.InsertID, ts.err
|
||||
}
|
||||
|
||||
func TestRejectsNonValueSavers(t *testing.T) {
|
||||
client := &Client{projectID: "project-id"}
|
||||
u := Uploader{t: client.Dataset("dataset-id").Table("table-id")}
|
||||
|
||||
testCases := []struct {
|
||||
src interface{}
|
||||
}{
|
||||
{
|
||||
src: 1,
|
||||
},
|
||||
{
|
||||
src: []int{1, 2},
|
||||
},
|
||||
{
|
||||
src: []interface{}{
|
||||
testSaver{ir: &insertionRow{"a", map[string]Value{"one": 1}}},
|
||||
1,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
if err := u.Put(context.Background(), tc.src); err == nil {
|
||||
t.Errorf("put value: %v; got nil, want error", tc.src)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type insertRowsRecorder struct {
|
||||
rowBatches [][]*insertionRow
|
||||
service
|
||||
}
|
||||
|
||||
func (irr *insertRowsRecorder) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error {
|
||||
irr.rowBatches = append(irr.rowBatches, rows)
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestInsertsData(t *testing.T) {
|
||||
testCases := []struct {
|
||||
data [][]*insertionRow
|
||||
}{
|
||||
{
|
||||
data: [][]*insertionRow{
|
||||
{
|
||||
&insertionRow{"a", map[string]Value{"one": 1}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
data: [][]*insertionRow{
|
||||
{
|
||||
&insertionRow{"a", map[string]Value{"one": 1}},
|
||||
&insertionRow{"b", map[string]Value{"two": 2}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
data: [][]*insertionRow{
|
||||
{
|
||||
&insertionRow{"a", map[string]Value{"one": 1}},
|
||||
},
|
||||
{
|
||||
&insertionRow{"b", map[string]Value{"two": 2}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
data: [][]*insertionRow{
|
||||
{
|
||||
&insertionRow{"a", map[string]Value{"one": 1}},
|
||||
&insertionRow{"b", map[string]Value{"two": 2}},
|
||||
},
|
||||
{
|
||||
&insertionRow{"c", map[string]Value{"three": 3}},
|
||||
&insertionRow{"d", map[string]Value{"four": 4}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
irr := &insertRowsRecorder{}
|
||||
client := &Client{
|
||||
projectID: "project-id",
|
||||
service: irr,
|
||||
}
|
||||
u := client.Dataset("dataset-id").Table("table-id").Uploader()
|
||||
for _, batch := range tc.data {
|
||||
if len(batch) == 0 {
|
||||
continue
|
||||
}
|
||||
var toUpload interface{}
|
||||
if len(batch) == 1 {
|
||||
toUpload = testSaver{ir: batch[0]}
|
||||
} else {
|
||||
savers := []testSaver{}
|
||||
for _, row := range batch {
|
||||
savers = append(savers, testSaver{ir: row})
|
||||
}
|
||||
toUpload = savers
|
||||
}
|
||||
|
||||
err := u.Put(context.Background(), toUpload)
|
||||
if err != nil {
|
||||
t.Errorf("expected successful Put of ValueSaver; got: %v", err)
|
||||
}
|
||||
}
|
||||
if got, want := irr.rowBatches, tc.data; !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("got: %v, want: %v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type uploadOptionRecorder struct {
|
||||
received *insertRowsConf
|
||||
service
|
||||
}
|
||||
|
||||
func (u *uploadOptionRecorder) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error {
|
||||
u.received = conf
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestUploadOptionsPropagate(t *testing.T) {
|
||||
// we don't care for the data in this testcase.
|
||||
dummyData := testSaver{ir: &insertionRow{}}
|
||||
recorder := new(uploadOptionRecorder)
|
||||
c := &Client{service: recorder}
|
||||
table := &Table{
|
||||
ProjectID: "project-id",
|
||||
DatasetID: "dataset-id",
|
||||
TableID: "table-id",
|
||||
c: c,
|
||||
}
|
||||
|
||||
tests := [...]struct {
|
||||
ul *Uploader
|
||||
conf insertRowsConf
|
||||
}{
|
||||
{
|
||||
// test zero options lead to zero value for insertRowsConf
|
||||
ul: table.Uploader(),
|
||||
},
|
||||
{
|
||||
ul: func() *Uploader {
|
||||
u := table.Uploader()
|
||||
u.TableTemplateSuffix = "suffix"
|
||||
return u
|
||||
}(),
|
||||
conf: insertRowsConf{
|
||||
templateSuffix: "suffix",
|
||||
},
|
||||
},
|
||||
{
|
||||
ul: func() *Uploader {
|
||||
u := table.Uploader()
|
||||
u.IgnoreUnknownValues = true
|
||||
return u
|
||||
}(),
|
||||
conf: insertRowsConf{
|
||||
ignoreUnknownValues: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
ul: func() *Uploader {
|
||||
u := table.Uploader()
|
||||
u.SkipInvalidRows = true
|
||||
return u
|
||||
}(),
|
||||
conf: insertRowsConf{
|
||||
skipInvalidRows: true,
|
||||
},
|
||||
},
|
||||
{ // multiple upload options combine
|
||||
ul: func() *Uploader {
|
||||
u := table.Uploader()
|
||||
u.TableTemplateSuffix = "suffix"
|
||||
u.IgnoreUnknownValues = true
|
||||
u.SkipInvalidRows = true
|
||||
return u
|
||||
}(),
|
||||
conf: insertRowsConf{
|
||||
templateSuffix: "suffix",
|
||||
skipInvalidRows: true,
|
||||
ignoreUnknownValues: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range tests {
|
||||
err := tc.ul.Put(context.Background(), dummyData)
|
||||
if err != nil {
|
||||
t.Fatalf("%d: expected successful Put of ValueSaver; got: %v", i, err)
|
||||
}
|
||||
|
||||
if recorder.received == nil {
|
||||
t.Fatalf("%d: received no options at all!", i)
|
||||
}
|
||||
|
||||
want := tc.conf
|
||||
got := *recorder.received
|
||||
if got != want {
|
||||
t.Errorf("%d: got %#v, want %#v, ul=%#v", i, got, want, tc.ul)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueSavers(t *testing.T) {
|
||||
ts := &testSaver{ir: &insertionRow{}}
|
||||
type T struct{ I int }
|
||||
schema, err := InferSchema(T{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, test := range []struct {
|
||||
in interface{}
|
||||
want []ValueSaver
|
||||
}{
|
||||
{ts, []ValueSaver{ts}},
|
||||
{T{I: 1}, []ValueSaver{&StructSaver{Schema: schema, Struct: T{I: 1}}}},
|
||||
{[]ValueSaver{ts, ts}, []ValueSaver{ts, ts}},
|
||||
{[]interface{}{ts, ts}, []ValueSaver{ts, ts}},
|
||||
{[]T{{I: 1}, {I: 2}}, []ValueSaver{
|
||||
&StructSaver{Schema: schema, Struct: T{I: 1}},
|
||||
&StructSaver{Schema: schema, Struct: T{I: 2}},
|
||||
}},
|
||||
{[]interface{}{T{I: 1}, &T{I: 2}}, []ValueSaver{
|
||||
&StructSaver{Schema: schema, Struct: T{I: 1}},
|
||||
&StructSaver{Schema: schema, Struct: &T{I: 2}},
|
||||
}},
|
||||
} {
|
||||
got, err := valueSavers(test.in)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, test.want) {
|
||||
|
||||
t.Errorf("%+v: got %v, want %v", test.in, pretty.Value(got), pretty.Value(test.want))
|
||||
}
|
||||
// Make sure Save is successful.
|
||||
for i, vs := range got {
|
||||
_, _, err := vs.Save()
|
||||
if err != nil {
|
||||
t.Fatalf("%+v, #%d: got error %v, want nil", test.in, i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
47
vendor/cloud.google.com/go/bigquery/utils_test.go
generated
vendored
Normal file
47
vendor/cloud.google.com/go/bigquery/utils_test.go
generated
vendored
Normal file
|
@ -0,0 +1,47 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func defaultGCS() *GCSReference {
|
||||
return &GCSReference{
|
||||
uris: []string{"uri"},
|
||||
}
|
||||
}
|
||||
|
||||
var defaultQuery = &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
}
|
||||
|
||||
type testService struct {
|
||||
*bq.Job
|
||||
|
||||
service
|
||||
}
|
||||
|
||||
func (s *testService) insertJob(ctx context.Context, projectID string, conf *insertJobConf) (*Job, error) {
|
||||
s.Job = conf.job
|
||||
return &Job{}, nil
|
||||
}
|
||||
|
||||
func (s *testService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) {
|
||||
return &JobStatus{State: Done}, nil
|
||||
}
|
637
vendor/cloud.google.com/go/bigquery/value.go
generated
vendored
Normal file
637
vendor/cloud.google.com/go/bigquery/value.go
generated
vendored
Normal file
|
@ -0,0 +1,637 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// Value stores the contents of a single cell from a BigQuery result.
|
||||
type Value interface{}
|
||||
|
||||
// ValueLoader stores a slice of Values representing a result row from a Read operation.
|
||||
// See RowIterator.Next for more information.
|
||||
type ValueLoader interface {
|
||||
Load(v []Value, s Schema) error
|
||||
}
|
||||
|
||||
// valueList converts a []Value to implement ValueLoader.
|
||||
type valueList []Value
|
||||
|
||||
// Load stores a sequence of values in a valueList.
|
||||
// It resets the slice length to zero, then appends each value to it.
|
||||
func (vs *valueList) Load(v []Value, _ Schema) error {
|
||||
*vs = append((*vs)[:0], v...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// valueMap converts a map[string]Value to implement ValueLoader.
|
||||
type valueMap map[string]Value
|
||||
|
||||
// Load stores a sequence of values in a valueMap.
|
||||
func (vm *valueMap) Load(v []Value, s Schema) error {
|
||||
if *vm == nil {
|
||||
*vm = map[string]Value{}
|
||||
}
|
||||
loadMap(*vm, v, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadMap(m map[string]Value, vals []Value, s Schema) {
|
||||
for i, f := range s {
|
||||
val := vals[i]
|
||||
var v interface{}
|
||||
switch {
|
||||
case f.Schema == nil:
|
||||
v = val
|
||||
case !f.Repeated:
|
||||
m2 := map[string]Value{}
|
||||
loadMap(m2, val.([]Value), f.Schema)
|
||||
v = m2
|
||||
default: // repeated and nested
|
||||
sval := val.([]Value)
|
||||
vs := make([]Value, len(sval))
|
||||
for j, e := range sval {
|
||||
m2 := map[string]Value{}
|
||||
loadMap(m2, e.([]Value), f.Schema)
|
||||
vs[j] = m2
|
||||
}
|
||||
v = vs
|
||||
}
|
||||
m[f.Name] = v
|
||||
}
|
||||
}
|
||||
|
||||
type structLoader struct {
|
||||
typ reflect.Type // type of struct
|
||||
err error
|
||||
ops []structLoaderOp
|
||||
|
||||
vstructp reflect.Value // pointer to current struct value; changed by set
|
||||
}
|
||||
|
||||
// A setFunc is a function that sets a struct field or slice/array
|
||||
// element to a value.
|
||||
type setFunc func(v reflect.Value, val interface{}) error
|
||||
|
||||
// A structLoaderOp instructs the loader to set a struct field to a row value.
|
||||
type structLoaderOp struct {
|
||||
fieldIndex []int
|
||||
valueIndex int
|
||||
setFunc setFunc
|
||||
repeated bool
|
||||
}
|
||||
|
||||
func setAny(v reflect.Value, x interface{}) error {
|
||||
v.Set(reflect.ValueOf(x))
|
||||
return nil
|
||||
}
|
||||
|
||||
func setInt(v reflect.Value, x interface{}) error {
|
||||
xx := x.(int64)
|
||||
if v.OverflowInt(xx) {
|
||||
return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type())
|
||||
}
|
||||
v.SetInt(xx)
|
||||
return nil
|
||||
}
|
||||
|
||||
func setFloat(v reflect.Value, x interface{}) error {
|
||||
xx := x.(float64)
|
||||
if v.OverflowFloat(xx) {
|
||||
return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type())
|
||||
}
|
||||
v.SetFloat(xx)
|
||||
return nil
|
||||
}
|
||||
|
||||
func setBool(v reflect.Value, x interface{}) error {
|
||||
v.SetBool(x.(bool))
|
||||
return nil
|
||||
}
|
||||
|
||||
func setString(v reflect.Value, x interface{}) error {
|
||||
v.SetString(x.(string))
|
||||
return nil
|
||||
}
|
||||
|
||||
func setBytes(v reflect.Value, x interface{}) error {
|
||||
v.SetBytes(x.([]byte))
|
||||
return nil
|
||||
}
|
||||
|
||||
// set remembers a value for the next call to Load. The value must be
|
||||
// a pointer to a struct. (This is checked in RowIterator.Next.)
|
||||
func (sl *structLoader) set(structp interface{}, schema Schema) error {
|
||||
if sl.err != nil {
|
||||
return sl.err
|
||||
}
|
||||
sl.vstructp = reflect.ValueOf(structp)
|
||||
typ := sl.vstructp.Type().Elem()
|
||||
if sl.typ == nil {
|
||||
// First call: remember the type and compile the schema.
|
||||
sl.typ = typ
|
||||
ops, err := compileToOps(typ, schema)
|
||||
if err != nil {
|
||||
sl.err = err
|
||||
return err
|
||||
}
|
||||
sl.ops = ops
|
||||
} else if sl.typ != typ {
|
||||
return fmt.Errorf("bigquery: struct type changed from %s to %s", sl.typ, typ)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// compileToOps produces a sequence of operations that will set the fields of a
|
||||
// value of structType to the contents of a row with schema.
|
||||
func compileToOps(structType reflect.Type, schema Schema) ([]structLoaderOp, error) {
|
||||
var ops []structLoaderOp
|
||||
fields, err := fieldCache.Fields(structType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i, schemaField := range schema {
|
||||
// Look for an exported struct field with the same name as the schema
|
||||
// field, ignoring case (BigQuery column names are case-insensitive,
|
||||
// and we want to act like encoding/json anyway).
|
||||
structField := fields.Match(schemaField.Name)
|
||||
if structField == nil {
|
||||
// Ignore schema fields with no corresponding struct field.
|
||||
continue
|
||||
}
|
||||
op := structLoaderOp{
|
||||
fieldIndex: structField.Index,
|
||||
valueIndex: i,
|
||||
}
|
||||
t := structField.Type
|
||||
if schemaField.Repeated {
|
||||
if t.Kind() != reflect.Slice && t.Kind() != reflect.Array {
|
||||
return nil, fmt.Errorf("bigquery: repeated schema field %s requires slice or array, but struct field %s has type %s",
|
||||
schemaField.Name, structField.Name, t)
|
||||
}
|
||||
t = t.Elem()
|
||||
op.repeated = true
|
||||
}
|
||||
if schemaField.Type == RecordFieldType {
|
||||
// Field can be a struct or a pointer to a struct.
|
||||
if t.Kind() == reflect.Ptr {
|
||||
t = t.Elem()
|
||||
}
|
||||
if t.Kind() != reflect.Struct {
|
||||
return nil, fmt.Errorf("bigquery: field %s has type %s, expected struct or *struct",
|
||||
structField.Name, structField.Type)
|
||||
}
|
||||
nested, err := compileToOps(t, schemaField.Schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
op.setFunc = func(v reflect.Value, val interface{}) error {
|
||||
return setNested(nested, v, val.([]Value))
|
||||
}
|
||||
} else {
|
||||
op.setFunc = determineSetFunc(t, schemaField.Type)
|
||||
if op.setFunc == nil {
|
||||
return nil, fmt.Errorf("bigquery: schema field %s of type %s is not assignable to struct field %s of type %s",
|
||||
schemaField.Name, schemaField.Type, structField.Name, t)
|
||||
}
|
||||
}
|
||||
ops = append(ops, op)
|
||||
}
|
||||
return ops, nil
|
||||
}
|
||||
|
||||
// determineSetFunc chooses the best function for setting a field of type ftype
|
||||
// to a value whose schema field type is sftype. It returns nil if stype
|
||||
// is not assignable to ftype.
|
||||
// determineSetFunc considers only basic types. See compileToOps for
|
||||
// handling of repetition and nesting.
|
||||
func determineSetFunc(ftype reflect.Type, stype FieldType) setFunc {
|
||||
switch stype {
|
||||
case StringFieldType:
|
||||
if ftype.Kind() == reflect.String {
|
||||
return setString
|
||||
}
|
||||
|
||||
case BytesFieldType:
|
||||
if ftype == typeOfByteSlice {
|
||||
return setBytes
|
||||
}
|
||||
|
||||
case IntegerFieldType:
|
||||
if isSupportedIntType(ftype) {
|
||||
return setInt
|
||||
}
|
||||
|
||||
case FloatFieldType:
|
||||
switch ftype.Kind() {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return setFloat
|
||||
}
|
||||
|
||||
case BooleanFieldType:
|
||||
if ftype.Kind() == reflect.Bool {
|
||||
return setBool
|
||||
}
|
||||
|
||||
case TimestampFieldType:
|
||||
if ftype == typeOfGoTime {
|
||||
return setAny
|
||||
}
|
||||
|
||||
case DateFieldType:
|
||||
if ftype == typeOfDate {
|
||||
return setAny
|
||||
}
|
||||
|
||||
case TimeFieldType:
|
||||
if ftype == typeOfTime {
|
||||
return setAny
|
||||
}
|
||||
|
||||
case DateTimeFieldType:
|
||||
if ftype == typeOfDateTime {
|
||||
return setAny
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sl *structLoader) Load(values []Value, _ Schema) error {
|
||||
if sl.err != nil {
|
||||
return sl.err
|
||||
}
|
||||
return runOps(sl.ops, sl.vstructp.Elem(), values)
|
||||
}
|
||||
|
||||
// runOps executes a sequence of ops, setting the fields of vstruct to the
|
||||
// supplied values.
|
||||
func runOps(ops []structLoaderOp, vstruct reflect.Value, values []Value) error {
|
||||
for _, op := range ops {
|
||||
field := vstruct.FieldByIndex(op.fieldIndex)
|
||||
var err error
|
||||
if op.repeated {
|
||||
err = setRepeated(field, values[op.valueIndex].([]Value), op.setFunc)
|
||||
} else {
|
||||
err = op.setFunc(field, values[op.valueIndex])
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setNested(ops []structLoaderOp, v reflect.Value, vals []Value) error {
|
||||
// v is either a struct or a pointer to a struct.
|
||||
if v.Kind() == reflect.Ptr {
|
||||
// If the pointer is nil, set it to a zero struct value.
|
||||
if v.IsNil() {
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
}
|
||||
v = v.Elem()
|
||||
}
|
||||
return runOps(ops, v, vals)
|
||||
}
|
||||
|
||||
func setRepeated(field reflect.Value, vslice []Value, setElem setFunc) error {
|
||||
vlen := len(vslice)
|
||||
var flen int
|
||||
switch field.Type().Kind() {
|
||||
case reflect.Slice:
|
||||
// Make a slice of the right size, avoiding allocation if possible.
|
||||
switch {
|
||||
case field.Len() < vlen:
|
||||
field.Set(reflect.MakeSlice(field.Type(), vlen, vlen))
|
||||
case field.Len() > vlen:
|
||||
field.SetLen(vlen)
|
||||
}
|
||||
flen = vlen
|
||||
|
||||
case reflect.Array:
|
||||
flen = field.Len()
|
||||
if flen > vlen {
|
||||
// Set extra elements to their zero value.
|
||||
z := reflect.Zero(field.Type().Elem())
|
||||
for i := vlen; i < flen; i++ {
|
||||
field.Index(i).Set(z)
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("bigquery: impossible field type %s", field.Type())
|
||||
}
|
||||
for i, val := range vslice {
|
||||
if i < flen { // avoid writing past the end of a short array
|
||||
if err := setElem(field.Index(i), val); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A ValueSaver returns a row of data to be inserted into a table.
|
||||
type ValueSaver interface {
|
||||
// Save returns a row to be inserted into a BigQuery table, represented
|
||||
// as a map from field name to Value.
|
||||
// If insertID is non-empty, BigQuery will use it to de-duplicate
|
||||
// insertions of this row on a best-effort basis.
|
||||
Save() (row map[string]Value, insertID string, err error)
|
||||
}
|
||||
|
||||
// ValuesSaver implements ValueSaver for a slice of Values.
|
||||
type ValuesSaver struct {
|
||||
Schema Schema
|
||||
|
||||
// If non-empty, BigQuery will use InsertID to de-duplicate insertions
|
||||
// of this row on a best-effort basis.
|
||||
InsertID string
|
||||
|
||||
Row []Value
|
||||
}
|
||||
|
||||
// Save implements ValueSaver.
|
||||
func (vls *ValuesSaver) Save() (map[string]Value, string, error) {
|
||||
m, err := valuesToMap(vls.Row, vls.Schema)
|
||||
return m, vls.InsertID, err
|
||||
}
|
||||
|
||||
func valuesToMap(vs []Value, schema Schema) (map[string]Value, error) {
|
||||
if len(vs) != len(schema) {
|
||||
return nil, errors.New("Schema does not match length of row to be inserted")
|
||||
}
|
||||
|
||||
m := make(map[string]Value)
|
||||
for i, fieldSchema := range schema {
|
||||
if fieldSchema.Type != RecordFieldType {
|
||||
m[fieldSchema.Name] = vs[i]
|
||||
continue
|
||||
}
|
||||
// Nested record, possibly repeated.
|
||||
vals, ok := vs[i].([]Value)
|
||||
if !ok {
|
||||
return nil, errors.New("nested record is not a []Value")
|
||||
}
|
||||
if !fieldSchema.Repeated {
|
||||
value, err := valuesToMap(vals, fieldSchema.Schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m[fieldSchema.Name] = value
|
||||
continue
|
||||
}
|
||||
// A repeated nested field is converted into a slice of maps.
|
||||
var maps []Value
|
||||
for _, v := range vals {
|
||||
sv, ok := v.([]Value)
|
||||
if !ok {
|
||||
return nil, errors.New("nested record in slice is not a []Value")
|
||||
}
|
||||
value, err := valuesToMap(sv, fieldSchema.Schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
maps = append(maps, value)
|
||||
}
|
||||
m[fieldSchema.Name] = maps
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// StructSaver implements ValueSaver for a struct.
|
||||
// The struct is converted to a map of values by using the values of struct
|
||||
// fields corresponding to schema fields. Additional and missing
|
||||
// fields are ignored, as are nested struct pointers that are nil.
|
||||
type StructSaver struct {
|
||||
// Schema determines what fields of the struct are uploaded. It should
|
||||
// match the table's schema.
|
||||
Schema Schema
|
||||
|
||||
// If non-empty, BigQuery will use InsertID to de-duplicate insertions
|
||||
// of this row on a best-effort basis.
|
||||
InsertID string
|
||||
|
||||
// Struct should be a struct or a pointer to a struct.
|
||||
Struct interface{}
|
||||
}
|
||||
|
||||
// Save implements ValueSaver.
|
||||
func (ss *StructSaver) Save() (row map[string]Value, insertID string, err error) {
|
||||
vstruct := reflect.ValueOf(ss.Struct)
|
||||
row, err = structToMap(vstruct, ss.Schema)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return row, ss.InsertID, nil
|
||||
}
|
||||
|
||||
func structToMap(vstruct reflect.Value, schema Schema) (map[string]Value, error) {
|
||||
if vstruct.Kind() == reflect.Ptr {
|
||||
vstruct = vstruct.Elem()
|
||||
}
|
||||
if !vstruct.IsValid() {
|
||||
return nil, nil
|
||||
}
|
||||
m := map[string]Value{}
|
||||
if vstruct.Kind() != reflect.Struct {
|
||||
return nil, fmt.Errorf("bigquery: type is %s, need struct or struct pointer", vstruct.Type())
|
||||
}
|
||||
fields, err := fieldCache.Fields(vstruct.Type())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, schemaField := range schema {
|
||||
// Look for an exported struct field with the same name as the schema
|
||||
// field, ignoring case.
|
||||
structField := fields.Match(schemaField.Name)
|
||||
if structField == nil {
|
||||
continue
|
||||
}
|
||||
val, err := structFieldToUploadValue(vstruct.FieldByIndex(structField.Index), schemaField)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Add the value to the map, unless it is nil.
|
||||
if val != nil {
|
||||
m[schemaField.Name] = val
|
||||
}
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// structFieldToUploadValue converts a struct field to a value suitable for ValueSaver.Save, using
|
||||
// the schemaField as a guide.
|
||||
// structFieldToUploadValue is careful to return a true nil interface{} when needed, so its
|
||||
// caller can easily identify a nil value.
|
||||
func structFieldToUploadValue(vfield reflect.Value, schemaField *FieldSchema) (interface{}, error) {
|
||||
if schemaField.Repeated && (vfield.Kind() != reflect.Slice && vfield.Kind() != reflect.Array) {
|
||||
return nil, fmt.Errorf("bigquery: repeated schema field %s requires slice or array, but value has type %s",
|
||||
schemaField.Name, vfield.Type())
|
||||
}
|
||||
|
||||
// A non-nested field can be represented by its Go value.
|
||||
if schemaField.Type != RecordFieldType {
|
||||
if !schemaField.Repeated || vfield.Len() > 0 {
|
||||
return vfield.Interface(), nil
|
||||
}
|
||||
// The service treats a null repeated field as an error. Return
|
||||
// nil to omit the field entirely.
|
||||
return nil, nil
|
||||
}
|
||||
// A non-repeated nested field is converted into a map[string]Value.
|
||||
if !schemaField.Repeated {
|
||||
m, err := structToMap(vfield, schemaField.Schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if m == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
// A repeated nested field is converted into a slice of maps.
|
||||
if vfield.Len() == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
var vals []Value
|
||||
for i := 0; i < vfield.Len(); i++ {
|
||||
m, err := structToMap(vfield.Index(i), schemaField.Schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vals = append(vals, m)
|
||||
}
|
||||
return vals, nil
|
||||
}
|
||||
|
||||
// convertRows converts a series of TableRows into a series of Value slices.
|
||||
// schema is used to interpret the data from rows; its length must match the
|
||||
// length of each row.
|
||||
func convertRows(rows []*bq.TableRow, schema Schema) ([][]Value, error) {
|
||||
var rs [][]Value
|
||||
for _, r := range rows {
|
||||
row, err := convertRow(r, schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rs = append(rs, row)
|
||||
}
|
||||
return rs, nil
|
||||
}
|
||||
|
||||
func convertRow(r *bq.TableRow, schema Schema) ([]Value, error) {
|
||||
if len(schema) != len(r.F) {
|
||||
return nil, errors.New("schema length does not match row length")
|
||||
}
|
||||
var values []Value
|
||||
for i, cell := range r.F {
|
||||
fs := schema[i]
|
||||
v, err := convertValue(cell.V, fs.Type, fs.Schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
values = append(values, v)
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func convertValue(val interface{}, typ FieldType, schema Schema) (Value, error) {
|
||||
switch val := val.(type) {
|
||||
case nil:
|
||||
return nil, nil
|
||||
case []interface{}:
|
||||
return convertRepeatedRecord(val, typ, schema)
|
||||
case map[string]interface{}:
|
||||
return convertNestedRecord(val, schema)
|
||||
case string:
|
||||
return convertBasicType(val, typ)
|
||||
default:
|
||||
return nil, fmt.Errorf("got value %v; expected a value of type %s", val, typ)
|
||||
}
|
||||
}
|
||||
|
||||
func convertRepeatedRecord(vals []interface{}, typ FieldType, schema Schema) (Value, error) {
|
||||
var values []Value
|
||||
for _, cell := range vals {
|
||||
// each cell contains a single entry, keyed by "v"
|
||||
val := cell.(map[string]interface{})["v"]
|
||||
v, err := convertValue(val, typ, schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
values = append(values, v)
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func convertNestedRecord(val map[string]interface{}, schema Schema) (Value, error) {
|
||||
// convertNestedRecord is similar to convertRow, as a record has the same structure as a row.
|
||||
|
||||
// Nested records are wrapped in a map with a single key, "f".
|
||||
record := val["f"].([]interface{})
|
||||
if len(record) != len(schema) {
|
||||
return nil, errors.New("schema length does not match record length")
|
||||
}
|
||||
|
||||
var values []Value
|
||||
for i, cell := range record {
|
||||
// each cell contains a single entry, keyed by "v"
|
||||
val := cell.(map[string]interface{})["v"]
|
||||
|
||||
fs := schema[i]
|
||||
v, err := convertValue(val, fs.Type, fs.Schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
values = append(values, v)
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// convertBasicType returns val as an interface with a concrete type specified by typ.
|
||||
func convertBasicType(val string, typ FieldType) (Value, error) {
|
||||
switch typ {
|
||||
case StringFieldType:
|
||||
return val, nil
|
||||
case BytesFieldType:
|
||||
return base64.StdEncoding.DecodeString(val)
|
||||
case IntegerFieldType:
|
||||
return strconv.ParseInt(val, 10, 64)
|
||||
case FloatFieldType:
|
||||
return strconv.ParseFloat(val, 64)
|
||||
case BooleanFieldType:
|
||||
return strconv.ParseBool(val)
|
||||
case TimestampFieldType:
|
||||
f, err := strconv.ParseFloat(val, 64)
|
||||
return Value(time.Unix(0, int64(f*1e9)).UTC()), err
|
||||
case DateFieldType:
|
||||
return civil.ParseDate(val)
|
||||
case TimeFieldType:
|
||||
return civil.ParseTime(val)
|
||||
case DateTimeFieldType:
|
||||
return civil.ParseDateTime(val)
|
||||
default:
|
||||
return nil, fmt.Errorf("unrecognized type: %s", typ)
|
||||
}
|
||||
}
|
885
vendor/cloud.google.com/go/bigquery/value_test.go
generated
vendored
Normal file
885
vendor/cloud.google.com/go/bigquery/value_test.go
generated
vendored
Normal file
|
@ -0,0 +1,885 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func TestConvertBasicValues(t *testing.T) {
|
||||
schema := []*FieldSchema{
|
||||
{Type: StringFieldType},
|
||||
{Type: IntegerFieldType},
|
||||
{Type: FloatFieldType},
|
||||
{Type: BooleanFieldType},
|
||||
{Type: BytesFieldType},
|
||||
}
|
||||
row := &bq.TableRow{
|
||||
F: []*bq.TableCell{
|
||||
{V: "a"},
|
||||
{V: "1"},
|
||||
{V: "1.2"},
|
||||
{V: "true"},
|
||||
{V: base64.StdEncoding.EncodeToString([]byte("foo"))},
|
||||
},
|
||||
}
|
||||
got, err := convertRow(row, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("error converting: %v", err)
|
||||
}
|
||||
want := []Value{"a", int64(1), 1.2, true, []byte("foo")}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("converting basic values: got:\n%v\nwant:\n%v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertTime(t *testing.T) {
|
||||
// TODO(jba): add tests for civil time types.
|
||||
schema := []*FieldSchema{
|
||||
{Type: TimestampFieldType},
|
||||
}
|
||||
thyme := time.Date(1970, 1, 1, 10, 0, 0, 10, time.UTC)
|
||||
row := &bq.TableRow{
|
||||
F: []*bq.TableCell{
|
||||
{V: fmt.Sprintf("%.10f", float64(thyme.UnixNano())/1e9)},
|
||||
},
|
||||
}
|
||||
got, err := convertRow(row, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("error converting: %v", err)
|
||||
}
|
||||
if !got[0].(time.Time).Equal(thyme) {
|
||||
t.Errorf("converting basic values: got:\n%v\nwant:\n%v", got, thyme)
|
||||
}
|
||||
if got[0].(time.Time).Location() != time.UTC {
|
||||
t.Errorf("expected time zone UTC: got:\n%v", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertNullValues(t *testing.T) {
|
||||
schema := []*FieldSchema{
|
||||
{Type: StringFieldType},
|
||||
}
|
||||
row := &bq.TableRow{
|
||||
F: []*bq.TableCell{
|
||||
{V: nil},
|
||||
},
|
||||
}
|
||||
got, err := convertRow(row, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("error converting: %v", err)
|
||||
}
|
||||
want := []Value{nil}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("converting null values: got:\n%v\nwant:\n%v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBasicRepetition(t *testing.T) {
|
||||
schema := []*FieldSchema{
|
||||
{Type: IntegerFieldType, Repeated: true},
|
||||
}
|
||||
row := &bq.TableRow{
|
||||
F: []*bq.TableCell{
|
||||
{
|
||||
V: []interface{}{
|
||||
map[string]interface{}{
|
||||
"v": "1",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "2",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
got, err := convertRow(row, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("error converting: %v", err)
|
||||
}
|
||||
want := []Value{[]Value{int64(1), int64(2), int64(3)}}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("converting basic repeated values: got:\n%v\nwant:\n%v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNestedRecordContainingRepetition(t *testing.T) {
|
||||
schema := []*FieldSchema{
|
||||
{
|
||||
Type: RecordFieldType,
|
||||
Schema: Schema{
|
||||
{Type: IntegerFieldType, Repeated: true},
|
||||
},
|
||||
},
|
||||
}
|
||||
row := &bq.TableRow{
|
||||
F: []*bq.TableCell{
|
||||
{
|
||||
V: map[string]interface{}{
|
||||
"f": []interface{}{
|
||||
map[string]interface{}{
|
||||
"v": []interface{}{
|
||||
map[string]interface{}{"v": "1"},
|
||||
map[string]interface{}{"v": "2"},
|
||||
map[string]interface{}{"v": "3"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
got, err := convertRow(row, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("error converting: %v", err)
|
||||
}
|
||||
want := []Value{[]Value{[]Value{int64(1), int64(2), int64(3)}}}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("converting basic repeated values: got:\n%v\nwant:\n%v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepeatedRecordContainingRepetition(t *testing.T) {
|
||||
schema := []*FieldSchema{
|
||||
{
|
||||
Type: RecordFieldType,
|
||||
Repeated: true,
|
||||
Schema: Schema{
|
||||
{Type: IntegerFieldType, Repeated: true},
|
||||
},
|
||||
},
|
||||
}
|
||||
row := &bq.TableRow{F: []*bq.TableCell{
|
||||
{
|
||||
V: []interface{}{ // repeated records.
|
||||
map[string]interface{}{ // first record.
|
||||
"v": map[string]interface{}{ // pointless single-key-map wrapper.
|
||||
"f": []interface{}{ // list of record fields.
|
||||
map[string]interface{}{ // only record (repeated ints)
|
||||
"v": []interface{}{ // pointless wrapper.
|
||||
map[string]interface{}{
|
||||
"v": "1",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "2",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
map[string]interface{}{ // second record.
|
||||
"v": map[string]interface{}{
|
||||
"f": []interface{}{
|
||||
map[string]interface{}{
|
||||
"v": []interface{}{
|
||||
map[string]interface{}{
|
||||
"v": "4",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "5",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "6",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}}
|
||||
|
||||
got, err := convertRow(row, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("error converting: %v", err)
|
||||
}
|
||||
want := []Value{ // the row is a list of length 1, containing an entry for the repeated record.
|
||||
[]Value{ // the repeated record is a list of length 2, containing an entry for each repetition.
|
||||
[]Value{ // the record is a list of length 1, containing an entry for the repeated integer field.
|
||||
[]Value{int64(1), int64(2), int64(3)}, // the repeated integer field is a list of length 3.
|
||||
},
|
||||
[]Value{ // second record
|
||||
[]Value{int64(4), int64(5), int64(6)},
|
||||
},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("converting repeated records with repeated values: got:\n%v\nwant:\n%v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRepeatedRecordContainingRecord(t *testing.T) {
|
||||
schema := []*FieldSchema{
|
||||
{
|
||||
Type: RecordFieldType,
|
||||
Repeated: true,
|
||||
Schema: Schema{
|
||||
{
|
||||
Type: StringFieldType,
|
||||
},
|
||||
{
|
||||
Type: RecordFieldType,
|
||||
Schema: Schema{
|
||||
{Type: IntegerFieldType},
|
||||
{Type: StringFieldType},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
row := &bq.TableRow{F: []*bq.TableCell{
|
||||
{
|
||||
V: []interface{}{ // repeated records.
|
||||
map[string]interface{}{ // first record.
|
||||
"v": map[string]interface{}{ // pointless single-key-map wrapper.
|
||||
"f": []interface{}{ // list of record fields.
|
||||
map[string]interface{}{ // first record field (name)
|
||||
"v": "first repeated record",
|
||||
},
|
||||
map[string]interface{}{ // second record field (nested record).
|
||||
"v": map[string]interface{}{ // pointless single-key-map wrapper.
|
||||
"f": []interface{}{ // nested record fields
|
||||
map[string]interface{}{
|
||||
"v": "1",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "two",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
map[string]interface{}{ // second record.
|
||||
"v": map[string]interface{}{
|
||||
"f": []interface{}{
|
||||
map[string]interface{}{
|
||||
"v": "second repeated record",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": map[string]interface{}{
|
||||
"f": []interface{}{
|
||||
map[string]interface{}{
|
||||
"v": "3",
|
||||
},
|
||||
map[string]interface{}{
|
||||
"v": "four",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}}
|
||||
|
||||
got, err := convertRow(row, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("error converting: %v", err)
|
||||
}
|
||||
// TODO: test with flattenresults.
|
||||
want := []Value{ // the row is a list of length 1, containing an entry for the repeated record.
|
||||
[]Value{ // the repeated record is a list of length 2, containing an entry for each repetition.
|
||||
[]Value{ // record contains a string followed by a nested record.
|
||||
"first repeated record",
|
||||
[]Value{
|
||||
int64(1),
|
||||
"two",
|
||||
},
|
||||
},
|
||||
[]Value{ // second record.
|
||||
"second repeated record",
|
||||
[]Value{
|
||||
int64(3),
|
||||
"four",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("converting repeated records containing record : got:\n%v\nwant:\n%v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValuesSaverConvertsToMap(t *testing.T) {
|
||||
testCases := []struct {
|
||||
vs ValuesSaver
|
||||
want *insertionRow
|
||||
}{
|
||||
{
|
||||
vs: ValuesSaver{
|
||||
Schema: []*FieldSchema{
|
||||
{Name: "intField", Type: IntegerFieldType},
|
||||
{Name: "strField", Type: StringFieldType},
|
||||
},
|
||||
InsertID: "iid",
|
||||
Row: []Value{1, "a"},
|
||||
},
|
||||
want: &insertionRow{
|
||||
InsertID: "iid",
|
||||
Row: map[string]Value{"intField": 1, "strField": "a"},
|
||||
},
|
||||
},
|
||||
{
|
||||
vs: ValuesSaver{
|
||||
Schema: []*FieldSchema{
|
||||
{Name: "intField", Type: IntegerFieldType},
|
||||
{
|
||||
Name: "recordField",
|
||||
Type: RecordFieldType,
|
||||
Schema: []*FieldSchema{
|
||||
{Name: "nestedInt", Type: IntegerFieldType, Repeated: true},
|
||||
},
|
||||
},
|
||||
},
|
||||
InsertID: "iid",
|
||||
Row: []Value{1, []Value{[]Value{2, 3}}},
|
||||
},
|
||||
want: &insertionRow{
|
||||
InsertID: "iid",
|
||||
Row: map[string]Value{
|
||||
"intField": 1,
|
||||
"recordField": map[string]Value{
|
||||
"nestedInt": []Value{2, 3},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{ // repeated nested field
|
||||
vs: ValuesSaver{
|
||||
Schema: Schema{
|
||||
{
|
||||
Name: "records",
|
||||
Type: RecordFieldType,
|
||||
Schema: Schema{
|
||||
{Name: "x", Type: IntegerFieldType},
|
||||
{Name: "y", Type: IntegerFieldType},
|
||||
},
|
||||
Repeated: true,
|
||||
},
|
||||
},
|
||||
InsertID: "iid",
|
||||
Row: []Value{ // a row is a []Value
|
||||
[]Value{ // repeated field's value is a []Value
|
||||
[]Value{1, 2}, // first record of the repeated field
|
||||
[]Value{3, 4}, // second record
|
||||
},
|
||||
},
|
||||
},
|
||||
want: &insertionRow{
|
||||
InsertID: "iid",
|
||||
Row: map[string]Value{
|
||||
"records": []Value{
|
||||
map[string]Value{"x": 1, "y": 2},
|
||||
map[string]Value{"x": 3, "y": 4},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
data, insertID, err := tc.vs.Save()
|
||||
if err != nil {
|
||||
t.Errorf("Expected successful save; got: %v", err)
|
||||
}
|
||||
got := &insertionRow{insertID, data}
|
||||
if !reflect.DeepEqual(got, tc.want) {
|
||||
t.Errorf("saving ValuesSaver:\ngot:\n%+v\nwant:\n%+v", got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStructSaver(t *testing.T) {
|
||||
schema := Schema{
|
||||
{Name: "s", Type: StringFieldType},
|
||||
{Name: "r", Type: IntegerFieldType, Repeated: true},
|
||||
{Name: "nested", Type: RecordFieldType, Schema: Schema{
|
||||
{Name: "b", Type: BooleanFieldType},
|
||||
}},
|
||||
{Name: "rnested", Type: RecordFieldType, Repeated: true, Schema: Schema{
|
||||
{Name: "b", Type: BooleanFieldType},
|
||||
}},
|
||||
}
|
||||
|
||||
type (
|
||||
N struct{ B bool }
|
||||
T struct {
|
||||
S string
|
||||
R []int
|
||||
Nested *N
|
||||
Rnested []*N
|
||||
}
|
||||
)
|
||||
|
||||
check := func(msg string, in interface{}, want map[string]Value) {
|
||||
ss := StructSaver{
|
||||
Schema: schema,
|
||||
InsertID: "iid",
|
||||
Struct: in,
|
||||
}
|
||||
got, gotIID, err := ss.Save()
|
||||
if err != nil {
|
||||
t.Fatalf("%s: %v", msg, err)
|
||||
}
|
||||
if wantIID := "iid"; gotIID != wantIID {
|
||||
t.Errorf("%s: InsertID: got %q, want %q", msg, gotIID, wantIID)
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("%s:\ngot\n%#v\nwant\n%#v", msg, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
in := T{
|
||||
S: "x",
|
||||
R: []int{1, 2},
|
||||
Nested: &N{B: true},
|
||||
Rnested: []*N{{true}, {false}},
|
||||
}
|
||||
want := map[string]Value{
|
||||
"s": "x",
|
||||
"r": []int{1, 2},
|
||||
"nested": map[string]Value{"b": true},
|
||||
"rnested": []Value{map[string]Value{"b": true}, map[string]Value{"b": false}},
|
||||
}
|
||||
check("all values", in, want)
|
||||
check("all values, ptr", &in, want)
|
||||
check("empty struct", T{}, map[string]Value{"s": ""})
|
||||
|
||||
// Missing and extra fields ignored.
|
||||
type T2 struct {
|
||||
S string
|
||||
// missing R, Nested, RNested
|
||||
Extra int
|
||||
}
|
||||
check("missing and extra", T2{S: "x"}, map[string]Value{"s": "x"})
|
||||
|
||||
check("nils in slice", T{Rnested: []*N{{true}, nil, {false}}},
|
||||
map[string]Value{
|
||||
"s": "",
|
||||
"rnested": []Value{map[string]Value{"b": true}, map[string]Value(nil), map[string]Value{"b": false}},
|
||||
})
|
||||
}
|
||||
|
||||
func TestConvertRows(t *testing.T) {
|
||||
schema := []*FieldSchema{
|
||||
{Type: StringFieldType},
|
||||
{Type: IntegerFieldType},
|
||||
{Type: FloatFieldType},
|
||||
{Type: BooleanFieldType},
|
||||
}
|
||||
rows := []*bq.TableRow{
|
||||
{F: []*bq.TableCell{
|
||||
{V: "a"},
|
||||
{V: "1"},
|
||||
{V: "1.2"},
|
||||
{V: "true"},
|
||||
}},
|
||||
{F: []*bq.TableCell{
|
||||
{V: "b"},
|
||||
{V: "2"},
|
||||
{V: "2.2"},
|
||||
{V: "false"},
|
||||
}},
|
||||
}
|
||||
want := [][]Value{
|
||||
{"a", int64(1), 1.2, true},
|
||||
{"b", int64(2), 2.2, false},
|
||||
}
|
||||
got, err := convertRows(rows, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("got %v, want nil", err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("\ngot %v\nwant %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueList(t *testing.T) {
|
||||
schema := Schema{
|
||||
{Name: "s", Type: StringFieldType},
|
||||
{Name: "i", Type: IntegerFieldType},
|
||||
{Name: "f", Type: FloatFieldType},
|
||||
{Name: "b", Type: BooleanFieldType},
|
||||
}
|
||||
want := []Value{"x", 7, 3.14, true}
|
||||
var got []Value
|
||||
vl := (*valueList)(&got)
|
||||
if err := vl.Load(want, schema); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("got %+v, want %+v", got, want)
|
||||
}
|
||||
|
||||
// Load truncates, not appends.
|
||||
// https://github.com/GoogleCloudPlatform/google-cloud-go/issues/437
|
||||
if err := vl.Load(want, schema); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("got %+v, want %+v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueMap(t *testing.T) {
|
||||
ns := Schema{
|
||||
{Name: "x", Type: IntegerFieldType},
|
||||
{Name: "y", Type: IntegerFieldType},
|
||||
}
|
||||
schema := Schema{
|
||||
{Name: "s", Type: StringFieldType},
|
||||
{Name: "i", Type: IntegerFieldType},
|
||||
{Name: "f", Type: FloatFieldType},
|
||||
{Name: "b", Type: BooleanFieldType},
|
||||
{Name: "n", Type: RecordFieldType, Schema: ns},
|
||||
{Name: "rn", Type: RecordFieldType, Schema: ns, Repeated: true},
|
||||
}
|
||||
in := []Value{"x", 7, 3.14, true,
|
||||
[]Value{1, 2},
|
||||
[]Value{[]Value{3, 4}, []Value{5, 6}},
|
||||
}
|
||||
var vm valueMap
|
||||
if err := vm.Load(in, schema); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := map[string]Value{
|
||||
"s": "x",
|
||||
"i": 7,
|
||||
"f": 3.14,
|
||||
"b": true,
|
||||
"n": map[string]Value{"x": 1, "y": 2},
|
||||
"rn": []Value{
|
||||
map[string]Value{"x": 3, "y": 4},
|
||||
map[string]Value{"x": 5, "y": 6},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(vm, valueMap(want)) {
|
||||
t.Errorf("got\n%+v\nwant\n%+v", vm, want)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
var (
|
||||
// For testing StructLoader
|
||||
schema2 = Schema{
|
||||
{Name: "s", Type: StringFieldType},
|
||||
{Name: "s2", Type: StringFieldType},
|
||||
{Name: "by", Type: BytesFieldType},
|
||||
{Name: "I", Type: IntegerFieldType},
|
||||
{Name: "F", Type: FloatFieldType},
|
||||
{Name: "B", Type: BooleanFieldType},
|
||||
{Name: "TS", Type: TimestampFieldType},
|
||||
{Name: "D", Type: DateFieldType},
|
||||
{Name: "T", Type: TimeFieldType},
|
||||
{Name: "DT", Type: DateTimeFieldType},
|
||||
{Name: "nested", Type: RecordFieldType, Schema: Schema{
|
||||
{Name: "nestS", Type: StringFieldType},
|
||||
{Name: "nestI", Type: IntegerFieldType},
|
||||
}},
|
||||
{Name: "t", Type: StringFieldType},
|
||||
}
|
||||
|
||||
testTimestamp = time.Date(2016, 11, 5, 7, 50, 22, 8, time.UTC)
|
||||
testDate = civil.Date{2016, 11, 5}
|
||||
testTime = civil.Time{7, 50, 22, 8}
|
||||
testDateTime = civil.DateTime{testDate, testTime}
|
||||
|
||||
testValues = []Value{"x", "y", []byte{1, 2, 3}, int64(7), 3.14, true,
|
||||
testTimestamp, testDate, testTime, testDateTime,
|
||||
[]Value{"nested", int64(17)}, "z"}
|
||||
)
|
||||
|
||||
type testStruct1 struct {
|
||||
B bool
|
||||
I int
|
||||
times
|
||||
S string
|
||||
S2 String
|
||||
By []byte
|
||||
s string
|
||||
F float64
|
||||
Nested nested
|
||||
Tagged string `bigquery:"t"`
|
||||
}
|
||||
|
||||
type String string
|
||||
|
||||
type nested struct {
|
||||
NestS string
|
||||
NestI int
|
||||
}
|
||||
|
||||
type times struct {
|
||||
TS time.Time
|
||||
T civil.Time
|
||||
D civil.Date
|
||||
DT civil.DateTime
|
||||
}
|
||||
|
||||
func TestStructLoader(t *testing.T) {
|
||||
var ts1 testStruct1
|
||||
if err := load(&ts1, schema2, testValues); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Note: the schema field named "s" gets matched to the exported struct
|
||||
// field "S", not the unexported "s".
|
||||
want := &testStruct1{
|
||||
B: true,
|
||||
I: 7,
|
||||
F: 3.14,
|
||||
times: times{TS: testTimestamp, T: testTime, D: testDate, DT: testDateTime},
|
||||
S: "x",
|
||||
S2: "y",
|
||||
By: []byte{1, 2, 3},
|
||||
Nested: nested{NestS: "nested", NestI: 17},
|
||||
Tagged: "z",
|
||||
}
|
||||
if !reflect.DeepEqual(&ts1, want) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(ts1), pretty.Value(*want))
|
||||
d, _, err := pretty.Diff(*want, ts1)
|
||||
if err == nil {
|
||||
t.Logf("diff:\n%s", d)
|
||||
}
|
||||
}
|
||||
|
||||
// Test pointers to nested structs.
|
||||
type nestedPtr struct{ Nested *nested }
|
||||
var np nestedPtr
|
||||
if err := load(&np, schema2, testValues); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want2 := &nestedPtr{Nested: &nested{NestS: "nested", NestI: 17}}
|
||||
if !reflect.DeepEqual(&np, want2) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(np), pretty.Value(*want2))
|
||||
}
|
||||
|
||||
// Existing values should be reused.
|
||||
nst := &nested{NestS: "x", NestI: -10}
|
||||
np = nestedPtr{Nested: nst}
|
||||
if err := load(&np, schema2, testValues); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(&np, want2) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(np), pretty.Value(*want2))
|
||||
}
|
||||
if np.Nested != nst {
|
||||
t.Error("nested struct pointers not equal")
|
||||
}
|
||||
}
|
||||
|
||||
type repStruct struct {
|
||||
Nums []int
|
||||
ShortNums [2]int // to test truncation
|
||||
LongNums [5]int // to test padding with zeroes
|
||||
Nested []*nested
|
||||
}
|
||||
|
||||
var (
|
||||
repSchema = Schema{
|
||||
{Name: "nums", Type: IntegerFieldType, Repeated: true},
|
||||
{Name: "shortNums", Type: IntegerFieldType, Repeated: true},
|
||||
{Name: "longNums", Type: IntegerFieldType, Repeated: true},
|
||||
{Name: "nested", Type: RecordFieldType, Repeated: true, Schema: Schema{
|
||||
{Name: "nestS", Type: StringFieldType},
|
||||
{Name: "nestI", Type: IntegerFieldType},
|
||||
}},
|
||||
}
|
||||
v123 = []Value{int64(1), int64(2), int64(3)}
|
||||
repValues = []Value{v123, v123, v123,
|
||||
[]Value{
|
||||
[]Value{"x", int64(1)},
|
||||
[]Value{"y", int64(2)},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func TestStructLoaderRepeated(t *testing.T) {
|
||||
var r1 repStruct
|
||||
if err := load(&r1, repSchema, repValues); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := repStruct{
|
||||
Nums: []int{1, 2, 3},
|
||||
ShortNums: [...]int{1, 2}, // extra values discarded
|
||||
LongNums: [...]int{1, 2, 3, 0, 0},
|
||||
Nested: []*nested{{"x", 1}, {"y", 2}},
|
||||
}
|
||||
if !reflect.DeepEqual(r1, want) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(r1), pretty.Value(want))
|
||||
}
|
||||
|
||||
r2 := repStruct{
|
||||
Nums: []int{-1, -2, -3, -4, -5}, // truncated to zero and appended to
|
||||
LongNums: [...]int{-1, -2, -3, -4, -5}, // unset elements are zeroed
|
||||
}
|
||||
if err := load(&r2, repSchema, repValues); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(r2, want) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(r2), pretty.Value(want))
|
||||
}
|
||||
if got, want := cap(r2.Nums), 5; got != want {
|
||||
t.Errorf("cap(r2.Nums) = %d, want %d", got, want)
|
||||
}
|
||||
|
||||
// Short slice case.
|
||||
r3 := repStruct{Nums: []int{-1}}
|
||||
if err := load(&r3, repSchema, repValues); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(r3, want) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(r3), pretty.Value(want))
|
||||
}
|
||||
if got, want := cap(r3.Nums), 3; got != want {
|
||||
t.Errorf("cap(r3.Nums) = %d, want %d", got, want)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestStructLoaderOverflow(t *testing.T) {
|
||||
type S struct {
|
||||
I int16
|
||||
F float32
|
||||
}
|
||||
schema := Schema{
|
||||
{Name: "I", Type: IntegerFieldType},
|
||||
{Name: "F", Type: FloatFieldType},
|
||||
}
|
||||
var s S
|
||||
if err := load(&s, schema, []Value{int64(math.MaxInt16 + 1), 0}); err == nil {
|
||||
t.Error("int: got nil, want error")
|
||||
}
|
||||
if err := load(&s, schema, []Value{int64(0), math.MaxFloat32 * 2}); err == nil {
|
||||
t.Error("float: got nil, want error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStructLoaderFieldOverlap(t *testing.T) {
|
||||
// It's OK if the struct has fields that the schema does not, and vice versa.
|
||||
type S1 struct {
|
||||
I int
|
||||
X [][]int // not in the schema; does not even correspond to a valid BigQuery type
|
||||
// many schema fields missing
|
||||
}
|
||||
var s1 S1
|
||||
if err := load(&s1, schema2, testValues); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want1 := S1{I: 7}
|
||||
if !reflect.DeepEqual(s1, want1) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(s1), pretty.Value(want1))
|
||||
}
|
||||
|
||||
// It's even valid to have no overlapping fields at all.
|
||||
type S2 struct{ Z int }
|
||||
|
||||
var s2 S2
|
||||
if err := load(&s2, schema2, testValues); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want2 := S2{}
|
||||
if !reflect.DeepEqual(s2, want2) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(s2), pretty.Value(want2))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStructLoaderErrors(t *testing.T) {
|
||||
check := func(sp interface{}) {
|
||||
var sl structLoader
|
||||
err := sl.set(sp, schema2)
|
||||
if err == nil {
|
||||
t.Errorf("%T: got nil, want error", sp)
|
||||
}
|
||||
}
|
||||
|
||||
type bad1 struct{ F int32 } // wrong type for FLOAT column
|
||||
check(&bad1{})
|
||||
|
||||
type bad2 struct{ I uint } // unsupported integer type
|
||||
check(&bad2{})
|
||||
|
||||
// Using more than one struct type with the same structLoader.
|
||||
type different struct {
|
||||
B bool
|
||||
I int
|
||||
times
|
||||
S string
|
||||
s string
|
||||
Nums []int
|
||||
}
|
||||
|
||||
var sl structLoader
|
||||
if err := sl.set(&testStruct1{}, schema2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err := sl.set(&different{}, schema2)
|
||||
if err == nil {
|
||||
t.Error("different struct types: got nil, want error")
|
||||
}
|
||||
}
|
||||
|
||||
func load(pval interface{}, schema Schema, vals []Value) error {
|
||||
var sl structLoader
|
||||
if err := sl.set(pval, schema); err != nil {
|
||||
return err
|
||||
}
|
||||
return sl.Load(vals, nil)
|
||||
}
|
||||
|
||||
func BenchmarkStructLoader_NoCompile(b *testing.B) {
|
||||
benchmarkStructLoader(b, false)
|
||||
}
|
||||
|
||||
func BenchmarkStructLoader_Compile(b *testing.B) {
|
||||
benchmarkStructLoader(b, true)
|
||||
}
|
||||
|
||||
func benchmarkStructLoader(b *testing.B, compile bool) {
|
||||
var ts1 testStruct1
|
||||
for i := 0; i < b.N; i++ {
|
||||
var sl structLoader
|
||||
for j := 0; j < 10; j++ {
|
||||
if err := load(&ts1, schema2, testValues); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
if !compile {
|
||||
sl.typ = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
335
vendor/cloud.google.com/go/bigtable/admin.go
generated
vendored
Normal file
335
vendor/cloud.google.com/go/bigtable/admin.go
generated
vendored
Normal file
|
@ -0,0 +1,335 @@
|
|||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package bigtable
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
btopt "cloud.google.com/go/bigtable/internal/option"
|
||||
"cloud.google.com/go/longrunning"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/transport"
|
||||
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
const adminAddr = "bigtableadmin.googleapis.com:443"
|
||||
|
||||
// AdminClient is a client type for performing admin operations within a specific instance.
|
||||
type AdminClient struct {
|
||||
conn *grpc.ClientConn
|
||||
tClient btapb.BigtableTableAdminClient
|
||||
|
||||
project, instance string
|
||||
|
||||
// Metadata to be sent with each request.
|
||||
md metadata.MD
|
||||
}
|
||||
|
||||
// NewAdminClient creates a new AdminClient for a given project and instance.
|
||||
func NewAdminClient(ctx context.Context, project, instance string, opts ...option.ClientOption) (*AdminClient, error) {
|
||||
o, err := btopt.DefaultClientOptions(adminAddr, AdminScope, clientUserAgent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o = append(o, opts...)
|
||||
conn, err := transport.DialGRPC(ctx, o...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dialing: %v", err)
|
||||
}
|
||||
return &AdminClient{
|
||||
conn: conn,
|
||||
tClient: btapb.NewBigtableTableAdminClient(conn),
|
||||
project: project,
|
||||
instance: instance,
|
||||
md: metadata.Pairs(resourcePrefixHeader, fmt.Sprintf("projects/%s/instances/%s", project, instance)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close closes the AdminClient.
|
||||
func (ac *AdminClient) Close() error {
|
||||
return ac.conn.Close()
|
||||
}
|
||||
|
||||
func (ac *AdminClient) instancePrefix() string {
|
||||
return fmt.Sprintf("projects/%s/instances/%s", ac.project, ac.instance)
|
||||
}
|
||||
|
||||
// Tables returns a list of the tables in the instance.
|
||||
func (ac *AdminClient) Tables(ctx context.Context) ([]string, error) {
|
||||
ctx = mergeMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.ListTablesRequest{
|
||||
Parent: prefix,
|
||||
}
|
||||
res, err := ac.tClient.ListTables(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
names := make([]string, 0, len(res.Tables))
|
||||
for _, tbl := range res.Tables {
|
||||
names = append(names, strings.TrimPrefix(tbl.Name, prefix+"/tables/"))
|
||||
}
|
||||
return names, nil
|
||||
}
|
||||
|
||||
// CreateTable creates a new table in the instance.
|
||||
// This method may return before the table's creation is complete.
|
||||
func (ac *AdminClient) CreateTable(ctx context.Context, table string) error {
|
||||
ctx = mergeMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.CreateTableRequest{
|
||||
Parent: prefix,
|
||||
TableId: table,
|
||||
}
|
||||
_, err := ac.tClient.CreateTable(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
// CreatePresplitTable creates a new table in the instance.
|
||||
// The list of row keys will be used to initially split the table into multiple tablets.
|
||||
// Given two split keys, "s1" and "s2", three tablets will be created,
|
||||
// spanning the key ranges: [, s1), [s1, s2), [s2, ).
|
||||
// This method may return before the table's creation is complete.
|
||||
func (ac *AdminClient) CreatePresplitTable(ctx context.Context, table string, split_keys []string) error {
|
||||
var req_splits []*btapb.CreateTableRequest_Split
|
||||
for _, split := range split_keys {
|
||||
req_splits = append(req_splits, &btapb.CreateTableRequest_Split{[]byte(split)})
|
||||
}
|
||||
ctx = mergeMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.CreateTableRequest{
|
||||
Parent: prefix,
|
||||
TableId: table,
|
||||
InitialSplits: req_splits,
|
||||
}
|
||||
_, err := ac.tClient.CreateTable(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
// CreateColumnFamily creates a new column family in a table.
|
||||
func (ac *AdminClient) CreateColumnFamily(ctx context.Context, table, family string) error {
|
||||
// TODO(dsymonds): Permit specifying gcexpr and any other family settings.
|
||||
ctx = mergeMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.ModifyColumnFamiliesRequest{
|
||||
Name: prefix + "/tables/" + table,
|
||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
||||
Id: family,
|
||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}},
|
||||
}},
|
||||
}
|
||||
_, err := ac.tClient.ModifyColumnFamilies(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteTable deletes a table and all of its data.
|
||||
func (ac *AdminClient) DeleteTable(ctx context.Context, table string) error {
|
||||
ctx = mergeMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.DeleteTableRequest{
|
||||
Name: prefix + "/tables/" + table,
|
||||
}
|
||||
_, err := ac.tClient.DeleteTable(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteColumnFamily deletes a column family in a table and all of its data.
|
||||
func (ac *AdminClient) DeleteColumnFamily(ctx context.Context, table, family string) error {
|
||||
ctx = mergeMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.ModifyColumnFamiliesRequest{
|
||||
Name: prefix + "/tables/" + table,
|
||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
||||
Id: family,
|
||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Drop{true},
|
||||
}},
|
||||
}
|
||||
_, err := ac.tClient.ModifyColumnFamilies(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
// TableInfo represents information about a table.
|
||||
type TableInfo struct {
|
||||
Families []string
|
||||
}
|
||||
|
||||
// TableInfo retrieves information about a table.
|
||||
func (ac *AdminClient) TableInfo(ctx context.Context, table string) (*TableInfo, error) {
|
||||
ctx = mergeMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.GetTableRequest{
|
||||
Name: prefix + "/tables/" + table,
|
||||
}
|
||||
res, err := ac.tClient.GetTable(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ti := &TableInfo{}
|
||||
for fam := range res.ColumnFamilies {
|
||||
ti.Families = append(ti.Families, fam)
|
||||
}
|
||||
return ti, nil
|
||||
}
|
||||
|
||||
// SetGCPolicy specifies which cells in a column family should be garbage collected.
|
||||
// GC executes opportunistically in the background; table reads may return data
|
||||
// matching the GC policy.
|
||||
func (ac *AdminClient) SetGCPolicy(ctx context.Context, table, family string, policy GCPolicy) error {
|
||||
ctx = mergeMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.ModifyColumnFamiliesRequest{
|
||||
Name: prefix + "/tables/" + table,
|
||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
||||
Id: family,
|
||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{&btapb.ColumnFamily{GcRule: policy.proto()}},
|
||||
}},
|
||||
}
|
||||
_, err := ac.tClient.ModifyColumnFamilies(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
const instanceAdminAddr = "bigtableadmin.googleapis.com:443"
|
||||
|
||||
// InstanceAdminClient is a client type for performing admin operations on instances.
|
||||
// These operations can be substantially more dangerous than those provided by AdminClient.
|
||||
type InstanceAdminClient struct {
|
||||
conn *grpc.ClientConn
|
||||
iClient btapb.BigtableInstanceAdminClient
|
||||
|
||||
project string
|
||||
|
||||
// Metadata to be sent with each request.
|
||||
md metadata.MD
|
||||
}
|
||||
|
||||
// NewInstanceAdminClient creates a new InstanceAdminClient for a given project.
|
||||
func NewInstanceAdminClient(ctx context.Context, project string, opts ...option.ClientOption) (*InstanceAdminClient, error) {
|
||||
o, err := btopt.DefaultClientOptions(instanceAdminAddr, InstanceAdminScope, clientUserAgent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
o = append(o, opts...)
|
||||
conn, err := transport.DialGRPC(ctx, o...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dialing: %v", err)
|
||||
}
|
||||
return &InstanceAdminClient{
|
||||
conn: conn,
|
||||
iClient: btapb.NewBigtableInstanceAdminClient(conn),
|
||||
|
||||
project: project,
|
||||
md: metadata.Pairs(resourcePrefixHeader, "projects/"+project),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close closes the InstanceAdminClient.
|
||||
func (iac *InstanceAdminClient) Close() error {
|
||||
return iac.conn.Close()
|
||||
}
|
||||
|
||||
// StorageType is the type of storage used for all tables in an instance
|
||||
type StorageType int
|
||||
|
||||
const (
|
||||
SSD StorageType = iota
|
||||
HDD
|
||||
)
|
||||
|
||||
func (st StorageType) proto() btapb.StorageType {
|
||||
if st == HDD {
|
||||
return btapb.StorageType_HDD
|
||||
}
|
||||
return btapb.StorageType_SSD
|
||||
}
|
||||
|
||||
// InstanceInfo represents information about an instance
|
||||
type InstanceInfo struct {
|
||||
Name string // name of the instance
|
||||
DisplayName string // display name for UIs
|
||||
}
|
||||
|
||||
// InstanceConf contains the information necessary to create an Instance
|
||||
type InstanceConf struct {
|
||||
InstanceId, DisplayName, ClusterId, Zone string
|
||||
NumNodes int32
|
||||
StorageType StorageType
|
||||
}
|
||||
|
||||
var instanceNameRegexp = regexp.MustCompile(`^projects/([^/]+)/instances/([a-z][-a-z0-9]*)$`)
|
||||
|
||||
// CreateInstance creates a new instance in the project.
|
||||
// This method will return when the instance has been created or when an error occurs.
|
||||
func (iac *InstanceAdminClient) CreateInstance(ctx context.Context, conf *InstanceConf) error {
|
||||
ctx = mergeMetadata(ctx, iac.md)
|
||||
req := &btapb.CreateInstanceRequest{
|
||||
Parent: "projects/" + iac.project,
|
||||
InstanceId: conf.InstanceId,
|
||||
Instance: &btapb.Instance{DisplayName: conf.DisplayName},
|
||||
Clusters: map[string]*btapb.Cluster{
|
||||
conf.ClusterId: {
|
||||
ServeNodes: conf.NumNodes,
|
||||
DefaultStorageType: conf.StorageType.proto(),
|
||||
Location: "projects/" + iac.project + "/locations/" + conf.Zone,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
lro, err := iac.iClient.CreateInstance(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp := btapb.Instance{}
|
||||
return longrunning.InternalNewOperation(iac.conn, lro).Wait(ctx, &resp)
|
||||
}
|
||||
|
||||
// DeleteInstance deletes an instance from the project.
|
||||
func (iac *InstanceAdminClient) DeleteInstance(ctx context.Context, instanceId string) error {
|
||||
ctx = mergeMetadata(ctx, iac.md)
|
||||
req := &btapb.DeleteInstanceRequest{"projects/" + iac.project + "/instances/" + instanceId}
|
||||
_, err := iac.iClient.DeleteInstance(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
// Instances returns a list of instances in the project.
|
||||
func (iac *InstanceAdminClient) Instances(ctx context.Context) ([]*InstanceInfo, error) {
|
||||
ctx = mergeMetadata(ctx, iac.md)
|
||||
req := &btapb.ListInstancesRequest{
|
||||
Parent: "projects/" + iac.project,
|
||||
}
|
||||
res, err := iac.iClient.ListInstances(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var is []*InstanceInfo
|
||||
for _, i := range res.Instances {
|
||||
m := instanceNameRegexp.FindStringSubmatch(i.Name)
|
||||
if m == nil {
|
||||
return nil, fmt.Errorf("malformed instance name %q", i.Name)
|
||||
}
|
||||
is = append(is, &InstanceInfo{
|
||||
Name: m[2],
|
||||
DisplayName: i.DisplayName,
|
||||
})
|
||||
}
|
||||
return is, nil
|
||||
}
|
91
vendor/cloud.google.com/go/bigtable/admin_test.go
generated
vendored
Normal file
91
vendor/cloud.google.com/go/bigtable/admin_test.go
generated
vendored
Normal file
|
@ -0,0 +1,91 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigtable
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func TestAdminIntegration(t *testing.T) {
|
||||
testEnv, err := NewIntegrationEnv()
|
||||
if err != nil {
|
||||
t.Fatalf("IntegrationEnv: %v", err)
|
||||
}
|
||||
defer testEnv.Close()
|
||||
|
||||
timeout := 2 * time.Second
|
||||
if testEnv.Config().UseProd {
|
||||
timeout = 5 * time.Minute
|
||||
}
|
||||
ctx, _ := context.WithTimeout(context.Background(), timeout)
|
||||
|
||||
adminClient, err := testEnv.NewAdminClient()
|
||||
if err != nil {
|
||||
t.Fatalf("NewAdminClient: %v", err)
|
||||
}
|
||||
defer adminClient.Close()
|
||||
|
||||
list := func() []string {
|
||||
tbls, err := adminClient.Tables(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("Fetching list of tables: %v", err)
|
||||
}
|
||||
sort.Strings(tbls)
|
||||
return tbls
|
||||
}
|
||||
containsAll := func(got, want []string) bool {
|
||||
gotSet := make(map[string]bool)
|
||||
|
||||
for _, s := range got {
|
||||
gotSet[s] = true
|
||||
}
|
||||
for _, s := range want {
|
||||
if !gotSet[s] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
defer adminClient.DeleteTable(ctx, "mytable")
|
||||
|
||||
if err := adminClient.CreateTable(ctx, "mytable"); err != nil {
|
||||
t.Fatalf("Creating table: %v", err)
|
||||
}
|
||||
|
||||
defer adminClient.DeleteTable(ctx, "myothertable")
|
||||
|
||||
if err := adminClient.CreateTable(ctx, "myothertable"); err != nil {
|
||||
t.Fatalf("Creating table: %v", err)
|
||||
}
|
||||
|
||||
if got, want := list(), []string{"myothertable", "mytable"}; !containsAll(got, want) {
|
||||
t.Errorf("adminClient.Tables returned %#v, want %#v", got, want)
|
||||
}
|
||||
if err := adminClient.DeleteTable(ctx, "myothertable"); err != nil {
|
||||
t.Fatalf("Deleting table: %v", err)
|
||||
}
|
||||
tables := list()
|
||||
if got, want := tables, []string{"mytable"}; !containsAll(got, want) {
|
||||
t.Errorf("adminClient.Tables returned %#v, want %#v", got, want)
|
||||
}
|
||||
if got, unwanted := tables, []string{"myothertable"}; containsAll(got, unwanted) {
|
||||
t.Errorf("adminClient.Tables return %#v. unwanted %#v", got, unwanted)
|
||||
}
|
||||
}
|
735
vendor/cloud.google.com/go/bigtable/bigtable.go
generated
vendored
Normal file
735
vendor/cloud.google.com/go/bigtable/bigtable.go
generated
vendored
Normal file
|
@ -0,0 +1,735 @@
|
|||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package bigtable // import "cloud.google.com/go/bigtable"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/bigtable/internal/gax"
|
||||
btopt "cloud.google.com/go/bigtable/internal/option"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/transport"
|
||||
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
const prodAddr = "bigtable.googleapis.com:443"
|
||||
|
||||
// Client is a client for reading and writing data to tables in an instance.
|
||||
//
|
||||
// A Client is safe to use concurrently, except for its Close method.
|
||||
type Client struct {
|
||||
conn *grpc.ClientConn
|
||||
client btpb.BigtableClient
|
||||
project, instance string
|
||||
}
|
||||
|
||||
// NewClient creates a new Client for a given project and instance.
|
||||
func NewClient(ctx context.Context, project, instance string, opts ...option.ClientOption) (*Client, error) {
|
||||
o, err := btopt.DefaultClientOptions(prodAddr, Scope, clientUserAgent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Default to a small connection pool that can be overridden.
|
||||
o = append(o, option.WithGRPCConnectionPool(4))
|
||||
o = append(o, opts...)
|
||||
conn, err := transport.DialGRPC(ctx, o...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dialing: %v", err)
|
||||
}
|
||||
return &Client{
|
||||
conn: conn,
|
||||
client: btpb.NewBigtableClient(conn),
|
||||
project: project,
|
||||
instance: instance,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close closes the Client.
|
||||
func (c *Client) Close() error {
|
||||
return c.conn.Close()
|
||||
}
|
||||
|
||||
var (
|
||||
idempotentRetryCodes = []codes.Code{codes.DeadlineExceeded, codes.Unavailable, codes.Aborted,codes.Internal}
|
||||
isIdempotentRetryCode = make(map[codes.Code]bool)
|
||||
retryOptions = []gax.CallOption{
|
||||
gax.WithDelayTimeoutSettings(100*time.Millisecond, 2000*time.Millisecond, 1.2),
|
||||
gax.WithRetryCodes(idempotentRetryCodes),
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
for _, code := range idempotentRetryCodes {
|
||||
isIdempotentRetryCode[code] = true
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) fullTableName(table string) string {
|
||||
return fmt.Sprintf("projects/%s/instances/%s/tables/%s", c.project, c.instance, table)
|
||||
}
|
||||
|
||||
// A Table refers to a table.
|
||||
//
|
||||
// A Table is safe to use concurrently.
|
||||
type Table struct {
|
||||
c *Client
|
||||
table string
|
||||
|
||||
// Metadata to be sent with each request.
|
||||
md metadata.MD
|
||||
}
|
||||
|
||||
// Open opens a table.
|
||||
func (c *Client) Open(table string) *Table {
|
||||
return &Table{
|
||||
c: c,
|
||||
table: table,
|
||||
md: metadata.Pairs(resourcePrefixHeader, c.fullTableName(table)),
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(dsymonds): Read method that returns a sequence of ReadItems.
|
||||
|
||||
// ReadRows reads rows from a table. f is called for each row.
|
||||
// If f returns false, the stream is shut down and ReadRows returns.
|
||||
// f owns its argument, and f is called serially in order by row key.
|
||||
//
|
||||
// By default, the yielded rows will contain all values in all cells.
|
||||
// Use RowFilter to limit the cells returned.
|
||||
func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts ...ReadOption) error {
|
||||
ctx = mergeMetadata(ctx, t.md)
|
||||
|
||||
var prevRowKey string
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
req := &btpb.ReadRowsRequest{
|
||||
TableName: t.c.fullTableName(t.table),
|
||||
Rows: arg.proto(),
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.set(req)
|
||||
}
|
||||
ctx, cancel := context.WithCancel(ctx) // for aborting the stream
|
||||
defer cancel()
|
||||
|
||||
stream, err := t.c.client.ReadRows(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cr := newChunkReader()
|
||||
for {
|
||||
res, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// Reset arg for next Invoke call.
|
||||
arg = arg.retainRowsAfter(prevRowKey)
|
||||
return err
|
||||
}
|
||||
|
||||
for _, cc := range res.Chunks {
|
||||
row, err := cr.Process(cc)
|
||||
if err != nil {
|
||||
// No need to prepare for a retry, this is an unretryable error.
|
||||
return err
|
||||
}
|
||||
if row == nil {
|
||||
continue
|
||||
}
|
||||
prevRowKey = row.Key()
|
||||
if !f(row) {
|
||||
// Cancel and drain stream.
|
||||
cancel()
|
||||
for {
|
||||
if _, err := stream.Recv(); err != nil {
|
||||
// The stream has ended. We don't return an error
|
||||
// because the caller has intentionally interrupted the scan.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := cr.Close(); err != nil {
|
||||
// No need to prepare for a retry, this is an unretryable error.
|
||||
return err
|
||||
}
|
||||
}
|
||||
return err
|
||||
}, retryOptions...)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// ReadRow is a convenience implementation of a single-row reader.
|
||||
// A missing row will return a zero-length map and a nil error.
|
||||
func (t *Table) ReadRow(ctx context.Context, row string, opts ...ReadOption) (Row, error) {
|
||||
var r Row
|
||||
err := t.ReadRows(ctx, SingleRow(row), func(rr Row) bool {
|
||||
r = rr
|
||||
return true
|
||||
}, opts...)
|
||||
return r, err
|
||||
}
|
||||
|
||||
// decodeFamilyProto adds the cell data from f to the given row.
|
||||
func decodeFamilyProto(r Row, row string, f *btpb.Family) {
|
||||
fam := f.Name // does not have colon
|
||||
for _, col := range f.Columns {
|
||||
for _, cell := range col.Cells {
|
||||
ri := ReadItem{
|
||||
Row: row,
|
||||
Column: fam + ":" + string(col.Qualifier),
|
||||
Timestamp: Timestamp(cell.TimestampMicros),
|
||||
Value: cell.Value,
|
||||
}
|
||||
r[fam] = append(r[fam], ri)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RowSet is a set of rows to be read. It is satisfied by RowList and RowRange.
|
||||
type RowSet interface {
|
||||
proto() *btpb.RowSet
|
||||
|
||||
// retainRowsAfter returns a new RowSet that does not include the
|
||||
// given row key or any row key lexicographically less than it.
|
||||
retainRowsAfter(lastRowKey string) RowSet
|
||||
}
|
||||
|
||||
// RowList is a sequence of row keys.
|
||||
type RowList []string
|
||||
|
||||
func (r RowList) proto() *btpb.RowSet {
|
||||
keys := make([][]byte, len(r))
|
||||
for i, row := range r {
|
||||
keys[i] = []byte(row)
|
||||
}
|
||||
return &btpb.RowSet{RowKeys: keys}
|
||||
}
|
||||
|
||||
func (r RowList) retainRowsAfter(lastRowKey string) RowSet {
|
||||
var retryKeys RowList
|
||||
for _, key := range r {
|
||||
if key > lastRowKey {
|
||||
retryKeys = append(retryKeys, key)
|
||||
}
|
||||
}
|
||||
return retryKeys
|
||||
}
|
||||
|
||||
// A RowRange is a half-open interval [Start, Limit) encompassing
|
||||
// all the rows with keys at least as large as Start, and less than Limit.
|
||||
// (Bigtable string comparison is the same as Go's.)
|
||||
// A RowRange can be unbounded, encompassing all keys at least as large as Start.
|
||||
type RowRange struct {
|
||||
start string
|
||||
limit string
|
||||
}
|
||||
|
||||
// NewRange returns the new RowRange [begin, end).
|
||||
func NewRange(begin, end string) RowRange {
|
||||
return RowRange{
|
||||
start: begin,
|
||||
limit: end,
|
||||
}
|
||||
}
|
||||
|
||||
// Unbounded tests whether a RowRange is unbounded.
|
||||
func (r RowRange) Unbounded() bool {
|
||||
return r.limit == ""
|
||||
}
|
||||
|
||||
// Contains says whether the RowRange contains the key.
|
||||
func (r RowRange) Contains(row string) bool {
|
||||
return r.start <= row && (r.limit == "" || r.limit > row)
|
||||
}
|
||||
|
||||
// String provides a printable description of a RowRange.
|
||||
func (r RowRange) String() string {
|
||||
a := strconv.Quote(r.start)
|
||||
if r.Unbounded() {
|
||||
return fmt.Sprintf("[%s,∞)", a)
|
||||
}
|
||||
return fmt.Sprintf("[%s,%q)", a, r.limit)
|
||||
}
|
||||
|
||||
func (r RowRange) proto() *btpb.RowSet {
|
||||
rr := &btpb.RowRange{
|
||||
StartKey: &btpb.RowRange_StartKeyClosed{[]byte(r.start)},
|
||||
}
|
||||
if !r.Unbounded() {
|
||||
rr.EndKey = &btpb.RowRange_EndKeyOpen{[]byte(r.limit)}
|
||||
}
|
||||
return &btpb.RowSet{RowRanges: []*btpb.RowRange{rr}}
|
||||
}
|
||||
|
||||
func (r RowRange) retainRowsAfter(lastRowKey string) RowSet {
|
||||
if lastRowKey == "" {
|
||||
return r
|
||||
}
|
||||
// Set the beginning of the range to the row after the last scanned.
|
||||
start := lastRowKey + "\x00"
|
||||
if r.Unbounded() {
|
||||
return InfiniteRange(start)
|
||||
}
|
||||
return NewRange(start, r.limit)
|
||||
}
|
||||
|
||||
// SingleRow returns a RowSet for reading a single row.
|
||||
func SingleRow(row string) RowSet {
|
||||
return RowList{row}
|
||||
}
|
||||
|
||||
// PrefixRange returns a RowRange consisting of all keys starting with the prefix.
|
||||
func PrefixRange(prefix string) RowRange {
|
||||
return RowRange{
|
||||
start: prefix,
|
||||
limit: prefixSuccessor(prefix),
|
||||
}
|
||||
}
|
||||
|
||||
// InfiniteRange returns the RowRange consisting of all keys at least as
|
||||
// large as start.
|
||||
func InfiniteRange(start string) RowRange {
|
||||
return RowRange{
|
||||
start: start,
|
||||
limit: "",
|
||||
}
|
||||
}
|
||||
|
||||
// prefixSuccessor returns the lexically smallest string greater than the
|
||||
// prefix, if it exists, or "" otherwise. In either case, it is the string
|
||||
// needed for the Limit of a RowRange.
|
||||
func prefixSuccessor(prefix string) string {
|
||||
if prefix == "" {
|
||||
return "" // infinite range
|
||||
}
|
||||
n := len(prefix)
|
||||
for n--; n >= 0 && prefix[n] == '\xff'; n-- {
|
||||
}
|
||||
if n == -1 {
|
||||
return ""
|
||||
}
|
||||
ans := []byte(prefix[:n])
|
||||
ans = append(ans, prefix[n]+1)
|
||||
return string(ans)
|
||||
}
|
||||
|
||||
// A ReadOption is an optional argument to ReadRows.
|
||||
type ReadOption interface {
|
||||
set(req *btpb.ReadRowsRequest)
|
||||
}
|
||||
|
||||
// RowFilter returns a ReadOption that applies f to the contents of read rows.
|
||||
func RowFilter(f Filter) ReadOption { return rowFilter{f} }
|
||||
|
||||
type rowFilter struct{ f Filter }
|
||||
|
||||
func (rf rowFilter) set(req *btpb.ReadRowsRequest) { req.Filter = rf.f.proto() }
|
||||
|
||||
// LimitRows returns a ReadOption that will limit the number of rows to be read.
|
||||
func LimitRows(limit int64) ReadOption { return limitRows{limit} }
|
||||
|
||||
type limitRows struct{ limit int64 }
|
||||
|
||||
func (lr limitRows) set(req *btpb.ReadRowsRequest) { req.RowsLimit = lr.limit }
|
||||
|
||||
// mutationsAreRetryable returns true if all mutations are idempotent
|
||||
// and therefore retryable. A mutation is idempotent iff all cell timestamps
|
||||
// have an explicit timestamp set and do not rely on the timestamp being set on the server.
|
||||
func mutationsAreRetryable(muts []*btpb.Mutation) bool {
|
||||
serverTime := int64(ServerTime)
|
||||
for _, mut := range muts {
|
||||
setCell := mut.GetSetCell()
|
||||
if setCell != nil && setCell.TimestampMicros == serverTime {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Apply applies a Mutation to a specific row.
|
||||
func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...ApplyOption) error {
|
||||
ctx = mergeMetadata(ctx, t.md)
|
||||
after := func(res proto.Message) {
|
||||
for _, o := range opts {
|
||||
o.after(res)
|
||||
}
|
||||
}
|
||||
|
||||
var callOptions []gax.CallOption
|
||||
if m.cond == nil {
|
||||
req := &btpb.MutateRowRequest{
|
||||
TableName: t.c.fullTableName(t.table),
|
||||
RowKey: []byte(row),
|
||||
Mutations: m.ops,
|
||||
}
|
||||
if mutationsAreRetryable(m.ops) {
|
||||
callOptions = retryOptions
|
||||
}
|
||||
var res *btpb.MutateRowResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
var err error
|
||||
res, err = t.c.client.MutateRow(ctx, req)
|
||||
return err
|
||||
}, callOptions...)
|
||||
if err == nil {
|
||||
after(res)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
req := &btpb.CheckAndMutateRowRequest{
|
||||
TableName: t.c.fullTableName(t.table),
|
||||
RowKey: []byte(row),
|
||||
PredicateFilter: m.cond.proto(),
|
||||
}
|
||||
if m.mtrue != nil {
|
||||
req.TrueMutations = m.mtrue.ops
|
||||
}
|
||||
if m.mfalse != nil {
|
||||
req.FalseMutations = m.mfalse.ops
|
||||
}
|
||||
if mutationsAreRetryable(req.TrueMutations) && mutationsAreRetryable(req.FalseMutations) {
|
||||
callOptions = retryOptions
|
||||
}
|
||||
var cmRes *btpb.CheckAndMutateRowResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
var err error
|
||||
cmRes, err = t.c.client.CheckAndMutateRow(ctx, req)
|
||||
return err
|
||||
}, callOptions...)
|
||||
if err == nil {
|
||||
after(cmRes)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// An ApplyOption is an optional argument to Apply.
|
||||
type ApplyOption interface {
|
||||
after(res proto.Message)
|
||||
}
|
||||
|
||||
type applyAfterFunc func(res proto.Message)
|
||||
|
||||
func (a applyAfterFunc) after(res proto.Message) { a(res) }
|
||||
|
||||
// GetCondMutationResult returns an ApplyOption that reports whether the conditional
|
||||
// mutation's condition matched.
|
||||
func GetCondMutationResult(matched *bool) ApplyOption {
|
||||
return applyAfterFunc(func(res proto.Message) {
|
||||
if res, ok := res.(*btpb.CheckAndMutateRowResponse); ok {
|
||||
*matched = res.PredicateMatched
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Mutation represents a set of changes for a single row of a table.
|
||||
type Mutation struct {
|
||||
ops []*btpb.Mutation
|
||||
|
||||
// for conditional mutations
|
||||
cond Filter
|
||||
mtrue, mfalse *Mutation
|
||||
}
|
||||
|
||||
// NewMutation returns a new mutation.
|
||||
func NewMutation() *Mutation {
|
||||
return new(Mutation)
|
||||
}
|
||||
|
||||
// NewCondMutation returns a conditional mutation.
|
||||
// The given row filter determines which mutation is applied:
|
||||
// If the filter matches any cell in the row, mtrue is applied;
|
||||
// otherwise, mfalse is applied.
|
||||
// Either given mutation may be nil.
|
||||
func NewCondMutation(cond Filter, mtrue, mfalse *Mutation) *Mutation {
|
||||
return &Mutation{cond: cond, mtrue: mtrue, mfalse: mfalse}
|
||||
}
|
||||
|
||||
// Set sets a value in a specified column, with the given timestamp.
|
||||
// The timestamp will be truncated to millisecond granularity.
|
||||
// A timestamp of ServerTime means to use the server timestamp.
|
||||
func (m *Mutation) Set(family, column string, ts Timestamp, value []byte) {
|
||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
|
||||
FamilyName: family,
|
||||
ColumnQualifier: []byte(column),
|
||||
TimestampMicros: int64(ts.TruncateToMilliseconds()),
|
||||
Value: value,
|
||||
}}})
|
||||
}
|
||||
|
||||
// DeleteCellsInColumn will delete all the cells whose columns are family:column.
|
||||
func (m *Mutation) DeleteCellsInColumn(family, column string) {
|
||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{&btpb.Mutation_DeleteFromColumn{
|
||||
FamilyName: family,
|
||||
ColumnQualifier: []byte(column),
|
||||
}}})
|
||||
}
|
||||
|
||||
// DeleteTimestampRange deletes all cells whose columns are family:column
|
||||
// and whose timestamps are in the half-open interval [start, end).
|
||||
// If end is zero, it will be interpreted as infinity.
|
||||
// The timestamps will be truncated to millisecond granularity.
|
||||
func (m *Mutation) DeleteTimestampRange(family, column string, start, end Timestamp) {
|
||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{&btpb.Mutation_DeleteFromColumn{
|
||||
FamilyName: family,
|
||||
ColumnQualifier: []byte(column),
|
||||
TimeRange: &btpb.TimestampRange{
|
||||
StartTimestampMicros: int64(start.TruncateToMilliseconds()),
|
||||
EndTimestampMicros: int64(end.TruncateToMilliseconds()),
|
||||
},
|
||||
}}})
|
||||
}
|
||||
|
||||
// DeleteCellsInFamily will delete all the cells whose columns are family:*.
|
||||
func (m *Mutation) DeleteCellsInFamily(family string) {
|
||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromFamily_{&btpb.Mutation_DeleteFromFamily{
|
||||
FamilyName: family,
|
||||
}}})
|
||||
}
|
||||
|
||||
// DeleteRow deletes the entire row.
|
||||
func (m *Mutation) DeleteRow() {
|
||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromRow_{&btpb.Mutation_DeleteFromRow{}}})
|
||||
}
|
||||
|
||||
// entryErr is a container that combines an entry with the error that was returned for it.
|
||||
// Err may be nil if no error was returned for the Entry, or if the Entry has not yet been processed.
|
||||
type entryErr struct {
|
||||
Entry *btpb.MutateRowsRequest_Entry
|
||||
Err error
|
||||
}
|
||||
|
||||
// ApplyBulk applies multiple Mutations.
|
||||
// Each mutation is individually applied atomically,
|
||||
// but the set of mutations may be applied in any order.
|
||||
//
|
||||
// Two types of failures may occur. If the entire process
|
||||
// fails, (nil, err) will be returned. If specific mutations
|
||||
// fail to apply, ([]err, nil) will be returned, and the errors
|
||||
// will correspond to the relevant rowKeys/muts arguments.
|
||||
//
|
||||
// Conditional mutations cannot be applied in bulk and providing one will result in an error.
|
||||
func (t *Table) ApplyBulk(ctx context.Context, rowKeys []string, muts []*Mutation, opts ...ApplyOption) ([]error, error) {
|
||||
ctx = mergeMetadata(ctx, t.md)
|
||||
if len(rowKeys) != len(muts) {
|
||||
return nil, fmt.Errorf("mismatched rowKeys and mutation array lengths: %d, %d", len(rowKeys), len(muts))
|
||||
}
|
||||
|
||||
origEntries := make([]*entryErr, len(rowKeys))
|
||||
for i, key := range rowKeys {
|
||||
mut := muts[i]
|
||||
if mut.cond != nil {
|
||||
return nil, errors.New("conditional mutations cannot be applied in bulk")
|
||||
}
|
||||
origEntries[i] = &entryErr{Entry: &btpb.MutateRowsRequest_Entry{RowKey: []byte(key), Mutations: mut.ops}}
|
||||
}
|
||||
|
||||
// entries will be reduced after each invocation to just what needs to be retried.
|
||||
entries := make([]*entryErr, len(rowKeys))
|
||||
copy(entries, origEntries)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
err := t.doApplyBulk(ctx, entries, opts...)
|
||||
if err != nil {
|
||||
// We want to retry the entire request with the current entries
|
||||
return err
|
||||
}
|
||||
entries = t.getApplyBulkRetries(entries)
|
||||
if len(entries) > 0 && len(idempotentRetryCodes) > 0 {
|
||||
// We have at least one mutation that needs to be retried.
|
||||
// Return an arbitrary error that is retryable according to callOptions.
|
||||
return grpc.Errorf(idempotentRetryCodes[0], "Synthetic error: partial failure of ApplyBulk")
|
||||
}
|
||||
return nil
|
||||
}, retryOptions...)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Accumulate all of the errors into an array to return, interspersed with nils for successful
|
||||
// entries. The absence of any errors means we should return nil.
|
||||
var errs []error
|
||||
var foundErr bool
|
||||
for _, entry := range origEntries {
|
||||
if entry.Err != nil {
|
||||
foundErr = true
|
||||
}
|
||||
errs = append(errs, entry.Err)
|
||||
}
|
||||
if foundErr {
|
||||
return errs, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// getApplyBulkRetries returns the entries that need to be retried
|
||||
func (t *Table) getApplyBulkRetries(entries []*entryErr) []*entryErr {
|
||||
var retryEntries []*entryErr
|
||||
for _, entry := range entries {
|
||||
err := entry.Err
|
||||
if err != nil && isIdempotentRetryCode[grpc.Code(err)] && mutationsAreRetryable(entry.Entry.Mutations) {
|
||||
// There was an error and the entry is retryable.
|
||||
retryEntries = append(retryEntries, entry)
|
||||
}
|
||||
}
|
||||
return retryEntries
|
||||
}
|
||||
|
||||
// doApplyBulk does the work of a single ApplyBulk invocation
|
||||
func (t *Table) doApplyBulk(ctx context.Context, entryErrs []*entryErr, opts ...ApplyOption) error {
|
||||
after := func(res proto.Message) {
|
||||
for _, o := range opts {
|
||||
o.after(res)
|
||||
}
|
||||
}
|
||||
|
||||
entries := make([]*btpb.MutateRowsRequest_Entry, len(entryErrs))
|
||||
for i, entryErr := range entryErrs {
|
||||
entries[i] = entryErr.Entry
|
||||
}
|
||||
req := &btpb.MutateRowsRequest{
|
||||
TableName: t.c.fullTableName(t.table),
|
||||
Entries: entries,
|
||||
}
|
||||
stream, err := t.c.client.MutateRows(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
res, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i, entry := range res.Entries {
|
||||
status := entry.Status
|
||||
if status.Code == int32(codes.OK) {
|
||||
entryErrs[i].Err = nil
|
||||
} else {
|
||||
entryErrs[i].Err = grpc.Errorf(codes.Code(status.Code), status.Message)
|
||||
}
|
||||
}
|
||||
after(res)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Timestamp is in units of microseconds since 1 January 1970.
|
||||
type Timestamp int64
|
||||
|
||||
// ServerTime is a specific Timestamp that may be passed to (*Mutation).Set.
|
||||
// It indicates that the server's timestamp should be used.
|
||||
const ServerTime Timestamp = -1
|
||||
|
||||
// Time converts a time.Time into a Timestamp.
|
||||
func Time(t time.Time) Timestamp { return Timestamp(t.UnixNano() / 1e3) }
|
||||
|
||||
// Now returns the Timestamp representation of the current time on the client.
|
||||
func Now() Timestamp { return Time(time.Now()) }
|
||||
|
||||
// Time converts a Timestamp into a time.Time.
|
||||
func (ts Timestamp) Time() time.Time { return time.Unix(0, int64(ts)*1e3) }
|
||||
|
||||
// TruncateToMilliseconds truncates a Timestamp to millisecond granularity,
|
||||
// which is currently the only granularity supported.
|
||||
func (ts Timestamp) TruncateToMilliseconds() Timestamp {
|
||||
if ts == ServerTime {
|
||||
return ts
|
||||
}
|
||||
return ts - ts % 1000
|
||||
}
|
||||
|
||||
// ApplyReadModifyWrite applies a ReadModifyWrite to a specific row.
|
||||
// It returns the newly written cells.
|
||||
func (t *Table) ApplyReadModifyWrite(ctx context.Context, row string, m *ReadModifyWrite) (Row, error) {
|
||||
ctx = mergeMetadata(ctx, t.md)
|
||||
req := &btpb.ReadModifyWriteRowRequest{
|
||||
TableName: t.c.fullTableName(t.table),
|
||||
RowKey: []byte(row),
|
||||
Rules: m.ops,
|
||||
}
|
||||
res, err := t.c.client.ReadModifyWriteRow(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if res.Row == nil {
|
||||
return nil, errors.New("unable to apply ReadModifyWrite: res.Row=nil")
|
||||
}
|
||||
r := make(Row)
|
||||
for _, fam := range res.Row.Families { // res is *btpb.Row, fam is *btpb.Family
|
||||
decodeFamilyProto(r, row, fam)
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// ReadModifyWrite represents a set of operations on a single row of a table.
|
||||
// It is like Mutation but for non-idempotent changes.
|
||||
// When applied, these operations operate on the latest values of the row's cells,
|
||||
// and result in a new value being written to the relevant cell with a timestamp
|
||||
// that is max(existing timestamp, current server time).
|
||||
//
|
||||
// The application of a ReadModifyWrite is atomic; concurrent ReadModifyWrites will
|
||||
// be executed serially by the server.
|
||||
type ReadModifyWrite struct {
|
||||
ops []*btpb.ReadModifyWriteRule
|
||||
}
|
||||
|
||||
// NewReadModifyWrite returns a new ReadModifyWrite.
|
||||
func NewReadModifyWrite() *ReadModifyWrite { return new(ReadModifyWrite) }
|
||||
|
||||
// AppendValue appends a value to a specific cell's value.
|
||||
// If the cell is unset, it will be treated as an empty value.
|
||||
func (m *ReadModifyWrite) AppendValue(family, column string, v []byte) {
|
||||
m.ops = append(m.ops, &btpb.ReadModifyWriteRule{
|
||||
FamilyName: family,
|
||||
ColumnQualifier: []byte(column),
|
||||
Rule: &btpb.ReadModifyWriteRule_AppendValue{v},
|
||||
})
|
||||
}
|
||||
|
||||
// Increment interprets the value in a specific cell as a 64-bit big-endian signed integer,
|
||||
// and adds a value to it. If the cell is unset, it will be treated as zero.
|
||||
// If the cell is set and is not an 8-byte value, the entire ApplyReadModifyWrite
|
||||
// operation will fail.
|
||||
func (m *ReadModifyWrite) Increment(family, column string, delta int64) {
|
||||
m.ops = append(m.ops, &btpb.ReadModifyWriteRule{
|
||||
FamilyName: family,
|
||||
ColumnQualifier: []byte(column),
|
||||
Rule: &btpb.ReadModifyWriteRule_IncrementAmount{delta},
|
||||
})
|
||||
}
|
||||
|
||||
// mergeMetadata returns a context populated by the existing metadata, if any,
|
||||
// joined with internal metadata.
|
||||
func mergeMetadata(ctx context.Context, md metadata.MD) context.Context {
|
||||
mdCopy, _ := metadata.FromContext(ctx)
|
||||
return metadata.NewContext(ctx, metadata.Join(mdCopy, md))
|
||||
}
|
854
vendor/cloud.google.com/go/bigtable/bigtable_test.go
generated
vendored
Normal file
854
vendor/cloud.google.com/go/bigtable/bigtable_test.go
generated
vendored
Normal file
|
@ -0,0 +1,854 @@
|
|||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package bigtable
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func TestPrefix(t *testing.T) {
|
||||
tests := []struct {
|
||||
prefix, succ string
|
||||
}{
|
||||
{"", ""},
|
||||
{"\xff", ""}, // when used, "" means Infinity
|
||||
{"x\xff", "y"},
|
||||
{"\xfe", "\xff"},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
got := prefixSuccessor(tc.prefix)
|
||||
if got != tc.succ {
|
||||
t.Errorf("prefixSuccessor(%q) = %q, want %s", tc.prefix, got, tc.succ)
|
||||
continue
|
||||
}
|
||||
r := PrefixRange(tc.prefix)
|
||||
if tc.succ == "" && r.limit != "" {
|
||||
t.Errorf("PrefixRange(%q) got limit %q", tc.prefix, r.limit)
|
||||
}
|
||||
if tc.succ != "" && r.limit != tc.succ {
|
||||
t.Errorf("PrefixRange(%q) got limit %q, want %q", tc.prefix, r.limit, tc.succ)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientIntegration(t *testing.T) {
|
||||
start := time.Now()
|
||||
lastCheckpoint := start
|
||||
checkpoint := func(s string) {
|
||||
n := time.Now()
|
||||
t.Logf("[%s] %v since start, %v since last checkpoint", s, n.Sub(start), n.Sub(lastCheckpoint))
|
||||
lastCheckpoint = n
|
||||
}
|
||||
|
||||
testEnv, err := NewIntegrationEnv()
|
||||
if err != nil {
|
||||
t.Fatalf("IntegrationEnv: %v", err)
|
||||
}
|
||||
|
||||
timeout := 30 * time.Second
|
||||
if testEnv.Config().UseProd {
|
||||
timeout = 5 * time.Minute
|
||||
t.Logf("Running test against production")
|
||||
} else {
|
||||
t.Logf("bttest.Server running on %s", testEnv.Config().AdminEndpoint)
|
||||
}
|
||||
ctx, _ := context.WithTimeout(context.Background(), timeout)
|
||||
|
||||
client, err := testEnv.NewClient()
|
||||
if err != nil {
|
||||
t.Fatalf("Client: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
checkpoint("dialed Client")
|
||||
|
||||
adminClient, err := testEnv.NewAdminClient()
|
||||
if err != nil {
|
||||
t.Fatalf("AdminClient: %v", err)
|
||||
}
|
||||
defer adminClient.Close()
|
||||
checkpoint("dialed AdminClient")
|
||||
|
||||
table := testEnv.Config().Table
|
||||
|
||||
// Delete the table at the end of the test.
|
||||
// Do this even before creating the table so that if this is running
|
||||
// against production and CreateTable fails there's a chance of cleaning it up.
|
||||
defer adminClient.DeleteTable(ctx, table)
|
||||
|
||||
if err := adminClient.CreateTable(ctx, table); err != nil {
|
||||
t.Fatalf("Creating table: %v", err)
|
||||
}
|
||||
checkpoint("created table")
|
||||
if err := adminClient.CreateColumnFamily(ctx, table, "follows"); err != nil {
|
||||
t.Fatalf("Creating column family: %v", err)
|
||||
}
|
||||
checkpoint(`created "follows" column family`)
|
||||
|
||||
tbl := client.Open(table)
|
||||
|
||||
// Insert some data.
|
||||
initialData := map[string][]string{
|
||||
"wmckinley": {"tjefferson"},
|
||||
"gwashington": {"jadams"},
|
||||
"tjefferson": {"gwashington", "jadams"}, // wmckinley set conditionally below
|
||||
"jadams": {"gwashington", "tjefferson"},
|
||||
}
|
||||
for row, ss := range initialData {
|
||||
mut := NewMutation()
|
||||
for _, name := range ss {
|
||||
mut.Set("follows", name, 0, []byte("1"))
|
||||
}
|
||||
if err := tbl.Apply(ctx, row, mut); err != nil {
|
||||
t.Errorf("Mutating row %q: %v", row, err)
|
||||
}
|
||||
}
|
||||
checkpoint("inserted initial data")
|
||||
|
||||
// Do a conditional mutation with a complex filter.
|
||||
mutTrue := NewMutation()
|
||||
mutTrue.Set("follows", "wmckinley", 0, []byte("1"))
|
||||
filter := ChainFilters(ColumnFilter("gwash[iz].*"), ValueFilter("."))
|
||||
mut := NewCondMutation(filter, mutTrue, nil)
|
||||
if err := tbl.Apply(ctx, "tjefferson", mut); err != nil {
|
||||
t.Errorf("Conditionally mutating row: %v", err)
|
||||
}
|
||||
// Do a second condition mutation with a filter that does not match,
|
||||
// and thus no changes should be made.
|
||||
mutTrue = NewMutation()
|
||||
mutTrue.DeleteRow()
|
||||
filter = ColumnFilter("snoop.dogg")
|
||||
mut = NewCondMutation(filter, mutTrue, nil)
|
||||
if err := tbl.Apply(ctx, "tjefferson", mut); err != nil {
|
||||
t.Errorf("Conditionally mutating row: %v", err)
|
||||
}
|
||||
checkpoint("did two conditional mutations")
|
||||
|
||||
// Fetch a row.
|
||||
row, err := tbl.ReadRow(ctx, "jadams")
|
||||
if err != nil {
|
||||
t.Fatalf("Reading a row: %v", err)
|
||||
}
|
||||
wantRow := Row{
|
||||
"follows": []ReadItem{
|
||||
{Row: "jadams", Column: "follows:gwashington", Value: []byte("1")},
|
||||
{Row: "jadams", Column: "follows:tjefferson", Value: []byte("1")},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(row, wantRow) {
|
||||
t.Errorf("Read row mismatch.\n got %#v\nwant %#v", row, wantRow)
|
||||
}
|
||||
checkpoint("tested ReadRow")
|
||||
|
||||
// Do a bunch of reads with filters.
|
||||
readTests := []struct {
|
||||
desc string
|
||||
rr RowSet
|
||||
filter Filter // may be nil
|
||||
limit ReadOption // may be nil
|
||||
|
||||
// We do the read, grab all the cells, turn them into "<row>-<col>-<val>",
|
||||
// and join with a comma.
|
||||
want string
|
||||
}{
|
||||
{
|
||||
desc: "read all, unfiltered",
|
||||
rr: RowRange{},
|
||||
want: "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,tjefferson-gwashington-1,tjefferson-jadams-1,tjefferson-wmckinley-1,wmckinley-tjefferson-1",
|
||||
},
|
||||
{
|
||||
desc: "read with InfiniteRange, unfiltered",
|
||||
rr: InfiniteRange("tjefferson"),
|
||||
want: "tjefferson-gwashington-1,tjefferson-jadams-1,tjefferson-wmckinley-1,wmckinley-tjefferson-1",
|
||||
},
|
||||
{
|
||||
desc: "read with NewRange, unfiltered",
|
||||
rr: NewRange("gargamel", "hubbard"),
|
||||
want: "gwashington-jadams-1",
|
||||
},
|
||||
{
|
||||
desc: "read with PrefixRange, unfiltered",
|
||||
rr: PrefixRange("jad"),
|
||||
want: "jadams-gwashington-1,jadams-tjefferson-1",
|
||||
},
|
||||
{
|
||||
desc: "read with SingleRow, unfiltered",
|
||||
rr: SingleRow("wmckinley"),
|
||||
want: "wmckinley-tjefferson-1",
|
||||
},
|
||||
{
|
||||
desc: "read all, with ColumnFilter",
|
||||
rr: RowRange{},
|
||||
filter: ColumnFilter(".*j.*"), // matches "jadams" and "tjefferson"
|
||||
want: "gwashington-jadams-1,jadams-tjefferson-1,tjefferson-jadams-1,wmckinley-tjefferson-1",
|
||||
},
|
||||
{
|
||||
desc: "read range, with ColumnRangeFilter",
|
||||
rr: RowRange{},
|
||||
filter: ColumnRangeFilter("follows", "h", "k"),
|
||||
want: "gwashington-jadams-1,tjefferson-jadams-1",
|
||||
},
|
||||
{
|
||||
desc: "read range from empty, with ColumnRangeFilter",
|
||||
rr: RowRange{},
|
||||
filter: ColumnRangeFilter("follows", "", "u"),
|
||||
want: "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,tjefferson-gwashington-1,tjefferson-jadams-1,wmckinley-tjefferson-1",
|
||||
},
|
||||
{
|
||||
desc: "read range from start to empty, with ColumnRangeFilter",
|
||||
rr: RowRange{},
|
||||
filter: ColumnRangeFilter("follows", "h", ""),
|
||||
want: "gwashington-jadams-1,jadams-tjefferson-1,tjefferson-jadams-1,tjefferson-wmckinley-1,wmckinley-tjefferson-1",
|
||||
},
|
||||
{
|
||||
desc: "read with RowKeyFilter",
|
||||
rr: RowRange{},
|
||||
filter: RowKeyFilter(".*wash.*"),
|
||||
want: "gwashington-jadams-1",
|
||||
},
|
||||
{
|
||||
desc: "read with RowKeyFilter, no matches",
|
||||
rr: RowRange{},
|
||||
filter: RowKeyFilter(".*xxx.*"),
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
desc: "read with FamilyFilter, no matches",
|
||||
rr: RowRange{},
|
||||
filter: FamilyFilter(".*xxx.*"),
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
desc: "read with ColumnFilter + row limit",
|
||||
rr: RowRange{},
|
||||
filter: ColumnFilter(".*j.*"), // matches "jadams" and "tjefferson"
|
||||
limit: LimitRows(2),
|
||||
want: "gwashington-jadams-1,jadams-tjefferson-1",
|
||||
},
|
||||
{
|
||||
desc: "read all, strip values",
|
||||
rr: RowRange{},
|
||||
filter: StripValueFilter(),
|
||||
want: "gwashington-jadams-,jadams-gwashington-,jadams-tjefferson-,tjefferson-gwashington-,tjefferson-jadams-,tjefferson-wmckinley-,wmckinley-tjefferson-",
|
||||
},
|
||||
{
|
||||
desc: "read with ColumnFilter + row limit + strip values",
|
||||
rr: RowRange{},
|
||||
filter: ChainFilters(ColumnFilter(".*j.*"), StripValueFilter()), // matches "jadams" and "tjefferson"
|
||||
limit: LimitRows(2),
|
||||
want: "gwashington-jadams-,jadams-tjefferson-",
|
||||
},
|
||||
{
|
||||
desc: "read with condition, strip values on true",
|
||||
rr: RowRange{},
|
||||
filter: ConditionFilter(ColumnFilter(".*j.*"), StripValueFilter(), nil),
|
||||
want: "gwashington-jadams-,jadams-gwashington-,jadams-tjefferson-,tjefferson-gwashington-,tjefferson-jadams-,tjefferson-wmckinley-,wmckinley-tjefferson-",
|
||||
},
|
||||
{
|
||||
desc: "read with condition, strip values on false",
|
||||
rr: RowRange{},
|
||||
filter: ConditionFilter(ColumnFilter(".*xxx.*"), nil, StripValueFilter()),
|
||||
want: "gwashington-jadams-,jadams-gwashington-,jadams-tjefferson-,tjefferson-gwashington-,tjefferson-jadams-,tjefferson-wmckinley-,wmckinley-tjefferson-",
|
||||
},
|
||||
{
|
||||
desc: "read with ValueRangeFilter + row limit",
|
||||
rr: RowRange{},
|
||||
filter: ValueRangeFilter([]byte("1"), []byte("5")), // matches our value of "1"
|
||||
limit: LimitRows(2),
|
||||
want: "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1",
|
||||
},
|
||||
{
|
||||
desc: "read with ValueRangeFilter, no match on exclusive end",
|
||||
rr: RowRange{},
|
||||
filter: ValueRangeFilter([]byte("0"), []byte("1")), // no match
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
desc: "read with ValueRangeFilter, no matches",
|
||||
rr: RowRange{},
|
||||
filter: ValueRangeFilter([]byte("3"), []byte("5")), // matches nothing
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
desc: "read with InterleaveFilter, no matches on all filters",
|
||||
rr: RowRange{},
|
||||
filter: InterleaveFilters(ColumnFilter(".*x.*"), ColumnFilter(".*z.*")),
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
desc: "read with InterleaveFilter, no duplicate cells",
|
||||
rr: RowRange{},
|
||||
filter: InterleaveFilters(ColumnFilter(".*g.*"), ColumnFilter(".*j.*")),
|
||||
want: "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,tjefferson-gwashington-1,tjefferson-jadams-1,wmckinley-tjefferson-1",
|
||||
},
|
||||
{
|
||||
desc: "read with InterleaveFilter, with duplicate cells",
|
||||
rr: RowRange{},
|
||||
filter: InterleaveFilters(ColumnFilter(".*g.*"), ColumnFilter(".*g.*")),
|
||||
want: "jadams-gwashington-1,jadams-gwashington-1,tjefferson-gwashington-1,tjefferson-gwashington-1",
|
||||
},
|
||||
}
|
||||
for _, tc := range readTests {
|
||||
var opts []ReadOption
|
||||
if tc.filter != nil {
|
||||
opts = append(opts, RowFilter(tc.filter))
|
||||
}
|
||||
if tc.limit != nil {
|
||||
opts = append(opts, tc.limit)
|
||||
}
|
||||
var elt []string
|
||||
err := tbl.ReadRows(context.Background(), tc.rr, func(r Row) bool {
|
||||
for _, ris := range r {
|
||||
for _, ri := range ris {
|
||||
elt = append(elt, formatReadItem(ri))
|
||||
}
|
||||
}
|
||||
return true
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
t.Errorf("%s: %v", tc.desc, err)
|
||||
continue
|
||||
}
|
||||
if got := strings.Join(elt, ","); got != tc.want {
|
||||
t.Errorf("%s: wrong reads.\n got %q\nwant %q", tc.desc, got, tc.want)
|
||||
}
|
||||
}
|
||||
// Read a RowList
|
||||
var elt []string
|
||||
keys := RowList{"wmckinley", "gwashington", "jadams"}
|
||||
want := "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,wmckinley-tjefferson-1"
|
||||
err = tbl.ReadRows(ctx, keys, func(r Row) bool {
|
||||
for _, ris := range r {
|
||||
for _, ri := range ris {
|
||||
elt = append(elt, formatReadItem(ri))
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("read RowList: %v", err)
|
||||
}
|
||||
|
||||
if got := strings.Join(elt, ","); got != want {
|
||||
t.Errorf("bulk read: wrong reads.\n got %q\nwant %q", got, want)
|
||||
}
|
||||
checkpoint("tested ReadRows in a few ways")
|
||||
|
||||
// Do a scan and stop part way through.
|
||||
// Verify that the ReadRows callback doesn't keep running.
|
||||
stopped := false
|
||||
err = tbl.ReadRows(ctx, InfiniteRange(""), func(r Row) bool {
|
||||
if r.Key() < "h" {
|
||||
return true
|
||||
}
|
||||
if !stopped {
|
||||
stopped = true
|
||||
return false
|
||||
}
|
||||
t.Errorf("ReadRows kept scanning to row %q after being told to stop", r.Key())
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("Partial ReadRows: %v", err)
|
||||
}
|
||||
checkpoint("did partial ReadRows test")
|
||||
|
||||
// Delete a row and check it goes away.
|
||||
mut = NewMutation()
|
||||
mut.DeleteRow()
|
||||
if err := tbl.Apply(ctx, "wmckinley", mut); err != nil {
|
||||
t.Errorf("Apply DeleteRow: %v", err)
|
||||
}
|
||||
row, err = tbl.ReadRow(ctx, "wmckinley")
|
||||
if err != nil {
|
||||
t.Fatalf("Reading a row after DeleteRow: %v", err)
|
||||
}
|
||||
if len(row) != 0 {
|
||||
t.Fatalf("Read non-zero row after DeleteRow: %v", row)
|
||||
}
|
||||
checkpoint("exercised DeleteRow")
|
||||
|
||||
// Check ReadModifyWrite.
|
||||
|
||||
if err := adminClient.CreateColumnFamily(ctx, table, "counter"); err != nil {
|
||||
t.Fatalf("Creating column family: %v", err)
|
||||
}
|
||||
|
||||
appendRMW := func(b []byte) *ReadModifyWrite {
|
||||
rmw := NewReadModifyWrite()
|
||||
rmw.AppendValue("counter", "likes", b)
|
||||
return rmw
|
||||
}
|
||||
incRMW := func(n int64) *ReadModifyWrite {
|
||||
rmw := NewReadModifyWrite()
|
||||
rmw.Increment("counter", "likes", n)
|
||||
return rmw
|
||||
}
|
||||
rmwSeq := []struct {
|
||||
desc string
|
||||
rmw *ReadModifyWrite
|
||||
want []byte
|
||||
}{
|
||||
{
|
||||
desc: "append #1",
|
||||
rmw: appendRMW([]byte{0, 0, 0}),
|
||||
want: []byte{0, 0, 0},
|
||||
},
|
||||
{
|
||||
desc: "append #2",
|
||||
rmw: appendRMW([]byte{0, 0, 0, 0, 17}), // the remaining 40 bits to make a big-endian 17
|
||||
want: []byte{0, 0, 0, 0, 0, 0, 0, 17},
|
||||
},
|
||||
{
|
||||
desc: "increment",
|
||||
rmw: incRMW(8),
|
||||
want: []byte{0, 0, 0, 0, 0, 0, 0, 25},
|
||||
},
|
||||
}
|
||||
for _, step := range rmwSeq {
|
||||
row, err := tbl.ApplyReadModifyWrite(ctx, "gwashington", step.rmw)
|
||||
if err != nil {
|
||||
t.Fatalf("ApplyReadModifyWrite %+v: %v", step.rmw, err)
|
||||
}
|
||||
clearTimestamps(row)
|
||||
wantRow := Row{"counter": []ReadItem{{Row: "gwashington", Column: "counter:likes", Value: step.want}}}
|
||||
if !reflect.DeepEqual(row, wantRow) {
|
||||
t.Fatalf("After %s,\n got %v\nwant %v", step.desc, row, wantRow)
|
||||
}
|
||||
}
|
||||
checkpoint("tested ReadModifyWrite")
|
||||
|
||||
// Test arbitrary timestamps more thoroughly.
|
||||
if err := adminClient.CreateColumnFamily(ctx, table, "ts"); err != nil {
|
||||
t.Fatalf("Creating column family: %v", err)
|
||||
}
|
||||
const numVersions = 4
|
||||
mut = NewMutation()
|
||||
for i := 0; i < numVersions; i++ {
|
||||
// Timestamps are used in thousands because the server
|
||||
// only permits that granularity.
|
||||
mut.Set("ts", "col", Timestamp(i*1000), []byte(fmt.Sprintf("val-%d", i)))
|
||||
}
|
||||
if err := tbl.Apply(ctx, "testrow", mut); err != nil {
|
||||
t.Fatalf("Mutating row: %v", err)
|
||||
}
|
||||
r, err := tbl.ReadRow(ctx, "testrow")
|
||||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
wantRow = Row{"ts": []ReadItem{
|
||||
// These should be returned in descending timestamp order.
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")},
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")},
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")},
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 0, Value: []byte("val-0")},
|
||||
}}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("Cell with multiple versions,\n got %v\nwant %v", r, wantRow)
|
||||
}
|
||||
// Do the same read, but filter to the latest two versions.
|
||||
r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(2)))
|
||||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
wantRow = Row{"ts": []ReadItem{
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")},
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")},
|
||||
}}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("Cell with multiple versions and LatestNFilter(2),\n got %v\nwant %v", r, wantRow)
|
||||
}
|
||||
// Check timestamp range filtering (with truncation)
|
||||
r, err = tbl.ReadRow(ctx, "testrow", RowFilter(TimestampRangeFilterMicros(1001, 3000)))
|
||||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
wantRow = Row{"ts": []ReadItem{
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")},
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")},
|
||||
}}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("Cell with multiple versions and TimestampRangeFilter(1000, 3000),\n got %v\nwant %v", r, wantRow)
|
||||
}
|
||||
r, err = tbl.ReadRow(ctx, "testrow", RowFilter(TimestampRangeFilterMicros(1000, 0)))
|
||||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
wantRow = Row{"ts": []ReadItem{
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")},
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")},
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")},
|
||||
}}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("Cell with multiple versions and TimestampRangeFilter(1000, 0),\n got %v\nwant %v", r, wantRow)
|
||||
}
|
||||
// Delete non-existing cells, no such column family in this row
|
||||
// Should not delete anything
|
||||
if err := adminClient.CreateColumnFamily(ctx, table, "non-existing"); err != nil {
|
||||
t.Fatalf("Creating column family: %v", err)
|
||||
}
|
||||
mut = NewMutation()
|
||||
mut.DeleteTimestampRange("non-existing", "col", 2000, 3000) // half-open interval
|
||||
if err := tbl.Apply(ctx, "testrow", mut); err != nil {
|
||||
t.Fatalf("Mutating row: %v", err)
|
||||
}
|
||||
r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(3)))
|
||||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("Cell was deleted unexpectly,\n got %v\nwant %v", r, wantRow)
|
||||
}
|
||||
// Delete non-existing cells, no such column in this column family
|
||||
// Should not delete anything
|
||||
mut = NewMutation()
|
||||
mut.DeleteTimestampRange("ts", "non-existing", 2000, 3000) // half-open interval
|
||||
if err := tbl.Apply(ctx, "testrow", mut); err != nil {
|
||||
t.Fatalf("Mutating row: %v", err)
|
||||
}
|
||||
r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(3)))
|
||||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("Cell was deleted unexpectly,\n got %v\nwant %v", r, wantRow)
|
||||
}
|
||||
// Delete the cell with timestamp 2000 and repeat the last read,
|
||||
// checking that we get ts 3000 and ts 1000.
|
||||
mut = NewMutation()
|
||||
mut.DeleteTimestampRange("ts", "col", 2001, 3000) // half-open interval
|
||||
if err := tbl.Apply(ctx, "testrow", mut); err != nil {
|
||||
t.Fatalf("Mutating row: %v", err)
|
||||
}
|
||||
r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(2)))
|
||||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
wantRow = Row{"ts": []ReadItem{
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")},
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")},
|
||||
}}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("Cell with multiple versions and LatestNFilter(2), after deleting timestamp 2000,\n got %v\nwant %v", r, wantRow)
|
||||
}
|
||||
checkpoint("tested multiple versions in a cell")
|
||||
|
||||
// Check DeleteCellsInFamily
|
||||
if err := adminClient.CreateColumnFamily(ctx, table, "status"); err != nil {
|
||||
t.Fatalf("Creating column family: %v", err)
|
||||
}
|
||||
|
||||
mut = NewMutation()
|
||||
mut.Set("status", "start", 0, []byte("1"))
|
||||
mut.Set("status", "end", 0, []byte("2"))
|
||||
mut.Set("ts", "col", 0, []byte("3"))
|
||||
if err := tbl.Apply(ctx, "row1", mut); err != nil {
|
||||
t.Errorf("Mutating row: %v", err)
|
||||
}
|
||||
if err := tbl.Apply(ctx, "row2", mut); err != nil {
|
||||
t.Errorf("Mutating row: %v", err)
|
||||
}
|
||||
|
||||
mut = NewMutation()
|
||||
mut.DeleteCellsInFamily("status")
|
||||
if err := tbl.Apply(ctx, "row1", mut); err != nil {
|
||||
t.Errorf("Delete cf: %v", err)
|
||||
}
|
||||
|
||||
// ColumnFamily removed
|
||||
r, err = tbl.ReadRow(ctx, "row1")
|
||||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
wantRow = Row{"ts": []ReadItem{
|
||||
{Row: "row1", Column: "ts:col", Timestamp: 0, Value: []byte("3")},
|
||||
}}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("column family was not deleted.\n got %v\n want %v", r, wantRow)
|
||||
}
|
||||
|
||||
// ColumnFamily not removed
|
||||
r, err = tbl.ReadRow(ctx, "row2")
|
||||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
wantRow = Row{
|
||||
"ts": []ReadItem{
|
||||
{Row: "row2", Column: "ts:col", Timestamp: 0, Value: []byte("3")},
|
||||
},
|
||||
"status": []ReadItem{
|
||||
{Row: "row2", Column: "status:end", Timestamp: 0, Value: []byte("2")},
|
||||
{Row: "row2", Column: "status:start", Timestamp: 0, Value: []byte("1")},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("Column family was deleted unexpectly.\n got %v\n want %v", r, wantRow)
|
||||
}
|
||||
checkpoint("tested family delete")
|
||||
|
||||
// Check DeleteCellsInColumn
|
||||
mut = NewMutation()
|
||||
mut.Set("status", "start", 0, []byte("1"))
|
||||
mut.Set("status", "middle", 0, []byte("2"))
|
||||
mut.Set("status", "end", 0, []byte("3"))
|
||||
if err := tbl.Apply(ctx, "row3", mut); err != nil {
|
||||
t.Errorf("Mutating row: %v", err)
|
||||
}
|
||||
mut = NewMutation()
|
||||
mut.DeleteCellsInColumn("status", "middle")
|
||||
if err := tbl.Apply(ctx, "row3", mut); err != nil {
|
||||
t.Errorf("Delete column: %v", err)
|
||||
}
|
||||
r, err = tbl.ReadRow(ctx, "row3")
|
||||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
wantRow = Row{
|
||||
"status": []ReadItem{
|
||||
{Row: "row3", Column: "status:end", Timestamp: 0, Value: []byte("3")},
|
||||
{Row: "row3", Column: "status:start", Timestamp: 0, Value: []byte("1")},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("Column was not deleted.\n got %v\n want %v", r, wantRow)
|
||||
}
|
||||
mut = NewMutation()
|
||||
mut.DeleteCellsInColumn("status", "start")
|
||||
if err := tbl.Apply(ctx, "row3", mut); err != nil {
|
||||
t.Errorf("Delete column: %v", err)
|
||||
}
|
||||
r, err = tbl.ReadRow(ctx, "row3")
|
||||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
wantRow = Row{
|
||||
"status": []ReadItem{
|
||||
{Row: "row3", Column: "status:end", Timestamp: 0, Value: []byte("3")},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("Column was not deleted.\n got %v\n want %v", r, wantRow)
|
||||
}
|
||||
mut = NewMutation()
|
||||
mut.DeleteCellsInColumn("status", "end")
|
||||
if err := tbl.Apply(ctx, "row3", mut); err != nil {
|
||||
t.Errorf("Delete column: %v", err)
|
||||
}
|
||||
r, err = tbl.ReadRow(ctx, "row3")
|
||||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
if len(r) != 0 {
|
||||
t.Errorf("Delete column: got %v, want empty row", r)
|
||||
}
|
||||
// Add same cell after delete
|
||||
mut = NewMutation()
|
||||
mut.Set("status", "end", 0, []byte("3"))
|
||||
if err := tbl.Apply(ctx, "row3", mut); err != nil {
|
||||
t.Errorf("Mutating row: %v", err)
|
||||
}
|
||||
r, err = tbl.ReadRow(ctx, "row3")
|
||||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("Column was not deleted correctly.\n got %v\n want %v", r, wantRow)
|
||||
}
|
||||
checkpoint("tested column delete")
|
||||
|
||||
// Do highly concurrent reads/writes.
|
||||
// TODO(dsymonds): Raise this to 1000 when https://github.com/grpc/grpc-go/issues/205 is resolved.
|
||||
const maxConcurrency = 100
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < maxConcurrency; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
switch r := rand.Intn(100); { // r ∈ [0,100)
|
||||
case 0 <= r && r < 30:
|
||||
// Do a read.
|
||||
_, err := tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(1)))
|
||||
if err != nil {
|
||||
t.Errorf("Concurrent read: %v", err)
|
||||
}
|
||||
case 30 <= r && r < 100:
|
||||
// Do a write.
|
||||
mut := NewMutation()
|
||||
mut.Set("ts", "col", 0, []byte("data"))
|
||||
if err := tbl.Apply(ctx, "testrow", mut); err != nil {
|
||||
t.Errorf("Concurrent write: %v", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
checkpoint("tested high concurrency")
|
||||
|
||||
// Large reads, writes and scans.
|
||||
bigBytes := make([]byte, 3<<20) // 3 MB is large, but less than current gRPC max of 4 MB.
|
||||
nonsense := []byte("lorem ipsum dolor sit amet, ")
|
||||
fill(bigBytes, nonsense)
|
||||
mut = NewMutation()
|
||||
mut.Set("ts", "col", 0, bigBytes)
|
||||
if err := tbl.Apply(ctx, "bigrow", mut); err != nil {
|
||||
t.Errorf("Big write: %v", err)
|
||||
}
|
||||
r, err = tbl.ReadRow(ctx, "bigrow")
|
||||
if err != nil {
|
||||
t.Errorf("Big read: %v", err)
|
||||
}
|
||||
wantRow = Row{"ts": []ReadItem{
|
||||
{Row: "bigrow", Column: "ts:col", Value: bigBytes},
|
||||
}}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
t.Errorf("Big read returned incorrect bytes: %v", r)
|
||||
}
|
||||
// Now write 1000 rows, each with 82 KB values, then scan them all.
|
||||
medBytes := make([]byte, 82<<10)
|
||||
fill(medBytes, nonsense)
|
||||
sem := make(chan int, 50) // do up to 50 mutations at a time.
|
||||
for i := 0; i < 1000; i++ {
|
||||
mut := NewMutation()
|
||||
mut.Set("ts", "big-scan", 0, medBytes)
|
||||
row := fmt.Sprintf("row-%d", i)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
defer func() { <-sem }()
|
||||
sem <- 1
|
||||
if err := tbl.Apply(ctx, row, mut); err != nil {
|
||||
t.Errorf("Preparing large scan: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
n := 0
|
||||
err = tbl.ReadRows(ctx, PrefixRange("row-"), func(r Row) bool {
|
||||
for _, ris := range r {
|
||||
for _, ri := range ris {
|
||||
n += len(ri.Value)
|
||||
}
|
||||
}
|
||||
return true
|
||||
}, RowFilter(ColumnFilter("big-scan")))
|
||||
if err != nil {
|
||||
t.Errorf("Doing large scan: %v", err)
|
||||
}
|
||||
if want := 1000 * len(medBytes); n != want {
|
||||
t.Errorf("Large scan returned %d bytes, want %d", n, want)
|
||||
}
|
||||
// Scan a subset of the 1000 rows that we just created, using a LimitRows ReadOption.
|
||||
rc := 0
|
||||
wantRc := 3
|
||||
err = tbl.ReadRows(ctx, PrefixRange("row-"), func(r Row) bool {
|
||||
rc++
|
||||
return true
|
||||
}, LimitRows(int64(wantRc)))
|
||||
if rc != wantRc {
|
||||
t.Errorf("Scan with row limit returned %d rows, want %d", rc, wantRc)
|
||||
}
|
||||
checkpoint("tested big read/write/scan")
|
||||
|
||||
// Test bulk mutations
|
||||
if err := adminClient.CreateColumnFamily(ctx, table, "bulk"); err != nil {
|
||||
t.Fatalf("Creating column family: %v", err)
|
||||
}
|
||||
bulkData := map[string][]string{
|
||||
"red sox": {"2004", "2007", "2013"},
|
||||
"patriots": {"2001", "2003", "2004", "2014"},
|
||||
"celtics": {"1981", "1984", "1986", "2008"},
|
||||
}
|
||||
var rowKeys []string
|
||||
var muts []*Mutation
|
||||
for row, ss := range bulkData {
|
||||
mut := NewMutation()
|
||||
for _, name := range ss {
|
||||
mut.Set("bulk", name, 0, []byte("1"))
|
||||
}
|
||||
rowKeys = append(rowKeys, row)
|
||||
muts = append(muts, mut)
|
||||
}
|
||||
status, err := tbl.ApplyBulk(ctx, rowKeys, muts)
|
||||
if err != nil {
|
||||
t.Fatalf("Bulk mutating rows %q: %v", rowKeys, err)
|
||||
}
|
||||
if status != nil {
|
||||
t.Errorf("non-nil errors: %v", err)
|
||||
}
|
||||
checkpoint("inserted bulk data")
|
||||
|
||||
// Read each row back
|
||||
for rowKey, ss := range bulkData {
|
||||
row, err := tbl.ReadRow(ctx, rowKey)
|
||||
if err != nil {
|
||||
t.Fatalf("Reading a bulk row: %v", err)
|
||||
}
|
||||
var wantItems []ReadItem
|
||||
for _, val := range ss {
|
||||
wantItems = append(wantItems, ReadItem{Row: rowKey, Column: "bulk:" + val, Value: []byte("1")})
|
||||
}
|
||||
wantRow := Row{"bulk": wantItems}
|
||||
if !reflect.DeepEqual(row, wantRow) {
|
||||
t.Errorf("Read row mismatch.\n got %#v\nwant %#v", row, wantRow)
|
||||
}
|
||||
}
|
||||
checkpoint("tested reading from bulk insert")
|
||||
|
||||
// Test bulk write errors.
|
||||
// Note: Setting timestamps as ServerTime makes sure the mutations are not retried on error.
|
||||
badMut := NewMutation()
|
||||
badMut.Set("badfamily", "col", ServerTime, nil)
|
||||
badMut2 := NewMutation()
|
||||
badMut2.Set("badfamily2", "goodcol", ServerTime, []byte("1"))
|
||||
status, err = tbl.ApplyBulk(ctx, []string{"badrow", "badrow2"}, []*Mutation{badMut, badMut2})
|
||||
if err != nil {
|
||||
t.Fatalf("Bulk mutating rows %q: %v", rowKeys, err)
|
||||
}
|
||||
if status == nil {
|
||||
t.Errorf("No errors for bad bulk mutation")
|
||||
} else if status[0] == nil || status[1] == nil {
|
||||
t.Errorf("No error for bad bulk mutation")
|
||||
}
|
||||
}
|
||||
|
||||
func formatReadItem(ri ReadItem) string {
|
||||
// Use the column qualifier only to make the test data briefer.
|
||||
col := ri.Column[strings.Index(ri.Column, ":")+1:]
|
||||
return fmt.Sprintf("%s-%s-%s", ri.Row, col, ri.Value)
|
||||
}
|
||||
|
||||
func fill(b, sub []byte) {
|
||||
for len(b) > len(sub) {
|
||||
n := copy(b, sub)
|
||||
b = b[n:]
|
||||
}
|
||||
}
|
||||
|
||||
func clearTimestamps(r Row) {
|
||||
for _, ris := range r {
|
||||
for i := range ris {
|
||||
ris[i].Timestamp = 0
|
||||
}
|
||||
}
|
||||
}
|
83
vendor/cloud.google.com/go/bigtable/bttest/example_test.go
generated
vendored
Normal file
83
vendor/cloud.google.com/go/bigtable/bttest/example_test.go
generated
vendored
Normal file
|
@ -0,0 +1,83 @@
|
|||
/*
|
||||
Copyright 2016 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package bttest_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"cloud.google.com/go/bigtable"
|
||||
"cloud.google.com/go/bigtable/bttest"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func ExampleNewServer() {
|
||||
|
||||
srv, err := bttest.NewServer("127.0.0.1:0")
|
||||
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure())
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
proj, instance := "proj", "instance"
|
||||
|
||||
adminClient, err := bigtable.NewAdminClient(ctx, proj, instance, option.WithGRPCConn(conn))
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
if err = adminClient.CreateTable(ctx, "example"); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
if err = adminClient.CreateColumnFamily(ctx, "example", "links"); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
client, err := bigtable.NewClient(ctx, proj, instance, option.WithGRPCConn(conn))
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
tbl := client.Open("example")
|
||||
|
||||
mut := bigtable.NewMutation()
|
||||
mut.Set("links", "golang.org", bigtable.Now(), []byte("Gophers!"))
|
||||
if err = tbl.Apply(ctx, "com.google.cloud", mut); err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
if row, err := tbl.ReadRow(ctx, "com.google.cloud"); err != nil {
|
||||
log.Fatalln(err)
|
||||
} else {
|
||||
for _, column := range row["links"] {
|
||||
fmt.Println(column.Column)
|
||||
fmt.Println(string(column.Value))
|
||||
}
|
||||
}
|
||||
|
||||
// Output:
|
||||
// links:golang.org
|
||||
// Gophers!
|
||||
}
|
1230
vendor/cloud.google.com/go/bigtable/bttest/inmem.go
generated
vendored
Normal file
1230
vendor/cloud.google.com/go/bigtable/bttest/inmem.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
517
vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go
generated
vendored
Normal file
517
vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go
generated
vendored
Normal file
|
@ -0,0 +1,517 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bttest
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
|
||||
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
|
||||
"google.golang.org/grpc"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func TestConcurrentMutationsReadModifyAndGC(t *testing.T) {
|
||||
s := &server{
|
||||
tables: make(map[string]*table),
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond)
|
||||
defer cancel()
|
||||
if _, err := s.CreateTable(
|
||||
ctx,
|
||||
&btapb.CreateTableRequest{Parent: "cluster", TableId: "t"}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
const name = `cluster/tables/t`
|
||||
tbl := s.tables[name]
|
||||
req := &btapb.ModifyColumnFamiliesRequest{
|
||||
Name: name,
|
||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
||||
Id: "cf",
|
||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}},
|
||||
}},
|
||||
}
|
||||
_, err := s.ModifyColumnFamilies(ctx, req)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
req = &btapb.ModifyColumnFamiliesRequest{
|
||||
Name: name,
|
||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
||||
Id: "cf",
|
||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{&btapb.ColumnFamily{
|
||||
GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}},
|
||||
}},
|
||||
}},
|
||||
}
|
||||
if _, err := s.ModifyColumnFamilies(ctx, req); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var ts int64
|
||||
ms := func() []*btpb.Mutation {
|
||||
return []*btpb.Mutation{{
|
||||
Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
|
||||
FamilyName: "cf",
|
||||
ColumnQualifier: []byte(`col`),
|
||||
TimestampMicros: atomic.AddInt64(&ts, 1000),
|
||||
}},
|
||||
}}
|
||||
}
|
||||
|
||||
rmw := func() *btpb.ReadModifyWriteRowRequest {
|
||||
return &btpb.ReadModifyWriteRowRequest{
|
||||
TableName: name,
|
||||
RowKey: []byte(fmt.Sprint(rand.Intn(100))),
|
||||
Rules: []*btpb.ReadModifyWriteRule{{
|
||||
FamilyName: "cf",
|
||||
ColumnQualifier: []byte("col"),
|
||||
Rule: &btpb.ReadModifyWriteRule_IncrementAmount{1},
|
||||
}},
|
||||
}
|
||||
}
|
||||
for i := 0; i < 100; i++ {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for ctx.Err() == nil {
|
||||
req := &btpb.MutateRowRequest{
|
||||
TableName: name,
|
||||
RowKey: []byte(fmt.Sprint(rand.Intn(100))),
|
||||
Mutations: ms(),
|
||||
}
|
||||
s.MutateRow(ctx, req)
|
||||
}
|
||||
}()
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for ctx.Err() == nil {
|
||||
_, _ = s.ReadModifyWriteRow(ctx, rmw())
|
||||
}
|
||||
}()
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
tbl.gc()
|
||||
}()
|
||||
}
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(done)
|
||||
}()
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(1 * time.Second):
|
||||
t.Error("Concurrent mutations and GCs haven't completed after 1s")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateTableWithFamily(t *testing.T) {
|
||||
// The Go client currently doesn't support creating a table with column families
|
||||
// in one operation but it is allowed by the API. This must still be supported by the
|
||||
// fake server so this test lives here instead of in the main bigtable
|
||||
// integration test.
|
||||
s := &server{
|
||||
tables: make(map[string]*table),
|
||||
}
|
||||
ctx := context.Background()
|
||||
newTbl := btapb.Table{
|
||||
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
||||
"cf1": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{123}}},
|
||||
"cf2": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{456}}},
|
||||
},
|
||||
}
|
||||
cTbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
||||
if err != nil {
|
||||
t.Fatalf("Creating table: %v", err)
|
||||
}
|
||||
tbl, err := s.GetTable(ctx, &btapb.GetTableRequest{Name: cTbl.Name})
|
||||
if err != nil {
|
||||
t.Fatalf("Getting table: %v", err)
|
||||
}
|
||||
cf := tbl.ColumnFamilies["cf1"]
|
||||
if cf == nil {
|
||||
t.Fatalf("Missing col family cf1")
|
||||
}
|
||||
if got, want := cf.GcRule.GetMaxNumVersions(), int32(123); got != want {
|
||||
t.Errorf("Invalid MaxNumVersions: wanted:%d, got:%d", want, got)
|
||||
}
|
||||
cf = tbl.ColumnFamilies["cf2"]
|
||||
if cf == nil {
|
||||
t.Fatalf("Missing col family cf2")
|
||||
}
|
||||
if got, want := cf.GcRule.GetMaxNumVersions(), int32(456); got != want {
|
||||
t.Errorf("Invalid MaxNumVersions: wanted:%d, got:%d", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
type MockSampleRowKeysServer struct {
|
||||
responses []*btpb.SampleRowKeysResponse
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (s *MockSampleRowKeysServer) Send(resp *btpb.SampleRowKeysResponse) error {
|
||||
s.responses = append(s.responses, resp)
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestSampleRowKeys(t *testing.T) {
|
||||
s := &server{
|
||||
tables: make(map[string]*table),
|
||||
}
|
||||
ctx := context.Background()
|
||||
newTbl := btapb.Table{
|
||||
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
||||
"cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}},
|
||||
},
|
||||
}
|
||||
tbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
||||
if err != nil {
|
||||
t.Fatalf("Creating table: %v", err)
|
||||
}
|
||||
|
||||
// Populate the table
|
||||
val := []byte("value")
|
||||
rowCount := 1000
|
||||
for i := 0; i < rowCount; i++ {
|
||||
req := &btpb.MutateRowRequest{
|
||||
TableName: tbl.Name,
|
||||
RowKey: []byte("row-" + strconv.Itoa(i)),
|
||||
Mutations: []*btpb.Mutation{{
|
||||
Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
|
||||
FamilyName: "cf",
|
||||
ColumnQualifier: []byte("col"),
|
||||
TimestampMicros: 0,
|
||||
Value: val,
|
||||
}},
|
||||
}},
|
||||
}
|
||||
if _, err := s.MutateRow(ctx, req); err != nil {
|
||||
t.Fatalf("Populating table: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
mock := &MockSampleRowKeysServer{}
|
||||
if err := s.SampleRowKeys(&btpb.SampleRowKeysRequest{TableName: tbl.Name}, mock); err != nil {
|
||||
t.Errorf("SampleRowKeys error: %v", err)
|
||||
}
|
||||
if len(mock.responses) == 0 {
|
||||
t.Fatal("Response count: got 0, want > 0")
|
||||
}
|
||||
// Make sure the offset of the final response is the offset of the final row
|
||||
got := mock.responses[len(mock.responses)-1].OffsetBytes
|
||||
want := int64((rowCount - 1) * len(val))
|
||||
if got != want {
|
||||
t.Errorf("Invalid offset: got %d, want %d", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDropRowRange(t *testing.T) {
|
||||
s := &server{
|
||||
tables: make(map[string]*table),
|
||||
}
|
||||
ctx := context.Background()
|
||||
newTbl := btapb.Table{
|
||||
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
||||
"cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}},
|
||||
},
|
||||
}
|
||||
tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
||||
if err != nil {
|
||||
t.Fatalf("Creating table: %v", err)
|
||||
}
|
||||
|
||||
tbl := s.tables[tblInfo.Name]
|
||||
|
||||
// Populate the table
|
||||
prefixes := []string{"AAA", "BBB", "CCC", "DDD"}
|
||||
count := 3
|
||||
doWrite := func() {
|
||||
for _, prefix := range prefixes {
|
||||
for i := 0; i < count; i++ {
|
||||
req := &btpb.MutateRowRequest{
|
||||
TableName: tblInfo.Name,
|
||||
RowKey: []byte(prefix + strconv.Itoa(i)),
|
||||
Mutations: []*btpb.Mutation{{
|
||||
Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
|
||||
FamilyName: "cf",
|
||||
ColumnQualifier: []byte("col"),
|
||||
TimestampMicros: 0,
|
||||
Value: []byte{},
|
||||
}},
|
||||
}},
|
||||
}
|
||||
if _, err := s.MutateRow(ctx, req); err != nil {
|
||||
t.Fatalf("Populating table: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
doWrite()
|
||||
tblSize := len(tbl.rows)
|
||||
req := &btapb.DropRowRangeRequest{
|
||||
Name: tblInfo.Name,
|
||||
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("AAA")},
|
||||
}
|
||||
if _, err = s.DropRowRange(ctx, req); err != nil {
|
||||
t.Fatalf("Dropping first range: %v", err)
|
||||
}
|
||||
got, want := len(tbl.rows), tblSize-count
|
||||
if got != want {
|
||||
t.Errorf("Row count after first drop: got %d (%v), want %d", got, tbl.rows, want)
|
||||
}
|
||||
|
||||
req = &btapb.DropRowRangeRequest{
|
||||
Name: tblInfo.Name,
|
||||
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("DDD")},
|
||||
}
|
||||
if _, err = s.DropRowRange(ctx, req); err != nil {
|
||||
t.Fatalf("Dropping second range: %v", err)
|
||||
}
|
||||
got, want = len(tbl.rows), tblSize-(2*count)
|
||||
if got != want {
|
||||
t.Errorf("Row count after second drop: got %d (%v), want %d", got, tbl.rows, want)
|
||||
}
|
||||
|
||||
req = &btapb.DropRowRangeRequest{
|
||||
Name: tblInfo.Name,
|
||||
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("XXX")},
|
||||
}
|
||||
if _, err = s.DropRowRange(ctx, req); err != nil {
|
||||
t.Fatalf("Dropping invalid range: %v", err)
|
||||
}
|
||||
got, want = len(tbl.rows), tblSize-(2*count)
|
||||
if got != want {
|
||||
t.Errorf("Row count after invalid drop: got %d (%v), want %d", got, tbl.rows, want)
|
||||
}
|
||||
|
||||
req = &btapb.DropRowRangeRequest{
|
||||
Name: tblInfo.Name,
|
||||
Target: &btapb.DropRowRangeRequest_DeleteAllDataFromTable{true},
|
||||
}
|
||||
if _, err = s.DropRowRange(ctx, req); err != nil {
|
||||
t.Fatalf("Dropping all data: %v", err)
|
||||
}
|
||||
got, want = len(tbl.rows), 0
|
||||
if got != want {
|
||||
t.Errorf("Row count after drop all: got %d, want %d", got, want)
|
||||
}
|
||||
|
||||
// Test that we can write rows, delete some and then write them again.
|
||||
count = 1
|
||||
doWrite()
|
||||
|
||||
req = &btapb.DropRowRangeRequest{
|
||||
Name: tblInfo.Name,
|
||||
Target: &btapb.DropRowRangeRequest_DeleteAllDataFromTable{true},
|
||||
}
|
||||
if _, err = s.DropRowRange(ctx, req); err != nil {
|
||||
t.Fatalf("Dropping all data: %v", err)
|
||||
}
|
||||
got, want = len(tbl.rows), 0
|
||||
if got != want {
|
||||
t.Errorf("Row count after drop all: got %d, want %d", got, want)
|
||||
}
|
||||
|
||||
doWrite()
|
||||
got, want = len(tbl.rows), len(prefixes)
|
||||
if got != want {
|
||||
t.Errorf("Row count after rewrite: got %d, want %d", got, want)
|
||||
}
|
||||
|
||||
req = &btapb.DropRowRangeRequest{
|
||||
Name: tblInfo.Name,
|
||||
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("BBB")},
|
||||
}
|
||||
if _, err = s.DropRowRange(ctx, req); err != nil {
|
||||
t.Fatalf("Dropping range: %v", err)
|
||||
}
|
||||
doWrite()
|
||||
got, want = len(tbl.rows), len(prefixes)
|
||||
if got != want {
|
||||
t.Errorf("Row count after drop range: got %d, want %d", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
type MockReadRowsServer struct {
|
||||
responses []*btpb.ReadRowsResponse
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (s *MockReadRowsServer) Send(resp *btpb.ReadRowsResponse) error {
|
||||
s.responses = append(s.responses, resp)
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestReadRowsOrder(t *testing.T) {
|
||||
s := &server{
|
||||
tables: make(map[string]*table),
|
||||
}
|
||||
ctx := context.Background()
|
||||
newTbl := btapb.Table{
|
||||
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
||||
"cf0": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}},
|
||||
},
|
||||
}
|
||||
tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
||||
if err != nil {
|
||||
t.Fatalf("Creating table: %v", err)
|
||||
}
|
||||
count := 3
|
||||
mcf := func(i int) *btapb.ModifyColumnFamiliesRequest {
|
||||
return &btapb.ModifyColumnFamiliesRequest{
|
||||
Name: tblInfo.Name,
|
||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
||||
Id: "cf" + strconv.Itoa(i),
|
||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}},
|
||||
}},
|
||||
}
|
||||
}
|
||||
for i := 1; i <= count; i++ {
|
||||
_, err = s.ModifyColumnFamilies(ctx, mcf(i))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
// Populate the table
|
||||
for fc := 0; fc < count; fc++ {
|
||||
for cc := count; cc > 0; cc-- {
|
||||
for tc := 0; tc < count; tc++ {
|
||||
req := &btpb.MutateRowRequest{
|
||||
TableName: tblInfo.Name,
|
||||
RowKey: []byte("row"),
|
||||
Mutations: []*btpb.Mutation{{
|
||||
Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
|
||||
FamilyName: "cf" + strconv.Itoa(fc),
|
||||
ColumnQualifier: []byte("col" + strconv.Itoa(cc)),
|
||||
TimestampMicros: int64((tc + 1) * 1000),
|
||||
Value: []byte{},
|
||||
}},
|
||||
}},
|
||||
}
|
||||
if _, err := s.MutateRow(ctx, req); err != nil {
|
||||
t.Fatalf("Populating table: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
req := &btpb.ReadRowsRequest{
|
||||
TableName: tblInfo.Name,
|
||||
Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}},
|
||||
}
|
||||
mock := &MockReadRowsServer{}
|
||||
if err = s.ReadRows(req, mock); err != nil {
|
||||
t.Errorf("ReadRows error: %v", err)
|
||||
}
|
||||
if len(mock.responses) == 0 {
|
||||
t.Fatal("Response count: got 0, want > 0")
|
||||
}
|
||||
if len(mock.responses[0].Chunks) != 27 {
|
||||
t.Fatal("Chunk count: got %d, want 27", len(mock.responses[0].Chunks))
|
||||
}
|
||||
testOrder := func(ms *MockReadRowsServer) {
|
||||
var prevFam, prevCol string
|
||||
var prevTime int64
|
||||
for _, cc := range ms.responses[0].Chunks {
|
||||
if prevFam == "" {
|
||||
prevFam = cc.FamilyName.Value
|
||||
prevCol = string(cc.Qualifier.Value)
|
||||
prevTime = cc.TimestampMicros
|
||||
continue
|
||||
}
|
||||
if cc.FamilyName.Value < prevFam {
|
||||
t.Errorf("Family order is not correct: got %s < %s", cc.FamilyName.Value, prevFam)
|
||||
} else if cc.FamilyName.Value == prevFam {
|
||||
if string(cc.Qualifier.Value) < prevCol {
|
||||
t.Errorf("Column order is not correct: got %s < %s", string(cc.Qualifier.Value), prevCol)
|
||||
} else if string(cc.Qualifier.Value) == prevCol {
|
||||
if cc.TimestampMicros > prevTime {
|
||||
t.Errorf("cell order is not correct: got %d > %d", cc.TimestampMicros, prevTime)
|
||||
}
|
||||
}
|
||||
}
|
||||
prevFam = cc.FamilyName.Value
|
||||
prevCol = string(cc.Qualifier.Value)
|
||||
prevTime = cc.TimestampMicros
|
||||
}
|
||||
}
|
||||
testOrder(mock)
|
||||
|
||||
// Read with interleave filter
|
||||
inter := &btpb.RowFilter_Interleave{}
|
||||
fnr := &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{"1"}}
|
||||
cqr := &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{[]byte("2")}}
|
||||
inter.Filters = append(inter.Filters, fnr, cqr)
|
||||
req = &btpb.ReadRowsRequest{
|
||||
TableName: tblInfo.Name,
|
||||
Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}},
|
||||
Filter: &btpb.RowFilter{
|
||||
Filter: &btpb.RowFilter_Interleave_{inter},
|
||||
},
|
||||
}
|
||||
mock = &MockReadRowsServer{}
|
||||
if err = s.ReadRows(req, mock); err != nil {
|
||||
t.Errorf("ReadRows error: %v", err)
|
||||
}
|
||||
if len(mock.responses) == 0 {
|
||||
t.Fatal("Response count: got 0, want > 0")
|
||||
}
|
||||
if len(mock.responses[0].Chunks) != 18 {
|
||||
t.Fatal("Chunk count: got %d, want 18", len(mock.responses[0].Chunks))
|
||||
}
|
||||
testOrder(mock)
|
||||
|
||||
// Check order after ReadModifyWriteRow
|
||||
rmw := func(i int) *btpb.ReadModifyWriteRowRequest {
|
||||
return &btpb.ReadModifyWriteRowRequest{
|
||||
TableName: tblInfo.Name,
|
||||
RowKey: []byte("row"),
|
||||
Rules: []*btpb.ReadModifyWriteRule{{
|
||||
FamilyName: "cf3",
|
||||
ColumnQualifier: []byte("col" + strconv.Itoa(i)),
|
||||
Rule: &btpb.ReadModifyWriteRule_IncrementAmount{1},
|
||||
}},
|
||||
}
|
||||
}
|
||||
for i := count; i > 0; i-- {
|
||||
s.ReadModifyWriteRow(ctx, rmw(i))
|
||||
}
|
||||
req = &btpb.ReadRowsRequest{
|
||||
TableName: tblInfo.Name,
|
||||
Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}},
|
||||
}
|
||||
mock = &MockReadRowsServer{}
|
||||
if err = s.ReadRows(req, mock); err != nil {
|
||||
t.Errorf("ReadRows error: %v", err)
|
||||
}
|
||||
if len(mock.responses) == 0 {
|
||||
t.Fatal("Response count: got 0, want > 0")
|
||||
}
|
||||
if len(mock.responses[0].Chunks) != 30 {
|
||||
t.Fatal("Chunk count: got %d, want 30", len(mock.responses[0].Chunks))
|
||||
}
|
||||
testOrder(mock)
|
||||
}
|
789
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go
generated
vendored
Normal file
789
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go
generated
vendored
Normal file
|
@ -0,0 +1,789 @@
|
|||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
// Command docs are in cbtdoc.go.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/format"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/bigtable"
|
||||
"cloud.google.com/go/bigtable/internal/cbtconfig"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
oFlag = flag.String("o", "", "if set, redirect stdout to this file")
|
||||
|
||||
config *cbtconfig.Config
|
||||
client *bigtable.Client
|
||||
adminClient *bigtable.AdminClient
|
||||
instanceAdminClient *bigtable.InstanceAdminClient
|
||||
)
|
||||
|
||||
func getCredentialOpts(opts []option.ClientOption) []option.ClientOption {
|
||||
if ts := config.TokenSource; ts != nil {
|
||||
opts = append(opts, option.WithTokenSource(ts))
|
||||
}
|
||||
if tlsCreds := config.TLSCreds; tlsCreds != nil {
|
||||
opts = append(opts, option.WithGRPCDialOption(grpc.WithTransportCredentials(tlsCreds)))
|
||||
}
|
||||
return opts
|
||||
}
|
||||
|
||||
func getClient() *bigtable.Client {
|
||||
if client == nil {
|
||||
var opts []option.ClientOption
|
||||
if ep := config.DataEndpoint; ep != "" {
|
||||
opts = append(opts, option.WithEndpoint(ep))
|
||||
}
|
||||
opts = getCredentialOpts(opts)
|
||||
var err error
|
||||
client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance, opts...)
|
||||
if err != nil {
|
||||
log.Fatalf("Making bigtable.Client: %v", err)
|
||||
}
|
||||
}
|
||||
return client
|
||||
}
|
||||
|
||||
func getAdminClient() *bigtable.AdminClient {
|
||||
if adminClient == nil {
|
||||
var opts []option.ClientOption
|
||||
if ep := config.AdminEndpoint; ep != "" {
|
||||
opts = append(opts, option.WithEndpoint(ep))
|
||||
}
|
||||
opts = getCredentialOpts(opts)
|
||||
var err error
|
||||
adminClient, err = bigtable.NewAdminClient(context.Background(), config.Project, config.Instance, opts...)
|
||||
if err != nil {
|
||||
log.Fatalf("Making bigtable.AdminClient: %v", err)
|
||||
}
|
||||
}
|
||||
return adminClient
|
||||
}
|
||||
|
||||
func getInstanceAdminClient() *bigtable.InstanceAdminClient {
|
||||
if instanceAdminClient == nil {
|
||||
var opts []option.ClientOption
|
||||
if ep := config.AdminEndpoint; ep != "" {
|
||||
opts = append(opts, option.WithEndpoint(ep))
|
||||
}
|
||||
opts = getCredentialOpts(opts)
|
||||
var err error
|
||||
instanceAdminClient, err = bigtable.NewInstanceAdminClient(context.Background(), config.Project, opts...)
|
||||
if err != nil {
|
||||
log.Fatalf("Making bigtable.InstanceAdminClient: %v", err)
|
||||
}
|
||||
}
|
||||
return instanceAdminClient
|
||||
}
|
||||
|
||||
func main() {
|
||||
var err error
|
||||
config, err = cbtconfig.Load()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
config.RegisterFlags()
|
||||
|
||||
flag.Usage = func() { usage(os.Stderr) }
|
||||
flag.Parse()
|
||||
if flag.NArg() == 0 {
|
||||
usage(os.Stderr)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if *oFlag != "" {
|
||||
f, err := os.Create(*oFlag)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := f.Close(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}()
|
||||
os.Stdout = f
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
for _, cmd := range commands {
|
||||
if cmd.Name == flag.Arg(0) {
|
||||
if err := config.CheckFlags(cmd.Required); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
cmd.do(ctx, flag.Args()[1:]...)
|
||||
return
|
||||
}
|
||||
}
|
||||
log.Fatalf("Unknown command %q", flag.Arg(0))
|
||||
}
|
||||
|
||||
func usage(w io.Writer) {
|
||||
fmt.Fprintf(w, "Usage: %s [flags] <command> ...\n", os.Args[0])
|
||||
flag.CommandLine.SetOutput(w)
|
||||
flag.CommandLine.PrintDefaults()
|
||||
fmt.Fprintf(w, "\n%s", cmdSummary)
|
||||
}
|
||||
|
||||
var cmdSummary string // generated in init, below
|
||||
|
||||
func init() {
|
||||
var buf bytes.Buffer
|
||||
tw := tabwriter.NewWriter(&buf, 10, 8, 4, '\t', 0)
|
||||
for _, cmd := range commands {
|
||||
fmt.Fprintf(tw, "cbt %s\t%s\n", cmd.Name, cmd.Desc)
|
||||
}
|
||||
tw.Flush()
|
||||
buf.WriteString(configHelp)
|
||||
cmdSummary = buf.String()
|
||||
}
|
||||
|
||||
var configHelp = `
|
||||
For convenience, values of the -project, -instance, -creds,
|
||||
-admin-endpoint and -data-endpoint flags may be specified in
|
||||
` + cbtconfig.Filename() + ` in this format:
|
||||
project = my-project-123
|
||||
instance = my-instance
|
||||
creds = path-to-account-key.json
|
||||
admin-endpoint = hostname:port
|
||||
data-endpoint = hostname:port
|
||||
All values are optional, and all will be overridden by flags.
|
||||
`
|
||||
|
||||
var commands = []struct {
|
||||
Name, Desc string
|
||||
do func(context.Context, ...string)
|
||||
Usage string
|
||||
Required cbtconfig.RequiredFlags
|
||||
}{
|
||||
{
|
||||
Name: "count",
|
||||
Desc: "Count rows in a table",
|
||||
do: doCount,
|
||||
Usage: "cbt count <table>",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "createfamily",
|
||||
Desc: "Create a column family",
|
||||
do: doCreateFamily,
|
||||
Usage: "cbt createfamily <table> <family>",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "createtable",
|
||||
Desc: "Create a table",
|
||||
do: doCreateTable,
|
||||
Usage: "cbt createtable <table> [initial_splits...]\n" +
|
||||
" initial_splits=row A row key to be used to initially split the table " +
|
||||
"into multiple tablets. Can be repeated to create multiple splits.",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "deletefamily",
|
||||
Desc: "Delete a column family",
|
||||
do: doDeleteFamily,
|
||||
Usage: "cbt deletefamily <table> <family>",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "deleterow",
|
||||
Desc: "Delete a row",
|
||||
do: doDeleteRow,
|
||||
Usage: "cbt deleterow <table> <row>",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "deletetable",
|
||||
Desc: "Delete a table",
|
||||
do: doDeleteTable,
|
||||
Usage: "cbt deletetable <table>",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "doc",
|
||||
Desc: "Print godoc-suitable documentation for cbt",
|
||||
do: doDoc,
|
||||
Usage: "cbt doc",
|
||||
Required: cbtconfig.NoneRequired,
|
||||
},
|
||||
{
|
||||
Name: "help",
|
||||
Desc: "Print help text",
|
||||
do: doHelp,
|
||||
Usage: "cbt help [command]",
|
||||
Required: cbtconfig.NoneRequired,
|
||||
},
|
||||
{
|
||||
Name: "listinstances",
|
||||
Desc: "List instances in a project",
|
||||
do: doListInstances,
|
||||
Usage: "cbt listinstances",
|
||||
Required: cbtconfig.ProjectRequired,
|
||||
},
|
||||
{
|
||||
Name: "lookup",
|
||||
Desc: "Read from a single row",
|
||||
do: doLookup,
|
||||
Usage: "cbt lookup <table> <row>",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "ls",
|
||||
Desc: "List tables and column families",
|
||||
do: doLS,
|
||||
Usage: "cbt ls List tables\n" +
|
||||
"cbt ls <table> List column families in <table>",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "mddoc",
|
||||
Desc: "Print documentation for cbt in Markdown format",
|
||||
do: doMDDoc,
|
||||
Usage: "cbt mddoc",
|
||||
Required: cbtconfig.NoneRequired,
|
||||
},
|
||||
{
|
||||
Name: "read",
|
||||
Desc: "Read rows",
|
||||
do: doRead,
|
||||
Usage: "cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>] [count=<n>]\n" +
|
||||
" start=<row> Start reading at this row\n" +
|
||||
" end=<row> Stop reading before this row\n" +
|
||||
" prefix=<prefix> Read rows with this prefix\n" +
|
||||
" count=<n> Read only this many rows\n",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "set",
|
||||
Desc: "Set value of a cell",
|
||||
do: doSet,
|
||||
Usage: "cbt set <table> <row> family:column=val[@ts] ...\n" +
|
||||
" family:column=val[@ts] may be repeated to set multiple cells.\n" +
|
||||
"\n" +
|
||||
" ts is an optional integer timestamp.\n" +
|
||||
" If it cannot be parsed, the `@ts` part will be\n" +
|
||||
" interpreted as part of the value.",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "setgcpolicy",
|
||||
Desc: "Set the GC policy for a column family",
|
||||
do: doSetGCPolicy,
|
||||
Usage: "cbt setgcpolicy <table> <family> ( maxage=<d> | maxversions=<n> )\n" +
|
||||
"\n" +
|
||||
` maxage=<d> Maximum timestamp age to preserve (e.g. "1h", "4d")` + "\n" +
|
||||
" maxversions=<n> Maximum number of versions to preserve",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
}
|
||||
|
||||
func doCount(ctx context.Context, args ...string) {
|
||||
if len(args) != 1 {
|
||||
log.Fatal("usage: cbt count <table>")
|
||||
}
|
||||
tbl := getClient().Open(args[0])
|
||||
|
||||
n := 0
|
||||
err := tbl.ReadRows(ctx, bigtable.InfiniteRange(""), func(_ bigtable.Row) bool {
|
||||
n++
|
||||
return true
|
||||
}, bigtable.RowFilter(bigtable.StripValueFilter()))
|
||||
if err != nil {
|
||||
log.Fatalf("Reading rows: %v", err)
|
||||
}
|
||||
fmt.Println(n)
|
||||
}
|
||||
|
||||
func doCreateFamily(ctx context.Context, args ...string) {
|
||||
if len(args) != 2 {
|
||||
log.Fatal("usage: cbt createfamily <table> <family>")
|
||||
}
|
||||
err := getAdminClient().CreateColumnFamily(ctx, args[0], args[1])
|
||||
if err != nil {
|
||||
log.Fatalf("Creating column family: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func doCreateTable(ctx context.Context, args ...string) {
|
||||
if len(args) < 1 {
|
||||
log.Fatal("usage: cbt createtable <table> [initial_splits...]")
|
||||
}
|
||||
var err error
|
||||
if len(args) > 1 {
|
||||
splits := args[1:]
|
||||
err = getAdminClient().CreatePresplitTable(ctx, args[0], splits)
|
||||
} else {
|
||||
err = getAdminClient().CreateTable(ctx, args[0])
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Creating table: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func doDeleteFamily(ctx context.Context, args ...string) {
|
||||
if len(args) != 2 {
|
||||
log.Fatal("usage: cbt deletefamily <table> <family>")
|
||||
}
|
||||
err := getAdminClient().DeleteColumnFamily(ctx, args[0], args[1])
|
||||
if err != nil {
|
||||
log.Fatalf("Deleting column family: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func doDeleteRow(ctx context.Context, args ...string) {
|
||||
if len(args) != 2 {
|
||||
log.Fatal("usage: cbt deleterow <table> <row>")
|
||||
}
|
||||
tbl := getClient().Open(args[0])
|
||||
mut := bigtable.NewMutation()
|
||||
mut.DeleteRow()
|
||||
if err := tbl.Apply(ctx, args[1], mut); err != nil {
|
||||
log.Fatalf("Deleting row: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func doDeleteTable(ctx context.Context, args ...string) {
|
||||
if len(args) != 1 {
|
||||
log.Fatalf("Can't do `cbt deletetable %s`", args)
|
||||
}
|
||||
err := getAdminClient().DeleteTable(ctx, args[0])
|
||||
if err != nil {
|
||||
log.Fatalf("Deleting table: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// to break circular dependencies
|
||||
var (
|
||||
doDocFn func(ctx context.Context, args ...string)
|
||||
doHelpFn func(ctx context.Context, args ...string)
|
||||
doMDDocFn func(ctx context.Context, args ...string)
|
||||
)
|
||||
|
||||
func init() {
|
||||
doDocFn = doDocReal
|
||||
doHelpFn = doHelpReal
|
||||
doMDDocFn = doMDDocReal
|
||||
}
|
||||
|
||||
func doDoc(ctx context.Context, args ...string) { doDocFn(ctx, args...) }
|
||||
func doHelp(ctx context.Context, args ...string) { doHelpFn(ctx, args...) }
|
||||
func doMDDoc(ctx context.Context, args ...string) { doMDDocFn(ctx, args...) }
|
||||
|
||||
func docFlags() []*flag.Flag {
|
||||
// Only include specific flags, in a specific order.
|
||||
var flags []*flag.Flag
|
||||
for _, name := range []string{"project", "instance", "creds"} {
|
||||
f := flag.Lookup(name)
|
||||
if f == nil {
|
||||
log.Fatalf("Flag not linked: -%s", name)
|
||||
}
|
||||
flags = append(flags, f)
|
||||
}
|
||||
return flags
|
||||
}
|
||||
|
||||
func doDocReal(ctx context.Context, args ...string) {
|
||||
data := map[string]interface{}{
|
||||
"Commands": commands,
|
||||
"Flags": docFlags(),
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if err := docTemplate.Execute(&buf, data); err != nil {
|
||||
log.Fatalf("Bad doc template: %v", err)
|
||||
}
|
||||
out, err := format.Source(buf.Bytes())
|
||||
if err != nil {
|
||||
log.Fatalf("Bad doc output: %v", err)
|
||||
}
|
||||
os.Stdout.Write(out)
|
||||
}
|
||||
|
||||
func indentLines(s, ind string) string {
|
||||
ss := strings.Split(s, "\n")
|
||||
for i, p := range ss {
|
||||
ss[i] = ind + p
|
||||
}
|
||||
return strings.Join(ss, "\n")
|
||||
}
|
||||
|
||||
var docTemplate = template.Must(template.New("doc").Funcs(template.FuncMap{
|
||||
"indent": indentLines,
|
||||
}).
|
||||
Parse(`
|
||||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED.
|
||||
// Run "go generate" to regenerate.
|
||||
//go:generate go run cbt.go -o cbtdoc.go doc
|
||||
|
||||
/*
|
||||
Cbt is a tool for doing basic interactions with Cloud Bigtable.
|
||||
|
||||
Usage:
|
||||
|
||||
cbt [options] command [arguments]
|
||||
|
||||
The commands are:
|
||||
{{range .Commands}}
|
||||
{{printf "%-25s %s" .Name .Desc}}{{end}}
|
||||
|
||||
Use "cbt help <command>" for more information about a command.
|
||||
|
||||
The options are:
|
||||
{{range .Flags}}
|
||||
-{{.Name}} string
|
||||
{{.Usage}}{{end}}
|
||||
|
||||
{{range .Commands}}
|
||||
{{.Desc}}
|
||||
|
||||
Usage:
|
||||
{{indent .Usage "\t"}}
|
||||
|
||||
|
||||
|
||||
{{end}}
|
||||
*/
|
||||
package main
|
||||
`))
|
||||
|
||||
func doHelpReal(ctx context.Context, args ...string) {
|
||||
if len(args) == 0 {
|
||||
usage(os.Stdout)
|
||||
return
|
||||
}
|
||||
for _, cmd := range commands {
|
||||
if cmd.Name == args[0] {
|
||||
fmt.Println(cmd.Usage)
|
||||
return
|
||||
}
|
||||
}
|
||||
log.Fatalf("Don't know command %q", args[0])
|
||||
}
|
||||
|
||||
func doListInstances(ctx context.Context, args ...string) {
|
||||
if len(args) != 0 {
|
||||
log.Fatalf("usage: cbt listinstances")
|
||||
}
|
||||
is, err := getInstanceAdminClient().Instances(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Getting list of instances: %v", err)
|
||||
}
|
||||
tw := tabwriter.NewWriter(os.Stdout, 10, 8, 4, '\t', 0)
|
||||
fmt.Fprintf(tw, "Instance Name\tInfo\n")
|
||||
fmt.Fprintf(tw, "-------------\t----\n")
|
||||
for _, i := range is {
|
||||
fmt.Fprintf(tw, "%s\t%s\n", i.Name, i.DisplayName)
|
||||
}
|
||||
tw.Flush()
|
||||
}
|
||||
|
||||
func doLookup(ctx context.Context, args ...string) {
|
||||
if len(args) != 2 {
|
||||
log.Fatalf("usage: cbt lookup <table> <row>")
|
||||
}
|
||||
table, row := args[0], args[1]
|
||||
tbl := getClient().Open(table)
|
||||
r, err := tbl.ReadRow(ctx, row)
|
||||
if err != nil {
|
||||
log.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
printRow(r)
|
||||
}
|
||||
|
||||
func printRow(r bigtable.Row) {
|
||||
fmt.Println(strings.Repeat("-", 40))
|
||||
fmt.Println(r.Key())
|
||||
|
||||
var fams []string
|
||||
for fam := range r {
|
||||
fams = append(fams, fam)
|
||||
}
|
||||
sort.Strings(fams)
|
||||
for _, fam := range fams {
|
||||
ris := r[fam]
|
||||
sort.Sort(byColumn(ris))
|
||||
for _, ri := range ris {
|
||||
ts := time.Unix(0, int64(ri.Timestamp)*1e3)
|
||||
fmt.Printf(" %-40s @ %s\n", ri.Column, ts.Format("2006/01/02-15:04:05.000000"))
|
||||
fmt.Printf(" %q\n", ri.Value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type byColumn []bigtable.ReadItem
|
||||
|
||||
func (b byColumn) Len() int { return len(b) }
|
||||
func (b byColumn) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b byColumn) Less(i, j int) bool { return b[i].Column < b[j].Column }
|
||||
|
||||
func doLS(ctx context.Context, args ...string) {
|
||||
switch len(args) {
|
||||
default:
|
||||
log.Fatalf("Can't do `cbt ls %s`", args)
|
||||
case 0:
|
||||
tables, err := getAdminClient().Tables(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("Getting list of tables: %v", err)
|
||||
}
|
||||
sort.Strings(tables)
|
||||
for _, table := range tables {
|
||||
fmt.Println(table)
|
||||
}
|
||||
case 1:
|
||||
table := args[0]
|
||||
ti, err := getAdminClient().TableInfo(ctx, table)
|
||||
if err != nil {
|
||||
log.Fatalf("Getting table info: %v", err)
|
||||
}
|
||||
sort.Strings(ti.Families)
|
||||
for _, fam := range ti.Families {
|
||||
fmt.Println(fam)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func doMDDocReal(ctx context.Context, args ...string) {
|
||||
data := map[string]interface{}{
|
||||
"Commands": commands,
|
||||
"Flags": docFlags(),
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if err := mddocTemplate.Execute(&buf, data); err != nil {
|
||||
log.Fatalf("Bad mddoc template: %v", err)
|
||||
}
|
||||
io.Copy(os.Stdout, &buf)
|
||||
}
|
||||
|
||||
var mddocTemplate = template.Must(template.New("mddoc").Funcs(template.FuncMap{
|
||||
"indent": indentLines,
|
||||
}).
|
||||
Parse(`
|
||||
Cbt is a tool for doing basic interactions with Cloud Bigtable.
|
||||
|
||||
Usage:
|
||||
|
||||
cbt [options] command [arguments]
|
||||
|
||||
The commands are:
|
||||
{{range .Commands}}
|
||||
{{printf "%-25s %s" .Name .Desc}}{{end}}
|
||||
|
||||
Use "cbt help <command>" for more information about a command.
|
||||
|
||||
The options are:
|
||||
{{range .Flags}}
|
||||
-{{.Name}} string
|
||||
{{.Usage}}{{end}}
|
||||
|
||||
{{range .Commands}}
|
||||
## {{.Desc}}
|
||||
|
||||
{{indent .Usage "\t"}}
|
||||
|
||||
|
||||
|
||||
{{end}}
|
||||
`))
|
||||
|
||||
func doRead(ctx context.Context, args ...string) {
|
||||
if len(args) < 1 {
|
||||
log.Fatalf("usage: cbt read <table> [args ...]")
|
||||
}
|
||||
tbl := getClient().Open(args[0])
|
||||
|
||||
parsed := make(map[string]string)
|
||||
for _, arg := range args[1:] {
|
||||
i := strings.Index(arg, "=")
|
||||
if i < 0 {
|
||||
log.Fatalf("Bad arg %q", arg)
|
||||
}
|
||||
key, val := arg[:i], arg[i+1:]
|
||||
switch key {
|
||||
default:
|
||||
log.Fatalf("Unknown arg key %q", key)
|
||||
case "limit":
|
||||
// Be nicer; we used to support this, but renamed it to "end".
|
||||
log.Fatalf("Unknown arg key %q; did you mean %q?", key, "end")
|
||||
case "start", "end", "prefix", "count":
|
||||
parsed[key] = val
|
||||
}
|
||||
}
|
||||
if (parsed["start"] != "" || parsed["end"] != "") && parsed["prefix"] != "" {
|
||||
log.Fatal(`"start"/"end" may not be mixed with "prefix"`)
|
||||
}
|
||||
|
||||
var rr bigtable.RowRange
|
||||
if start, end := parsed["start"], parsed["end"]; end != "" {
|
||||
rr = bigtable.NewRange(start, end)
|
||||
} else if start != "" {
|
||||
rr = bigtable.InfiniteRange(start)
|
||||
}
|
||||
if prefix := parsed["prefix"]; prefix != "" {
|
||||
rr = bigtable.PrefixRange(prefix)
|
||||
}
|
||||
|
||||
var opts []bigtable.ReadOption
|
||||
if count := parsed["count"]; count != "" {
|
||||
n, err := strconv.ParseInt(count, 0, 64)
|
||||
if err != nil {
|
||||
log.Fatalf("Bad count %q: %v", count, err)
|
||||
}
|
||||
opts = append(opts, bigtable.LimitRows(n))
|
||||
}
|
||||
|
||||
// TODO(dsymonds): Support filters.
|
||||
err := tbl.ReadRows(ctx, rr, func(r bigtable.Row) bool {
|
||||
printRow(r)
|
||||
return true
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
log.Fatalf("Reading rows: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
var setArg = regexp.MustCompile(`([^:]+):([^=]*)=(.*)`)
|
||||
|
||||
func doSet(ctx context.Context, args ...string) {
|
||||
if len(args) < 3 {
|
||||
log.Fatalf("usage: cbt set <table> <row> family:[column]=val[@ts] ...")
|
||||
}
|
||||
tbl := getClient().Open(args[0])
|
||||
row := args[1]
|
||||
mut := bigtable.NewMutation()
|
||||
for _, arg := range args[2:] {
|
||||
m := setArg.FindStringSubmatch(arg)
|
||||
if m == nil {
|
||||
log.Fatalf("Bad set arg %q", arg)
|
||||
}
|
||||
val := m[3]
|
||||
ts := bigtable.Now()
|
||||
if i := strings.LastIndex(val, "@"); i >= 0 {
|
||||
// Try parsing a timestamp.
|
||||
n, err := strconv.ParseInt(val[i+1:], 0, 64)
|
||||
if err == nil {
|
||||
val = val[:i]
|
||||
ts = bigtable.Timestamp(n)
|
||||
}
|
||||
}
|
||||
mut.Set(m[1], m[2], ts, []byte(val))
|
||||
}
|
||||
if err := tbl.Apply(ctx, row, mut); err != nil {
|
||||
log.Fatalf("Applying mutation: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func doSetGCPolicy(ctx context.Context, args ...string) {
|
||||
if len(args) < 3 {
|
||||
log.Fatalf("usage: cbt setgcpolicy <table> <family> ( maxage=<d> | maxversions=<n> )")
|
||||
}
|
||||
table := args[0]
|
||||
fam := args[1]
|
||||
|
||||
var pol bigtable.GCPolicy
|
||||
switch p := args[2]; {
|
||||
case strings.HasPrefix(p, "maxage="):
|
||||
d, err := parseDuration(p[7:])
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
pol = bigtable.MaxAgePolicy(d)
|
||||
case strings.HasPrefix(p, "maxversions="):
|
||||
n, err := strconv.ParseUint(p[12:], 10, 16)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
pol = bigtable.MaxVersionsPolicy(int(n))
|
||||
default:
|
||||
log.Fatalf("Bad GC policy %q", p)
|
||||
}
|
||||
if err := getAdminClient().SetGCPolicy(ctx, table, fam, pol); err != nil {
|
||||
log.Fatalf("Setting GC policy: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// parseDuration parses a duration string.
|
||||
// It is similar to Go's time.ParseDuration, except with a different set of supported units,
|
||||
// and only simple formats supported.
|
||||
func parseDuration(s string) (time.Duration, error) {
|
||||
// [0-9]+[a-z]+
|
||||
|
||||
// Split [0-9]+ from [a-z]+.
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
c := s[i]
|
||||
if c < '0' || c > '9' {
|
||||
break
|
||||
}
|
||||
}
|
||||
ds, u := s[:i], s[i:]
|
||||
if ds == "" || u == "" {
|
||||
return 0, fmt.Errorf("invalid duration %q", s)
|
||||
}
|
||||
// Parse them.
|
||||
d, err := strconv.ParseUint(ds, 10, 32)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("invalid duration %q: %v", s, err)
|
||||
}
|
||||
unit, ok := unitMap[u]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("unknown unit %q in duration %q", u, s)
|
||||
}
|
||||
if d > uint64((1<<63-1)/unit) {
|
||||
// overflow
|
||||
return 0, fmt.Errorf("invalid duration %q overflows", s)
|
||||
}
|
||||
return time.Duration(d) * unit, nil
|
||||
}
|
||||
|
||||
var unitMap = map[string]time.Duration{
|
||||
"ms": time.Millisecond,
|
||||
"s": time.Second,
|
||||
"m": time.Minute,
|
||||
"h": time.Hour,
|
||||
"d": 24 * time.Hour,
|
||||
}
|
59
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt_test.go
generated
vendored
Normal file
59
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt_test.go
generated
vendored
Normal file
|
@ -0,0 +1,59 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestParseDuration(t *testing.T) {
|
||||
tests := []struct {
|
||||
in string
|
||||
// out or fail are mutually exclusive
|
||||
out time.Duration
|
||||
fail bool
|
||||
}{
|
||||
{in: "10ms", out: 10 * time.Millisecond},
|
||||
{in: "3s", out: 3 * time.Second},
|
||||
{in: "60m", out: 60 * time.Minute},
|
||||
{in: "12h", out: 12 * time.Hour},
|
||||
{in: "7d", out: 168 * time.Hour},
|
||||
|
||||
{in: "", fail: true},
|
||||
{in: "0", fail: true},
|
||||
{in: "7ns", fail: true},
|
||||
{in: "14mo", fail: true},
|
||||
{in: "3.5h", fail: true},
|
||||
{in: "106752d", fail: true}, // overflow
|
||||
}
|
||||
for _, tc := range tests {
|
||||
got, err := parseDuration(tc.in)
|
||||
if !tc.fail && err != nil {
|
||||
t.Errorf("parseDuration(%q) unexpectedly failed: %v", tc.in, err)
|
||||
continue
|
||||
}
|
||||
if tc.fail && err == nil {
|
||||
t.Errorf("parseDuration(%q) did not fail", tc.in)
|
||||
continue
|
||||
}
|
||||
if tc.fail {
|
||||
continue
|
||||
}
|
||||
if got != tc.out {
|
||||
t.Errorf("parseDuration(%q) = %v, want %v", tc.in, got, tc.out)
|
||||
}
|
||||
}
|
||||
}
|
191
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbtdoc.go
generated
vendored
Normal file
191
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbtdoc.go
generated
vendored
Normal file
|
@ -0,0 +1,191 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED.
|
||||
// Run "go generate" to regenerate.
|
||||
//go:generate go run cbt.go -o cbtdoc.go doc
|
||||
|
||||
/*
|
||||
Cbt is a tool for doing basic interactions with Cloud Bigtable.
|
||||
|
||||
Usage:
|
||||
|
||||
cbt [options] command [arguments]
|
||||
|
||||
The commands are:
|
||||
|
||||
count Count rows in a table
|
||||
createfamily Create a column family
|
||||
createtable Create a table
|
||||
deletefamily Delete a column family
|
||||
deleterow Delete a row
|
||||
deletetable Delete a table
|
||||
doc Print godoc-suitable documentation for cbt
|
||||
help Print help text
|
||||
listinstances List instances in a project
|
||||
lookup Read from a single row
|
||||
ls List tables and column families
|
||||
mddoc Print documentation for cbt in Markdown format
|
||||
read Read rows
|
||||
set Set value of a cell
|
||||
setgcpolicy Set the GC policy for a column family
|
||||
|
||||
Use "cbt help <command>" for more information about a command.
|
||||
|
||||
The options are:
|
||||
|
||||
-project string
|
||||
project ID
|
||||
-instance string
|
||||
Cloud Bigtable instance
|
||||
-creds string
|
||||
if set, use application credentials in this file
|
||||
|
||||
|
||||
Count rows in a table
|
||||
|
||||
Usage:
|
||||
cbt count <table>
|
||||
|
||||
|
||||
|
||||
|
||||
Create a column family
|
||||
|
||||
Usage:
|
||||
cbt createfamily <table> <family>
|
||||
|
||||
|
||||
|
||||
|
||||
Create a table
|
||||
|
||||
Usage:
|
||||
cbt createtable <table>
|
||||
|
||||
|
||||
|
||||
|
||||
Delete a column family
|
||||
|
||||
Usage:
|
||||
cbt deletefamily <table> <family>
|
||||
|
||||
|
||||
|
||||
|
||||
Delete a row
|
||||
|
||||
Usage:
|
||||
cbt deleterow <table> <row>
|
||||
|
||||
|
||||
|
||||
|
||||
Delete a table
|
||||
|
||||
Usage:
|
||||
cbt deletetable <table>
|
||||
|
||||
|
||||
|
||||
|
||||
Print godoc-suitable documentation for cbt
|
||||
|
||||
Usage:
|
||||
cbt doc
|
||||
|
||||
|
||||
|
||||
|
||||
Print help text
|
||||
|
||||
Usage:
|
||||
cbt help [command]
|
||||
|
||||
|
||||
|
||||
|
||||
List instances in a project
|
||||
|
||||
Usage:
|
||||
cbt listinstances
|
||||
|
||||
|
||||
|
||||
|
||||
Read from a single row
|
||||
|
||||
Usage:
|
||||
cbt lookup <table> <row>
|
||||
|
||||
|
||||
|
||||
|
||||
List tables and column families
|
||||
|
||||
Usage:
|
||||
cbt ls List tables
|
||||
cbt ls <table> List column families in <table>
|
||||
|
||||
|
||||
|
||||
|
||||
Print documentation for cbt in Markdown format
|
||||
|
||||
Usage:
|
||||
cbt mddoc
|
||||
|
||||
|
||||
|
||||
|
||||
Read rows
|
||||
|
||||
Usage:
|
||||
cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>] [count=<n>]
|
||||
start=<row> Start reading at this row
|
||||
end=<row> Stop reading before this row
|
||||
prefix=<prefix> Read rows with this prefix
|
||||
count=<n> Read only this many rows
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Set value of a cell
|
||||
|
||||
Usage:
|
||||
cbt set <table> <row> family:column=val[@ts] ...
|
||||
family:column=val[@ts] may be repeated to set multiple cells.
|
||||
|
||||
ts is an optional integer timestamp.
|
||||
If it cannot be parsed, the `@ts` part will be
|
||||
interpreted as part of the value.
|
||||
|
||||
|
||||
|
||||
|
||||
Set the GC policy for a column family
|
||||
|
||||
Usage:
|
||||
cbt setgcpolicy <table> <family> ( maxage=<d> | maxversions=<n> )
|
||||
|
||||
maxage=<d> Maximum timestamp age to preserve (e.g. "1h", "4d")
|
||||
maxversions=<n> Maximum number of versions to preserve
|
||||
|
||||
|
||||
|
||||
|
||||
*/
|
||||
package main
|
44
vendor/cloud.google.com/go/bigtable/cmd/emulator/cbtemulator.go
generated
vendored
Normal file
44
vendor/cloud.google.com/go/bigtable/cmd/emulator/cbtemulator.go
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
cbtemulator launches the in-memory Cloud Bigtable server on the given address.
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"cloud.google.com/go/bigtable/bttest"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
var (
|
||||
host = flag.String("host", "localhost", "the address to bind to on the local machine")
|
||||
port = flag.Int("port", 9000, "the port number to bind to on the local machine")
|
||||
)
|
||||
|
||||
func main() {
|
||||
grpc.EnableTracing = false
|
||||
flag.Parse()
|
||||
srv, err := bttest.NewServer(fmt.Sprintf("%s:%d", *host, *port))
|
||||
if err != nil {
|
||||
log.Fatalf("failed to start emulator: %v", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Cloud Bigtable emulator running on %s\n", srv.Addr)
|
||||
select {}
|
||||
}
|
186
vendor/cloud.google.com/go/bigtable/cmd/loadtest/loadtest.go
generated
vendored
Normal file
186
vendor/cloud.google.com/go/bigtable/cmd/loadtest/loadtest.go
generated
vendored
Normal file
|
@ -0,0 +1,186 @@
|
|||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Loadtest does some load testing through the Go client library for Cloud Bigtable.
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/bigtable"
|
||||
"cloud.google.com/go/bigtable/internal/cbtconfig"
|
||||
"cloud.google.com/go/bigtable/internal/stat"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
var (
|
||||
runFor = flag.Duration("run_for", 5*time.Second, "how long to run the load test for")
|
||||
scratchTable = flag.String("scratch_table", "loadtest-scratch", "name of table to use; should not already exist")
|
||||
csvOutput = flag.String("csv_output", "",
|
||||
"output path for statistics in .csv format. If this file already exists it will be overwritten.")
|
||||
poolSize = flag.Int("pool_size", 1, "size of the gRPC connection pool to use for the data client")
|
||||
reqCount = flag.Int("req_count", 100, "number of concurrent requests")
|
||||
|
||||
config *cbtconfig.Config
|
||||
client *bigtable.Client
|
||||
adminClient *bigtable.AdminClient
|
||||
)
|
||||
|
||||
func main() {
|
||||
var err error
|
||||
config, err = cbtconfig.Load()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
config.RegisterFlags()
|
||||
|
||||
flag.Parse()
|
||||
if err := config.CheckFlags(cbtconfig.ProjectAndInstanceRequired); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if config.Creds != "" {
|
||||
os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds)
|
||||
}
|
||||
if flag.NArg() != 0 {
|
||||
flag.Usage()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
var options []option.ClientOption
|
||||
if *poolSize > 1 {
|
||||
options = append(options, option.WithGRPCConnectionPool(*poolSize))
|
||||
}
|
||||
|
||||
var csvFile *os.File
|
||||
if *csvOutput != "" {
|
||||
csvFile, err = os.Create(*csvOutput)
|
||||
if err != nil {
|
||||
log.Fatalf("creating csv output file: %v", err)
|
||||
}
|
||||
defer csvFile.Close()
|
||||
log.Printf("Writing statistics to %q ...", *csvOutput)
|
||||
}
|
||||
|
||||
log.Printf("Dialing connections...")
|
||||
client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance, options...)
|
||||
if err != nil {
|
||||
log.Fatalf("Making bigtable.Client: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
adminClient, err = bigtable.NewAdminClient(context.Background(), config.Project, config.Instance)
|
||||
if err != nil {
|
||||
log.Fatalf("Making bigtable.AdminClient: %v", err)
|
||||
}
|
||||
defer adminClient.Close()
|
||||
|
||||
// Create a scratch table.
|
||||
log.Printf("Setting up scratch table...")
|
||||
if err := adminClient.CreateTable(context.Background(), *scratchTable); err != nil {
|
||||
log.Fatalf("Making scratch table %q: %v", *scratchTable, err)
|
||||
}
|
||||
if err := adminClient.CreateColumnFamily(context.Background(), *scratchTable, "f"); err != nil {
|
||||
log.Fatalf("Making scratch table column family: %v", err)
|
||||
}
|
||||
// Upon a successful run, delete the table. Don't bother checking for errors.
|
||||
defer adminClient.DeleteTable(context.Background(), *scratchTable)
|
||||
|
||||
log.Printf("Starting load test... (run for %v)", *runFor)
|
||||
tbl := client.Open(*scratchTable)
|
||||
sem := make(chan int, *reqCount) // limit the number of requests happening at once
|
||||
var reads, writes stats
|
||||
stopTime := time.Now().Add(*runFor)
|
||||
var wg sync.WaitGroup
|
||||
for time.Now().Before(stopTime) {
|
||||
sem <- 1
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
defer func() { <-sem }()
|
||||
|
||||
ok := true
|
||||
opStart := time.Now()
|
||||
var stats *stats
|
||||
defer func() {
|
||||
stats.Record(ok, time.Since(opStart))
|
||||
}()
|
||||
|
||||
row := fmt.Sprintf("row%d", rand.Intn(100)) // operate on 1 of 100 rows
|
||||
|
||||
switch rand.Intn(10) {
|
||||
default:
|
||||
// read
|
||||
stats = &reads
|
||||
_, err := tbl.ReadRow(context.Background(), row, bigtable.RowFilter(bigtable.LatestNFilter(1)))
|
||||
if err != nil {
|
||||
log.Printf("Error doing read: %v", err)
|
||||
ok = false
|
||||
}
|
||||
case 0, 1, 2, 3, 4:
|
||||
// write
|
||||
stats = &writes
|
||||
mut := bigtable.NewMutation()
|
||||
mut.Set("f", "col", bigtable.Now(), bytes.Repeat([]byte("0"), 1<<10)) // 1 KB write
|
||||
if err := tbl.Apply(context.Background(), row, mut); err != nil {
|
||||
log.Printf("Error doing mutation: %v", err)
|
||||
ok = false
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
readsAgg := stat.NewAggregate("reads", reads.ds, reads.tries-reads.ok)
|
||||
writesAgg := stat.NewAggregate("writes", writes.ds, writes.tries-writes.ok)
|
||||
log.Printf("Reads (%d ok / %d tries):\n%v", reads.ok, reads.tries, readsAgg)
|
||||
log.Printf("Writes (%d ok / %d tries):\n%v", writes.ok, writes.tries, writesAgg)
|
||||
|
||||
if csvFile != nil {
|
||||
stat.WriteCSV([]*stat.Aggregate{readsAgg, writesAgg}, csvFile)
|
||||
}
|
||||
}
|
||||
|
||||
var allStats int64 // atomic
|
||||
|
||||
type stats struct {
|
||||
mu sync.Mutex
|
||||
tries, ok int
|
||||
ds []time.Duration
|
||||
}
|
||||
|
||||
func (s *stats) Record(ok bool, d time.Duration) {
|
||||
s.mu.Lock()
|
||||
s.tries++
|
||||
if ok {
|
||||
s.ok++
|
||||
}
|
||||
s.ds = append(s.ds, d)
|
||||
s.mu.Unlock()
|
||||
|
||||
if n := atomic.AddInt64(&allStats, 1); n%1000 == 0 {
|
||||
log.Printf("Progress: done %d ops", n)
|
||||
}
|
||||
}
|
155
vendor/cloud.google.com/go/bigtable/cmd/scantest/scantest.go
generated
vendored
Normal file
155
vendor/cloud.google.com/go/bigtable/cmd/scantest/scantest.go
generated
vendored
Normal file
|
@ -0,0 +1,155 @@
|
|||
/*
|
||||
Copyright 2016 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Scantest does scan-related load testing against Cloud Bigtable. The logic here
|
||||
mimics a similar test written using the Java client.
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/bigtable"
|
||||
"cloud.google.com/go/bigtable/internal/cbtconfig"
|
||||
"cloud.google.com/go/bigtable/internal/stat"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
runFor = flag.Duration("run_for", 5*time.Second, "how long to run the load test for")
|
||||
numScans = flag.Int("concurrent_scans", 1, "number of concurrent scans")
|
||||
rowLimit = flag.Int("row_limit", 10000, "max number of records per scan")
|
||||
|
||||
config *cbtconfig.Config
|
||||
client *bigtable.Client
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Usage = func() {
|
||||
fmt.Printf("Usage: scantest [options] <table_name>\n\n")
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
|
||||
var err error
|
||||
config, err = cbtconfig.Load()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
config.RegisterFlags()
|
||||
|
||||
flag.Parse()
|
||||
if err := config.CheckFlags(cbtconfig.ProjectAndInstanceRequired); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if config.Creds != "" {
|
||||
os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds)
|
||||
}
|
||||
if flag.NArg() != 1 {
|
||||
flag.Usage()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
table := flag.Arg(0)
|
||||
|
||||
log.Printf("Dialing connections...")
|
||||
client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance)
|
||||
if err != nil {
|
||||
log.Fatalf("Making bigtable.Client: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
log.Printf("Starting scan test... (run for %v)", *runFor)
|
||||
tbl := client.Open(table)
|
||||
sem := make(chan int, *numScans) // limit the number of requests happening at once
|
||||
var scans stats
|
||||
|
||||
stopTime := time.Now().Add(*runFor)
|
||||
var wg sync.WaitGroup
|
||||
for time.Now().Before(stopTime) {
|
||||
sem <- 1
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
defer func() { <-sem }()
|
||||
|
||||
ok := true
|
||||
opStart := time.Now()
|
||||
defer func() {
|
||||
scans.Record(ok, time.Since(opStart))
|
||||
}()
|
||||
|
||||
// Start at a random row key
|
||||
key := fmt.Sprintf("user%d", rand.Int63())
|
||||
limit := bigtable.LimitRows(int64(*rowLimit))
|
||||
noop := func(bigtable.Row) bool { return true }
|
||||
if err := tbl.ReadRows(context.Background(), bigtable.NewRange(key, ""), noop, limit); err != nil {
|
||||
log.Printf("Error during scan: %v", err)
|
||||
ok = false
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
agg := stat.NewAggregate("scans", scans.ds, scans.tries-scans.ok)
|
||||
log.Printf("Scans (%d ok / %d tries):\nscan times:\n%v\nthroughput (rows/second):\n%v",
|
||||
scans.ok, scans.tries, agg, throughputString(agg))
|
||||
}
|
||||
|
||||
func throughputString(agg *stat.Aggregate) string {
|
||||
var buf bytes.Buffer
|
||||
tw := tabwriter.NewWriter(&buf, 0, 0, 1, ' ', 0) // one-space padding
|
||||
rowLimitF := float64(*rowLimit)
|
||||
fmt.Fprintf(
|
||||
tw,
|
||||
"min:\t%.2f\nmedian:\t%.2f\nmax:\t%.2f\n",
|
||||
rowLimitF/agg.Max.Seconds(),
|
||||
rowLimitF/agg.Median.Seconds(),
|
||||
rowLimitF/agg.Min.Seconds())
|
||||
tw.Flush()
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
var allStats int64 // atomic
|
||||
|
||||
type stats struct {
|
||||
mu sync.Mutex
|
||||
tries, ok int
|
||||
ds []time.Duration
|
||||
}
|
||||
|
||||
func (s *stats) Record(ok bool, d time.Duration) {
|
||||
s.mu.Lock()
|
||||
s.tries++
|
||||
if ok {
|
||||
s.ok++
|
||||
}
|
||||
s.ds = append(s.ds, d)
|
||||
s.mu.Unlock()
|
||||
|
||||
if n := atomic.AddInt64(&allStats, 1); n%1000 == 0 {
|
||||
log.Printf("Progress: done %d ops", n)
|
||||
}
|
||||
}
|
125
vendor/cloud.google.com/go/bigtable/doc.go
generated
vendored
Normal file
125
vendor/cloud.google.com/go/bigtable/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,125 @@
|
|||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Package bigtable is an API to Google Cloud Bigtable.
|
||||
|
||||
See https://cloud.google.com/bigtable/docs/ for general product documentation.
|
||||
|
||||
Setup and Credentials
|
||||
|
||||
Use NewClient or NewAdminClient to create a client that can be used to access
|
||||
the data or admin APIs respectively. Both require credentials that have permission
|
||||
to access the Cloud Bigtable API.
|
||||
|
||||
If your program is run on Google App Engine or Google Compute Engine, using the Application Default Credentials
|
||||
(https://developers.google.com/accounts/docs/application-default-credentials)
|
||||
is the simplest option. Those credentials will be used by default when NewClient or NewAdminClient are called.
|
||||
|
||||
To use alternate credentials, pass them to NewClient or NewAdminClient using option.WithTokenSource.
|
||||
For instance, you can use service account credentials by visiting
|
||||
https://cloud.google.com/console/project/MYPROJECT/apiui/credential,
|
||||
creating a new OAuth "Client ID", storing the JSON key somewhere accessible, and writing
|
||||
jsonKey, err := ioutil.ReadFile(pathToKeyFile)
|
||||
...
|
||||
config, err := google.JWTConfigFromJSON(jsonKey, bigtable.Scope) // or bigtable.AdminScope, etc.
|
||||
...
|
||||
client, err := bigtable.NewClient(ctx, project, instance, option.WithTokenSource(config.TokenSource(ctx)))
|
||||
...
|
||||
Here, `google` means the golang.org/x/oauth2/google package
|
||||
and `option` means the google.golang.org/api/option package.
|
||||
|
||||
Reading
|
||||
|
||||
The principal way to read from a Bigtable is to use the ReadRows method on *Table.
|
||||
A RowRange specifies a contiguous portion of a table. A Filter may be provided through
|
||||
RowFilter to limit or transform the data that is returned.
|
||||
tbl := client.Open("mytable")
|
||||
...
|
||||
// Read all the rows starting with "com.google.",
|
||||
// but only fetch the columns in the "links" family.
|
||||
rr := bigtable.PrefixRange("com.google.")
|
||||
err := tbl.ReadRows(ctx, rr, func(r Row) bool {
|
||||
// do something with r
|
||||
return true // keep going
|
||||
}, bigtable.RowFilter(bigtable.FamilyFilter("links")))
|
||||
...
|
||||
|
||||
To read a single row, use the ReadRow helper method.
|
||||
r, err := tbl.ReadRow(ctx, "com.google.cloud") // "com.google.cloud" is the entire row key
|
||||
...
|
||||
|
||||
Writing
|
||||
|
||||
This API exposes two distinct forms of writing to a Bigtable: a Mutation and a ReadModifyWrite.
|
||||
The former expresses idempotent operations.
|
||||
The latter expresses non-idempotent operations and returns the new values of updated cells.
|
||||
These operations are performed by creating a Mutation or ReadModifyWrite (with NewMutation or NewReadModifyWrite),
|
||||
building up one or more operations on that, and then using the Apply or ApplyReadModifyWrite
|
||||
methods on a Table.
|
||||
|
||||
For instance, to set a couple of cells in a table,
|
||||
tbl := client.Open("mytable")
|
||||
mut := bigtable.NewMutation()
|
||||
mut.Set("links", "maps.google.com", bigtable.Now(), []byte("1"))
|
||||
mut.Set("links", "golang.org", bigtable.Now(), []byte("1"))
|
||||
err := tbl.Apply(ctx, "com.google.cloud", mut)
|
||||
...
|
||||
|
||||
To increment an encoded value in one cell,
|
||||
tbl := client.Open("mytable")
|
||||
rmw := bigtable.NewReadModifyWrite()
|
||||
rmw.Increment("links", "golang.org", 12) // add 12 to the cell in column "links:golang.org"
|
||||
r, err := tbl.ApplyReadModifyWrite(ctx, "com.google.cloud", rmw)
|
||||
...
|
||||
|
||||
Retries
|
||||
|
||||
If a read or write operation encounters a transient error it will be retried until a successful
|
||||
response, an unretryable error or the context deadline is reached. Non-idempotent writes (where
|
||||
the timestamp is set to ServerTime) will not be retried. In the case of ReadRows, retried calls
|
||||
will not re-scan rows that have already been processed.
|
||||
|
||||
Authentication
|
||||
|
||||
See examples of authorization and authentication at
|
||||
https://godoc.org/cloud.google.com/go#pkg-examples.
|
||||
|
||||
*/
|
||||
package bigtable // import "cloud.google.com/go/bigtable"
|
||||
|
||||
// Scope constants for authentication credentials.
|
||||
// These should be used when using credential creation functions such as oauth.NewServiceAccountFromFile.
|
||||
const (
|
||||
// Scope is the OAuth scope for Cloud Bigtable data operations.
|
||||
Scope = "https://www.googleapis.com/auth/bigtable.data"
|
||||
// ReadonlyScope is the OAuth scope for Cloud Bigtable read-only data operations.
|
||||
ReadonlyScope = "https://www.googleapis.com/auth/bigtable.readonly"
|
||||
|
||||
// AdminScope is the OAuth scope for Cloud Bigtable table admin operations.
|
||||
AdminScope = "https://www.googleapis.com/auth/bigtable.admin.table"
|
||||
|
||||
// InstanceAdminScope is the OAuth scope for Cloud Bigtable instance (and cluster) admin operations.
|
||||
InstanceAdminScope = "https://www.googleapis.com/auth/bigtable.admin.cluster"
|
||||
)
|
||||
|
||||
// clientUserAgent identifies the version of this package.
|
||||
// It should be bumped upon significant changes only.
|
||||
const clientUserAgent = "cbt-go/20160628"
|
||||
|
||||
// resourcePrefixHeader is the name of the metadata header used to indicate
|
||||
// the resource being operated on.
|
||||
const resourcePrefixHeader = "google-cloud-resource-prefix"
|
203
vendor/cloud.google.com/go/bigtable/export_test.go
generated
vendored
Normal file
203
vendor/cloud.google.com/go/bigtable/export_test.go
generated
vendored
Normal file
|
@ -0,0 +1,203 @@
|
|||
/*
|
||||
Copyright 2016 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package bigtable
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/bigtable/bttest"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
var legacyUseProd string
|
||||
var integrationConfig IntegrationTestConfig
|
||||
|
||||
func init() {
|
||||
c := &integrationConfig
|
||||
|
||||
flag.BoolVar(&c.UseProd, "it.use-prod", false, "Use remote bigtable instead of local emulator")
|
||||
flag.StringVar(&c.AdminEndpoint, "it.admin-endpoint", "", "Admin api host and port")
|
||||
flag.StringVar(&c.DataEndpoint, "it.data-endpoint", "", "Data api host and port")
|
||||
flag.StringVar(&c.Project, "it.project", "", "Project to use for integration test")
|
||||
flag.StringVar(&c.Instance, "it.instance", "", "Bigtable instance to use")
|
||||
flag.StringVar(&c.Cluster, "it.cluster", "", "Bigtable cluster to use")
|
||||
flag.StringVar(&c.Table, "it.table", "", "Bigtable table to create")
|
||||
|
||||
// Backwards compat
|
||||
flag.StringVar(&legacyUseProd, "use_prod", "", `DEPRECATED: if set to "proj,instance,table", run integration test against production`)
|
||||
|
||||
}
|
||||
|
||||
// IntegrationTestConfig contains parameters to pick and setup a IntegrationEnv for testing
|
||||
type IntegrationTestConfig struct {
|
||||
UseProd bool
|
||||
AdminEndpoint string
|
||||
DataEndpoint string
|
||||
Project string
|
||||
Instance string
|
||||
Cluster string
|
||||
Table string
|
||||
}
|
||||
|
||||
// IntegrationEnv represents a testing environment.
|
||||
// The environment can be implemented using production or an emulator
|
||||
type IntegrationEnv interface {
|
||||
Config() IntegrationTestConfig
|
||||
NewAdminClient() (*AdminClient, error)
|
||||
NewClient() (*Client, error)
|
||||
Close()
|
||||
}
|
||||
|
||||
// NewIntegrationEnv creates a new environment based on the command line args
|
||||
func NewIntegrationEnv() (IntegrationEnv, error) {
|
||||
c := integrationConfig
|
||||
|
||||
if legacyUseProd != "" {
|
||||
fmt.Println("WARNING: using legacy commandline arg -use_prod, please switch to -it.*")
|
||||
parts := strings.SplitN(legacyUseProd, ",", 3)
|
||||
c.UseProd = true
|
||||
c.Project = parts[0]
|
||||
c.Instance = parts[1]
|
||||
c.Table = parts[2]
|
||||
}
|
||||
|
||||
if integrationConfig.UseProd {
|
||||
return NewProdEnv(c)
|
||||
} else {
|
||||
return NewEmulatedEnv(c)
|
||||
}
|
||||
}
|
||||
|
||||
// EmulatedEnv encapsulates the state of an emulator
|
||||
type EmulatedEnv struct {
|
||||
config IntegrationTestConfig
|
||||
server *bttest.Server
|
||||
}
|
||||
|
||||
// NewEmulatedEnv builds and starts the emulator based environment
|
||||
func NewEmulatedEnv(config IntegrationTestConfig) (*EmulatedEnv, error) {
|
||||
srv, err := bttest.NewServer("127.0.0.1:0")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if config.Project == "" {
|
||||
config.Project = "project"
|
||||
}
|
||||
if config.Instance == "" {
|
||||
config.Instance = "instance"
|
||||
}
|
||||
if config.Table == "" {
|
||||
config.Table = "mytable"
|
||||
}
|
||||
config.AdminEndpoint = srv.Addr
|
||||
config.DataEndpoint = srv.Addr
|
||||
|
||||
env := &EmulatedEnv{
|
||||
config: config,
|
||||
server: srv,
|
||||
}
|
||||
return env, nil
|
||||
}
|
||||
|
||||
// Close stops & cleans up the emulator
|
||||
func (e *EmulatedEnv) Close() {
|
||||
e.server.Close()
|
||||
}
|
||||
|
||||
// Config gets the config used to build this environment
|
||||
func (e *EmulatedEnv) Config() IntegrationTestConfig {
|
||||
return e.config
|
||||
}
|
||||
|
||||
// NewAdminClient builds a new connected admin client for this environment
|
||||
func (e *EmulatedEnv) NewAdminClient() (*AdminClient, error) {
|
||||
timeout := 20 * time.Second
|
||||
ctx, _ := context.WithTimeout(context.Background(), timeout)
|
||||
conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewAdminClient(ctx, e.config.Project, e.config.Instance, option.WithGRPCConn(conn))
|
||||
}
|
||||
|
||||
// NewClient builds a new connected data client for this environment
|
||||
func (e *EmulatedEnv) NewClient() (*Client, error) {
|
||||
timeout := 20 * time.Second
|
||||
ctx, _ := context.WithTimeout(context.Background(), timeout)
|
||||
conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewClient(ctx, e.config.Project, e.config.Instance, option.WithGRPCConn(conn))
|
||||
}
|
||||
|
||||
// ProdEnv encapsulates the state necessary to connect to the external Bigtable service
|
||||
type ProdEnv struct {
|
||||
config IntegrationTestConfig
|
||||
}
|
||||
|
||||
// NewProdEnv builds the environment representation
|
||||
func NewProdEnv(config IntegrationTestConfig) (*ProdEnv, error) {
|
||||
if config.Project == "" {
|
||||
return nil, errors.New("Project not set")
|
||||
}
|
||||
if config.Instance == "" {
|
||||
return nil, errors.New("Instance not set")
|
||||
}
|
||||
if config.Table == "" {
|
||||
return nil, errors.New("Table not set")
|
||||
}
|
||||
|
||||
return &ProdEnv{config}, nil
|
||||
}
|
||||
|
||||
// Close is a no-op for production environments
|
||||
func (e *ProdEnv) Close() {}
|
||||
|
||||
// Config gets the config used to build this environment
|
||||
func (e *ProdEnv) Config() IntegrationTestConfig {
|
||||
return e.config
|
||||
}
|
||||
|
||||
// NewAdminClient builds a new connected admin client for this environment
|
||||
func (e *ProdEnv) NewAdminClient() (*AdminClient, error) {
|
||||
timeout := 20 * time.Second
|
||||
ctx, _ := context.WithTimeout(context.Background(), timeout)
|
||||
var clientOpts []option.ClientOption
|
||||
if endpoint := e.config.AdminEndpoint; endpoint != "" {
|
||||
clientOpts = append(clientOpts, option.WithEndpoint(endpoint))
|
||||
}
|
||||
return NewAdminClient(ctx, e.config.Project, e.config.Instance, clientOpts...)
|
||||
}
|
||||
|
||||
// NewClient builds a connected data client for this environment
|
||||
func (e *ProdEnv) NewClient() (*Client, error) {
|
||||
timeout := 20 * time.Second
|
||||
ctx, _ := context.WithTimeout(context.Background(), timeout)
|
||||
var clientOpts []option.ClientOption
|
||||
if endpoint := e.config.DataEndpoint; endpoint != "" {
|
||||
clientOpts = append(clientOpts, option.WithEndpoint(endpoint))
|
||||
}
|
||||
return NewClient(ctx, e.config.Project, e.config.Instance, clientOpts...)
|
||||
}
|
288
vendor/cloud.google.com/go/bigtable/filter.go
generated
vendored
Normal file
288
vendor/cloud.google.com/go/bigtable/filter.go
generated
vendored
Normal file
|
@ -0,0 +1,288 @@
|
|||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package bigtable
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
|
||||
)
|
||||
|
||||
// A Filter represents a row filter.
|
||||
type Filter interface {
|
||||
String() string
|
||||
proto() *btpb.RowFilter
|
||||
}
|
||||
|
||||
// ChainFilters returns a filter that applies a sequence of filters.
|
||||
func ChainFilters(sub ...Filter) Filter { return chainFilter{sub} }
|
||||
|
||||
type chainFilter struct {
|
||||
sub []Filter
|
||||
}
|
||||
|
||||
func (cf chainFilter) String() string {
|
||||
var ss []string
|
||||
for _, sf := range cf.sub {
|
||||
ss = append(ss, sf.String())
|
||||
}
|
||||
return "(" + strings.Join(ss, " | ") + ")"
|
||||
}
|
||||
|
||||
func (cf chainFilter) proto() *btpb.RowFilter {
|
||||
chain := &btpb.RowFilter_Chain{}
|
||||
for _, sf := range cf.sub {
|
||||
chain.Filters = append(chain.Filters, sf.proto())
|
||||
}
|
||||
return &btpb.RowFilter{
|
||||
Filter: &btpb.RowFilter_Chain_{chain},
|
||||
}
|
||||
}
|
||||
|
||||
// InterleaveFilters returns a filter that applies a set of filters in parallel
|
||||
// and interleaves the results.
|
||||
func InterleaveFilters(sub ...Filter) Filter { return interleaveFilter{sub} }
|
||||
|
||||
type interleaveFilter struct {
|
||||
sub []Filter
|
||||
}
|
||||
|
||||
func (ilf interleaveFilter) String() string {
|
||||
var ss []string
|
||||
for _, sf := range ilf.sub {
|
||||
ss = append(ss, sf.String())
|
||||
}
|
||||
return "(" + strings.Join(ss, " + ") + ")"
|
||||
}
|
||||
|
||||
func (ilf interleaveFilter) proto() *btpb.RowFilter {
|
||||
inter := &btpb.RowFilter_Interleave{}
|
||||
for _, sf := range ilf.sub {
|
||||
inter.Filters = append(inter.Filters, sf.proto())
|
||||
}
|
||||
return &btpb.RowFilter{
|
||||
Filter: &btpb.RowFilter_Interleave_{inter},
|
||||
}
|
||||
}
|
||||
|
||||
// RowKeyFilter returns a filter that matches cells from rows whose
|
||||
// key matches the provided RE2 pattern.
|
||||
// See https://github.com/google/re2/wiki/Syntax for the accepted syntax.
|
||||
func RowKeyFilter(pattern string) Filter { return rowKeyFilter(pattern) }
|
||||
|
||||
type rowKeyFilter string
|
||||
|
||||
func (rkf rowKeyFilter) String() string { return fmt.Sprintf("row(%s)", string(rkf)) }
|
||||
|
||||
func (rkf rowKeyFilter) proto() *btpb.RowFilter {
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_RowKeyRegexFilter{[]byte(rkf)}}
|
||||
}
|
||||
|
||||
// FamilyFilter returns a filter that matches cells whose family name
|
||||
// matches the provided RE2 pattern.
|
||||
// See https://github.com/google/re2/wiki/Syntax for the accepted syntax.
|
||||
func FamilyFilter(pattern string) Filter { return familyFilter(pattern) }
|
||||
|
||||
type familyFilter string
|
||||
|
||||
func (ff familyFilter) String() string { return fmt.Sprintf("col(%s:)", string(ff)) }
|
||||
|
||||
func (ff familyFilter) proto() *btpb.RowFilter {
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{string(ff)}}
|
||||
}
|
||||
|
||||
// ColumnFilter returns a filter that matches cells whose column name
|
||||
// matches the provided RE2 pattern.
|
||||
// See https://github.com/google/re2/wiki/Syntax for the accepted syntax.
|
||||
func ColumnFilter(pattern string) Filter { return columnFilter(pattern) }
|
||||
|
||||
type columnFilter string
|
||||
|
||||
func (cf columnFilter) String() string { return fmt.Sprintf("col(.*:%s)", string(cf)) }
|
||||
|
||||
func (cf columnFilter) proto() *btpb.RowFilter {
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{[]byte(cf)}}
|
||||
}
|
||||
|
||||
// ValueFilter returns a filter that matches cells whose value
|
||||
// matches the provided RE2 pattern.
|
||||
// See https://github.com/google/re2/wiki/Syntax for the accepted syntax.
|
||||
func ValueFilter(pattern string) Filter { return valueFilter(pattern) }
|
||||
|
||||
type valueFilter string
|
||||
|
||||
func (vf valueFilter) String() string { return fmt.Sprintf("value_match(%s)", string(vf)) }
|
||||
|
||||
func (vf valueFilter) proto() *btpb.RowFilter {
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_ValueRegexFilter{[]byte(vf)}}
|
||||
}
|
||||
|
||||
// LatestNFilter returns a filter that matches the most recent N cells in each column.
|
||||
func LatestNFilter(n int) Filter { return latestNFilter(n) }
|
||||
|
||||
type latestNFilter int32
|
||||
|
||||
func (lnf latestNFilter) String() string { return fmt.Sprintf("col(*,%d)", lnf) }
|
||||
|
||||
func (lnf latestNFilter) proto() *btpb.RowFilter {
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerColumnLimitFilter{int32(lnf)}}
|
||||
}
|
||||
|
||||
// StripValueFilter returns a filter that replaces each value with the empty string.
|
||||
func StripValueFilter() Filter { return stripValueFilter{} }
|
||||
|
||||
type stripValueFilter struct{}
|
||||
|
||||
func (stripValueFilter) String() string { return "strip_value()" }
|
||||
func (stripValueFilter) proto() *btpb.RowFilter {
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_StripValueTransformer{true}}
|
||||
}
|
||||
|
||||
// TimestampRangeFilter returns a filter that matches any rows whose timestamp is within the given time bounds. A zero
|
||||
// time means no bound.
|
||||
// The timestamp will be truncated to millisecond granularity.
|
||||
func TimestampRangeFilter(startTime time.Time, endTime time.Time) Filter {
|
||||
trf := timestampRangeFilter{}
|
||||
if !startTime.IsZero() {
|
||||
trf.startTime = Time(startTime)
|
||||
}
|
||||
if !endTime.IsZero() {
|
||||
trf.endTime = Time(endTime)
|
||||
}
|
||||
return trf
|
||||
}
|
||||
|
||||
// TimestampRangeFilterMicros returns a filter that matches any rows whose timestamp is within the given time bounds,
|
||||
// specified in units of microseconds since 1 January 1970. A zero value for the end time is interpreted as no bound.
|
||||
// The timestamp will be truncated to millisecond granularity.
|
||||
func TimestampRangeFilterMicros(startTime Timestamp, endTime Timestamp) Filter {
|
||||
return timestampRangeFilter{startTime, endTime}
|
||||
}
|
||||
|
||||
type timestampRangeFilter struct {
|
||||
startTime Timestamp
|
||||
endTime Timestamp
|
||||
}
|
||||
|
||||
func (trf timestampRangeFilter) String() string {
|
||||
return fmt.Sprintf("timestamp_range(%s,%s)", trf.startTime, trf.endTime)
|
||||
}
|
||||
|
||||
func (trf timestampRangeFilter) proto() *btpb.RowFilter {
|
||||
return &btpb.RowFilter{
|
||||
Filter: &btpb.RowFilter_TimestampRangeFilter{
|
||||
&btpb.TimestampRange{
|
||||
int64(trf.startTime.TruncateToMilliseconds()),
|
||||
int64(trf.endTime.TruncateToMilliseconds()),
|
||||
},
|
||||
}}
|
||||
}
|
||||
|
||||
// ColumnRangeFilter returns a filter that matches a contiguous range of columns within a single
|
||||
// family, as specified by an inclusive start qualifier and exclusive end qualifier.
|
||||
func ColumnRangeFilter(family, start, end string) Filter {
|
||||
return columnRangeFilter{family, start, end}
|
||||
}
|
||||
|
||||
type columnRangeFilter struct {
|
||||
family string
|
||||
start string
|
||||
end string
|
||||
}
|
||||
|
||||
func (crf columnRangeFilter) String() string {
|
||||
return fmt.Sprintf("columnRangeFilter(%s,%s,%s)", crf.family, crf.start, crf.end)
|
||||
}
|
||||
|
||||
func (crf columnRangeFilter) proto() *btpb.RowFilter {
|
||||
r := &btpb.ColumnRange{FamilyName: crf.family}
|
||||
if crf.start != "" {
|
||||
r.StartQualifier = &btpb.ColumnRange_StartQualifierClosed{[]byte(crf.start)}
|
||||
}
|
||||
if crf.end != "" {
|
||||
r.EndQualifier = &btpb.ColumnRange_EndQualifierOpen{[]byte(crf.end)}
|
||||
}
|
||||
return &btpb.RowFilter{&btpb.RowFilter_ColumnRangeFilter{r}}
|
||||
}
|
||||
|
||||
// ValueRangeFilter returns a filter that matches cells with values that fall within
|
||||
// the given range, as specified by an inclusive start value and exclusive end value.
|
||||
func ValueRangeFilter(start, end []byte) Filter {
|
||||
return valueRangeFilter{start, end}
|
||||
}
|
||||
|
||||
type valueRangeFilter struct {
|
||||
start []byte
|
||||
end []byte
|
||||
}
|
||||
|
||||
func (vrf valueRangeFilter) String() string {
|
||||
return fmt.Sprintf("valueRangeFilter(%s,%s)", vrf.start, vrf.end)
|
||||
}
|
||||
|
||||
func (vrf valueRangeFilter) proto() *btpb.RowFilter {
|
||||
r := &btpb.ValueRange{}
|
||||
if vrf.start != nil {
|
||||
r.StartValue = &btpb.ValueRange_StartValueClosed{vrf.start}
|
||||
}
|
||||
if vrf.end != nil {
|
||||
r.EndValue = &btpb.ValueRange_EndValueOpen{vrf.end}
|
||||
}
|
||||
return &btpb.RowFilter{&btpb.RowFilter_ValueRangeFilter{r}}
|
||||
}
|
||||
|
||||
// ConditionFilter returns a filter that evaluates to one of two possible filters depending
|
||||
// on whether or not the given predicate filter matches at least one cell.
|
||||
// If the matched filter is nil then no results will be returned.
|
||||
// IMPORTANT NOTE: The predicate filter does not execute atomically with the
|
||||
// true and false filters, which may lead to inconsistent or unexpected
|
||||
// results. Additionally, condition filters have poor performance, especially
|
||||
// when filters are set for the false condition.
|
||||
func ConditionFilter(predicateFilter, trueFilter, falseFilter Filter) Filter {
|
||||
return conditionFilter{predicateFilter, trueFilter, falseFilter}
|
||||
}
|
||||
|
||||
type conditionFilter struct {
|
||||
predicateFilter Filter
|
||||
trueFilter Filter
|
||||
falseFilter Filter
|
||||
}
|
||||
|
||||
func (cf conditionFilter) String() string {
|
||||
return fmt.Sprintf("conditionFilter(%s,%s,%s)", cf.predicateFilter, cf.trueFilter, cf.falseFilter)
|
||||
}
|
||||
|
||||
func (cf conditionFilter) proto() *btpb.RowFilter {
|
||||
var tf *btpb.RowFilter
|
||||
var ff *btpb.RowFilter
|
||||
if cf.trueFilter != nil {
|
||||
tf = cf.trueFilter.proto()
|
||||
}
|
||||
if cf.falseFilter != nil {
|
||||
ff = cf.falseFilter.proto()
|
||||
}
|
||||
return &btpb.RowFilter{
|
||||
&btpb.RowFilter_Condition_{&btpb.RowFilter_Condition{
|
||||
cf.predicateFilter.proto(),
|
||||
tf,
|
||||
ff,
|
||||
}}}
|
||||
}
|
||||
|
||||
// TODO(dsymonds): More filters: sampling
|
131
vendor/cloud.google.com/go/bigtable/gc.go
generated
vendored
Normal file
131
vendor/cloud.google.com/go/bigtable/gc.go
generated
vendored
Normal file
|
@ -0,0 +1,131 @@
|
|||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package bigtable
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
durpb "github.com/golang/protobuf/ptypes/duration"
|
||||
bttdpb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
|
||||
)
|
||||
|
||||
// A GCPolicy represents a rule that determines which cells are eligible for garbage collection.
|
||||
type GCPolicy interface {
|
||||
String() string
|
||||
proto() *bttdpb.GcRule
|
||||
}
|
||||
|
||||
// IntersectionPolicy returns a GC policy that only applies when all its sub-policies apply.
|
||||
func IntersectionPolicy(sub ...GCPolicy) GCPolicy { return intersectionPolicy{sub} }
|
||||
|
||||
type intersectionPolicy struct {
|
||||
sub []GCPolicy
|
||||
}
|
||||
|
||||
func (ip intersectionPolicy) String() string {
|
||||
var ss []string
|
||||
for _, sp := range ip.sub {
|
||||
ss = append(ss, sp.String())
|
||||
}
|
||||
return "(" + strings.Join(ss, " && ") + ")"
|
||||
}
|
||||
|
||||
func (ip intersectionPolicy) proto() *bttdpb.GcRule {
|
||||
inter := &bttdpb.GcRule_Intersection{}
|
||||
for _, sp := range ip.sub {
|
||||
inter.Rules = append(inter.Rules, sp.proto())
|
||||
}
|
||||
return &bttdpb.GcRule{
|
||||
Rule: &bttdpb.GcRule_Intersection_{inter},
|
||||
}
|
||||
}
|
||||
|
||||
// UnionPolicy returns a GC policy that applies when any of its sub-policies apply.
|
||||
func UnionPolicy(sub ...GCPolicy) GCPolicy { return unionPolicy{sub} }
|
||||
|
||||
type unionPolicy struct {
|
||||
sub []GCPolicy
|
||||
}
|
||||
|
||||
func (up unionPolicy) String() string {
|
||||
var ss []string
|
||||
for _, sp := range up.sub {
|
||||
ss = append(ss, sp.String())
|
||||
}
|
||||
return "(" + strings.Join(ss, " || ") + ")"
|
||||
}
|
||||
|
||||
func (up unionPolicy) proto() *bttdpb.GcRule {
|
||||
union := &bttdpb.GcRule_Union{}
|
||||
for _, sp := range up.sub {
|
||||
union.Rules = append(union.Rules, sp.proto())
|
||||
}
|
||||
return &bttdpb.GcRule{
|
||||
Rule: &bttdpb.GcRule_Union_{union},
|
||||
}
|
||||
}
|
||||
|
||||
// MaxVersionsPolicy returns a GC policy that applies to all versions of a cell
|
||||
// except for the most recent n.
|
||||
func MaxVersionsPolicy(n int) GCPolicy { return maxVersionsPolicy(n) }
|
||||
|
||||
type maxVersionsPolicy int
|
||||
|
||||
func (mvp maxVersionsPolicy) String() string { return fmt.Sprintf("versions() > %d", int(mvp)) }
|
||||
|
||||
func (mvp maxVersionsPolicy) proto() *bttdpb.GcRule {
|
||||
return &bttdpb.GcRule{Rule: &bttdpb.GcRule_MaxNumVersions{int32(mvp)}}
|
||||
}
|
||||
|
||||
// MaxAgePolicy returns a GC policy that applies to all cells
|
||||
// older than the given age.
|
||||
func MaxAgePolicy(d time.Duration) GCPolicy { return maxAgePolicy(d) }
|
||||
|
||||
type maxAgePolicy time.Duration
|
||||
|
||||
var units = []struct {
|
||||
d time.Duration
|
||||
suffix string
|
||||
}{
|
||||
{24 * time.Hour, "d"},
|
||||
{time.Hour, "h"},
|
||||
{time.Minute, "m"},
|
||||
}
|
||||
|
||||
func (ma maxAgePolicy) String() string {
|
||||
d := time.Duration(ma)
|
||||
for _, u := range units {
|
||||
if d%u.d == 0 {
|
||||
return fmt.Sprintf("age() > %d%s", d/u.d, u.suffix)
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("age() > %d", d/time.Microsecond)
|
||||
}
|
||||
|
||||
func (ma maxAgePolicy) proto() *bttdpb.GcRule {
|
||||
// This doesn't handle overflows, etc.
|
||||
// Fix this if people care about GC policies over 290 years.
|
||||
ns := time.Duration(ma).Nanoseconds()
|
||||
return &bttdpb.GcRule{
|
||||
Rule: &bttdpb.GcRule_MaxAge{&durpb.Duration{
|
||||
Seconds: ns / 1e9,
|
||||
Nanos: int32(ns % 1e9),
|
||||
}},
|
||||
}
|
||||
}
|
246
vendor/cloud.google.com/go/bigtable/internal/cbtconfig/cbtconfig.go
generated
vendored
Normal file
246
vendor/cloud.google.com/go/bigtable/internal/cbtconfig/cbtconfig.go
generated
vendored
Normal file
|
@ -0,0 +1,246 @@
|
|||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package cbtconfig encapsulates common code for reading configuration from .cbtrc and gcloud.
|
||||
package cbtconfig
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
"google.golang.org/grpc/credentials"
|
||||
)
|
||||
|
||||
// Config represents a configuration.
|
||||
type Config struct {
|
||||
Project, Instance string // required
|
||||
Creds string // optional
|
||||
AdminEndpoint string // optional
|
||||
DataEndpoint string // optional
|
||||
CertFile string // optional
|
||||
TokenSource oauth2.TokenSource // derived
|
||||
TLSCreds credentials.TransportCredentials // derived
|
||||
}
|
||||
|
||||
type RequiredFlags uint
|
||||
|
||||
const NoneRequired RequiredFlags = 0
|
||||
const (
|
||||
ProjectRequired RequiredFlags = 1 << iota
|
||||
InstanceRequired
|
||||
)
|
||||
const ProjectAndInstanceRequired RequiredFlags = ProjectRequired | InstanceRequired
|
||||
|
||||
// RegisterFlags registers a set of standard flags for this config.
|
||||
// It should be called before flag.Parse.
|
||||
func (c *Config) RegisterFlags() {
|
||||
flag.StringVar(&c.Project, "project", c.Project, "project ID, if unset uses gcloud configured project")
|
||||
flag.StringVar(&c.Instance, "instance", c.Instance, "Cloud Bigtable instance")
|
||||
flag.StringVar(&c.Creds, "creds", c.Creds, "if set, use application credentials in this file")
|
||||
flag.StringVar(&c.AdminEndpoint, "admin-endpoint", c.AdminEndpoint, "Override the admin api endpoint")
|
||||
flag.StringVar(&c.DataEndpoint, "data-endpoint", c.DataEndpoint, "Override the data api endpoint")
|
||||
flag.StringVar(&c.CertFile, "cert-file", c.CertFile, "Override the TLS certificates file")
|
||||
}
|
||||
|
||||
// CheckFlags checks that the required config values are set.
|
||||
func (c *Config) CheckFlags(required RequiredFlags) error {
|
||||
var missing []string
|
||||
if c.CertFile != "" {
|
||||
b, err := ioutil.ReadFile(c.CertFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to load certificates from %s: %v", c.CertFile, err)
|
||||
}
|
||||
|
||||
cp := x509.NewCertPool()
|
||||
if !cp.AppendCertsFromPEM(b) {
|
||||
return fmt.Errorf("Failed to append certificates from %s", c.CertFile)
|
||||
}
|
||||
|
||||
c.TLSCreds = credentials.NewTLS(&tls.Config{RootCAs: cp})
|
||||
}
|
||||
if required != NoneRequired {
|
||||
c.SetFromGcloud()
|
||||
}
|
||||
if required&ProjectRequired != 0 && c.Project == "" {
|
||||
missing = append(missing, "-project")
|
||||
}
|
||||
if required&InstanceRequired != 0 && c.Instance == "" {
|
||||
missing = append(missing, "-instance")
|
||||
}
|
||||
if len(missing) > 0 {
|
||||
return fmt.Errorf("Missing %s", strings.Join(missing, " and "))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Filename returns the filename consulted for standard configuration.
|
||||
func Filename() string {
|
||||
// TODO(dsymonds): Might need tweaking for Windows.
|
||||
return filepath.Join(os.Getenv("HOME"), ".cbtrc")
|
||||
}
|
||||
|
||||
// Load loads a .cbtrc file.
|
||||
// If the file is not present, an empty config is returned.
|
||||
func Load() (*Config, error) {
|
||||
filename := Filename()
|
||||
data, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
// silent fail if the file isn't there
|
||||
if os.IsNotExist(err) {
|
||||
return &Config{}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Reading %s: %v", filename, err)
|
||||
}
|
||||
c := new(Config)
|
||||
s := bufio.NewScanner(bytes.NewReader(data))
|
||||
for s.Scan() {
|
||||
line := s.Text()
|
||||
i := strings.Index(line, "=")
|
||||
if i < 0 {
|
||||
return nil, fmt.Errorf("Bad line in %s: %q", filename, line)
|
||||
}
|
||||
key, val := strings.TrimSpace(line[:i]), strings.TrimSpace(line[i+1:])
|
||||
switch key {
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown key in %s: %q", filename, key)
|
||||
case "project":
|
||||
c.Project = val
|
||||
case "instance":
|
||||
c.Instance = val
|
||||
case "creds":
|
||||
c.Creds = val
|
||||
case "admin-endpoint":
|
||||
c.AdminEndpoint = val
|
||||
case "data-endpoint":
|
||||
c.DataEndpoint = val
|
||||
}
|
||||
|
||||
}
|
||||
return c, s.Err()
|
||||
}
|
||||
|
||||
type GcloudCredential struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
Expiry time.Time `json:"token_expiry"`
|
||||
}
|
||||
|
||||
func (cred *GcloudCredential) Token() *oauth2.Token {
|
||||
return &oauth2.Token{AccessToken: cred.AccessToken, TokenType: "Bearer", Expiry: cred.Expiry}
|
||||
}
|
||||
|
||||
type GcloudConfig struct {
|
||||
Configuration struct {
|
||||
Properties struct {
|
||||
Core struct {
|
||||
Project string `json:"project"`
|
||||
} `json:"core"`
|
||||
} `json:"properties"`
|
||||
} `json:"configuration"`
|
||||
Credential GcloudCredential `json:"credential"`
|
||||
}
|
||||
|
||||
type GcloudCmdTokenSource struct {
|
||||
Command string
|
||||
Args []string
|
||||
}
|
||||
|
||||
// Token implements the oauth2.TokenSource interface
|
||||
func (g *GcloudCmdTokenSource) Token() (*oauth2.Token, error) {
|
||||
gcloudConfig, err := LoadGcloudConfig(g.Command, g.Args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return gcloudConfig.Credential.Token(), nil
|
||||
}
|
||||
|
||||
// LoadGcloudConfig retrieves the gcloud configuration values we need use via the
|
||||
// 'config-helper' command
|
||||
func LoadGcloudConfig(gcloudCmd string, gcloudCmdArgs []string) (*GcloudConfig, error) {
|
||||
out, err := exec.Command(gcloudCmd, gcloudCmdArgs...).Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Could not retrieve gcloud configuration")
|
||||
}
|
||||
|
||||
var gcloudConfig GcloudConfig
|
||||
if err := json.Unmarshal(out, &gcloudConfig); err != nil {
|
||||
return nil, fmt.Errorf("Could not parse gcloud configuration")
|
||||
}
|
||||
|
||||
return &gcloudConfig, nil
|
||||
}
|
||||
|
||||
// SetFromGcloud retrieves and sets any missing config values from the gcloud
|
||||
// configuration if possible possible
|
||||
func (c *Config) SetFromGcloud() error {
|
||||
|
||||
if c.Creds == "" {
|
||||
c.Creds = os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")
|
||||
if c.Creds == "" {
|
||||
log.Printf("-creds flag unset, will use gcloud credential")
|
||||
}
|
||||
} else {
|
||||
os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", c.Creds)
|
||||
}
|
||||
|
||||
if c.Project == "" {
|
||||
log.Printf("-project flag unset, will use gcloud active project")
|
||||
}
|
||||
|
||||
if c.Creds != "" && c.Project != "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
gcloudCmd := "gcloud"
|
||||
if runtime.GOOS == "windows" {
|
||||
gcloudCmd = gcloudCmd + ".cmd"
|
||||
}
|
||||
|
||||
gcloudCmdArgs := []string{"config", "config-helper",
|
||||
"--format=json(configuration.properties.core.project,credential)"}
|
||||
|
||||
gcloudConfig, err := LoadGcloudConfig(gcloudCmd, gcloudCmdArgs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.Project == "" && gcloudConfig.Configuration.Properties.Core.Project != "" {
|
||||
log.Printf("gcloud active project is \"%s\"",
|
||||
gcloudConfig.Configuration.Properties.Core.Project)
|
||||
c.Project = gcloudConfig.Configuration.Properties.Core.Project
|
||||
}
|
||||
|
||||
if c.Creds == "" {
|
||||
c.TokenSource = oauth2.ReuseTokenSource(
|
||||
gcloudConfig.Credential.Token(),
|
||||
&GcloudCmdTokenSource{Command: gcloudCmd, Args: gcloudCmdArgs})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
106
vendor/cloud.google.com/go/bigtable/internal/gax/call_option.go
generated
vendored
Normal file
106
vendor/cloud.google.com/go/bigtable/internal/gax/call_option.go
generated
vendored
Normal file
|
@ -0,0 +1,106 @@
|
|||
/*
|
||||
Copyright 2016 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This is ia snapshot from github.com/googleapis/gax-go with minor modifications.
|
||||
package gax
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
type CallOption interface {
|
||||
Resolve(*CallSettings)
|
||||
}
|
||||
|
||||
type callOptions []CallOption
|
||||
|
||||
func (opts callOptions) Resolve(s *CallSettings) *CallSettings {
|
||||
for _, opt := range opts {
|
||||
opt.Resolve(s)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Encapsulates the call settings for a particular API call.
|
||||
type CallSettings struct {
|
||||
Timeout time.Duration
|
||||
RetrySettings RetrySettings
|
||||
}
|
||||
|
||||
// Per-call configurable settings for retrying upon transient failure.
|
||||
type RetrySettings struct {
|
||||
RetryCodes map[codes.Code]bool
|
||||
BackoffSettings BackoffSettings
|
||||
}
|
||||
|
||||
// Parameters to the exponential backoff algorithm for retrying.
|
||||
type BackoffSettings struct {
|
||||
DelayTimeoutSettings MultipliableDuration
|
||||
RPCTimeoutSettings MultipliableDuration
|
||||
}
|
||||
|
||||
type MultipliableDuration struct {
|
||||
Initial time.Duration
|
||||
Max time.Duration
|
||||
Multiplier float64
|
||||
}
|
||||
|
||||
func (w CallSettings) Resolve(s *CallSettings) {
|
||||
s.Timeout = w.Timeout
|
||||
s.RetrySettings = w.RetrySettings
|
||||
|
||||
s.RetrySettings.RetryCodes = make(map[codes.Code]bool, len(w.RetrySettings.RetryCodes))
|
||||
for key, value := range w.RetrySettings.RetryCodes {
|
||||
s.RetrySettings.RetryCodes[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
type withRetryCodes []codes.Code
|
||||
|
||||
func (w withRetryCodes) Resolve(s *CallSettings) {
|
||||
s.RetrySettings.RetryCodes = make(map[codes.Code]bool)
|
||||
for _, code := range w {
|
||||
s.RetrySettings.RetryCodes[code] = true
|
||||
}
|
||||
}
|
||||
|
||||
// WithRetryCodes sets a list of Google API canonical error codes upon which a
|
||||
// retry should be attempted.
|
||||
func WithRetryCodes(retryCodes []codes.Code) CallOption {
|
||||
return withRetryCodes(retryCodes)
|
||||
}
|
||||
|
||||
type withDelayTimeoutSettings MultipliableDuration
|
||||
|
||||
func (w withDelayTimeoutSettings) Resolve(s *CallSettings) {
|
||||
s.RetrySettings.BackoffSettings.DelayTimeoutSettings = MultipliableDuration(w)
|
||||
}
|
||||
|
||||
// WithDelayTimeoutSettings specifies:
|
||||
// - The initial delay time, in milliseconds, between the completion of
|
||||
// the first failed request and the initiation of the first retrying
|
||||
// request.
|
||||
// - The multiplier by which to increase the delay time between the
|
||||
// completion of failed requests, and the initiation of the subsequent
|
||||
// retrying request.
|
||||
// - The maximum delay time, in milliseconds, between requests. When this
|
||||
// value is reached, `RetryDelayMultiplier` will no longer be used to
|
||||
// increase delay time.
|
||||
func WithDelayTimeoutSettings(initial time.Duration, max time.Duration, multiplier float64) CallOption {
|
||||
return withDelayTimeoutSettings(MultipliableDuration{initial, max, multiplier})
|
||||
}
|
84
vendor/cloud.google.com/go/bigtable/internal/gax/invoke.go
generated
vendored
Normal file
84
vendor/cloud.google.com/go/bigtable/internal/gax/invoke.go
generated
vendored
Normal file
|
@ -0,0 +1,84 @@
|
|||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This is ia snapshot from github.com/googleapis/gax-go with minor modifications.
|
||||
package gax
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"log"
|
||||
"os"
|
||||
)
|
||||
|
||||
var logger *log.Logger = log.New(os.Stderr, "", log.LstdFlags)
|
||||
|
||||
// A user defined call stub.
|
||||
type APICall func(context.Context) error
|
||||
|
||||
// scaleDuration returns the product of a and mult.
|
||||
func scaleDuration(a time.Duration, mult float64) time.Duration {
|
||||
ns := float64(a) * mult
|
||||
return time.Duration(ns)
|
||||
}
|
||||
|
||||
// invokeWithRetry calls stub using an exponential backoff retry mechanism
|
||||
// based on the values provided in callSettings.
|
||||
func invokeWithRetry(ctx context.Context, stub APICall, callSettings CallSettings) error {
|
||||
retrySettings := callSettings.RetrySettings
|
||||
backoffSettings := callSettings.RetrySettings.BackoffSettings
|
||||
delay := backoffSettings.DelayTimeoutSettings.Initial
|
||||
for {
|
||||
// If the deadline is exceeded...
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
err := stub(ctx)
|
||||
code := grpc.Code(err)
|
||||
if code == codes.OK {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !retrySettings.RetryCodes[code] {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sleep a random amount up to the current delay
|
||||
d := time.Duration(rand.Int63n(int64(delay)))
|
||||
delayCtx, _ := context.WithTimeout(ctx, delay)
|
||||
logger.Printf("Retryable error: %v, retrying in %v", err, d)
|
||||
<-delayCtx.Done()
|
||||
|
||||
delay = scaleDuration(delay, backoffSettings.DelayTimeoutSettings.Multiplier)
|
||||
if delay > backoffSettings.DelayTimeoutSettings.Max {
|
||||
delay = backoffSettings.DelayTimeoutSettings.Max
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Invoke calls stub with a child of context modified by the specified options.
|
||||
func Invoke(ctx context.Context, stub APICall, opts ...CallOption) error {
|
||||
settings := &CallSettings{}
|
||||
callOptions(opts).Resolve(settings)
|
||||
if len(settings.RetrySettings.RetryCodes) > 0 {
|
||||
return invokeWithRetry(ctx, stub, *settings)
|
||||
}
|
||||
return stub(ctx)
|
||||
}
|
49
vendor/cloud.google.com/go/bigtable/internal/gax/invoke_test.go
generated
vendored
Normal file
49
vendor/cloud.google.com/go/bigtable/internal/gax/invoke_test.go
generated
vendored
Normal file
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package gax
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
func TestRandomizedDelays(t *testing.T) {
|
||||
max := 200 * time.Millisecond
|
||||
settings := []CallOption{
|
||||
WithRetryCodes([]codes.Code{codes.Unavailable, codes.DeadlineExceeded}),
|
||||
WithDelayTimeoutSettings(10*time.Millisecond, max, 1.5),
|
||||
}
|
||||
|
||||
deadline := time.Now().Add(1 * time.Second)
|
||||
ctx, _ := context.WithDeadline(context.Background(), deadline)
|
||||
var invokeTime time.Time
|
||||
Invoke(ctx, func(childCtx context.Context) error {
|
||||
// Keep failing, make sure we never slept more than max (plus a fudge factor)
|
||||
if !invokeTime.IsZero() {
|
||||
if got, want := time.Since(invokeTime), max; got > (want + 20*time.Millisecond) {
|
||||
t.Fatalf("Slept too long. Got: %v, want: %v", got, max)
|
||||
}
|
||||
}
|
||||
invokeTime = time.Now()
|
||||
// Workaround for `go vet`: https://github.com/grpc/grpc-go/issues/90
|
||||
errf := grpc.Errorf
|
||||
return errf(codes.Unavailable, "")
|
||||
}, settings...)
|
||||
}
|
48
vendor/cloud.google.com/go/bigtable/internal/option/option.go
generated
vendored
Normal file
48
vendor/cloud.google.com/go/bigtable/internal/option/option.go
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
Copyright 2015 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package option contains common code for dealing with client options.
|
||||
package option
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
// DefaultClientOptions returns the default client options to use for the
|
||||
// client's gRPC connection.
|
||||
func DefaultClientOptions(endpoint, scope, userAgent string) ([]option.ClientOption, error) {
|
||||
var o []option.ClientOption
|
||||
// Check the environment variables for the bigtable emulator.
|
||||
// Dial it directly and don't pass any credentials.
|
||||
if addr := os.Getenv("BIGTABLE_EMULATOR_HOST"); addr != "" {
|
||||
conn, err := grpc.Dial(addr, grpc.WithInsecure())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("emulator grpc.Dial: %v", err)
|
||||
}
|
||||
o = []option.ClientOption{option.WithGRPCConn(conn)}
|
||||
} else {
|
||||
o = []option.ClientOption{
|
||||
option.WithEndpoint(endpoint),
|
||||
option.WithScopes(scope),
|
||||
option.WithUserAgent(userAgent),
|
||||
}
|
||||
}
|
||||
return o, nil
|
||||
}
|
144
vendor/cloud.google.com/go/bigtable/internal/stat/stats.go
generated
vendored
Normal file
144
vendor/cloud.google.com/go/bigtable/internal/stat/stats.go
generated
vendored
Normal file
|
@ -0,0 +1,144 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package stat
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"sort"
|
||||
"strconv"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
)
|
||||
|
||||
type byDuration []time.Duration
|
||||
|
||||
func (data byDuration) Len() int { return len(data) }
|
||||
func (data byDuration) Swap(i, j int) { data[i], data[j] = data[j], data[i] }
|
||||
func (data byDuration) Less(i, j int) bool { return data[i] < data[j] }
|
||||
|
||||
// quantile returns a value representing the kth of q quantiles.
|
||||
// May alter the order of data.
|
||||
func quantile(data []time.Duration, k, q int) (quantile time.Duration, ok bool) {
|
||||
if len(data) < 1 {
|
||||
return 0, false
|
||||
}
|
||||
if k > q {
|
||||
return 0, false
|
||||
}
|
||||
if k < 0 || q < 1 {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
sort.Sort(byDuration(data))
|
||||
|
||||
if k == 0 {
|
||||
return data[0], true
|
||||
}
|
||||
if k == q {
|
||||
return data[len(data)-1], true
|
||||
}
|
||||
|
||||
bucketSize := float64(len(data)-1) / float64(q)
|
||||
i := float64(k) * bucketSize
|
||||
|
||||
lower := int(math.Trunc(i))
|
||||
var upper int
|
||||
if i > float64(lower) && lower+1 < len(data) {
|
||||
// If the quantile lies between two elements
|
||||
upper = lower + 1
|
||||
} else {
|
||||
upper = lower
|
||||
}
|
||||
weightUpper := i - float64(lower)
|
||||
weightLower := 1 - weightUpper
|
||||
return time.Duration(weightLower*float64(data[lower]) + weightUpper*float64(data[upper])), true
|
||||
}
|
||||
|
||||
type Aggregate struct {
|
||||
Name string
|
||||
Count, Errors int
|
||||
Min, Median, Max time.Duration
|
||||
P75, P90, P95, P99 time.Duration // percentiles
|
||||
}
|
||||
|
||||
// NewAggregate constructs an aggregate from latencies. Returns nil if latencies does not contain aggregateable data.
|
||||
func NewAggregate(name string, latencies []time.Duration, errorCount int) *Aggregate {
|
||||
agg := Aggregate{Name: name, Count: len(latencies), Errors: errorCount}
|
||||
|
||||
if len(latencies) == 0 {
|
||||
return nil
|
||||
}
|
||||
var ok bool
|
||||
if agg.Min, ok = quantile(latencies, 0, 2); !ok {
|
||||
return nil
|
||||
}
|
||||
if agg.Median, ok = quantile(latencies, 1, 2); !ok {
|
||||
return nil
|
||||
}
|
||||
if agg.Max, ok = quantile(latencies, 2, 2); !ok {
|
||||
return nil
|
||||
}
|
||||
if agg.P75, ok = quantile(latencies, 75, 100); !ok {
|
||||
return nil
|
||||
}
|
||||
if agg.P90, ok = quantile(latencies, 90, 100); !ok {
|
||||
return nil
|
||||
}
|
||||
if agg.P95, ok = quantile(latencies, 95, 100); !ok {
|
||||
return nil
|
||||
}
|
||||
if agg.P99, ok = quantile(latencies, 99, 100); !ok {
|
||||
return nil
|
||||
}
|
||||
return &agg
|
||||
}
|
||||
|
||||
func (agg *Aggregate) String() string {
|
||||
if agg == nil {
|
||||
return "no data"
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
tw := tabwriter.NewWriter(&buf, 0, 0, 1, ' ', 0) // one-space padding
|
||||
fmt.Fprintf(tw, "min:\t%v\nmedian:\t%v\nmax:\t%v\n95th percentile:\t%v\n99th percentile:\t%v\n",
|
||||
agg.Min, agg.Median, agg.Max, agg.P95, agg.P99)
|
||||
tw.Flush()
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// WriteCSV writes a csv file to the given Writer,
|
||||
// with a header row and one row per aggregate.
|
||||
func WriteCSV(aggs []*Aggregate, iow io.Writer) error {
|
||||
w := csv.NewWriter(iow)
|
||||
defer w.Flush()
|
||||
err := w.Write([]string{"name", "count", "errors", "min", "median", "max", "p75", "p90", "p95", "p99"})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, agg := range aggs {
|
||||
err = w.Write([]string{
|
||||
agg.Name, strconv.Itoa(agg.Count), strconv.Itoa(agg.Errors),
|
||||
agg.Min.String(), agg.Median.String(), agg.Max.String(),
|
||||
agg.P75.String(), agg.P90.String(), agg.P95.String(), agg.P99.String(),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
250
vendor/cloud.google.com/go/bigtable/reader.go
generated
vendored
Normal file
250
vendor/cloud.google.com/go/bigtable/reader.go
generated
vendored
Normal file
|
@ -0,0 +1,250 @@
|
|||
/*
|
||||
Copyright 2016 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package bigtable
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
|
||||
)
|
||||
|
||||
// A Row is returned by ReadRows. The map is keyed by column family (the prefix
|
||||
// of the column name before the colon). The values are the returned ReadItems
|
||||
// for that column family in the order returned by Read.
|
||||
type Row map[string][]ReadItem
|
||||
|
||||
// Key returns the row's key, or "" if the row is empty.
|
||||
func (r Row) Key() string {
|
||||
for _, items := range r {
|
||||
if len(items) > 0 {
|
||||
return items[0].Row
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// A ReadItem is returned by Read. A ReadItem contains data from a specific row and column.
|
||||
type ReadItem struct {
|
||||
Row, Column string
|
||||
Timestamp Timestamp
|
||||
Value []byte
|
||||
}
|
||||
|
||||
// The current state of the read rows state machine.
|
||||
type rrState int64
|
||||
|
||||
const (
|
||||
newRow rrState = iota
|
||||
rowInProgress
|
||||
cellInProgress
|
||||
)
|
||||
|
||||
// chunkReader handles cell chunks from the read rows response and combines
|
||||
// them into full Rows.
|
||||
type chunkReader struct {
|
||||
state rrState
|
||||
curKey []byte
|
||||
curFam string
|
||||
curQual []byte
|
||||
curTS int64
|
||||
curVal []byte
|
||||
curRow Row
|
||||
lastKey string
|
||||
}
|
||||
|
||||
// newChunkReader returns a new chunkReader for handling read rows responses.
|
||||
func newChunkReader() *chunkReader {
|
||||
return &chunkReader{state: newRow}
|
||||
}
|
||||
|
||||
// Process takes a cell chunk and returns a new Row if the given chunk
|
||||
// completes a Row, or nil otherwise.
|
||||
func (cr *chunkReader) Process(cc *btpb.ReadRowsResponse_CellChunk) (Row, error) {
|
||||
var row Row
|
||||
switch cr.state {
|
||||
case newRow:
|
||||
if err := cr.validateNewRow(cc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cr.curRow = make(Row)
|
||||
cr.curKey = cc.RowKey
|
||||
cr.curFam = cc.FamilyName.Value
|
||||
cr.curQual = cc.Qualifier.Value
|
||||
cr.curTS = cc.TimestampMicros
|
||||
row = cr.handleCellValue(cc)
|
||||
|
||||
case rowInProgress:
|
||||
if err := cr.validateRowInProgress(cc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cc.GetResetRow() {
|
||||
cr.resetToNewRow()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if cc.FamilyName != nil {
|
||||
cr.curFam = cc.FamilyName.Value
|
||||
}
|
||||
if cc.Qualifier != nil {
|
||||
cr.curQual = cc.Qualifier.Value
|
||||
}
|
||||
cr.curTS = cc.TimestampMicros
|
||||
row = cr.handleCellValue(cc)
|
||||
|
||||
case cellInProgress:
|
||||
if err := cr.validateCellInProgress(cc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cc.GetResetRow() {
|
||||
cr.resetToNewRow()
|
||||
return nil, nil
|
||||
}
|
||||
row = cr.handleCellValue(cc)
|
||||
}
|
||||
|
||||
return row, nil
|
||||
}
|
||||
|
||||
// Close must be called after all cell chunks from the response
|
||||
// have been processed. An error will be returned if the reader is
|
||||
// in an invalid state, in which case the error should be propagated to the caller.
|
||||
func (cr *chunkReader) Close() error {
|
||||
if cr.state != newRow {
|
||||
return fmt.Errorf("invalid state for end of stream %q", cr.state)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleCellValue returns a Row if the cell value includes a commit, otherwise nil.
|
||||
func (cr *chunkReader) handleCellValue(cc *btpb.ReadRowsResponse_CellChunk) Row {
|
||||
if cc.ValueSize > 0 {
|
||||
// ValueSize is specified so expect a split value of ValueSize bytes
|
||||
if cr.curVal == nil {
|
||||
cr.curVal = make([]byte, 0, cc.ValueSize)
|
||||
}
|
||||
cr.curVal = append(cr.curVal, cc.Value...)
|
||||
cr.state = cellInProgress
|
||||
} else {
|
||||
// This cell is either the complete value or the last chunk of a split
|
||||
if cr.curVal == nil {
|
||||
cr.curVal = cc.Value
|
||||
} else {
|
||||
cr.curVal = append(cr.curVal, cc.Value...)
|
||||
}
|
||||
cr.finishCell()
|
||||
|
||||
if cc.GetCommitRow() {
|
||||
return cr.commitRow()
|
||||
} else {
|
||||
cr.state = rowInProgress
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cr *chunkReader) finishCell() {
|
||||
ri := ReadItem{
|
||||
Row: string(cr.curKey),
|
||||
Column: fmt.Sprintf("%s:%s", cr.curFam, cr.curQual),
|
||||
Timestamp: Timestamp(cr.curTS),
|
||||
Value: cr.curVal,
|
||||
}
|
||||
cr.curRow[cr.curFam] = append(cr.curRow[cr.curFam], ri)
|
||||
cr.curVal = nil
|
||||
}
|
||||
|
||||
func (cr *chunkReader) commitRow() Row {
|
||||
row := cr.curRow
|
||||
cr.lastKey = cr.curRow.Key()
|
||||
cr.resetToNewRow()
|
||||
return row
|
||||
}
|
||||
|
||||
func (cr *chunkReader) resetToNewRow() {
|
||||
cr.curKey = nil
|
||||
cr.curFam = ""
|
||||
cr.curQual = nil
|
||||
cr.curVal = nil
|
||||
cr.curRow = nil
|
||||
cr.curTS = 0
|
||||
cr.state = newRow
|
||||
}
|
||||
|
||||
func (cr *chunkReader) validateNewRow(cc *btpb.ReadRowsResponse_CellChunk) error {
|
||||
if cc.GetResetRow() {
|
||||
return fmt.Errorf("reset_row not allowed between rows")
|
||||
}
|
||||
if cc.RowKey == nil || cc.FamilyName == nil || cc.Qualifier == nil {
|
||||
return fmt.Errorf("missing key field for new row %v", cc)
|
||||
}
|
||||
if cr.lastKey != "" && cr.lastKey >= string(cc.RowKey) {
|
||||
return fmt.Errorf("out of order row key: %q, %q", cr.lastKey, string(cc.RowKey))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cr *chunkReader) validateRowInProgress(cc *btpb.ReadRowsResponse_CellChunk) error {
|
||||
if err := cr.validateRowStatus(cc); err != nil {
|
||||
return err
|
||||
}
|
||||
if cc.RowKey != nil && !bytes.Equal(cc.RowKey, cr.curKey) {
|
||||
return fmt.Errorf("received new row key %q during existing row %q", cc.RowKey, cr.curKey)
|
||||
}
|
||||
if cc.FamilyName != nil && cc.Qualifier == nil {
|
||||
return fmt.Errorf("family name %q specified without a qualifier", cc.FamilyName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cr *chunkReader) validateCellInProgress(cc *btpb.ReadRowsResponse_CellChunk) error {
|
||||
if err := cr.validateRowStatus(cc); err != nil {
|
||||
return err
|
||||
}
|
||||
if cr.curVal == nil {
|
||||
return fmt.Errorf("no cached cell while CELL_IN_PROGRESS %v", cc)
|
||||
}
|
||||
if cc.GetResetRow() == false && cr.isAnyKeyPresent(cc) {
|
||||
return fmt.Errorf("cell key components found while CELL_IN_PROGRESS %v", cc)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cr *chunkReader) isAnyKeyPresent(cc *btpb.ReadRowsResponse_CellChunk) bool {
|
||||
return cc.RowKey != nil ||
|
||||
cc.FamilyName != nil ||
|
||||
cc.Qualifier != nil ||
|
||||
cc.TimestampMicros != 0
|
||||
}
|
||||
|
||||
// Validate a RowStatus, commit or reset, if present.
|
||||
func (cr *chunkReader) validateRowStatus(cc *btpb.ReadRowsResponse_CellChunk) error {
|
||||
// Resets can't be specified with any other part of a cell
|
||||
if cc.GetResetRow() && (cr.isAnyKeyPresent(cc) ||
|
||||
cc.Value != nil ||
|
||||
cc.ValueSize != 0 ||
|
||||
cc.Labels != nil) {
|
||||
return fmt.Errorf("reset must not be specified with other fields %v", cc)
|
||||
}
|
||||
if cc.GetCommitRow() && cc.ValueSize > 0 {
|
||||
return fmt.Errorf("commit row found in between chunks in a cell")
|
||||
}
|
||||
return nil
|
||||
}
|
343
vendor/cloud.google.com/go/bigtable/reader_test.go
generated
vendored
Normal file
343
vendor/cloud.google.com/go/bigtable/reader_test.go
generated
vendored
Normal file
|
@ -0,0 +1,343 @@
|
|||
/*
|
||||
Copyright 2016 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package bigtable
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes/wrappers"
|
||||
btspb "google.golang.org/genproto/googleapis/bigtable/v2"
|
||||
)
|
||||
|
||||
// Indicates that a field in the proto should be omitted, rather than included
|
||||
// as a wrapped empty string.
|
||||
const nilStr = "<>"
|
||||
|
||||
func TestSingleCell(t *testing.T) {
|
||||
cr := newChunkReader()
|
||||
|
||||
// All in one cell
|
||||
row, err := cr.Process(cc("rk", "fm", "col", 1, "value", 0, true))
|
||||
if err != nil {
|
||||
t.Fatalf("Processing chunk: %v", err)
|
||||
}
|
||||
if row == nil {
|
||||
t.Fatalf("Missing row")
|
||||
}
|
||||
if len(row["fm"]) != 1 {
|
||||
t.Fatalf("Family name length mismatch %d, %d", 1, len(row["fm"]))
|
||||
}
|
||||
want := []ReadItem{ri("rk", "fm", "col", 1, "value")}
|
||||
if !reflect.DeepEqual(row["fm"], want) {
|
||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm"], want)
|
||||
}
|
||||
if err := cr.Close(); err != nil {
|
||||
t.Fatalf("Close: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultipleCells(t *testing.T) {
|
||||
cr := newChunkReader()
|
||||
|
||||
cr.Process(cc("rs", "fm1", "col1", 0, "val1", 0, false))
|
||||
cr.Process(cc("rs", "fm1", "col1", 1, "val2", 0, false))
|
||||
cr.Process(cc("rs", "fm1", "col2", 0, "val3", 0, false))
|
||||
cr.Process(cc("rs", "fm2", "col1", 0, "val4", 0, false))
|
||||
row, err := cr.Process(cc("rs", "fm2", "col2", 1, "extralongval5", 0, true))
|
||||
if err != nil {
|
||||
t.Fatalf("Processing chunk: %v", err)
|
||||
}
|
||||
if row == nil {
|
||||
t.Fatalf("Missing row")
|
||||
}
|
||||
|
||||
want := []ReadItem{
|
||||
ri("rs", "fm1", "col1", 0, "val1"),
|
||||
ri("rs", "fm1", "col1", 1, "val2"),
|
||||
ri("rs", "fm1", "col2", 0, "val3"),
|
||||
}
|
||||
if !reflect.DeepEqual(row["fm1"], want) {
|
||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want)
|
||||
}
|
||||
want = []ReadItem{
|
||||
ri("rs", "fm2", "col1", 0, "val4"),
|
||||
ri("rs", "fm2", "col2", 1, "extralongval5"),
|
||||
}
|
||||
if !reflect.DeepEqual(row["fm2"], want) {
|
||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want)
|
||||
}
|
||||
if err := cr.Close(); err != nil {
|
||||
t.Fatalf("Close: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSplitCells(t *testing.T) {
|
||||
cr := newChunkReader()
|
||||
|
||||
cr.Process(cc("rs", "fm1", "col1", 0, "hello ", 11, false))
|
||||
cr.Process(ccData("world", 0, false))
|
||||
row, err := cr.Process(cc("rs", "fm1", "col2", 0, "val2", 0, true))
|
||||
if err != nil {
|
||||
t.Fatalf("Processing chunk: %v", err)
|
||||
}
|
||||
if row == nil {
|
||||
t.Fatalf("Missing row")
|
||||
}
|
||||
|
||||
want := []ReadItem{
|
||||
ri("rs", "fm1", "col1", 0, "hello world"),
|
||||
ri("rs", "fm1", "col2", 0, "val2"),
|
||||
}
|
||||
if !reflect.DeepEqual(row["fm1"], want) {
|
||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want)
|
||||
}
|
||||
if err := cr.Close(); err != nil {
|
||||
t.Fatalf("Close: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultipleRows(t *testing.T) {
|
||||
cr := newChunkReader()
|
||||
|
||||
row, err := cr.Process(cc("rs1", "fm1", "col1", 1, "val1", 0, true))
|
||||
if err != nil {
|
||||
t.Fatalf("Processing chunk: %v", err)
|
||||
}
|
||||
want := []ReadItem{ri("rs1", "fm1", "col1", 1, "val1")}
|
||||
if !reflect.DeepEqual(row["fm1"], want) {
|
||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want)
|
||||
}
|
||||
|
||||
row, err = cr.Process(cc("rs2", "fm2", "col2", 2, "val2", 0, true))
|
||||
if err != nil {
|
||||
t.Fatalf("Processing chunk: %v", err)
|
||||
}
|
||||
want = []ReadItem{ri("rs2", "fm2", "col2", 2, "val2")}
|
||||
if !reflect.DeepEqual(row["fm2"], want) {
|
||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want)
|
||||
}
|
||||
|
||||
if err := cr.Close(); err != nil {
|
||||
t.Fatalf("Close: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlankQualifier(t *testing.T) {
|
||||
cr := newChunkReader()
|
||||
|
||||
row, err := cr.Process(cc("rs1", "fm1", "", 1, "val1", 0, true))
|
||||
if err != nil {
|
||||
t.Fatalf("Processing chunk: %v", err)
|
||||
}
|
||||
want := []ReadItem{ri("rs1", "fm1", "", 1, "val1")}
|
||||
if !reflect.DeepEqual(row["fm1"], want) {
|
||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want)
|
||||
}
|
||||
|
||||
row, err = cr.Process(cc("rs2", "fm2", "col2", 2, "val2", 0, true))
|
||||
if err != nil {
|
||||
t.Fatalf("Processing chunk: %v", err)
|
||||
}
|
||||
want = []ReadItem{ri("rs2", "fm2", "col2", 2, "val2")}
|
||||
if !reflect.DeepEqual(row["fm2"], want) {
|
||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want)
|
||||
}
|
||||
|
||||
if err := cr.Close(); err != nil {
|
||||
t.Fatalf("Close: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReset(t *testing.T) {
|
||||
cr := newChunkReader()
|
||||
|
||||
cr.Process(cc("rs", "fm1", "col1", 0, "val1", 0, false))
|
||||
cr.Process(cc("rs", "fm1", "col1", 1, "val2", 0, false))
|
||||
cr.Process(cc("rs", "fm1", "col2", 0, "val3", 0, false))
|
||||
cr.Process(ccReset())
|
||||
row, _ := cr.Process(cc("rs1", "fm1", "col1", 1, "val1", 0, true))
|
||||
want := []ReadItem{ri("rs1", "fm1", "col1", 1, "val1")}
|
||||
if !reflect.DeepEqual(row["fm1"], want) {
|
||||
t.Fatalf("Reset: got: %v\nwant: %v\n", row["fm1"], want)
|
||||
}
|
||||
if err := cr.Close(); err != nil {
|
||||
t.Fatalf("Close: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewFamEmptyQualifier(t *testing.T) {
|
||||
cr := newChunkReader()
|
||||
|
||||
cr.Process(cc("rs", "fm1", "col1", 0, "val1", 0, false))
|
||||
_, err := cr.Process(cc(nilStr, "fm2", nilStr, 0, "val2", 0, true))
|
||||
if err == nil {
|
||||
t.Fatalf("Expected error on second chunk with no qualifier set")
|
||||
}
|
||||
}
|
||||
|
||||
// The read rows acceptance test reads a json file specifying a number of tests,
|
||||
// each consisting of one or more cell chunk text protos and one or more resulting
|
||||
// cells or errors.
|
||||
type AcceptanceTest struct {
|
||||
Tests []TestCase `json:"tests"`
|
||||
}
|
||||
|
||||
type TestCase struct {
|
||||
Name string `json:"name"`
|
||||
Chunks []string `json:"chunks"`
|
||||
Results []TestResult `json:"results"`
|
||||
}
|
||||
|
||||
type TestResult struct {
|
||||
RK string `json:"rk"`
|
||||
FM string `json:"fm"`
|
||||
Qual string `json:"qual"`
|
||||
TS int64 `json:"ts"`
|
||||
Value string `json:"value"`
|
||||
Error bool `json:"error"` // If true, expect an error. Ignore any other field.
|
||||
}
|
||||
|
||||
func TestAcceptance(t *testing.T) {
|
||||
testJson, err := ioutil.ReadFile("./testdata/read-rows-acceptance-test.json")
|
||||
if err != nil {
|
||||
t.Fatalf("could not open acceptance test file %v", err)
|
||||
}
|
||||
|
||||
var accTest AcceptanceTest
|
||||
err = json.Unmarshal(testJson, &accTest)
|
||||
if err != nil {
|
||||
t.Fatalf("could not parse acceptance test file: %v", err)
|
||||
}
|
||||
|
||||
for _, test := range accTest.Tests {
|
||||
runTestCase(t, test)
|
||||
}
|
||||
}
|
||||
|
||||
func runTestCase(t *testing.T, test TestCase) {
|
||||
// Increment an index into the result array as we get results
|
||||
cr := newChunkReader()
|
||||
var results []TestResult
|
||||
var seenErr bool
|
||||
for _, chunkText := range test.Chunks {
|
||||
// Parse and pass each cell chunk to the ChunkReader
|
||||
cc := &btspb.ReadRowsResponse_CellChunk{}
|
||||
err := proto.UnmarshalText(chunkText, cc)
|
||||
if err != nil {
|
||||
t.Errorf("[%s] failed to unmarshal text proto: %s\n%s", test.Name, chunkText, err)
|
||||
return
|
||||
}
|
||||
row, err := cr.Process(cc)
|
||||
if err != nil {
|
||||
results = append(results, TestResult{Error: true})
|
||||
seenErr = true
|
||||
break
|
||||
} else {
|
||||
// Turn the Row into TestResults
|
||||
for fm, ris := range row {
|
||||
for _, ri := range ris {
|
||||
tr := TestResult{
|
||||
RK: ri.Row,
|
||||
FM: fm,
|
||||
Qual: strings.Split(ri.Column, ":")[1],
|
||||
TS: int64(ri.Timestamp),
|
||||
Value: string(ri.Value),
|
||||
}
|
||||
results = append(results, tr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Only Close if we don't have an error yet, otherwise Close: is expected.
|
||||
if !seenErr {
|
||||
err := cr.Close()
|
||||
if err != nil {
|
||||
results = append(results, TestResult{Error: true})
|
||||
}
|
||||
}
|
||||
|
||||
got := toSet(results)
|
||||
want := toSet(test.Results)
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("[%s]: got: %v\nwant: %v\n", test.Name, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func toSet(res []TestResult) map[TestResult]bool {
|
||||
set := make(map[TestResult]bool)
|
||||
for _, tr := range res {
|
||||
set[tr] = true
|
||||
}
|
||||
return set
|
||||
}
|
||||
|
||||
// ri returns a ReadItem for the given components
|
||||
func ri(rk string, fm string, qual string, ts int64, val string) ReadItem {
|
||||
return ReadItem{Row: rk, Column: fmt.Sprintf("%s:%s", fm, qual), Value: []byte(val), Timestamp: Timestamp(ts)}
|
||||
}
|
||||
|
||||
// cc returns a CellChunk proto
|
||||
func cc(rk string, fm string, qual string, ts int64, val string, size int32, commit bool) *btspb.ReadRowsResponse_CellChunk {
|
||||
// The components of the cell key are wrapped and can be null or empty
|
||||
var rkWrapper []byte
|
||||
if rk == nilStr {
|
||||
rkWrapper = nil
|
||||
} else {
|
||||
rkWrapper = []byte(rk)
|
||||
}
|
||||
|
||||
var fmWrapper *wrappers.StringValue
|
||||
if fm != nilStr {
|
||||
fmWrapper = &wrappers.StringValue{Value: fm}
|
||||
} else {
|
||||
fmWrapper = nil
|
||||
}
|
||||
|
||||
var qualWrapper *wrappers.BytesValue
|
||||
if qual != nilStr {
|
||||
qualWrapper = &wrappers.BytesValue{Value: []byte(qual)}
|
||||
} else {
|
||||
qualWrapper = nil
|
||||
}
|
||||
|
||||
return &btspb.ReadRowsResponse_CellChunk{
|
||||
RowKey: rkWrapper,
|
||||
FamilyName: fmWrapper,
|
||||
Qualifier: qualWrapper,
|
||||
TimestampMicros: ts,
|
||||
Value: []byte(val),
|
||||
ValueSize: size,
|
||||
RowStatus: &btspb.ReadRowsResponse_CellChunk_CommitRow{CommitRow: commit}}
|
||||
}
|
||||
|
||||
// ccData returns a CellChunk with only a value and size
|
||||
func ccData(val string, size int32, commit bool) *btspb.ReadRowsResponse_CellChunk {
|
||||
return cc(nilStr, nilStr, nilStr, 0, val, size, commit)
|
||||
}
|
||||
|
||||
// ccReset returns a CellChunk with RestRow set to true
|
||||
func ccReset() *btspb.ReadRowsResponse_CellChunk {
|
||||
return &btspb.ReadRowsResponse_CellChunk{
|
||||
RowStatus: &btspb.ReadRowsResponse_CellChunk_ResetRow{ResetRow: true}}
|
||||
}
|
362
vendor/cloud.google.com/go/bigtable/retry_test.go
generated
vendored
Normal file
362
vendor/cloud.google.com/go/bigtable/retry_test.go
generated
vendored
Normal file
|
@ -0,0 +1,362 @@
|
|||
/*
|
||||
Copyright 2016 Google Inc. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
package bigtable
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/bigtable/bttest"
|
||||
"github.com/golang/protobuf/ptypes/wrappers"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
|
||||
rpcpb "google.golang.org/genproto/googleapis/rpc/status"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
func setupFakeServer(opt ...grpc.ServerOption) (tbl *Table, cleanup func(), err error) {
|
||||
srv, err := bttest.NewServer("127.0.0.1:0", opt...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
client, err := NewClient(context.Background(), "client", "instance", option.WithGRPCConn(conn))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
adminClient, err := NewAdminClient(context.Background(), "client", "instance", option.WithGRPCConn(conn))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := adminClient.CreateTable(context.Background(), "table"); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := adminClient.CreateColumnFamily(context.Background(), "table", "cf"); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
t := client.Open("table")
|
||||
|
||||
cleanupFunc := func() {
|
||||
adminClient.Close()
|
||||
client.Close()
|
||||
srv.Close()
|
||||
}
|
||||
return t, cleanupFunc, nil
|
||||
}
|
||||
|
||||
func TestRetryApply(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
errCount := 0
|
||||
code := codes.Unavailable // Will be retried
|
||||
// Intercept requests and return an error or defer to the underlying handler
|
||||
errInjector := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
|
||||
if strings.HasSuffix(info.FullMethod, "MutateRow") && errCount < 3 {
|
||||
errCount++
|
||||
return nil, grpc.Errorf(code, "")
|
||||
}
|
||||
return handler(ctx, req)
|
||||
}
|
||||
tbl, cleanup, err := setupFakeServer(grpc.UnaryInterceptor(errInjector))
|
||||
defer cleanup()
|
||||
if err != nil {
|
||||
t.Fatalf("fake server setup: %v", err)
|
||||
}
|
||||
|
||||
mut := NewMutation()
|
||||
mut.Set("cf", "col", 1, []byte("val"))
|
||||
if err := tbl.Apply(ctx, "row1", mut); err != nil {
|
||||
t.Errorf("applying single mutation with retries: %v", err)
|
||||
}
|
||||
row, err := tbl.ReadRow(ctx, "row1")
|
||||
if err != nil {
|
||||
t.Errorf("reading single value with retries: %v", err)
|
||||
}
|
||||
if row == nil {
|
||||
t.Errorf("applying single mutation with retries: could not read back row")
|
||||
}
|
||||
|
||||
code = codes.FailedPrecondition // Won't be retried
|
||||
errCount = 0
|
||||
if err := tbl.Apply(ctx, "row", mut); err == nil {
|
||||
t.Errorf("applying single mutation with no retries: no error")
|
||||
}
|
||||
|
||||
// Check and mutate
|
||||
mutTrue := NewMutation()
|
||||
mutTrue.DeleteRow()
|
||||
mutFalse := NewMutation()
|
||||
mutFalse.Set("cf", "col", 1, []byte("val"))
|
||||
condMut := NewCondMutation(ValueFilter("."), mutTrue, mutFalse)
|
||||
|
||||
errCount = 0
|
||||
code = codes.Unavailable // Will be retried
|
||||
if err := tbl.Apply(ctx, "row1", condMut); err != nil {
|
||||
t.Errorf("conditionally mutating row with retries: %v", err)
|
||||
}
|
||||
row, err = tbl.ReadRow(ctx, "row1") // row1 already in the table
|
||||
if err != nil {
|
||||
t.Errorf("reading single value after conditional mutation: %v", err)
|
||||
}
|
||||
if row != nil {
|
||||
t.Errorf("reading single value after conditional mutation: row not deleted")
|
||||
}
|
||||
|
||||
errCount = 0
|
||||
code = codes.FailedPrecondition // Won't be retried
|
||||
if err := tbl.Apply(ctx, "row", condMut); err == nil {
|
||||
t.Errorf("conditionally mutating row with no retries: no error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRetryApplyBulk(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Intercept requests and delegate to an interceptor defined by the test case
|
||||
errCount := 0
|
||||
var f func(grpc.ServerStream) error
|
||||
errInjector := func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
if strings.HasSuffix(info.FullMethod, "MutateRows") {
|
||||
return f(ss)
|
||||
}
|
||||
return handler(ctx, ss)
|
||||
}
|
||||
|
||||
tbl, cleanup, err := setupFakeServer(grpc.StreamInterceptor(errInjector))
|
||||
defer cleanup()
|
||||
if err != nil {
|
||||
t.Fatalf("fake server setup: %v", err)
|
||||
}
|
||||
|
||||
errCount = 0
|
||||
// Test overall request failure and retries
|
||||
f = func(ss grpc.ServerStream) error {
|
||||
if errCount < 3 {
|
||||
errCount++
|
||||
return grpc.Errorf(codes.Aborted, "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
mut := NewMutation()
|
||||
mut.Set("cf", "col", 1, []byte{})
|
||||
errors, err := tbl.ApplyBulk(ctx, []string{"row2"}, []*Mutation{mut})
|
||||
if errors != nil || err != nil {
|
||||
t.Errorf("bulk with request failure: got: %v, %v, want: nil", errors, err)
|
||||
}
|
||||
|
||||
// Test failures and retries in one request
|
||||
errCount = 0
|
||||
m1 := NewMutation()
|
||||
m1.Set("cf", "col", 1, []byte{})
|
||||
m2 := NewMutation()
|
||||
m2.Set("cf", "col2", 1, []byte{})
|
||||
m3 := NewMutation()
|
||||
m3.Set("cf", "col3", 1, []byte{})
|
||||
f = func(ss grpc.ServerStream) error {
|
||||
var err error
|
||||
req := new(btpb.MutateRowsRequest)
|
||||
ss.RecvMsg(req)
|
||||
switch errCount {
|
||||
case 0:
|
||||
// Retryable request failure
|
||||
err = grpc.Errorf(codes.Unavailable, "")
|
||||
case 1:
|
||||
// Two mutations fail
|
||||
writeMutateRowsResponse(ss, codes.Unavailable, codes.OK, codes.Aborted)
|
||||
err = nil
|
||||
case 2:
|
||||
// Two failures were retried. One will succeed.
|
||||
if want, got := 2, len(req.Entries); want != got {
|
||||
t.Errorf("2 bulk retries, got: %d, want %d", got, want)
|
||||
}
|
||||
writeMutateRowsResponse(ss, codes.OK, codes.Aborted)
|
||||
err = nil
|
||||
case 3:
|
||||
// One failure was retried and will succeed.
|
||||
if want, got := 1, len(req.Entries); want != got {
|
||||
t.Errorf("1 bulk retry, got: %d, want %d", got, want)
|
||||
}
|
||||
writeMutateRowsResponse(ss, codes.OK)
|
||||
err = nil
|
||||
}
|
||||
errCount++
|
||||
return err
|
||||
}
|
||||
errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2", "row3"}, []*Mutation{m1, m2, m3})
|
||||
if errors != nil || err != nil {
|
||||
t.Errorf("bulk with retries: got: %v, %v, want: nil", errors, err)
|
||||
}
|
||||
|
||||
// Test unretryable errors
|
||||
niMut := NewMutation()
|
||||
niMut.Set("cf", "col", ServerTime, []byte{}) // Non-idempotent
|
||||
errCount = 0
|
||||
f = func(ss grpc.ServerStream) error {
|
||||
var err error
|
||||
req := new(btpb.MutateRowsRequest)
|
||||
ss.RecvMsg(req)
|
||||
switch errCount {
|
||||
case 0:
|
||||
// Give non-idempotent mutation a retryable error code.
|
||||
// Nothing should be retried.
|
||||
writeMutateRowsResponse(ss, codes.FailedPrecondition, codes.Aborted)
|
||||
err = nil
|
||||
case 1:
|
||||
t.Errorf("unretryable errors: got one retry, want no retries")
|
||||
}
|
||||
errCount++
|
||||
return err
|
||||
}
|
||||
errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2"}, []*Mutation{m1, niMut})
|
||||
if err != nil {
|
||||
t.Errorf("unretryable errors: request failed %v")
|
||||
}
|
||||
want := []error{
|
||||
grpc.Errorf(codes.FailedPrecondition, ""),
|
||||
grpc.Errorf(codes.Aborted, ""),
|
||||
}
|
||||
if !reflect.DeepEqual(want, errors) {
|
||||
t.Errorf("unretryable errors: got: %v, want: %v", errors, want)
|
||||
}
|
||||
|
||||
// Test individual errors and a deadline exceeded
|
||||
f = func(ss grpc.ServerStream) error {
|
||||
writeMutateRowsResponse(ss, codes.FailedPrecondition, codes.OK, codes.Aborted)
|
||||
return nil
|
||||
}
|
||||
ctx, _ = context.WithTimeout(ctx, 100*time.Millisecond)
|
||||
errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2", "row3"}, []*Mutation{m1, m2, m3})
|
||||
wantErr := context.DeadlineExceeded
|
||||
if wantErr != err {
|
||||
t.Errorf("deadline exceeded error: got: %v, want: %v", err, wantErr)
|
||||
}
|
||||
if errors != nil {
|
||||
t.Errorf("deadline exceeded errors: got: %v, want: nil", err)
|
||||
}
|
||||
}
|
||||
|
||||
func writeMutateRowsResponse(ss grpc.ServerStream, codes ...codes.Code) error {
|
||||
res := &btpb.MutateRowsResponse{Entries: make([]*btpb.MutateRowsResponse_Entry, len(codes))}
|
||||
for i, code := range codes {
|
||||
res.Entries[i] = &btpb.MutateRowsResponse_Entry{
|
||||
Index: int64(i),
|
||||
Status: &rpcpb.Status{Code: int32(code), Message: ""},
|
||||
}
|
||||
}
|
||||
return ss.SendMsg(res)
|
||||
}
|
||||
|
||||
func TestRetainRowsAfter(t *testing.T) {
|
||||
prevRowRange := NewRange("a", "z")
|
||||
prevRowKey := "m"
|
||||
want := NewRange("m\x00", "z")
|
||||
got := prevRowRange.retainRowsAfter(prevRowKey)
|
||||
if !reflect.DeepEqual(want, got) {
|
||||
t.Errorf("range retry: got %v, want %v", got, want)
|
||||
}
|
||||
|
||||
prevRowList := RowList{"a", "b", "c", "d", "e", "f"}
|
||||
prevRowKey = "b"
|
||||
wantList := RowList{"c", "d", "e", "f"}
|
||||
got = prevRowList.retainRowsAfter(prevRowKey)
|
||||
if !reflect.DeepEqual(wantList, got) {
|
||||
t.Errorf("list retry: got %v, want %v", got, wantList)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRetryReadRows(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Intercept requests and delegate to an interceptor defined by the test case
|
||||
errCount := 0
|
||||
var f func(grpc.ServerStream) error
|
||||
errInjector := func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
if strings.HasSuffix(info.FullMethod, "ReadRows") {
|
||||
return f(ss)
|
||||
}
|
||||
return handler(ctx, ss)
|
||||
}
|
||||
|
||||
tbl, cleanup, err := setupFakeServer(grpc.StreamInterceptor(errInjector))
|
||||
defer cleanup()
|
||||
if err != nil {
|
||||
t.Fatalf("fake server setup: %v", err)
|
||||
}
|
||||
|
||||
errCount = 0
|
||||
// Test overall request failure and retries
|
||||
f = func(ss grpc.ServerStream) error {
|
||||
var err error
|
||||
req := new(btpb.ReadRowsRequest)
|
||||
ss.RecvMsg(req)
|
||||
switch errCount {
|
||||
case 0:
|
||||
// Retryable request failure
|
||||
err = grpc.Errorf(codes.Unavailable, "")
|
||||
case 1:
|
||||
// Write two rows then error
|
||||
if want, got := "a", string(req.Rows.RowRanges[0].GetStartKeyClosed()); want != got {
|
||||
t.Errorf("first retry, no data received yet: got %q, want %q", got, want)
|
||||
}
|
||||
writeReadRowsResponse(ss, "a", "b")
|
||||
err = grpc.Errorf(codes.Unavailable, "")
|
||||
case 2:
|
||||
// Retryable request failure
|
||||
if want, got := "b\x00", string(req.Rows.RowRanges[0].GetStartKeyClosed()); want != got {
|
||||
t.Errorf("2 range retries: got %q, want %q", got, want)
|
||||
}
|
||||
err = grpc.Errorf(codes.Unavailable, "")
|
||||
case 3:
|
||||
// Write two more rows
|
||||
writeReadRowsResponse(ss, "c", "d")
|
||||
err = nil
|
||||
}
|
||||
errCount++
|
||||
return err
|
||||
}
|
||||
|
||||
var got []string
|
||||
tbl.ReadRows(ctx, NewRange("a", "z"), func(r Row) bool {
|
||||
got = append(got, r.Key())
|
||||
return true
|
||||
})
|
||||
want := []string{"a", "b", "c", "d"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("retry range integration: got %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func writeReadRowsResponse(ss grpc.ServerStream, rowKeys ...string) error {
|
||||
var chunks []*btpb.ReadRowsResponse_CellChunk
|
||||
for _, key := range rowKeys {
|
||||
chunks = append(chunks, &btpb.ReadRowsResponse_CellChunk{
|
||||
RowKey: []byte(key),
|
||||
FamilyName: &wrappers.StringValue{Value: "fm"},
|
||||
Qualifier: &wrappers.BytesValue{Value: []byte("col")},
|
||||
RowStatus: &btpb.ReadRowsResponse_CellChunk_CommitRow{CommitRow: true},
|
||||
})
|
||||
}
|
||||
return ss.SendMsg(&btpb.ReadRowsResponse{Chunks: chunks})
|
||||
}
|
1178
vendor/cloud.google.com/go/bigtable/testdata/read-rows-acceptance-test.json
generated
vendored
Normal file
1178
vendor/cloud.google.com/go/bigtable/testdata/read-rows-acceptance-test.json
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
277
vendor/cloud.google.com/go/civil/civil.go
generated
vendored
Normal file
277
vendor/cloud.google.com/go/civil/civil.go
generated
vendored
Normal file
|
@ -0,0 +1,277 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package civil implements types for civil time, a time-zone-independent
|
||||
// representation of time that follows the rules of the proleptic
|
||||
// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second
|
||||
// minutes.
|
||||
//
|
||||
// Because they lack location information, these types do not represent unique
|
||||
// moments or intervals of time. Use time.Time for that purpose.
|
||||
package civil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// A Date represents a date (year, month, day).
|
||||
//
|
||||
// This type does not include location information, and therefore does not
|
||||
// describe a unique 24-hour timespan.
|
||||
type Date struct {
|
||||
Year int // Year (e.g., 2014).
|
||||
Month time.Month // Month of the year (January = 1, ...).
|
||||
Day int // Day of the month, starting at 1.
|
||||
}
|
||||
|
||||
// DateOf returns the Date in which a time occurs in that time's location.
|
||||
func DateOf(t time.Time) Date {
|
||||
var d Date
|
||||
d.Year, d.Month, d.Day = t.Date()
|
||||
return d
|
||||
}
|
||||
|
||||
// ParseDate parses a string in RFC3339 full-date format and returns the date value it represents.
|
||||
func ParseDate(s string) (Date, error) {
|
||||
t, err := time.Parse("2006-01-02", s)
|
||||
if err != nil {
|
||||
return Date{}, err
|
||||
}
|
||||
return DateOf(t), nil
|
||||
}
|
||||
|
||||
// String returns the date in RFC3339 full-date format.
|
||||
func (d Date) String() string {
|
||||
return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day)
|
||||
}
|
||||
|
||||
// IsValid reports whether the date is valid.
|
||||
func (d Date) IsValid() bool {
|
||||
return DateOf(d.In(time.UTC)) == d
|
||||
}
|
||||
|
||||
// In returns the time corresponding to time 00:00:00 of the date in the location.
|
||||
//
|
||||
// In is always consistent with time.Date, even when time.Date returns a time
|
||||
// on a different day. For example, if loc is America/Indiana/Vincennes, then both
|
||||
// time.Date(1955, time.May, 1, 0, 0, 0, 0, loc)
|
||||
// and
|
||||
// civil.Date{Year: 1955, Month: time.May, Day: 1}.In(loc)
|
||||
// return 23:00:00 on April 30, 1955.
|
||||
//
|
||||
// In panics if loc is nil.
|
||||
func (d Date) In(loc *time.Location) time.Time {
|
||||
return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc)
|
||||
}
|
||||
|
||||
// AddDays returns the date that is n days in the future.
|
||||
// n can also be negative to go into the past.
|
||||
func (d Date) AddDays(n int) Date {
|
||||
return DateOf(d.In(time.UTC).AddDate(0, 0, n))
|
||||
}
|
||||
|
||||
// DaysSince returns the signed number of days between the date and s, not including the end day.
|
||||
// This is the inverse operation to AddDays.
|
||||
func (d Date) DaysSince(s Date) (days int) {
|
||||
// We convert to Unix time so we do not have to worry about leap seconds:
|
||||
// Unix time increases by exactly 86400 seconds per day.
|
||||
deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix()
|
||||
return int(deltaUnix / 86400)
|
||||
}
|
||||
|
||||
// Before reports whether d1 occurs before d2.
|
||||
func (d1 Date) Before(d2 Date) bool {
|
||||
if d1.Year != d2.Year {
|
||||
return d1.Year < d2.Year
|
||||
}
|
||||
if d1.Month != d2.Month {
|
||||
return d1.Month < d2.Month
|
||||
}
|
||||
return d1.Day < d2.Day
|
||||
}
|
||||
|
||||
// After reports whether d1 occurs after d2.
|
||||
func (d1 Date) After(d2 Date) bool {
|
||||
return d2.Before(d1)
|
||||
}
|
||||
|
||||
// MarshalText implements the encoding.TextMarshaler interface.
|
||||
// The output is the result of d.String().
|
||||
func (d Date) MarshalText() ([]byte, error) {
|
||||
return []byte(d.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements the encoding.TextUnmarshaler interface.
|
||||
// The date is expected to be a string in a format accepted by ParseDate.
|
||||
func (d *Date) UnmarshalText(data []byte) error {
|
||||
var err error
|
||||
*d, err = ParseDate(string(data))
|
||||
return err
|
||||
}
|
||||
|
||||
// A Time represents a time with nanosecond precision.
|
||||
//
|
||||
// This type does not include location information, and therefore does not
|
||||
// describe a unique moment in time.
|
||||
//
|
||||
// This type exists to represent the TIME type in storage-based APIs like BigQuery.
|
||||
// Most operations on Times are unlikely to be meaningful. Prefer the DateTime type.
|
||||
type Time struct {
|
||||
Hour int // The hour of the day in 24-hour format; range [0-23]
|
||||
Minute int // The minute of the hour; range [0-59]
|
||||
Second int // The second of the minute; range [0-59]
|
||||
Nanosecond int // The nanosecond of the second; range [0-999999999]
|
||||
}
|
||||
|
||||
// TimeOf returns the Time representing the time of day in which a time occurs
|
||||
// in that time's location. It ignores the date.
|
||||
func TimeOf(t time.Time) Time {
|
||||
var tm Time
|
||||
tm.Hour, tm.Minute, tm.Second = t.Clock()
|
||||
tm.Nanosecond = t.Nanosecond()
|
||||
return tm
|
||||
}
|
||||
|
||||
// ParseTime parses a string and returns the time value it represents.
|
||||
// ParseTime accepts an extended form of the RFC3339 partial-time format. After
|
||||
// the HH:MM:SS part of the string, an optional fractional part may appear,
|
||||
// consisting of a decimal point followed by one to nine decimal digits.
|
||||
// (RFC3339 admits only one digit after the decimal point).
|
||||
func ParseTime(s string) (Time, error) {
|
||||
t, err := time.Parse("15:04:05.999999999", s)
|
||||
if err != nil {
|
||||
return Time{}, err
|
||||
}
|
||||
return TimeOf(t), nil
|
||||
}
|
||||
|
||||
// String returns the date in the format described in ParseTime. If Nanoseconds
|
||||
// is zero, no fractional part will be generated. Otherwise, the result will
|
||||
// end with a fractional part consisting of a decimal point and nine digits.
|
||||
func (t Time) String() string {
|
||||
s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second)
|
||||
if t.Nanosecond == 0 {
|
||||
return s
|
||||
}
|
||||
return s + fmt.Sprintf(".%09d", t.Nanosecond)
|
||||
}
|
||||
|
||||
// IsValid reports whether the time is valid.
|
||||
func (t Time) IsValid() bool {
|
||||
// Construct a non-zero time.
|
||||
tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC)
|
||||
return TimeOf(tm) == t
|
||||
}
|
||||
|
||||
// MarshalText implements the encoding.TextMarshaler interface.
|
||||
// The output is the result of t.String().
|
||||
func (t Time) MarshalText() ([]byte, error) {
|
||||
return []byte(t.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements the encoding.TextUnmarshaler interface.
|
||||
// The time is expected to be a string in a format accepted by ParseTime.
|
||||
func (t *Time) UnmarshalText(data []byte) error {
|
||||
var err error
|
||||
*t, err = ParseTime(string(data))
|
||||
return err
|
||||
}
|
||||
|
||||
// A DateTime represents a date and time.
|
||||
//
|
||||
// This type does not include location information, and therefore does not
|
||||
// describe a unique moment in time.
|
||||
type DateTime struct {
|
||||
Date Date
|
||||
Time Time
|
||||
}
|
||||
|
||||
// Note: We deliberately do not embed Date into DateTime, to avoid promoting AddDays and Sub.
|
||||
|
||||
// DateTimeOf returns the DateTime in which a time occurs in that time's location.
|
||||
func DateTimeOf(t time.Time) DateTime {
|
||||
return DateTime{
|
||||
Date: DateOf(t),
|
||||
Time: TimeOf(t),
|
||||
}
|
||||
}
|
||||
|
||||
// ParseDateTime parses a string and returns the DateTime it represents.
|
||||
// ParseDateTime accepts a variant of the RFC3339 date-time format that omits
|
||||
// the time offset but includes an optional fractional time, as described in
|
||||
// ParseTime. Informally, the accepted format is
|
||||
// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF]
|
||||
// where the 'T' may be a lower-case 't'.
|
||||
func ParseDateTime(s string) (DateTime, error) {
|
||||
t, err := time.Parse("2006-01-02T15:04:05.999999999", s)
|
||||
if err != nil {
|
||||
t, err = time.Parse("2006-01-02t15:04:05.999999999", s)
|
||||
if err != nil {
|
||||
return DateTime{}, err
|
||||
}
|
||||
}
|
||||
return DateTimeOf(t), nil
|
||||
}
|
||||
|
||||
// String returns the date in the format described in ParseDate.
|
||||
func (dt DateTime) String() string {
|
||||
return dt.Date.String() + "T" + dt.Time.String()
|
||||
}
|
||||
|
||||
// IsValid reports whether the datetime is valid.
|
||||
func (dt DateTime) IsValid() bool {
|
||||
return dt.Date.IsValid() && dt.Time.IsValid()
|
||||
}
|
||||
|
||||
// In returns the time corresponding to the DateTime in the given location.
|
||||
//
|
||||
// If the time is missing or ambigous at the location, In returns the same
|
||||
// result as time.Date. For example, if loc is America/Indiana/Vincennes, then
|
||||
// both
|
||||
// time.Date(1955, time.May, 1, 0, 30, 0, 0, loc)
|
||||
// and
|
||||
// civil.DateTime{
|
||||
// civil.Date{Year: 1955, Month: time.May, Day: 1}},
|
||||
// civil.Time{Minute: 30}}.In(loc)
|
||||
// return 23:30:00 on April 30, 1955.
|
||||
//
|
||||
// In panics if loc is nil.
|
||||
func (dt DateTime) In(loc *time.Location) time.Time {
|
||||
return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc)
|
||||
}
|
||||
|
||||
// Before reports whether dt1 occurs before dt2.
|
||||
func (dt1 DateTime) Before(dt2 DateTime) bool {
|
||||
return dt1.In(time.UTC).Before(dt2.In(time.UTC))
|
||||
}
|
||||
|
||||
// After reports whether dt1 occurs after dt2.
|
||||
func (dt1 DateTime) After(dt2 DateTime) bool {
|
||||
return dt2.Before(dt1)
|
||||
}
|
||||
|
||||
// MarshalText implements the encoding.TextMarshaler interface.
|
||||
// The output is the result of dt.String().
|
||||
func (dt DateTime) MarshalText() ([]byte, error) {
|
||||
return []byte(dt.String()), nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements the encoding.TextUnmarshaler interface.
|
||||
// The datetime is expected to be a string in a format accepted by ParseDateTime
|
||||
func (dt *DateTime) UnmarshalText(data []byte) error {
|
||||
var err error
|
||||
*dt, err = ParseDateTime(string(data))
|
||||
return err
|
||||
}
|
441
vendor/cloud.google.com/go/civil/civil_test.go
generated
vendored
Normal file
441
vendor/cloud.google.com/go/civil/civil_test.go
generated
vendored
Normal file
|
@ -0,0 +1,441 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package civil
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestDates(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
date Date
|
||||
loc *time.Location
|
||||
wantStr string
|
||||
wantTime time.Time
|
||||
}{
|
||||
{
|
||||
date: Date{2014, 7, 29},
|
||||
loc: time.Local,
|
||||
wantStr: "2014-07-29",
|
||||
wantTime: time.Date(2014, time.July, 29, 0, 0, 0, 0, time.Local),
|
||||
},
|
||||
{
|
||||
date: DateOf(time.Date(2014, 8, 20, 15, 8, 43, 1, time.Local)),
|
||||
loc: time.UTC,
|
||||
wantStr: "2014-08-20",
|
||||
wantTime: time.Date(2014, 8, 20, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
{
|
||||
date: DateOf(time.Date(999, time.January, 26, 0, 0, 0, 0, time.Local)),
|
||||
loc: time.UTC,
|
||||
wantStr: "0999-01-26",
|
||||
wantTime: time.Date(999, 1, 26, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
} {
|
||||
if got := test.date.String(); got != test.wantStr {
|
||||
t.Errorf("%#v.String() = %q, want %q", test.date, got, test.wantStr)
|
||||
}
|
||||
if got := test.date.In(test.loc); !got.Equal(test.wantTime) {
|
||||
t.Errorf("%#v.In(%v) = %v, want %v", test.date, test.loc, got, test.wantTime)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDateIsValid(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
date Date
|
||||
want bool
|
||||
}{
|
||||
{Date{2014, 7, 29}, true},
|
||||
{Date{2000, 2, 29}, true},
|
||||
{Date{10000, 12, 31}, true},
|
||||
{Date{1, 1, 1}, true},
|
||||
{Date{0, 1, 1}, true}, // year zero is OK
|
||||
{Date{-1, 1, 1}, true}, // negative year is OK
|
||||
{Date{1, 0, 1}, false},
|
||||
{Date{1, 1, 0}, false},
|
||||
{Date{2016, 1, 32}, false},
|
||||
{Date{2016, 13, 1}, false},
|
||||
{Date{1, -1, 1}, false},
|
||||
{Date{1, 1, -1}, false},
|
||||
} {
|
||||
got := test.date.IsValid()
|
||||
if got != test.want {
|
||||
t.Errorf("%#v: got %t, want %t", test.date, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseDate(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
str string
|
||||
want Date // if empty, expect an error
|
||||
}{
|
||||
{"2016-01-02", Date{2016, 1, 2}},
|
||||
{"2016-12-31", Date{2016, 12, 31}},
|
||||
{"0003-02-04", Date{3, 2, 4}},
|
||||
{"999-01-26", Date{}},
|
||||
{"", Date{}},
|
||||
{"2016-01-02x", Date{}},
|
||||
} {
|
||||
got, err := ParseDate(test.str)
|
||||
if got != test.want {
|
||||
t.Errorf("ParseDate(%q) = %+v, want %+v", test.str, got, test.want)
|
||||
}
|
||||
if err != nil && test.want != (Date{}) {
|
||||
t.Errorf("Unexpected error %v from ParseDate(%q)", err, test.str)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDateArithmetic(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
desc string
|
||||
start Date
|
||||
end Date
|
||||
days int
|
||||
}{
|
||||
{
|
||||
desc: "zero days noop",
|
||||
start: Date{2014, 5, 9},
|
||||
end: Date{2014, 5, 9},
|
||||
days: 0,
|
||||
},
|
||||
{
|
||||
desc: "crossing a year boundary",
|
||||
start: Date{2014, 12, 31},
|
||||
end: Date{2015, 1, 1},
|
||||
days: 1,
|
||||
},
|
||||
{
|
||||
desc: "negative number of days",
|
||||
start: Date{2015, 1, 1},
|
||||
end: Date{2014, 12, 31},
|
||||
days: -1,
|
||||
},
|
||||
{
|
||||
desc: "full leap year",
|
||||
start: Date{2004, 1, 1},
|
||||
end: Date{2005, 1, 1},
|
||||
days: 366,
|
||||
},
|
||||
{
|
||||
desc: "full non-leap year",
|
||||
start: Date{2001, 1, 1},
|
||||
end: Date{2002, 1, 1},
|
||||
days: 365,
|
||||
},
|
||||
{
|
||||
desc: "crossing a leap second",
|
||||
start: Date{1972, 6, 30},
|
||||
end: Date{1972, 7, 1},
|
||||
days: 1,
|
||||
},
|
||||
{
|
||||
desc: "dates before the unix epoch",
|
||||
start: Date{101, 1, 1},
|
||||
end: Date{102, 1, 1},
|
||||
days: 365,
|
||||
},
|
||||
} {
|
||||
if got := test.start.AddDays(test.days); got != test.end {
|
||||
t.Errorf("[%s] %#v.AddDays(%v) = %#v, want %#v", test.desc, test.start, test.days, got, test.end)
|
||||
}
|
||||
if got := test.end.DaysSince(test.start); got != test.days {
|
||||
t.Errorf("[%s] %#v.Sub(%#v) = %v, want %v", test.desc, test.end, test.start, got, test.days)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDateBefore(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
d1, d2 Date
|
||||
want bool
|
||||
}{
|
||||
{Date{2016, 12, 31}, Date{2017, 1, 1}, true},
|
||||
{Date{2016, 1, 1}, Date{2016, 1, 1}, false},
|
||||
{Date{2016, 12, 30}, Date{2016, 12, 31}, true},
|
||||
} {
|
||||
if got := test.d1.Before(test.d2); got != test.want {
|
||||
t.Errorf("%v.Before(%v): got %t, want %t", test.d1, test.d2, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDateAfter(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
d1, d2 Date
|
||||
want bool
|
||||
}{
|
||||
{Date{2016, 12, 31}, Date{2017, 1, 1}, false},
|
||||
{Date{2016, 1, 1}, Date{2016, 1, 1}, false},
|
||||
{Date{2016, 12, 30}, Date{2016, 12, 31}, false},
|
||||
} {
|
||||
if got := test.d1.After(test.d2); got != test.want {
|
||||
t.Errorf("%v.After(%v): got %t, want %t", test.d1, test.d2, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimeToString(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
str string
|
||||
time Time
|
||||
roundTrip bool // ParseTime(str).String() == str?
|
||||
}{
|
||||
{"13:26:33", Time{13, 26, 33, 0}, true},
|
||||
{"01:02:03.000023456", Time{1, 2, 3, 23456}, true},
|
||||
{"00:00:00.000000001", Time{0, 0, 0, 1}, true},
|
||||
{"13:26:03.1", Time{13, 26, 3, 100000000}, false},
|
||||
{"13:26:33.0000003", Time{13, 26, 33, 300}, false},
|
||||
} {
|
||||
gotTime, err := ParseTime(test.str)
|
||||
if err != nil {
|
||||
t.Errorf("ParseTime(%q): got error: %v", test.str, err)
|
||||
continue
|
||||
}
|
||||
if gotTime != test.time {
|
||||
t.Errorf("ParseTime(%q) = %+v, want %+v", test.str, gotTime, test.time)
|
||||
}
|
||||
if test.roundTrip {
|
||||
gotStr := test.time.String()
|
||||
if gotStr != test.str {
|
||||
t.Errorf("%#v.String() = %q, want %q", test.time, gotStr, test.str)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimeOf(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
time time.Time
|
||||
want Time
|
||||
}{
|
||||
{time.Date(2014, 8, 20, 15, 8, 43, 1, time.Local), Time{15, 8, 43, 1}},
|
||||
{time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), Time{0, 0, 0, 0}},
|
||||
} {
|
||||
if got := TimeOf(test.time); got != test.want {
|
||||
t.Errorf("TimeOf(%v) = %+v, want %+v", test.time, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimeIsValid(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
time Time
|
||||
want bool
|
||||
}{
|
||||
{Time{0, 0, 0, 0}, true},
|
||||
{Time{23, 0, 0, 0}, true},
|
||||
{Time{23, 59, 59, 999999999}, true},
|
||||
{Time{24, 59, 59, 999999999}, false},
|
||||
{Time{23, 60, 59, 999999999}, false},
|
||||
{Time{23, 59, 60, 999999999}, false},
|
||||
{Time{23, 59, 59, 1000000000}, false},
|
||||
{Time{-1, 0, 0, 0}, false},
|
||||
{Time{0, -1, 0, 0}, false},
|
||||
{Time{0, 0, -1, 0}, false},
|
||||
{Time{0, 0, 0, -1}, false},
|
||||
} {
|
||||
got := test.time.IsValid()
|
||||
if got != test.want {
|
||||
t.Errorf("%#v: got %t, want %t", test.time, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDateTimeToString(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
str string
|
||||
dateTime DateTime
|
||||
roundTrip bool // ParseDateTime(str).String() == str?
|
||||
}{
|
||||
{"2016-03-22T13:26:33", DateTime{Date{2016, 03, 22}, Time{13, 26, 33, 0}}, true},
|
||||
{"2016-03-22T13:26:33.000000600", DateTime{Date{2016, 03, 22}, Time{13, 26, 33, 600}}, true},
|
||||
{"2016-03-22t13:26:33", DateTime{Date{2016, 03, 22}, Time{13, 26, 33, 0}}, false},
|
||||
} {
|
||||
gotDateTime, err := ParseDateTime(test.str)
|
||||
if err != nil {
|
||||
t.Errorf("ParseDateTime(%q): got error: %v", test.str, err)
|
||||
continue
|
||||
}
|
||||
if gotDateTime != test.dateTime {
|
||||
t.Errorf("ParseDateTime(%q) = %+v, want %+v", test.str, gotDateTime, test.dateTime)
|
||||
}
|
||||
if test.roundTrip {
|
||||
gotStr := test.dateTime.String()
|
||||
if gotStr != test.str {
|
||||
t.Errorf("%#v.String() = %q, want %q", test.dateTime, gotStr, test.str)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseDateTimeErrors(t *testing.T) {
|
||||
for _, str := range []string{
|
||||
"",
|
||||
"2016-03-22", // just a date
|
||||
"13:26:33", // just a time
|
||||
"2016-03-22 13:26:33", // wrong separating character
|
||||
"2016-03-22T13:26:33x", // extra at end
|
||||
} {
|
||||
if _, err := ParseDateTime(str); err == nil {
|
||||
t.Errorf("ParseDateTime(%q) succeeded, want error", str)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDateTimeOf(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
time time.Time
|
||||
want DateTime
|
||||
}{
|
||||
{time.Date(2014, 8, 20, 15, 8, 43, 1, time.Local),
|
||||
DateTime{Date{2014, 8, 20}, Time{15, 8, 43, 1}}},
|
||||
{time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
DateTime{Date{1, 1, 1}, Time{0, 0, 0, 0}}},
|
||||
} {
|
||||
if got := DateTimeOf(test.time); got != test.want {
|
||||
t.Errorf("DateTimeOf(%v) = %+v, want %+v", test.time, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDateTimeIsValid(t *testing.T) {
|
||||
// No need to be exhaustive here; it's just Date.IsValid && Time.IsValid.
|
||||
for _, test := range []struct {
|
||||
dt DateTime
|
||||
want bool
|
||||
}{
|
||||
{DateTime{Date{2016, 3, 20}, Time{0, 0, 0, 0}}, true},
|
||||
{DateTime{Date{2016, -3, 20}, Time{0, 0, 0, 0}}, false},
|
||||
{DateTime{Date{2016, 3, 20}, Time{24, 0, 0, 0}}, false},
|
||||
} {
|
||||
got := test.dt.IsValid()
|
||||
if got != test.want {
|
||||
t.Errorf("%#v: got %t, want %t", test.dt, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDateTimeIn(t *testing.T) {
|
||||
dt := DateTime{Date{2016, 1, 2}, Time{3, 4, 5, 6}}
|
||||
got := dt.In(time.UTC)
|
||||
want := time.Date(2016, 1, 2, 3, 4, 5, 6, time.UTC)
|
||||
if !got.Equal(want) {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDateTimeBefore(t *testing.T) {
|
||||
d1 := Date{2016, 12, 31}
|
||||
d2 := Date{2017, 1, 1}
|
||||
t1 := Time{5, 6, 7, 8}
|
||||
t2 := Time{5, 6, 7, 9}
|
||||
for _, test := range []struct {
|
||||
dt1, dt2 DateTime
|
||||
want bool
|
||||
}{
|
||||
{DateTime{d1, t1}, DateTime{d2, t1}, true},
|
||||
{DateTime{d1, t1}, DateTime{d1, t2}, true},
|
||||
{DateTime{d2, t1}, DateTime{d1, t1}, false},
|
||||
{DateTime{d2, t1}, DateTime{d2, t1}, false},
|
||||
} {
|
||||
if got := test.dt1.Before(test.dt2); got != test.want {
|
||||
t.Errorf("%v.Before(%v): got %t, want %t", test.dt1, test.dt2, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDateTimeAfter(t *testing.T) {
|
||||
d1 := Date{2016, 12, 31}
|
||||
d2 := Date{2017, 1, 1}
|
||||
t1 := Time{5, 6, 7, 8}
|
||||
t2 := Time{5, 6, 7, 9}
|
||||
for _, test := range []struct {
|
||||
dt1, dt2 DateTime
|
||||
want bool
|
||||
}{
|
||||
{DateTime{d1, t1}, DateTime{d2, t1}, false},
|
||||
{DateTime{d1, t1}, DateTime{d1, t2}, false},
|
||||
{DateTime{d2, t1}, DateTime{d1, t1}, true},
|
||||
{DateTime{d2, t1}, DateTime{d2, t1}, false},
|
||||
} {
|
||||
if got := test.dt1.After(test.dt2); got != test.want {
|
||||
t.Errorf("%v.After(%v): got %t, want %t", test.dt1, test.dt2, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshalJSON(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
value interface{}
|
||||
want string
|
||||
}{
|
||||
{Date{1987, 4, 15}, `"1987-04-15"`},
|
||||
{Time{18, 54, 2, 0}, `"18:54:02"`},
|
||||
{DateTime{Date{1987, 4, 15}, Time{18, 54, 2, 0}}, `"1987-04-15T18:54:02"`},
|
||||
} {
|
||||
bgot, err := json.Marshal(test.value)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got := string(bgot); got != test.want {
|
||||
t.Errorf("%#v: got %s, want %s", test.value, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmarshalJSON(t *testing.T) {
|
||||
var d Date
|
||||
var tm Time
|
||||
var dt DateTime
|
||||
for _, test := range []struct {
|
||||
data string
|
||||
ptr interface{}
|
||||
want interface{}
|
||||
}{
|
||||
{`"1987-04-15"`, &d, &Date{1987, 4, 15}},
|
||||
{`"1987-04-\u0031\u0035"`, &d, &Date{1987, 4, 15}},
|
||||
{`"18:54:02"`, &tm, &Time{18, 54, 2, 0}},
|
||||
{`"1987-04-15T18:54:02"`, &dt, &DateTime{Date{1987, 4, 15}, Time{18, 54, 2, 0}}},
|
||||
} {
|
||||
if err := json.Unmarshal([]byte(test.data), test.ptr); err != nil {
|
||||
t.Fatalf("%s: %v", test.data, err)
|
||||
}
|
||||
if !reflect.DeepEqual(test.ptr, test.want) {
|
||||
t.Errorf("%s: got %#v, want %#v", test.data, test.ptr, test.want)
|
||||
}
|
||||
}
|
||||
|
||||
for _, bad := range []string{"", `""`, `"bad"`, `"1987-04-15x"`,
|
||||
`19870415`, // a JSON number
|
||||
`11987-04-15x`, // not a JSON string
|
||||
|
||||
} {
|
||||
if json.Unmarshal([]byte(bad), &d) == nil {
|
||||
t.Errorf("%q, Date: got nil, want error", bad)
|
||||
}
|
||||
if json.Unmarshal([]byte(bad), &tm) == nil {
|
||||
t.Errorf("%q, Time: got nil, want error", bad)
|
||||
}
|
||||
if json.Unmarshal([]byte(bad), &dt) == nil {
|
||||
t.Errorf("%q, DateTime: got nil, want error", bad)
|
||||
}
|
||||
}
|
||||
}
|
20
vendor/cloud.google.com/go/cloud.go
generated
vendored
Normal file
20
vendor/cloud.google.com/go/cloud.go
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package cloud is the root of the packages used to access Google Cloud
|
||||
// Services. See https://godoc.org/cloud.google.com/go for a full list
|
||||
// of sub-packages.
|
||||
//
|
||||
// This package documents how to authorize and authenticate the sub packages.
|
||||
package cloud // import "cloud.google.com/go"
|
450
vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/debuglet.go
generated
vendored
Normal file
450
vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/debuglet.go
generated
vendored
Normal file
|
@ -0,0 +1,450 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build linux
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints"
|
||||
debuglet "cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller"
|
||||
"cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector"
|
||||
"cloud.google.com/go/compute/metadata"
|
||||
"golang.org/x/debug"
|
||||
"golang.org/x/debug/local"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
cd "google.golang.org/api/clouddebugger/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
appModule = flag.String("appmodule", "", "Optional application module name.")
|
||||
appVersion = flag.String("appversion", "", "Optional application module version name.")
|
||||
sourceContextFile = flag.String("sourcecontext", "", "File containing JSON-encoded source context.")
|
||||
verbose = flag.Bool("v", false, "Output verbose log messages.")
|
||||
projectNumber = flag.String("projectnumber", "", "Project number."+
|
||||
" If this is not set, it is read from the GCP metadata server.")
|
||||
projectID = flag.String("projectid", "", "Project ID."+
|
||||
" If this is not set, it is read from the GCP metadata server.")
|
||||
serviceAccountFile = flag.String("serviceaccountfile", "", "File containing JSON service account credentials.")
|
||||
)
|
||||
|
||||
const (
|
||||
maxCapturedStackFrames = 50
|
||||
maxCapturedVariables = 1000
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
if len(args) == 0 {
|
||||
// The user needs to supply the name of the executable to run.
|
||||
flag.Usage()
|
||||
return
|
||||
}
|
||||
if *projectNumber == "" {
|
||||
var err error
|
||||
*projectNumber, err = metadata.NumericProjectID()
|
||||
if err != nil {
|
||||
log.Print("Debuglet initialization: ", err)
|
||||
}
|
||||
}
|
||||
if *projectID == "" {
|
||||
var err error
|
||||
*projectID, err = metadata.ProjectID()
|
||||
if err != nil {
|
||||
log.Print("Debuglet initialization: ", err)
|
||||
}
|
||||
}
|
||||
sourceContexts, err := readSourceContextFile(*sourceContextFile)
|
||||
if err != nil {
|
||||
log.Print("Reading source context file: ", err)
|
||||
}
|
||||
var ts oauth2.TokenSource
|
||||
ctx := context.Background()
|
||||
if *serviceAccountFile != "" {
|
||||
if ts, err = serviceAcctTokenSource(ctx, *serviceAccountFile, cd.CloudDebuggerScope); err != nil {
|
||||
log.Fatalf("Error getting credentials from file %s: %v", *serviceAccountFile, err)
|
||||
}
|
||||
} else if ts, err = google.DefaultTokenSource(ctx, cd.CloudDebuggerScope); err != nil {
|
||||
log.Print("Error getting application default credentials for Cloud Debugger:", err)
|
||||
os.Exit(103)
|
||||
}
|
||||
c, err := debuglet.NewController(ctx, debuglet.Options{
|
||||
ProjectNumber: *projectNumber,
|
||||
ProjectID: *projectID,
|
||||
AppModule: *appModule,
|
||||
AppVersion: *appVersion,
|
||||
SourceContexts: sourceContexts,
|
||||
Verbose: *verbose,
|
||||
TokenSource: ts,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal("Error connecting to Cloud Debugger: ", err)
|
||||
}
|
||||
prog, err := local.New(args[0])
|
||||
if err != nil {
|
||||
log.Fatal("Error loading program: ", err)
|
||||
}
|
||||
// Load the program, but don't actually start it running yet.
|
||||
if _, err = prog.Run(args[1:]...); err != nil {
|
||||
log.Fatal("Error loading program: ", err)
|
||||
}
|
||||
bs := breakpoints.NewBreakpointStore(prog)
|
||||
|
||||
// Seed the random number generator.
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
// Now we want to do two things: run the user's program, and start sending
|
||||
// List requests periodically to the Debuglet Controller to get breakpoints
|
||||
// to set.
|
||||
//
|
||||
// We want to give the Debuglet Controller a chance to give us breakpoints
|
||||
// before we start the program, otherwise we would miss any breakpoint
|
||||
// triggers that occur during program startup -- for example, a breakpoint on
|
||||
// the first line of main. But if the Debuglet Controller is not responding or
|
||||
// is returning errors, we don't want to delay starting the program
|
||||
// indefinitely.
|
||||
//
|
||||
// We pass a channel to breakpointListLoop, which will close it when the first
|
||||
// List call finishes. Then we wait until either the channel is closed or a
|
||||
// 5-second timer has finished before starting the program.
|
||||
ch := make(chan bool)
|
||||
// Start a goroutine that sends List requests to the Debuglet Controller, and
|
||||
// sets any breakpoints it gets back.
|
||||
go breakpointListLoop(ctx, c, bs, ch)
|
||||
// Wait until 5 seconds have passed or breakpointListLoop has closed ch.
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
case <-ch:
|
||||
}
|
||||
// Run the debuggee.
|
||||
programLoop(ctx, c, bs, prog)
|
||||
}
|
||||
|
||||
// usage prints a usage message to stderr and exits.
|
||||
func usage() {
|
||||
me := "a.out"
|
||||
if len(os.Args) >= 1 {
|
||||
me = os.Args[0]
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "Usage of %s:\n", me)
|
||||
fmt.Fprintf(os.Stderr, "\t%s [flags...] -- <program name> args...\n", me)
|
||||
fmt.Fprintf(os.Stderr, "Flags:\n")
|
||||
flag.PrintDefaults()
|
||||
fmt.Fprintf(os.Stderr,
|
||||
"See https://cloud.google.com/tools/cloud-debugger/setting-up-on-compute-engine for more information.\n")
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
// readSourceContextFile reads a JSON-encoded source context from the given file.
|
||||
// It returns a non-empty slice on success.
|
||||
func readSourceContextFile(filename string) ([]*cd.SourceContext, error) {
|
||||
if filename == "" {
|
||||
return nil, nil
|
||||
}
|
||||
scJSON, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading file %q: %v", filename, err)
|
||||
}
|
||||
var sc cd.SourceContext
|
||||
if err = json.Unmarshal(scJSON, &sc); err != nil {
|
||||
return nil, fmt.Errorf("parsing file %q: %v", filename, err)
|
||||
}
|
||||
return []*cd.SourceContext{&sc}, nil
|
||||
}
|
||||
|
||||
// breakpointListLoop repeatedly calls the Debuglet Controller's List RPC, and
|
||||
// passes the results to the BreakpointStore so it can set and unset breakpoints
|
||||
// in the program.
|
||||
//
|
||||
// After the first List call finishes, ch is closed.
|
||||
func breakpointListLoop(ctx context.Context, c *debuglet.Controller, bs *breakpoints.BreakpointStore, first chan bool) {
|
||||
const (
|
||||
avgTimeBetweenCalls = time.Second
|
||||
errorDelay = 5 * time.Second
|
||||
)
|
||||
|
||||
// randomDuration returns a random duration with expected value avg.
|
||||
randomDuration := func(avg time.Duration) time.Duration {
|
||||
return time.Duration(rand.Int63n(int64(2*avg + 1)))
|
||||
}
|
||||
|
||||
var consecutiveFailures uint
|
||||
|
||||
for {
|
||||
callStart := time.Now()
|
||||
resp, err := c.List(ctx)
|
||||
if err != nil && err != debuglet.ErrListUnchanged {
|
||||
log.Printf("Debuglet controller server error: %v", err)
|
||||
}
|
||||
if err == nil {
|
||||
bs.ProcessBreakpointList(resp.Breakpoints)
|
||||
}
|
||||
|
||||
if first != nil {
|
||||
// We've finished one call to List and set any breakpoints we received.
|
||||
close(first)
|
||||
first = nil
|
||||
}
|
||||
|
||||
// Asynchronously send updates for any breakpoints that caused an error when
|
||||
// the BreakpointStore tried to process them. We don't wait for the update
|
||||
// to finish before the program can exit, as we do for normal updates.
|
||||
errorBps := bs.ErrorBreakpoints()
|
||||
for _, bp := range errorBps {
|
||||
go func(bp *cd.Breakpoint) {
|
||||
if err := c.Update(ctx, bp.Id, bp); err != nil {
|
||||
log.Printf("Failed to send breakpoint update for %s: %s", bp.Id, err)
|
||||
}
|
||||
}(bp)
|
||||
}
|
||||
|
||||
// Make the next call not too soon after the one we just did.
|
||||
delay := randomDuration(avgTimeBetweenCalls)
|
||||
|
||||
// If the call returned an error other than ErrListUnchanged, wait longer.
|
||||
if err != nil && err != debuglet.ErrListUnchanged {
|
||||
// Wait twice as long after each consecutive failure, to a maximum of 16x.
|
||||
delay += randomDuration(errorDelay * (1 << consecutiveFailures))
|
||||
if consecutiveFailures < 4 {
|
||||
consecutiveFailures++
|
||||
}
|
||||
} else {
|
||||
consecutiveFailures = 0
|
||||
}
|
||||
|
||||
// Sleep until we reach time callStart+delay. If we've already passed that
|
||||
// time, time.Sleep will return immediately -- this should be the common
|
||||
// case, since the server will delay responding to List for a while when
|
||||
// there are no changes to report.
|
||||
time.Sleep(callStart.Add(delay).Sub(time.Now()))
|
||||
}
|
||||
}
|
||||
|
||||
// programLoop runs the program being debugged to completion. When a breakpoint's
|
||||
// conditions are satisfied, it sends an Update RPC to the Debuglet Controller.
|
||||
// The function returns when the program exits and all Update RPCs have finished.
|
||||
func programLoop(ctx context.Context, c *debuglet.Controller, bs *breakpoints.BreakpointStore, prog debug.Program) {
|
||||
var wg sync.WaitGroup
|
||||
for {
|
||||
// Run the program until it hits a breakpoint or exits.
|
||||
status, err := prog.Resume()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Get the breakpoints at this address whose conditions were satisfied,
|
||||
// and remove the ones that aren't logpoints.
|
||||
bps := bs.BreakpointsAtPC(status.PC)
|
||||
bps = bpsWithConditionSatisfied(bps, prog)
|
||||
for _, bp := range bps {
|
||||
if bp.Action != "LOG" {
|
||||
bs.RemoveBreakpoint(bp)
|
||||
}
|
||||
}
|
||||
|
||||
if len(bps) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Evaluate expressions and get the stack.
|
||||
vc := valuecollector.NewCollector(prog, maxCapturedVariables)
|
||||
needStackFrames := false
|
||||
for _, bp := range bps {
|
||||
// If evaluating bp's condition didn't return an error, evaluate bp's
|
||||
// expressions, and later get the stack frames.
|
||||
if bp.Status == nil {
|
||||
bp.EvaluatedExpressions = expressionValues(bp.Expressions, prog, vc)
|
||||
needStackFrames = true
|
||||
}
|
||||
}
|
||||
var (
|
||||
stack []*cd.StackFrame
|
||||
stackFramesStatusMessage *cd.StatusMessage
|
||||
)
|
||||
if needStackFrames {
|
||||
stack, stackFramesStatusMessage = stackFrames(prog, vc)
|
||||
}
|
||||
|
||||
// Read variable values from the program.
|
||||
variableTable := vc.ReadValues()
|
||||
|
||||
// Start a goroutine to send updates to the Debuglet Controller or write
|
||||
// to logs, concurrently with resuming the program.
|
||||
// TODO: retry Update on failure.
|
||||
for _, bp := range bps {
|
||||
wg.Add(1)
|
||||
switch bp.Action {
|
||||
case "LOG":
|
||||
go func(format string, evaluatedExpressions []*cd.Variable) {
|
||||
s := valuecollector.LogString(format, evaluatedExpressions, variableTable)
|
||||
log.Print(s)
|
||||
wg.Done()
|
||||
}(bp.LogMessageFormat, bp.EvaluatedExpressions)
|
||||
bp.Status = nil
|
||||
bp.EvaluatedExpressions = nil
|
||||
default:
|
||||
go func(bp *cd.Breakpoint) {
|
||||
defer wg.Done()
|
||||
bp.IsFinalState = true
|
||||
if bp.Status == nil {
|
||||
// If evaluating bp's condition didn't return an error, include the
|
||||
// stack frames, variable table, and any status message produced when
|
||||
// getting the stack frames.
|
||||
bp.StackFrames = stack
|
||||
bp.VariableTable = variableTable
|
||||
bp.Status = stackFramesStatusMessage
|
||||
}
|
||||
if err := c.Update(ctx, bp.Id, bp); err != nil {
|
||||
log.Printf("Failed to send breakpoint update for %s: %s", bp.Id, err)
|
||||
}
|
||||
}(bp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for all updates to finish before returning.
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// bpsWithConditionSatisfied returns the breakpoints whose conditions are true
|
||||
// (or that do not have a condition.)
|
||||
func bpsWithConditionSatisfied(bpsIn []*cd.Breakpoint, prog debug.Program) []*cd.Breakpoint {
|
||||
var bpsOut []*cd.Breakpoint
|
||||
for _, bp := range bpsIn {
|
||||
cond, err := condTruth(bp.Condition, prog)
|
||||
if err != nil {
|
||||
bp.Status = errorStatusMessage(err.Error(), refersToBreakpointCondition)
|
||||
// Include bp in the list to be updated when there's an error, so that
|
||||
// the user gets a response.
|
||||
bpsOut = append(bpsOut, bp)
|
||||
} else if cond {
|
||||
bpsOut = append(bpsOut, bp)
|
||||
}
|
||||
}
|
||||
return bpsOut
|
||||
}
|
||||
|
||||
// condTruth evaluates a condition.
|
||||
func condTruth(condition string, prog debug.Program) (bool, error) {
|
||||
if condition == "" {
|
||||
// A condition wasn't set.
|
||||
return true, nil
|
||||
}
|
||||
val, err := prog.Evaluate(condition)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if v, ok := val.(bool); !ok {
|
||||
return false, fmt.Errorf("condition expression has type %T, should be bool", val)
|
||||
} else {
|
||||
return v, nil
|
||||
}
|
||||
}
|
||||
|
||||
// expressionValues evaluates a slice of expressions and returns a []*cd.Variable
|
||||
// containing the results.
|
||||
// If the result of an expression evaluation refers to values from the program's
|
||||
// memory (e.g., the expression evaluates to a slice) a corresponding variable is
|
||||
// added to the value collector, to be read later.
|
||||
func expressionValues(expressions []string, prog debug.Program, vc *valuecollector.Collector) []*cd.Variable {
|
||||
evaluatedExpressions := make([]*cd.Variable, len(expressions))
|
||||
for i, exp := range expressions {
|
||||
ee := &cd.Variable{Name: exp}
|
||||
evaluatedExpressions[i] = ee
|
||||
if val, err := prog.Evaluate(exp); err != nil {
|
||||
ee.Status = errorStatusMessage(err.Error(), refersToBreakpointExpression)
|
||||
} else {
|
||||
vc.FillValue(val, ee)
|
||||
}
|
||||
}
|
||||
return evaluatedExpressions
|
||||
}
|
||||
|
||||
// stackFrames returns a stack trace for the program. It passes references to
|
||||
// function parameters and local variables to the value collector, so it can read
|
||||
// their values later.
|
||||
func stackFrames(prog debug.Program, vc *valuecollector.Collector) ([]*cd.StackFrame, *cd.StatusMessage) {
|
||||
frames, err := prog.Frames(maxCapturedStackFrames)
|
||||
if err != nil {
|
||||
return nil, errorStatusMessage("Error getting stack: "+err.Error(), refersToUnspecified)
|
||||
}
|
||||
stackFrames := make([]*cd.StackFrame, len(frames))
|
||||
for i, f := range frames {
|
||||
frame := &cd.StackFrame{}
|
||||
frame.Function = f.Function
|
||||
for _, v := range f.Params {
|
||||
frame.Arguments = append(frame.Arguments, vc.AddVariable(debug.LocalVar(v)))
|
||||
}
|
||||
for _, v := range f.Vars {
|
||||
frame.Locals = append(frame.Locals, vc.AddVariable(v))
|
||||
}
|
||||
frame.Location = &cd.SourceLocation{
|
||||
Path: f.File,
|
||||
Line: int64(f.Line),
|
||||
}
|
||||
stackFrames[i] = frame
|
||||
}
|
||||
return stackFrames, nil
|
||||
}
|
||||
|
||||
// errorStatusMessage returns a *cd.StatusMessage indicating an error,
|
||||
// with the given message and refersTo field.
|
||||
func errorStatusMessage(msg string, refersTo int) *cd.StatusMessage {
|
||||
return &cd.StatusMessage{
|
||||
Description: &cd.FormatMessage{Format: "$0", Parameters: []string{msg}},
|
||||
IsError: true,
|
||||
RefersTo: refersToString[refersTo],
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// RefersTo values for cd.StatusMessage.
|
||||
refersToUnspecified = iota
|
||||
refersToBreakpointCondition
|
||||
refersToBreakpointExpression
|
||||
)
|
||||
|
||||
// refersToString contains the strings for each refersTo value.
|
||||
// See the definition of StatusMessage in the v2/clouddebugger package.
|
||||
var refersToString = map[int]string{
|
||||
refersToUnspecified: "UNSPECIFIED",
|
||||
refersToBreakpointCondition: "BREAKPOINT_CONDITION",
|
||||
refersToBreakpointExpression: "BREAKPOINT_EXPRESSION",
|
||||
}
|
||||
|
||||
func serviceAcctTokenSource(ctx context.Context, filename string, scope ...string) (oauth2.TokenSource, error) {
|
||||
data, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot read service account file: %v", err)
|
||||
}
|
||||
cfg, err := google.JWTConfigFromJSON(data, scope...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("google.JWTConfigFromJSON: %v", err)
|
||||
}
|
||||
return cfg.TokenSource(ctx), nil
|
||||
}
|
174
vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints/breakpoints.go
generated
vendored
Normal file
174
vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints/breakpoints.go
generated
vendored
Normal file
|
@ -0,0 +1,174 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package breakpoints handles breakpoint requests we get from the user through
|
||||
// the Debuglet Controller, and manages corresponding breakpoints set in the code.
|
||||
package breakpoints
|
||||
|
||||
import (
|
||||
"log"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/debug"
|
||||
cd "google.golang.org/api/clouddebugger/v2"
|
||||
)
|
||||
|
||||
// BreakpointStore stores the set of breakpoints for a program.
|
||||
type BreakpointStore struct {
|
||||
mu sync.Mutex
|
||||
// prog is the program being debugged.
|
||||
prog debug.Program
|
||||
// idToBreakpoint is a map from breakpoint identifier to *cd.Breakpoint. The
|
||||
// map value is nil if the breakpoint is inactive. A breakpoint is active if:
|
||||
// - We received it from the Debuglet Controller, and it was active at the time;
|
||||
// - We were able to set code breakpoints for it;
|
||||
// - We have not reached any of those code breakpoints while satisfying the
|
||||
// breakpoint's conditions, or the breakpoint has action LOG; and
|
||||
// - The Debuglet Controller hasn't informed us the breakpoint has become inactive.
|
||||
idToBreakpoint map[string]*cd.Breakpoint
|
||||
// pcToBps and bpToPCs store the many-to-many relationship between breakpoints we
|
||||
// received from the Debuglet Controller and the code breakpoints we set for them.
|
||||
pcToBps map[uint64][]*cd.Breakpoint
|
||||
bpToPCs map[*cd.Breakpoint][]uint64
|
||||
// errors contains any breakpoints which couldn't be set because they caused an
|
||||
// error. These are retrieved with ErrorBreakpoints, and the caller is
|
||||
// expected to handle sending updates for them.
|
||||
errors []*cd.Breakpoint
|
||||
}
|
||||
|
||||
// NewBreakpointStore returns a BreakpointStore for the given program.
|
||||
func NewBreakpointStore(prog debug.Program) *BreakpointStore {
|
||||
return &BreakpointStore{
|
||||
idToBreakpoint: make(map[string]*cd.Breakpoint),
|
||||
pcToBps: make(map[uint64][]*cd.Breakpoint),
|
||||
bpToPCs: make(map[*cd.Breakpoint][]uint64),
|
||||
prog: prog,
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessBreakpointList applies updates received from the Debuglet Controller through a List call.
|
||||
func (bs *BreakpointStore) ProcessBreakpointList(bps []*cd.Breakpoint) {
|
||||
bs.mu.Lock()
|
||||
defer bs.mu.Unlock()
|
||||
for _, bp := range bps {
|
||||
if storedBp, ok := bs.idToBreakpoint[bp.Id]; ok {
|
||||
if storedBp != nil && bp.IsFinalState {
|
||||
// IsFinalState indicates that the breakpoint has been made inactive.
|
||||
bs.removeBreakpointLocked(storedBp)
|
||||
}
|
||||
} else {
|
||||
if bp.IsFinalState {
|
||||
// The controller is notifying us that the breakpoint is no longer active,
|
||||
// but we didn't know about it anyway.
|
||||
continue
|
||||
}
|
||||
if bp.Action != "" && bp.Action != "CAPTURE" && bp.Action != "LOG" {
|
||||
bp.IsFinalState = true
|
||||
bp.Status = &cd.StatusMessage{
|
||||
Description: &cd.FormatMessage{Format: "Action is not supported"},
|
||||
IsError: true,
|
||||
}
|
||||
bs.errors = append(bs.errors, bp)
|
||||
// Note in idToBreakpoint that we've already seen this breakpoint, so that we
|
||||
// don't try to report it as an error multiple times.
|
||||
bs.idToBreakpoint[bp.Id] = nil
|
||||
continue
|
||||
}
|
||||
pcs, err := bs.prog.BreakpointAtLine(bp.Location.Path, uint64(bp.Location.Line))
|
||||
if err != nil {
|
||||
log.Printf("error setting breakpoint at %s:%d: %v", bp.Location.Path, bp.Location.Line, err)
|
||||
}
|
||||
if len(pcs) == 0 {
|
||||
// We can't find a PC for this breakpoint's source line, so don't make it active.
|
||||
// TODO: we could snap the line to a location where we can break, or report an error to the user.
|
||||
bs.idToBreakpoint[bp.Id] = nil
|
||||
} else {
|
||||
bs.idToBreakpoint[bp.Id] = bp
|
||||
for _, pc := range pcs {
|
||||
bs.pcToBps[pc] = append(bs.pcToBps[pc], bp)
|
||||
}
|
||||
bs.bpToPCs[bp] = pcs
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ErrorBreakpoints returns a slice of Breakpoints that caused errors when the
|
||||
// BreakpointStore tried to process them, and resets the list of such
|
||||
// breakpoints.
|
||||
// The caller is expected to send updates to the server to indicate the errors.
|
||||
func (bs *BreakpointStore) ErrorBreakpoints() []*cd.Breakpoint {
|
||||
bs.mu.Lock()
|
||||
defer bs.mu.Unlock()
|
||||
bps := bs.errors
|
||||
bs.errors = nil
|
||||
return bps
|
||||
}
|
||||
|
||||
// BreakpointsAtPC returns all the breakpoints for which we set a code
|
||||
// breakpoint at the given address.
|
||||
func (bs *BreakpointStore) BreakpointsAtPC(pc uint64) []*cd.Breakpoint {
|
||||
bs.mu.Lock()
|
||||
defer bs.mu.Unlock()
|
||||
return bs.pcToBps[pc]
|
||||
}
|
||||
|
||||
// RemoveBreakpoint makes the given breakpoint inactive.
|
||||
// This is called when either the debugged program hits the breakpoint, or the Debuglet
|
||||
// Controller informs us that the breakpoint is now inactive.
|
||||
func (bs *BreakpointStore) RemoveBreakpoint(bp *cd.Breakpoint) {
|
||||
bs.mu.Lock()
|
||||
bs.removeBreakpointLocked(bp)
|
||||
bs.mu.Unlock()
|
||||
}
|
||||
|
||||
func (bs *BreakpointStore) removeBreakpointLocked(bp *cd.Breakpoint) {
|
||||
// Set the ID's corresponding breakpoint to nil, so that we won't activate it
|
||||
// if we see it again.
|
||||
// TODO: we could delete it after a few seconds.
|
||||
bs.idToBreakpoint[bp.Id] = nil
|
||||
|
||||
// Delete bp from the list of cd breakpoints at each of its corresponding
|
||||
// code breakpoint locations, and delete any code breakpoints which no longer
|
||||
// have a corresponding cd breakpoint.
|
||||
var codeBreakpointsToDelete []uint64
|
||||
for _, pc := range bs.bpToPCs[bp] {
|
||||
bps := remove(bs.pcToBps[pc], bp)
|
||||
if len(bps) == 0 {
|
||||
// bp was the last breakpoint set at this PC, so delete the code breakpoint.
|
||||
codeBreakpointsToDelete = append(codeBreakpointsToDelete, pc)
|
||||
delete(bs.pcToBps, pc)
|
||||
} else {
|
||||
bs.pcToBps[pc] = bps
|
||||
}
|
||||
}
|
||||
if len(codeBreakpointsToDelete) > 0 {
|
||||
bs.prog.DeleteBreakpoints(codeBreakpointsToDelete)
|
||||
}
|
||||
delete(bs.bpToPCs, bp)
|
||||
}
|
||||
|
||||
// remove updates rs by removing r, then returns rs.
|
||||
// The mutex in the BreakpointStore which contains rs should be held.
|
||||
func remove(rs []*cd.Breakpoint, r *cd.Breakpoint) []*cd.Breakpoint {
|
||||
for i := range rs {
|
||||
if rs[i] == r {
|
||||
rs[i] = rs[len(rs)-1]
|
||||
rs = rs[0 : len(rs)-1]
|
||||
return rs
|
||||
}
|
||||
}
|
||||
// We shouldn't reach here.
|
||||
return rs
|
||||
}
|
168
vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints/breakpoints_test.go
generated
vendored
Normal file
168
vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints/breakpoints_test.go
generated
vendored
Normal file
|
@ -0,0 +1,168 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package breakpoints
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/debug"
|
||||
cd "google.golang.org/api/clouddebugger/v2"
|
||||
)
|
||||
|
||||
var (
|
||||
testPC1 uint64 = 0x1234
|
||||
testPC2 uint64 = 0x5678
|
||||
testPC3 uint64 = 0x3333
|
||||
testFile = "foo.go"
|
||||
testLine uint64 = 42
|
||||
testLine2 uint64 = 99
|
||||
testLogPC uint64 = 0x9abc
|
||||
testLogLine uint64 = 43
|
||||
testBadPC uint64 = 0xdef0
|
||||
testBadLine uint64 = 44
|
||||
testBP = &cd.Breakpoint{
|
||||
Action: "CAPTURE",
|
||||
Id: "TestBreakpoint",
|
||||
IsFinalState: false,
|
||||
Location: &cd.SourceLocation{Path: testFile, Line: int64(testLine)},
|
||||
}
|
||||
testBP2 = &cd.Breakpoint{
|
||||
Action: "CAPTURE",
|
||||
Id: "TestBreakpoint2",
|
||||
IsFinalState: false,
|
||||
Location: &cd.SourceLocation{Path: testFile, Line: int64(testLine2)},
|
||||
}
|
||||
testLogBP = &cd.Breakpoint{
|
||||
Action: "LOG",
|
||||
Id: "TestLogBreakpoint",
|
||||
IsFinalState: false,
|
||||
Location: &cd.SourceLocation{Path: testFile, Line: int64(testLogLine)},
|
||||
}
|
||||
testBadBP = &cd.Breakpoint{
|
||||
Action: "BEEP",
|
||||
Id: "TestBadBreakpoint",
|
||||
IsFinalState: false,
|
||||
Location: &cd.SourceLocation{Path: testFile, Line: int64(testBadLine)},
|
||||
}
|
||||
)
|
||||
|
||||
func TestBreakpointStore(t *testing.T) {
|
||||
p := &Program{breakpointPCs: make(map[uint64]bool)}
|
||||
bs := NewBreakpointStore(p)
|
||||
checkPCs := func(expected map[uint64]bool) {
|
||||
if !reflect.DeepEqual(p.breakpointPCs, expected) {
|
||||
t.Errorf("got breakpoint map %v want %v", p.breakpointPCs, expected)
|
||||
}
|
||||
}
|
||||
bs.ProcessBreakpointList([]*cd.Breakpoint{testBP, testBP2, testLogBP, testBadBP})
|
||||
checkPCs(map[uint64]bool{
|
||||
testPC1: true,
|
||||
testPC2: true,
|
||||
testPC3: true,
|
||||
testLogPC: true,
|
||||
})
|
||||
for _, test := range []struct {
|
||||
pc uint64
|
||||
expected []*cd.Breakpoint
|
||||
}{
|
||||
{testPC1, []*cd.Breakpoint{testBP}},
|
||||
{testPC2, []*cd.Breakpoint{testBP}},
|
||||
{testPC3, []*cd.Breakpoint{testBP2}},
|
||||
{testLogPC, []*cd.Breakpoint{testLogBP}},
|
||||
} {
|
||||
if bps := bs.BreakpointsAtPC(test.pc); !reflect.DeepEqual(bps, test.expected) {
|
||||
t.Errorf("BreakpointsAtPC(%x): got %v want %v", test.pc, bps, test.expected)
|
||||
}
|
||||
}
|
||||
testBP2.IsFinalState = true
|
||||
bs.ProcessBreakpointList([]*cd.Breakpoint{testBP, testBP2, testLogBP, testBadBP})
|
||||
checkPCs(map[uint64]bool{
|
||||
testPC1: true,
|
||||
testPC2: true,
|
||||
testPC3: false,
|
||||
testLogPC: true,
|
||||
})
|
||||
bs.RemoveBreakpoint(testBP)
|
||||
checkPCs(map[uint64]bool{
|
||||
testPC1: false,
|
||||
testPC2: false,
|
||||
testPC3: false,
|
||||
testLogPC: true,
|
||||
})
|
||||
for _, pc := range []uint64{testPC1, testPC2, testPC3} {
|
||||
if bps := bs.BreakpointsAtPC(pc); len(bps) != 0 {
|
||||
t.Errorf("BreakpointsAtPC(%x): got %v want []", pc, bps)
|
||||
}
|
||||
}
|
||||
// bs.ErrorBreakpoints should return testBadBP.
|
||||
errorBps := bs.ErrorBreakpoints()
|
||||
if len(errorBps) != 1 {
|
||||
t.Errorf("ErrorBreakpoints: got %d want 1", len(errorBps))
|
||||
} else {
|
||||
bp := errorBps[0]
|
||||
if bp.Id != testBadBP.Id {
|
||||
t.Errorf("ErrorBreakpoints: got id %q want 1", bp.Id)
|
||||
}
|
||||
if bp.Status == nil || !bp.Status.IsError {
|
||||
t.Errorf("ErrorBreakpoints: got %v, want error", bp.Status)
|
||||
}
|
||||
}
|
||||
// The error should have been removed by the last call to bs.ErrorBreakpoints.
|
||||
errorBps = bs.ErrorBreakpoints()
|
||||
if len(errorBps) != 0 {
|
||||
t.Errorf("ErrorBreakpoints: got %d want 0", len(errorBps))
|
||||
}
|
||||
// Even if testBadBP is sent in a new list, it should not be returned again.
|
||||
bs.ProcessBreakpointList([]*cd.Breakpoint{testBadBP})
|
||||
errorBps = bs.ErrorBreakpoints()
|
||||
if len(errorBps) != 0 {
|
||||
t.Errorf("ErrorBreakpoints: got %d want 0", len(errorBps))
|
||||
}
|
||||
}
|
||||
|
||||
// Program implements the similarly-named interface in x/debug.
|
||||
// ValueCollector should only call its BreakpointAtLine and DeleteBreakpoints methods.
|
||||
type Program struct {
|
||||
debug.Program
|
||||
// breakpointPCs contains the state of code breakpoints -- true if the
|
||||
// breakpoint is currently set, false if it has been deleted.
|
||||
breakpointPCs map[uint64]bool
|
||||
}
|
||||
|
||||
func (p *Program) BreakpointAtLine(file string, line uint64) ([]uint64, error) {
|
||||
var pcs []uint64
|
||||
switch {
|
||||
case file == testFile && line == testLine:
|
||||
pcs = []uint64{testPC1, testPC2}
|
||||
case file == testFile && line == testLine2:
|
||||
pcs = []uint64{testPC3}
|
||||
case file == testFile && line == testLogLine:
|
||||
pcs = []uint64{testLogPC}
|
||||
default:
|
||||
pcs = []uint64{0xbad}
|
||||
}
|
||||
for _, pc := range pcs {
|
||||
p.breakpointPCs[pc] = true
|
||||
}
|
||||
return pcs, nil
|
||||
}
|
||||
|
||||
func (p *Program) DeleteBreakpoints(pcs []uint64) error {
|
||||
for _, pc := range pcs {
|
||||
p.breakpointPCs[pc] = false
|
||||
}
|
||||
return nil
|
||||
}
|
279
vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client.go
generated
vendored
Normal file
279
vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client.go
generated
vendored
Normal file
|
@ -0,0 +1,279 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package controller is a library for interacting with the Google Cloud Debugger's Debuglet Controller service.
|
||||
package controller
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2"
|
||||
cd "google.golang.org/api/clouddebugger/v2"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/transport"
|
||||
)
|
||||
|
||||
const (
|
||||
// agentVersionString identifies the agent to the service.
|
||||
agentVersionString = "google.com/go-gcp/v0.2"
|
||||
// initWaitToken is the wait token sent in the first Update request to a server.
|
||||
initWaitToken = "init"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrListUnchanged is returned by List if the server time limit is reached
|
||||
// before the list of breakpoints changes.
|
||||
ErrListUnchanged = errors.New("breakpoint list unchanged")
|
||||
// ErrDebuggeeDisabled is returned by List or Update if the server has disabled
|
||||
// this Debuggee. The caller can retry later.
|
||||
ErrDebuggeeDisabled = errors.New("debuglet disabled by server")
|
||||
)
|
||||
|
||||
// Controller manages a connection to the Debuglet Controller service.
|
||||
type Controller struct {
|
||||
s serviceInterface
|
||||
// waitToken is sent with List requests so the server knows which set of
|
||||
// breakpoints this client has already seen. Each successful List request
|
||||
// returns a new waitToken to send in the next request.
|
||||
waitToken string
|
||||
// verbose determines whether to do some logging
|
||||
verbose bool
|
||||
// options, uniquifier and description are used in register.
|
||||
options Options
|
||||
uniquifier string
|
||||
description string
|
||||
// mu protects debuggeeID
|
||||
mu sync.Mutex
|
||||
// debuggeeID is returned from the server on registration, and is passed back
|
||||
// to the server in List and Update requests.
|
||||
debuggeeID string
|
||||
}
|
||||
|
||||
// Options controls how the Debuglet Controller client identifies itself to the server.
|
||||
// See https://cloud.google.com/storage/docs/projects and
|
||||
// https://cloud.google.com/tools/cloud-debugger/setting-up-on-compute-engine
|
||||
// for further documentation of these parameters.
|
||||
type Options struct {
|
||||
ProjectNumber string // GCP Project Number.
|
||||
ProjectID string // GCP Project ID.
|
||||
AppModule string // Module name for the debugged program.
|
||||
AppVersion string // Version number for this module.
|
||||
SourceContexts []*cd.SourceContext // Description of source.
|
||||
Verbose bool
|
||||
TokenSource oauth2.TokenSource // Source of Credentials used for Stackdriver Debugger.
|
||||
}
|
||||
|
||||
type serviceInterface interface {
|
||||
Register(ctx context.Context, req *cd.RegisterDebuggeeRequest) (*cd.RegisterDebuggeeResponse, error)
|
||||
Update(ctx context.Context, debuggeeID, breakpointID string, req *cd.UpdateActiveBreakpointRequest) (*cd.UpdateActiveBreakpointResponse, error)
|
||||
List(ctx context.Context, debuggeeID, waitToken string) (*cd.ListActiveBreakpointsResponse, error)
|
||||
}
|
||||
|
||||
var newService = func(ctx context.Context, tokenSource oauth2.TokenSource) (serviceInterface, error) {
|
||||
httpClient, endpoint, err := transport.NewHTTPClient(ctx, option.WithTokenSource(tokenSource))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s, err := cd.New(httpClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if endpoint != "" {
|
||||
s.BasePath = endpoint
|
||||
}
|
||||
return &service{s: s}, nil
|
||||
}
|
||||
|
||||
type service struct {
|
||||
s *cd.Service
|
||||
}
|
||||
|
||||
func (s service) Register(ctx context.Context, req *cd.RegisterDebuggeeRequest) (*cd.RegisterDebuggeeResponse, error) {
|
||||
call := cd.NewControllerDebuggeesService(s.s).Register(req)
|
||||
return call.Context(ctx).Do()
|
||||
}
|
||||
|
||||
func (s service) Update(ctx context.Context, debuggeeID, breakpointID string, req *cd.UpdateActiveBreakpointRequest) (*cd.UpdateActiveBreakpointResponse, error) {
|
||||
call := cd.NewControllerDebuggeesBreakpointsService(s.s).Update(debuggeeID, breakpointID, req)
|
||||
return call.Context(ctx).Do()
|
||||
}
|
||||
|
||||
func (s service) List(ctx context.Context, debuggeeID, waitToken string) (*cd.ListActiveBreakpointsResponse, error) {
|
||||
call := cd.NewControllerDebuggeesBreakpointsService(s.s).List(debuggeeID)
|
||||
call.WaitToken(waitToken)
|
||||
return call.Context(ctx).Do()
|
||||
}
|
||||
|
||||
// NewController connects to the Debuglet Controller server using the given options,
|
||||
// and returns a Controller for that connection.
|
||||
// Google Application Default Credentials are used to connect to the Debuglet Controller;
|
||||
// see https://developers.google.com/identity/protocols/application-default-credentials
|
||||
func NewController(ctx context.Context, o Options) (*Controller, error) {
|
||||
// We build a JSON encoding of o.SourceContexts so we can hash it.
|
||||
scJSON, err := json.Marshal(o.SourceContexts)
|
||||
if err != nil {
|
||||
scJSON = nil
|
||||
o.SourceContexts = nil
|
||||
}
|
||||
|
||||
// Compute a uniquifier string by hashing the project number, app module name,
|
||||
// app module version, debuglet version, and source context.
|
||||
// The choice of hash function is arbitrary.
|
||||
h := sha256.Sum256([]byte(fmt.Sprintf("%d %s %d %s %d %s %d %s %d %s",
|
||||
len(o.ProjectNumber), o.ProjectNumber,
|
||||
len(o.AppModule), o.AppModule,
|
||||
len(o.AppVersion), o.AppVersion,
|
||||
len(agentVersionString), agentVersionString,
|
||||
len(scJSON), scJSON)))
|
||||
uniquifier := fmt.Sprintf("%X", h[0:16]) // 32 hex characters
|
||||
|
||||
description := o.ProjectID
|
||||
if o.AppModule != "" {
|
||||
description += "-" + o.AppModule
|
||||
}
|
||||
if o.AppVersion != "" {
|
||||
description += "-" + o.AppVersion
|
||||
}
|
||||
|
||||
s, err := newService(ctx, o.TokenSource)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Construct client.
|
||||
c := &Controller{
|
||||
s: s,
|
||||
waitToken: initWaitToken,
|
||||
verbose: o.Verbose,
|
||||
options: o,
|
||||
uniquifier: uniquifier,
|
||||
description: description,
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *Controller) getDebuggeeID(ctx context.Context) (string, error) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if c.debuggeeID != "" {
|
||||
return c.debuggeeID, nil
|
||||
}
|
||||
// The debuglet hasn't been registered yet, or it is disabled and we should try registering again.
|
||||
if err := c.register(ctx); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return c.debuggeeID, nil
|
||||
}
|
||||
|
||||
// List retrieves the current list of breakpoints from the server.
|
||||
// If the set of breakpoints on the server is the same as the one returned in
|
||||
// the previous call to List, the server can delay responding until it changes,
|
||||
// and return an error instead if no change occurs before a time limit the
|
||||
// server sets. List can't be called concurrently with itself.
|
||||
func (c *Controller) List(ctx context.Context) (*cd.ListActiveBreakpointsResponse, error) {
|
||||
id, err := c.getDebuggeeID(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := c.s.List(ctx, id, c.waitToken)
|
||||
if err != nil {
|
||||
if isAbortedError(err) {
|
||||
return nil, ErrListUnchanged
|
||||
}
|
||||
// For other errors, the protocol requires that we attempt to re-register.
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
if regError := c.register(ctx); regError != nil {
|
||||
return nil, regError
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if resp == nil {
|
||||
return nil, errors.New("no response")
|
||||
}
|
||||
if c.verbose {
|
||||
log.Printf("List response: %v", resp)
|
||||
}
|
||||
c.waitToken = resp.NextWaitToken
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// isAbortedError tests if err is a *googleapi.Error, that it contains one error
|
||||
// in Errors, and that that error's Reason is "aborted".
|
||||
func isAbortedError(err error) bool {
|
||||
e, _ := err.(*googleapi.Error)
|
||||
if e == nil {
|
||||
return false
|
||||
}
|
||||
if len(e.Errors) != 1 {
|
||||
return false
|
||||
}
|
||||
return e.Errors[0].Reason == "aborted"
|
||||
}
|
||||
|
||||
// Update reports information to the server about a breakpoint that was hit.
|
||||
// Update can be called concurrently with List and Update.
|
||||
func (c *Controller) Update(ctx context.Context, breakpointID string, bp *cd.Breakpoint) error {
|
||||
req := &cd.UpdateActiveBreakpointRequest{Breakpoint: bp}
|
||||
if c.verbose {
|
||||
log.Printf("sending update for %s: %v", breakpointID, req)
|
||||
}
|
||||
id, err := c.getDebuggeeID(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = c.s.Update(ctx, id, breakpointID, req)
|
||||
return err
|
||||
}
|
||||
|
||||
// register calls the Debuglet Controller Register method, and sets c.debuggeeID.
|
||||
// c.mu should be locked while calling this function. List and Update can't
|
||||
// make progress until it returns.
|
||||
func (c *Controller) register(ctx context.Context) error {
|
||||
req := cd.RegisterDebuggeeRequest{
|
||||
Debuggee: &cd.Debuggee{
|
||||
AgentVersion: agentVersionString,
|
||||
Description: c.description,
|
||||
Project: c.options.ProjectNumber,
|
||||
SourceContexts: c.options.SourceContexts,
|
||||
Uniquifier: c.uniquifier,
|
||||
},
|
||||
}
|
||||
resp, err := c.s.Register(ctx, &req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if resp == nil {
|
||||
return errors.New("register: no response")
|
||||
}
|
||||
if resp.Debuggee.IsDisabled {
|
||||
// Setting c.debuggeeID to empty makes sure future List and Update calls
|
||||
// will call register first.
|
||||
c.debuggeeID = ""
|
||||
} else {
|
||||
c.debuggeeID = resp.Debuggee.Id
|
||||
}
|
||||
if c.debuggeeID == "" {
|
||||
return ErrDebuggeeDisabled
|
||||
}
|
||||
return nil
|
||||
}
|
218
vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client_test.go
generated
vendored
Normal file
218
vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client_test.go
generated
vendored
Normal file
|
@ -0,0 +1,218 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2"
|
||||
|
||||
cd "google.golang.org/api/clouddebugger/v2"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
const (
|
||||
testDebuggeeID = "d12345"
|
||||
testBreakpointID = "bp12345"
|
||||
)
|
||||
|
||||
var (
|
||||
// The sequence of wait tokens in List requests and responses.
|
||||
expectedWaitToken = []string{"init", "token1", "token2", "token1", "token1"}
|
||||
// The set of breakpoints returned from each List call.
|
||||
expectedBreakpoints = [][]*cd.Breakpoint{
|
||||
nil,
|
||||
{
|
||||
&cd.Breakpoint{
|
||||
Id: testBreakpointID,
|
||||
IsFinalState: false,
|
||||
Location: &cd.SourceLocation{Line: 42, Path: "foo.go"},
|
||||
},
|
||||
},
|
||||
nil,
|
||||
}
|
||||
abortedError error = &googleapi.Error{
|
||||
Code: 409,
|
||||
Message: "Conflict",
|
||||
Body: `{
|
||||
"error": {
|
||||
"errors": [
|
||||
{
|
||||
"domain": "global",
|
||||
"reason": "aborted",
|
||||
"message": "Conflict"
|
||||
}
|
||||
],
|
||||
"code": 409,
|
||||
"message": "Conflict"
|
||||
}
|
||||
}`,
|
||||
Errors: []googleapi.ErrorItem{
|
||||
{Reason: "aborted", Message: "Conflict"},
|
||||
},
|
||||
}
|
||||
backendError error = &googleapi.Error{
|
||||
Code: 503,
|
||||
Message: "Backend Error",
|
||||
Body: `{
|
||||
"error": {
|
||||
"errors": [
|
||||
{
|
||||
"domain": "global",
|
||||
"reason": "backendError",
|
||||
"message": "Backend Error"
|
||||
}
|
||||
],
|
||||
"code": 503,
|
||||
"message": "Backend Error"
|
||||
}
|
||||
}`,
|
||||
Errors: []googleapi.ErrorItem{
|
||||
{Reason: "backendError", Message: "Backend Error"},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
type mockService struct {
|
||||
t *testing.T
|
||||
listCallsSeen int
|
||||
registerCallsSeen int
|
||||
}
|
||||
|
||||
func (s *mockService) Register(ctx context.Context, req *cd.RegisterDebuggeeRequest) (*cd.RegisterDebuggeeResponse, error) {
|
||||
s.registerCallsSeen++
|
||||
if req.Debuggee == nil {
|
||||
s.t.Errorf("missing debuggee")
|
||||
return nil, nil
|
||||
}
|
||||
if req.Debuggee.AgentVersion == "" {
|
||||
s.t.Errorf("missing agent version")
|
||||
}
|
||||
if req.Debuggee.Description == "" {
|
||||
s.t.Errorf("missing debuglet description")
|
||||
}
|
||||
if req.Debuggee.Project == "" {
|
||||
s.t.Errorf("missing project id")
|
||||
}
|
||||
if req.Debuggee.Uniquifier == "" {
|
||||
s.t.Errorf("missing uniquifier")
|
||||
}
|
||||
return &cd.RegisterDebuggeeResponse{
|
||||
Debuggee: &cd.Debuggee{Id: testDebuggeeID},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *mockService) Update(ctx context.Context, id, breakpointID string, req *cd.UpdateActiveBreakpointRequest) (*cd.UpdateActiveBreakpointResponse, error) {
|
||||
if id != testDebuggeeID {
|
||||
s.t.Errorf("got debuggee ID %s want %s", id, testDebuggeeID)
|
||||
}
|
||||
if breakpointID != testBreakpointID {
|
||||
s.t.Errorf("got breakpoint ID %s want %s", breakpointID, testBreakpointID)
|
||||
}
|
||||
if !req.Breakpoint.IsFinalState {
|
||||
s.t.Errorf("got IsFinalState = false, want true")
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *mockService) List(ctx context.Context, id, waitToken string) (*cd.ListActiveBreakpointsResponse, error) {
|
||||
if id != testDebuggeeID {
|
||||
s.t.Errorf("got debuggee ID %s want %s", id, testDebuggeeID)
|
||||
}
|
||||
if waitToken != expectedWaitToken[s.listCallsSeen] {
|
||||
s.t.Errorf("got wait token %s want %s", waitToken, expectedWaitToken[s.listCallsSeen])
|
||||
}
|
||||
s.listCallsSeen++
|
||||
if s.listCallsSeen == 4 {
|
||||
return nil, backendError
|
||||
}
|
||||
if s.listCallsSeen == 5 {
|
||||
return nil, abortedError
|
||||
}
|
||||
resp := &cd.ListActiveBreakpointsResponse{
|
||||
Breakpoints: expectedBreakpoints[s.listCallsSeen-1],
|
||||
NextWaitToken: expectedWaitToken[s.listCallsSeen],
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func TestDebugletControllerClientLibrary(t *testing.T) {
|
||||
var (
|
||||
m *mockService
|
||||
c *Controller
|
||||
list *cd.ListActiveBreakpointsResponse
|
||||
err error
|
||||
)
|
||||
m = &mockService{t: t}
|
||||
newService = func(context.Context, oauth2.TokenSource) (serviceInterface, error) { return m, nil }
|
||||
opts := Options{
|
||||
ProjectNumber: "5",
|
||||
ProjectID: "p1",
|
||||
AppModule: "mod1",
|
||||
AppVersion: "v1",
|
||||
}
|
||||
ctx := context.Background()
|
||||
if c, err = NewController(ctx, opts); err != nil {
|
||||
t.Fatal("Initializing Controller client:", err)
|
||||
}
|
||||
if list, err = c.List(ctx); err != nil {
|
||||
t.Fatal("List:", err)
|
||||
}
|
||||
if m.registerCallsSeen != 1 {
|
||||
t.Errorf("saw %d Register calls, want 1", m.registerCallsSeen)
|
||||
}
|
||||
if list, err = c.List(ctx); err != nil {
|
||||
t.Fatal("List:", err)
|
||||
}
|
||||
if len(list.Breakpoints) != 1 {
|
||||
t.Fatalf("got %d breakpoints, want 1", len(list.Breakpoints))
|
||||
}
|
||||
if err = c.Update(ctx, list.Breakpoints[0].Id, &cd.Breakpoint{Id: testBreakpointID, IsFinalState: true}); err != nil {
|
||||
t.Fatal("Update:", err)
|
||||
}
|
||||
if list, err = c.List(ctx); err != nil {
|
||||
t.Fatal("List:", err)
|
||||
}
|
||||
if m.registerCallsSeen != 1 {
|
||||
t.Errorf("saw %d Register calls, want 1", m.registerCallsSeen)
|
||||
}
|
||||
// The next List call produces an error that should cause a Register call.
|
||||
if list, err = c.List(ctx); err == nil {
|
||||
t.Fatal("List should have returned an error")
|
||||
}
|
||||
if m.registerCallsSeen != 2 {
|
||||
t.Errorf("saw %d Register calls, want 2", m.registerCallsSeen)
|
||||
}
|
||||
// The next List call produces an error that should not cause a Register call.
|
||||
if list, err = c.List(ctx); err == nil {
|
||||
t.Fatal("List should have returned an error")
|
||||
}
|
||||
if m.registerCallsSeen != 2 {
|
||||
t.Errorf("saw %d Register calls, want 2", m.registerCallsSeen)
|
||||
}
|
||||
if m.listCallsSeen != 5 {
|
||||
t.Errorf("saw %d list calls, want 5", m.listCallsSeen)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsAbortedError(t *testing.T) {
|
||||
if !isAbortedError(abortedError) {
|
||||
t.Errorf("isAborted(%+v): got false, want true", abortedError)
|
||||
}
|
||||
if isAbortedError(backendError) {
|
||||
t.Errorf("isAborted(%+v): got true, want false", backendError)
|
||||
}
|
||||
}
|
460
vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector.go
generated
vendored
Normal file
460
vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector.go
generated
vendored
Normal file
|
@ -0,0 +1,460 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package valuecollector is used to collect the values of variables in a program.
|
||||
package valuecollector
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/debug"
|
||||
cd "google.golang.org/api/clouddebugger/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
maxArrayLength = 50
|
||||
maxMapLength = 20
|
||||
)
|
||||
|
||||
// Collector is given references to variables from a program being debugged
|
||||
// using AddVariable. Then when ReadValues is called, the Collector will fetch
|
||||
// the values of those variables. Any variables referred to by those values
|
||||
// will also be fetched; e.g. the targets of pointers, members of structs,
|
||||
// elements of slices, etc. This continues iteratively, building a graph of
|
||||
// values, until all the reachable values are fetched, or a size limit is
|
||||
// reached.
|
||||
//
|
||||
// Variables are passed to the Collector as debug.Var, which is used by x/debug
|
||||
// to represent references to variables. Values are returned as cd.Variable,
|
||||
// which is used by the Debuglet Controller to represent the graph of values.
|
||||
//
|
||||
// For example, if the program has a struct variable:
|
||||
//
|
||||
// foo := SomeStruct{a:42, b:"xyz"}
|
||||
//
|
||||
// and we call AddVariable with a reference to foo, we will get back a result
|
||||
// like:
|
||||
//
|
||||
// cd.Variable{Name:"foo", VarTableIndex:10}
|
||||
//
|
||||
// which denotes a variable named "foo" which will have its value stored in
|
||||
// element 10 of the table that will later be returned by ReadValues. That
|
||||
// element might be:
|
||||
//
|
||||
// out[10] = &cd.Variable{Members:{{Name:"a", VarTableIndex:11},{Name:"b", VarTableIndex:12}}}
|
||||
//
|
||||
// which denotes a struct with two members a and b, whose values are in elements
|
||||
// 11 and 12 of the output table:
|
||||
//
|
||||
// out[11] = &cd.Variable{Value:"42"}
|
||||
// out[12] = &cd.Variable{Value:"xyz"}
|
||||
type Collector struct {
|
||||
// prog is the program being debugged.
|
||||
prog debug.Program
|
||||
// limit is the maximum size of the output slice of values.
|
||||
limit int
|
||||
// index is a map from references (variables and map elements) to their
|
||||
// locations in the table.
|
||||
index map[reference]int
|
||||
// table contains the references, including those given to the
|
||||
// Collector directly and those the Collector itself found.
|
||||
// If VarTableIndex is set to 0 in a cd.Variable, it is ignored, so the first entry
|
||||
// of table can't be used. On initialization we put a dummy value there.
|
||||
table []reference
|
||||
}
|
||||
|
||||
// reference represents a value which is in the queue to be read by the
|
||||
// collector. It is either a debug.Var, or a mapElement.
|
||||
type reference interface{}
|
||||
|
||||
// mapElement represents an element of a map in the debugged program's memory.
|
||||
type mapElement struct {
|
||||
debug.Map
|
||||
index uint64
|
||||
}
|
||||
|
||||
// NewCollector returns a Collector for the given program and size limit.
|
||||
// The limit is the maximum size of the slice of values returned by ReadValues.
|
||||
func NewCollector(prog debug.Program, limit int) *Collector {
|
||||
return &Collector{
|
||||
prog: prog,
|
||||
limit: limit,
|
||||
index: make(map[reference]int),
|
||||
table: []reference{debug.Var{}},
|
||||
}
|
||||
}
|
||||
|
||||
// AddVariable adds another variable to be collected.
|
||||
// The Collector doesn't get the value immediately; it returns a cd.Variable
|
||||
// that contains an index into the table which will later be returned by
|
||||
// ReadValues.
|
||||
func (c *Collector) AddVariable(lv debug.LocalVar) *cd.Variable {
|
||||
ret := &cd.Variable{Name: lv.Name}
|
||||
if index, ok := c.add(lv.Var); !ok {
|
||||
// If the add call failed, it's because we reached the size limit.
|
||||
// The Debuglet Controller's convention is to pass it a "Not Captured" error
|
||||
// in this case.
|
||||
ret.Status = statusMessage(messageNotCaptured, true, refersToVariableName)
|
||||
} else {
|
||||
ret.VarTableIndex = int64(index)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// add adds a reference to the set of values to be read from the
|
||||
// program. It returns the index in the output table that will contain the
|
||||
// corresponding value. It fails if the table has reached the size limit.
|
||||
// It deduplicates references, so the index may be the same as one that was
|
||||
// returned from an earlier add call.
|
||||
func (c *Collector) add(r reference) (outputIndex int, ok bool) {
|
||||
if i, ok := c.index[r]; ok {
|
||||
return i, true
|
||||
}
|
||||
i := len(c.table)
|
||||
if i >= c.limit {
|
||||
return 0, false
|
||||
}
|
||||
c.index[r] = i
|
||||
c.table = append(c.table, r)
|
||||
return i, true
|
||||
}
|
||||
|
||||
func addMember(v *cd.Variable, name string) *cd.Variable {
|
||||
v2 := &cd.Variable{Name: name}
|
||||
v.Members = append(v.Members, v2)
|
||||
return v2
|
||||
}
|
||||
|
||||
// ReadValues fetches values of the variables that were passed to the Collector
|
||||
// with AddVariable. The values of any new variables found are also fetched,
|
||||
// e.g. the targets of pointers or the members of structs, until we reach the
|
||||
// size limit or we run out of values to fetch.
|
||||
// The results are output as a []*cd.Variable, which is the type we need to send
|
||||
// to the Debuglet Controller after we trigger a breakpoint.
|
||||
func (c *Collector) ReadValues() (out []*cd.Variable) {
|
||||
for i := 0; i < len(c.table); i++ {
|
||||
// Create a new cd.Variable for this value, and append it to the output.
|
||||
dcv := new(cd.Variable)
|
||||
out = append(out, dcv)
|
||||
if i == 0 {
|
||||
// The first element is unused.
|
||||
continue
|
||||
}
|
||||
switch x := c.table[i].(type) {
|
||||
case mapElement:
|
||||
key, value, err := c.prog.MapElement(x.Map, x.index)
|
||||
if err != nil {
|
||||
dcv.Status = statusMessage(err.Error(), true, refersToVariableValue)
|
||||
continue
|
||||
}
|
||||
// Add a member for the key.
|
||||
member := addMember(dcv, "key")
|
||||
if index, ok := c.add(key); !ok {
|
||||
// The table is full.
|
||||
member.Status = statusMessage(messageNotCaptured, true, refersToVariableName)
|
||||
continue
|
||||
} else {
|
||||
member.VarTableIndex = int64(index)
|
||||
}
|
||||
// Add a member for the value.
|
||||
member = addMember(dcv, "value")
|
||||
if index, ok := c.add(value); !ok {
|
||||
// The table is full.
|
||||
member.Status = statusMessage(messageNotCaptured, true, refersToVariableName)
|
||||
} else {
|
||||
member.VarTableIndex = int64(index)
|
||||
}
|
||||
case debug.Var:
|
||||
if v, err := c.prog.Value(x); err != nil {
|
||||
dcv.Status = statusMessage(err.Error(), true, refersToVariableValue)
|
||||
} else {
|
||||
c.FillValue(v, dcv)
|
||||
}
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// indexable is an interface for arrays, slices and channels.
|
||||
type indexable interface {
|
||||
Len() uint64
|
||||
Element(uint64) debug.Var
|
||||
}
|
||||
|
||||
// channel implements indexable.
|
||||
type channel struct {
|
||||
debug.Channel
|
||||
}
|
||||
|
||||
func (c channel) Len() uint64 {
|
||||
return c.Length
|
||||
}
|
||||
|
||||
var (
|
||||
_ indexable = debug.Array{}
|
||||
_ indexable = debug.Slice{}
|
||||
_ indexable = channel{}
|
||||
)
|
||||
|
||||
// FillValue copies a value into a cd.Variable. Any variables referred to by
|
||||
// that value, e.g. struct members and pointer targets, are added to the
|
||||
// collector's queue, to be fetched later by ReadValues.
|
||||
func (c *Collector) FillValue(v debug.Value, dcv *cd.Variable) {
|
||||
if c, ok := v.(debug.Channel); ok {
|
||||
// Convert to channel, which implements indexable.
|
||||
v = channel{c}
|
||||
}
|
||||
// Fill in dcv in a manner depending on the type of the value we got.
|
||||
switch val := v.(type) {
|
||||
case int8, int16, int32, int64, bool, uint8, uint16, uint32, uint64, float32, float64, complex64, complex128:
|
||||
// For simple types, we just print the value to dcv.Value.
|
||||
dcv.Value = fmt.Sprint(val)
|
||||
case string:
|
||||
// Put double quotes around strings.
|
||||
dcv.Value = strconv.Quote(val)
|
||||
case debug.String:
|
||||
if uint64(len(val.String)) < val.Length {
|
||||
// This string value was truncated.
|
||||
dcv.Value = strconv.Quote(val.String + "...")
|
||||
} else {
|
||||
dcv.Value = strconv.Quote(val.String)
|
||||
}
|
||||
case debug.Struct:
|
||||
// For structs, we add an entry to dcv.Members for each field in the
|
||||
// struct.
|
||||
// Each member will contain the name of the field, and the index in the
|
||||
// output table which will contain the value of that field.
|
||||
for _, f := range val.Fields {
|
||||
member := addMember(dcv, f.Name)
|
||||
if index, ok := c.add(f.Var); !ok {
|
||||
// The table is full.
|
||||
member.Status = statusMessage(messageNotCaptured, true, refersToVariableName)
|
||||
} else {
|
||||
member.VarTableIndex = int64(index)
|
||||
}
|
||||
}
|
||||
case debug.Map:
|
||||
dcv.Value = fmt.Sprintf("len = %d", val.Length)
|
||||
for i := uint64(0); i < val.Length; i++ {
|
||||
field := addMember(dcv, `⚫`)
|
||||
if i == maxMapLength {
|
||||
field.Name = "..."
|
||||
field.Status = statusMessage(messageTruncated, true, refersToVariableName)
|
||||
break
|
||||
}
|
||||
if index, ok := c.add(mapElement{val, i}); !ok {
|
||||
// The value table is full; add a member to contain the error message.
|
||||
field.Name = "..."
|
||||
field.Status = statusMessage(messageNotCaptured, true, refersToVariableName)
|
||||
break
|
||||
} else {
|
||||
field.VarTableIndex = int64(index)
|
||||
}
|
||||
}
|
||||
case debug.Pointer:
|
||||
if val.Address == 0 {
|
||||
dcv.Value = "<nil>"
|
||||
} else if val.TypeID == 0 {
|
||||
// We don't know the type of the pointer, so just output the address as
|
||||
// the value.
|
||||
dcv.Value = fmt.Sprintf("0x%X", val.Address)
|
||||
dcv.Status = statusMessage(messageUnknownPointerType, false, refersToVariableName)
|
||||
} else {
|
||||
// Adds the pointed-to variable to the table, and links this value to
|
||||
// that table entry through VarTableIndex.
|
||||
dcv.Value = fmt.Sprintf("0x%X", val.Address)
|
||||
target := addMember(dcv, "")
|
||||
if index, ok := c.add(debug.Var(val)); !ok {
|
||||
target.Status = statusMessage(messageNotCaptured, true, refersToVariableName)
|
||||
} else {
|
||||
target.VarTableIndex = int64(index)
|
||||
}
|
||||
}
|
||||
case indexable:
|
||||
// Arrays, slices and channels.
|
||||
dcv.Value = "len = " + fmt.Sprint(val.Len())
|
||||
for j := uint64(0); j < val.Len(); j++ {
|
||||
field := addMember(dcv, fmt.Sprint(`[`, j, `]`))
|
||||
if j == maxArrayLength {
|
||||
field.Name = "..."
|
||||
field.Status = statusMessage(messageTruncated, true, refersToVariableName)
|
||||
break
|
||||
}
|
||||
vr := val.Element(j)
|
||||
if index, ok := c.add(vr); !ok {
|
||||
// The value table is full; add a member to contain the error message.
|
||||
field.Name = "..."
|
||||
field.Status = statusMessage(messageNotCaptured, true, refersToVariableName)
|
||||
break
|
||||
} else {
|
||||
// Add a member with the index as the name.
|
||||
field.VarTableIndex = int64(index)
|
||||
}
|
||||
}
|
||||
default:
|
||||
dcv.Status = statusMessage(messageUnknownType, false, refersToVariableName)
|
||||
}
|
||||
}
|
||||
|
||||
// statusMessage returns a *cd.StatusMessage with the given message, IsError
|
||||
// field and refersTo field.
|
||||
func statusMessage(msg string, isError bool, refersTo int) *cd.StatusMessage {
|
||||
return &cd.StatusMessage{
|
||||
Description: &cd.FormatMessage{Format: "$0", Parameters: []string{msg}},
|
||||
IsError: isError,
|
||||
RefersTo: refersToString[refersTo],
|
||||
}
|
||||
}
|
||||
|
||||
// LogString produces a string for a logpoint, substituting in variable values
|
||||
// using evaluatedExpressions and varTable.
|
||||
func LogString(s string, evaluatedExpressions []*cd.Variable, varTable []*cd.Variable) string {
|
||||
var buf bytes.Buffer
|
||||
fmt.Fprintf(&buf, "LOGPOINT: ")
|
||||
seen := make(map[*cd.Variable]bool)
|
||||
for i := 0; i < len(s); {
|
||||
if s[i] == '$' {
|
||||
i++
|
||||
if num, n, ok := parseToken(s[i:], len(evaluatedExpressions)-1); ok {
|
||||
// This token is one of $0, $1, etc. Write the corresponding expression.
|
||||
writeExpression(&buf, evaluatedExpressions[num], false, varTable, seen)
|
||||
i += n
|
||||
} else {
|
||||
// Something else, like $$.
|
||||
buf.WriteByte(s[i])
|
||||
i++
|
||||
}
|
||||
} else {
|
||||
buf.WriteByte(s[i])
|
||||
i++
|
||||
}
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func parseToken(s string, max int) (num int, bytesRead int, ok bool) {
|
||||
var i int
|
||||
for i < len(s) && s[i] >= '0' && s[i] <= '9' {
|
||||
i++
|
||||
}
|
||||
num, err := strconv.Atoi(s[:i])
|
||||
return num, i, err == nil && num <= max
|
||||
}
|
||||
|
||||
// writeExpression recursively writes variables to buf, in a format suitable
|
||||
// for logging. If printName is true, writes the name of the variable.
|
||||
func writeExpression(buf *bytes.Buffer, v *cd.Variable, printName bool, varTable []*cd.Variable, seen map[*cd.Variable]bool) {
|
||||
if v == nil {
|
||||
// Shouldn't happen.
|
||||
return
|
||||
}
|
||||
name, value, status, members := v.Name, v.Value, v.Status, v.Members
|
||||
|
||||
// If v.VarTableIndex is not zero, it refers to an element of varTable.
|
||||
// We merge its fields with the fields we got from v.
|
||||
var other *cd.Variable
|
||||
if idx := int(v.VarTableIndex); idx > 0 && idx < len(varTable) {
|
||||
other = varTable[idx]
|
||||
}
|
||||
if other != nil {
|
||||
if name == "" {
|
||||
name = other.Name
|
||||
}
|
||||
if value == "" {
|
||||
value = other.Value
|
||||
}
|
||||
if status == nil {
|
||||
status = other.Status
|
||||
}
|
||||
if len(members) == 0 {
|
||||
members = other.Members
|
||||
}
|
||||
}
|
||||
if printName && name != "" {
|
||||
buf.WriteString(name)
|
||||
buf.WriteByte(':')
|
||||
}
|
||||
|
||||
// If we have seen this value before, write "..." rather than repeating it.
|
||||
if seen[v] {
|
||||
buf.WriteString("...")
|
||||
return
|
||||
}
|
||||
seen[v] = true
|
||||
if other != nil {
|
||||
if seen[other] {
|
||||
buf.WriteString("...")
|
||||
return
|
||||
}
|
||||
seen[other] = true
|
||||
}
|
||||
|
||||
if value != "" && !strings.HasPrefix(value, "len = ") {
|
||||
// A plain value.
|
||||
buf.WriteString(value)
|
||||
} else if status != nil && status.Description != nil {
|
||||
// An error.
|
||||
for _, p := range status.Description.Parameters {
|
||||
buf.WriteByte('(')
|
||||
buf.WriteString(p)
|
||||
buf.WriteByte(')')
|
||||
}
|
||||
} else if name == `⚫` {
|
||||
// A map element.
|
||||
first := true
|
||||
for _, member := range members {
|
||||
if first {
|
||||
first = false
|
||||
} else {
|
||||
buf.WriteByte(':')
|
||||
}
|
||||
writeExpression(buf, member, false, varTable, seen)
|
||||
}
|
||||
} else {
|
||||
// A map, array, slice, channel, or struct.
|
||||
isStruct := value == ""
|
||||
first := true
|
||||
buf.WriteByte('{')
|
||||
for _, member := range members {
|
||||
if first {
|
||||
first = false
|
||||
} else {
|
||||
buf.WriteString(", ")
|
||||
}
|
||||
writeExpression(buf, member, isStruct, varTable, seen)
|
||||
}
|
||||
buf.WriteByte('}')
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// Error messages for cd.StatusMessage
|
||||
messageNotCaptured = "Not captured"
|
||||
messageTruncated = "Truncated"
|
||||
messageUnknownPointerType = "Unknown pointer type"
|
||||
messageUnknownType = "Unknown type"
|
||||
// RefersTo values for cd.StatusMessage.
|
||||
refersToVariableName = iota
|
||||
refersToVariableValue
|
||||
)
|
||||
|
||||
// refersToString contains the strings for each refersTo value.
|
||||
// See the definition of StatusMessage in the v2/clouddebugger package.
|
||||
var refersToString = map[int]string{
|
||||
refersToVariableName: "VARIABLE_NAME",
|
||||
refersToVariableValue: "VARIABLE_VALUE",
|
||||
}
|
418
vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector_test.go
generated
vendored
Normal file
418
vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector_test.go
generated
vendored
Normal file
|
@ -0,0 +1,418 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package valuecollector
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/debug"
|
||||
cd "google.golang.org/api/clouddebugger/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
// Some arbitrary type IDs for the test, for use in debug.Var's TypeID field.
|
||||
// A TypeID of 0 means the type is unknown, so we start at 1.
|
||||
int16Type = iota + 1
|
||||
stringType
|
||||
structType
|
||||
pointerType
|
||||
arrayType
|
||||
int32Type
|
||||
debugStringType
|
||||
mapType
|
||||
channelType
|
||||
sliceType
|
||||
)
|
||||
|
||||
func TestValueCollector(t *testing.T) {
|
||||
// Construct the collector.
|
||||
c := NewCollector(&Program{}, 26)
|
||||
// Add some variables of various types, whose values we want the collector to read.
|
||||
variablesToAdd := []debug.LocalVar{
|
||||
{Name: "a", Var: debug.Var{int16Type, 0x1}},
|
||||
{Name: "b", Var: debug.Var{stringType, 0x2}},
|
||||
{Name: "c", Var: debug.Var{structType, 0x3}},
|
||||
{Name: "d", Var: debug.Var{pointerType, 0x4}},
|
||||
{Name: "e", Var: debug.Var{arrayType, 0x5}},
|
||||
{Name: "f", Var: debug.Var{debugStringType, 0x6}},
|
||||
{Name: "g", Var: debug.Var{mapType, 0x7}},
|
||||
{Name: "h", Var: debug.Var{channelType, 0x8}},
|
||||
{Name: "i", Var: debug.Var{sliceType, 0x9}},
|
||||
}
|
||||
expectedResults := []*cd.Variable{
|
||||
&cd.Variable{Name: "a", VarTableIndex: 1},
|
||||
&cd.Variable{Name: "b", VarTableIndex: 2},
|
||||
&cd.Variable{Name: "c", VarTableIndex: 3},
|
||||
&cd.Variable{Name: "d", VarTableIndex: 4},
|
||||
&cd.Variable{Name: "e", VarTableIndex: 5},
|
||||
&cd.Variable{Name: "f", VarTableIndex: 6},
|
||||
&cd.Variable{Name: "g", VarTableIndex: 7},
|
||||
&cd.Variable{Name: "h", VarTableIndex: 8},
|
||||
&cd.Variable{Name: "i", VarTableIndex: 9},
|
||||
}
|
||||
for i, v := range variablesToAdd {
|
||||
added := c.AddVariable(v)
|
||||
if !reflect.DeepEqual(added, expectedResults[i]) {
|
||||
t.Errorf("AddVariable: got %+v want %+v", *added, *expectedResults[i])
|
||||
}
|
||||
}
|
||||
// Read the values, compare the output to what we expect.
|
||||
v := c.ReadValues()
|
||||
expectedValues := []*cd.Variable{
|
||||
&cd.Variable{},
|
||||
&cd.Variable{Value: "1"},
|
||||
&cd.Variable{Value: `"hello"`},
|
||||
&cd.Variable{
|
||||
Members: []*cd.Variable{
|
||||
&cd.Variable{Name: "x", VarTableIndex: 1},
|
||||
&cd.Variable{Name: "y", VarTableIndex: 2},
|
||||
},
|
||||
},
|
||||
&cd.Variable{
|
||||
Members: []*cd.Variable{
|
||||
&cd.Variable{VarTableIndex: 1},
|
||||
},
|
||||
Value: "0x1",
|
||||
},
|
||||
&cd.Variable{
|
||||
Members: []*cd.Variable{
|
||||
&cd.Variable{Name: "[0]", VarTableIndex: 10},
|
||||
&cd.Variable{Name: "[1]", VarTableIndex: 11},
|
||||
&cd.Variable{Name: "[2]", VarTableIndex: 12},
|
||||
&cd.Variable{Name: "[3]", VarTableIndex: 13},
|
||||
},
|
||||
Value: "len = 4",
|
||||
},
|
||||
&cd.Variable{Value: `"world"`},
|
||||
&cd.Variable{
|
||||
Members: []*cd.Variable{
|
||||
&cd.Variable{Name: "⚫", VarTableIndex: 14},
|
||||
&cd.Variable{Name: "⚫", VarTableIndex: 15},
|
||||
&cd.Variable{Name: "⚫", VarTableIndex: 16},
|
||||
},
|
||||
Value: "len = 3",
|
||||
},
|
||||
&cd.Variable{
|
||||
Members: []*cd.Variable{
|
||||
&cd.Variable{Name: "[0]", VarTableIndex: 17},
|
||||
&cd.Variable{Name: "[1]", VarTableIndex: 18},
|
||||
},
|
||||
Value: "len = 2",
|
||||
},
|
||||
&cd.Variable{
|
||||
Members: []*cd.Variable{
|
||||
&cd.Variable{Name: "[0]", VarTableIndex: 19},
|
||||
&cd.Variable{Name: "[1]", VarTableIndex: 20},
|
||||
},
|
||||
Value: "len = 2",
|
||||
},
|
||||
&cd.Variable{Value: "100"},
|
||||
&cd.Variable{Value: "104"},
|
||||
&cd.Variable{Value: "108"},
|
||||
&cd.Variable{Value: "112"},
|
||||
&cd.Variable{
|
||||
Members: []*cd.Variable{
|
||||
&cd.Variable{Name: "key", VarTableIndex: 21},
|
||||
&cd.Variable{Name: "value", VarTableIndex: 22},
|
||||
},
|
||||
},
|
||||
&cd.Variable{
|
||||
Members: []*cd.Variable{
|
||||
&cd.Variable{Name: "key", VarTableIndex: 23},
|
||||
&cd.Variable{Name: "value", VarTableIndex: 24},
|
||||
},
|
||||
},
|
||||
&cd.Variable{
|
||||
Members: []*cd.Variable{
|
||||
&cd.Variable{Name: "key", VarTableIndex: 25},
|
||||
&cd.Variable{
|
||||
Name: "value",
|
||||
Status: &cd.StatusMessage{
|
||||
Description: &cd.FormatMessage{
|
||||
Format: "$0",
|
||||
Parameters: []string{"Not captured"},
|
||||
},
|
||||
IsError: true,
|
||||
RefersTo: "VARIABLE_NAME",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&cd.Variable{Value: "246"},
|
||||
&cd.Variable{Value: "210"},
|
||||
&cd.Variable{Value: "300"},
|
||||
&cd.Variable{Value: "304"},
|
||||
&cd.Variable{Value: "400"},
|
||||
&cd.Variable{Value: "404"},
|
||||
&cd.Variable{Value: "1400"},
|
||||
&cd.Variable{Value: "1404"},
|
||||
&cd.Variable{Value: "2400"},
|
||||
}
|
||||
if !reflect.DeepEqual(v, expectedValues) {
|
||||
t.Errorf("ReadValues: got %v want %v", v, expectedValues)
|
||||
// Do element-by-element comparisons, for more useful error messages.
|
||||
for i := range v {
|
||||
if i < len(expectedValues) && !reflect.DeepEqual(v[i], expectedValues[i]) {
|
||||
t.Errorf("element %d: got %+v want %+v", i, *v[i], *expectedValues[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Program implements the similarly-named interface in x/debug.
|
||||
// ValueCollector should only call its Value and MapElement methods.
|
||||
type Program struct {
|
||||
debug.Program
|
||||
}
|
||||
|
||||
func (p *Program) Value(v debug.Var) (debug.Value, error) {
|
||||
// We determine what to return using v.TypeID.
|
||||
switch v.TypeID {
|
||||
case int16Type:
|
||||
// We use the address as the value, so that we're testing whether the right
|
||||
// address was calculated.
|
||||
return int16(v.Address), nil
|
||||
case stringType:
|
||||
// A string.
|
||||
return "hello", nil
|
||||
case structType:
|
||||
// A struct with two elements.
|
||||
return debug.Struct{
|
||||
Fields: []debug.StructField{
|
||||
{
|
||||
Name: "x",
|
||||
Var: debug.Var{int16Type, 0x1},
|
||||
},
|
||||
{
|
||||
Name: "y",
|
||||
Var: debug.Var{stringType, 0x2},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
case pointerType:
|
||||
// A pointer to the first variable above.
|
||||
return debug.Pointer{int16Type, 0x1}, nil
|
||||
case arrayType:
|
||||
// An array of 4 32-bit-wide elements.
|
||||
return debug.Array{
|
||||
ElementTypeID: int32Type,
|
||||
Address: 0x64,
|
||||
Length: 4,
|
||||
StrideBits: 32,
|
||||
}, nil
|
||||
case debugStringType:
|
||||
return debug.String{
|
||||
Length: 5,
|
||||
String: "world",
|
||||
}, nil
|
||||
case mapType:
|
||||
return debug.Map{
|
||||
TypeID: 99,
|
||||
Address: 0x100,
|
||||
Length: 3,
|
||||
}, nil
|
||||
case channelType:
|
||||
return debug.Channel{
|
||||
ElementTypeID: int32Type,
|
||||
Address: 200,
|
||||
Buffer: 210,
|
||||
Length: 2,
|
||||
Capacity: 10,
|
||||
Stride: 4,
|
||||
BufferStart: 9,
|
||||
}, nil
|
||||
case sliceType:
|
||||
// A slice of 2 32-bit-wide elements.
|
||||
return debug.Slice{
|
||||
Array: debug.Array{
|
||||
ElementTypeID: int32Type,
|
||||
Address: 300,
|
||||
Length: 2,
|
||||
StrideBits: 32,
|
||||
},
|
||||
Capacity: 50,
|
||||
}, nil
|
||||
case int32Type:
|
||||
// We use the address as the value, so that we're testing whether the right
|
||||
// address was calculated.
|
||||
return int32(v.Address), nil
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected Value request")
|
||||
}
|
||||
|
||||
func (p *Program) MapElement(m debug.Map, index uint64) (debug.Var, debug.Var, error) {
|
||||
return debug.Var{TypeID: int16Type, Address: 1000*index + 400},
|
||||
debug.Var{TypeID: int32Type, Address: 1000*index + 404},
|
||||
nil
|
||||
}
|
||||
|
||||
func TestLogString(t *testing.T) {
|
||||
bp := cd.Breakpoint{
|
||||
Action: "LOG",
|
||||
LogMessageFormat: "$0 hello, $$7world! $1 $2 $3 $4 $5$6 $7 $8",
|
||||
EvaluatedExpressions: []*cd.Variable{
|
||||
&cd.Variable{Name: "a", VarTableIndex: 1},
|
||||
&cd.Variable{Name: "b", VarTableIndex: 2},
|
||||
&cd.Variable{Name: "c", VarTableIndex: 3},
|
||||
&cd.Variable{Name: "d", VarTableIndex: 4},
|
||||
&cd.Variable{Name: "e", VarTableIndex: 5},
|
||||
&cd.Variable{Name: "f", VarTableIndex: 6},
|
||||
&cd.Variable{Name: "g", VarTableIndex: 7},
|
||||
&cd.Variable{Name: "h", VarTableIndex: 8},
|
||||
&cd.Variable{Name: "i", VarTableIndex: 9},
|
||||
},
|
||||
}
|
||||
varTable := []*cd.Variable{
|
||||
&cd.Variable{},
|
||||
&cd.Variable{Value: "1"},
|
||||
&cd.Variable{Value: `"hello"`},
|
||||
&cd.Variable{
|
||||
Members: []*cd.Variable{
|
||||
&cd.Variable{Name: "x", Value: "1"},
|
||||
&cd.Variable{Name: "y", Value: `"hello"`},
|
||||
&cd.Variable{Name: "z", VarTableIndex: 3},
|
||||
},
|
||||
},
|
||||
&cd.Variable{
|
||||
Members: []*cd.Variable{
|
||||
&cd.Variable{VarTableIndex: 1},
|
||||
},
|
||||
Value: "0x1",
|
||||
},
|
||||
&cd.Variable{
|
||||
Members: []*cd.Variable{
|
||||
&cd.Variable{Name: "[0]", VarTableIndex: 10},
|
||||
&cd.Variable{Name: "[1]", VarTableIndex: 11},
|
||||
&cd.Variable{Name: "[2]", VarTableIndex: 12},
|
||||
&cd.Variable{Name: "[3]", VarTableIndex: 13},
|
||||
},
|
||||
Value: "len = 4",
|
||||
},
|
||||
&cd.Variable{Value: `"world"`},
|
||||
&cd.Variable{
|
||||
Members: []*cd.Variable{
|
||||
&cd.Variable{Name: "⚫", VarTableIndex: 14},
|
||||
&cd.Variable{Name: "⚫", VarTableIndex: 15},
|
||||
&cd.Variable{Name: "⚫", VarTableIndex: 16},
|
||||
},
|
||||
Value: "len = 3",
|
||||
},
|
||||
&cd.Variable{
|
||||
Members: []*cd.Variable{
|
||||
&cd.Variable{Name: "[0]", VarTableIndex: 17},
|
||||
&cd.Variable{Name: "[1]", VarTableIndex: 18},
|
||||
},
|
||||
Value: "len = 2",
|
||||
},
|
||||
&cd.Variable{
|
||||
Members: []*cd.Variable{
|
||||
&cd.Variable{Name: "[0]", VarTableIndex: 19},
|
||||
&cd.Variable{Name: "[1]", VarTableIndex: 20},
|
||||
},
|
||||
Value: "len = 2",
|
||||
},
|
||||
&cd.Variable{Value: "100"},
|
||||
&cd.Variable{Value: "104"},
|
||||
&cd.Variable{Value: "108"},
|
||||
&cd.Variable{Value: "112"},
|
||||
&cd.Variable{
|
||||
Members: []*cd.Variable{
|
||||
&cd.Variable{Name: "key", VarTableIndex: 21},
|
||||
&cd.Variable{Name: "value", VarTableIndex: 22},
|
||||
},
|
||||
},
|
||||
&cd.Variable{
|
||||
Members: []*cd.Variable{
|
||||
&cd.Variable{Name: "key", VarTableIndex: 23},
|
||||
&cd.Variable{Name: "value", VarTableIndex: 24},
|
||||
},
|
||||
},
|
||||
&cd.Variable{
|
||||
Members: []*cd.Variable{
|
||||
&cd.Variable{Name: "key", VarTableIndex: 25},
|
||||
&cd.Variable{
|
||||
Name: "value",
|
||||
Status: &cd.StatusMessage{
|
||||
Description: &cd.FormatMessage{
|
||||
Format: "$0",
|
||||
Parameters: []string{"Not captured"},
|
||||
},
|
||||
IsError: true,
|
||||
RefersTo: "VARIABLE_NAME",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&cd.Variable{Value: "246"},
|
||||
&cd.Variable{Value: "210"},
|
||||
&cd.Variable{Value: "300"},
|
||||
&cd.Variable{Value: "304"},
|
||||
&cd.Variable{Value: "400"},
|
||||
&cd.Variable{Value: "404"},
|
||||
&cd.Variable{Value: "1400"},
|
||||
&cd.Variable{Value: "1404"},
|
||||
&cd.Variable{Value: "2400"},
|
||||
}
|
||||
s := LogString(bp.LogMessageFormat, bp.EvaluatedExpressions, varTable)
|
||||
expected := `LOGPOINT: 1 hello, $7world! "hello" {x:1, y:"hello", z:...} ` +
|
||||
`0x1 {100, 104, 108, 112} "world"{400:404, 1400:1404, 2400:(Not captured)} ` +
|
||||
`{246, 210} {300, 304}`
|
||||
if s != expected {
|
||||
t.Errorf("LogString: got %q want %q", s, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseToken(t *testing.T) {
|
||||
for _, c := range []struct {
|
||||
s string
|
||||
max int
|
||||
num int
|
||||
n int
|
||||
ok bool
|
||||
}{
|
||||
{"", 0, 0, 0, false},
|
||||
{".", 0, 0, 0, false},
|
||||
{"0", 0, 0, 1, true},
|
||||
{"0", 1, 0, 1, true},
|
||||
{"00", 0, 0, 2, true},
|
||||
{"1.", 1, 1, 1, true},
|
||||
{"1.", 0, 0, 0, false},
|
||||
{"10", 10, 10, 2, true},
|
||||
{"10..", 10, 10, 2, true},
|
||||
{"10", 11, 10, 2, true},
|
||||
{"10..", 11, 10, 2, true},
|
||||
{"10", 9, 0, 0, false},
|
||||
{"10..", 9, 0, 0, false},
|
||||
{" 10", 10, 0, 0, false},
|
||||
{"010", 10, 10, 3, true},
|
||||
{"123456789", 123456789, 123456789, 9, true},
|
||||
{"123456789", 123456788, 0, 0, false},
|
||||
{"123456789123456789123456789", 999999999, 0, 0, false},
|
||||
} {
|
||||
num, n, ok := parseToken(c.s, c.max)
|
||||
if ok != c.ok {
|
||||
t.Errorf("parseToken(%q, %d): got ok=%t want ok=%t", c.s, c.max, ok, c.ok)
|
||||
continue
|
||||
}
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if num != c.num || n != c.n {
|
||||
t.Errorf("parseToken(%q, %d): got %d,%d,%t want %d,%d,%t", c.s, c.max, num, n, ok, c.num, c.n, c.ok)
|
||||
}
|
||||
}
|
||||
}
|
438
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
Normal file
438
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
Normal file
|
@ -0,0 +1,438 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package metadata provides access to Google Compute Engine (GCE)
|
||||
// metadata and API service accounts.
|
||||
//
|
||||
// This package is a wrapper around the GCE metadata service,
|
||||
// as documented at https://developers.google.com/compute/docs/metadata.
|
||||
package metadata // import "cloud.google.com/go/compute/metadata"
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/net/context/ctxhttp"
|
||||
|
||||
"cloud.google.com/go/internal"
|
||||
)
|
||||
|
||||
const (
|
||||
// metadataIP is the documented metadata server IP address.
|
||||
metadataIP = "169.254.169.254"
|
||||
|
||||
// metadataHostEnv is the environment variable specifying the
|
||||
// GCE metadata hostname. If empty, the default value of
|
||||
// metadataIP ("169.254.169.254") is used instead.
|
||||
// This is variable name is not defined by any spec, as far as
|
||||
// I know; it was made up for the Go package.
|
||||
metadataHostEnv = "GCE_METADATA_HOST"
|
||||
)
|
||||
|
||||
type cachedValue struct {
|
||||
k string
|
||||
trim bool
|
||||
mu sync.Mutex
|
||||
v string
|
||||
}
|
||||
|
||||
var (
|
||||
projID = &cachedValue{k: "project/project-id", trim: true}
|
||||
projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
|
||||
instID = &cachedValue{k: "instance/id", trim: true}
|
||||
)
|
||||
|
||||
var (
|
||||
metaClient = &http.Client{
|
||||
Transport: &internal.Transport{
|
||||
Base: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 2 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
ResponseHeaderTimeout: 2 * time.Second,
|
||||
},
|
||||
},
|
||||
}
|
||||
subscribeClient = &http.Client{
|
||||
Transport: &internal.Transport{
|
||||
Base: &http.Transport{
|
||||
Dial: (&net.Dialer{
|
||||
Timeout: 2 * time.Second,
|
||||
KeepAlive: 30 * time.Second,
|
||||
}).Dial,
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// NotDefinedError is returned when requested metadata is not defined.
|
||||
//
|
||||
// The underlying string is the suffix after "/computeMetadata/v1/".
|
||||
//
|
||||
// This error is not returned if the value is defined to be the empty
|
||||
// string.
|
||||
type NotDefinedError string
|
||||
|
||||
func (suffix NotDefinedError) Error() string {
|
||||
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
|
||||
}
|
||||
|
||||
// Get returns a value from the metadata service.
|
||||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
||||
//
|
||||
// If the GCE_METADATA_HOST environment variable is not defined, a default of
|
||||
// 169.254.169.254 will be used instead.
|
||||
//
|
||||
// If the requested metadata is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
func Get(suffix string) (string, error) {
|
||||
val, _, err := getETag(metaClient, suffix)
|
||||
return val, err
|
||||
}
|
||||
|
||||
// getETag returns a value from the metadata service as well as the associated
|
||||
// ETag using the provided client. This func is otherwise equivalent to Get.
|
||||
func getETag(client *http.Client, suffix string) (value, etag string, err error) {
|
||||
// Using a fixed IP makes it very difficult to spoof the metadata service in
|
||||
// a container, which is an important use-case for local testing of cloud
|
||||
// deployments. To enable spoofing of the metadata service, the environment
|
||||
// variable GCE_METADATA_HOST is first inspected to decide where metadata
|
||||
// requests shall go.
|
||||
host := os.Getenv(metadataHostEnv)
|
||||
if host == "" {
|
||||
// Using 169.254.169.254 instead of "metadata" here because Go
|
||||
// binaries built with the "netgo" tag and without cgo won't
|
||||
// know the search suffix for "metadata" is
|
||||
// ".google.internal", and this IP address is documented as
|
||||
// being stable anyway.
|
||||
host = metadataIP
|
||||
}
|
||||
url := "http://" + host + "/computeMetadata/v1/" + suffix
|
||||
req, _ := http.NewRequest("GET", url, nil)
|
||||
req.Header.Set("Metadata-Flavor", "Google")
|
||||
res, err := client.Do(req)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode == http.StatusNotFound {
|
||||
return "", "", NotDefinedError(suffix)
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
|
||||
}
|
||||
all, err := ioutil.ReadAll(res.Body)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
return string(all), res.Header.Get("Etag"), nil
|
||||
}
|
||||
|
||||
func getTrimmed(suffix string) (s string, err error) {
|
||||
s, err = Get(suffix)
|
||||
s = strings.TrimSpace(s)
|
||||
return
|
||||
}
|
||||
|
||||
func (c *cachedValue) get() (v string, err error) {
|
||||
defer c.mu.Unlock()
|
||||
c.mu.Lock()
|
||||
if c.v != "" {
|
||||
return c.v, nil
|
||||
}
|
||||
if c.trim {
|
||||
v, err = getTrimmed(c.k)
|
||||
} else {
|
||||
v, err = Get(c.k)
|
||||
}
|
||||
if err == nil {
|
||||
c.v = v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
onGCEOnce sync.Once
|
||||
onGCE bool
|
||||
)
|
||||
|
||||
// OnGCE reports whether this process is running on Google Compute Engine.
|
||||
func OnGCE() bool {
|
||||
onGCEOnce.Do(initOnGCE)
|
||||
return onGCE
|
||||
}
|
||||
|
||||
func initOnGCE() {
|
||||
onGCE = testOnGCE()
|
||||
}
|
||||
|
||||
func testOnGCE() bool {
|
||||
// The user explicitly said they're on GCE, so trust them.
|
||||
if os.Getenv(metadataHostEnv) != "" {
|
||||
return true
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
resc := make(chan bool, 2)
|
||||
|
||||
// Try two strategies in parallel.
|
||||
// See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
|
||||
go func() {
|
||||
res, err := ctxhttp.Get(ctx, metaClient, "http://"+metadataIP)
|
||||
if err != nil {
|
||||
resc <- false
|
||||
return
|
||||
}
|
||||
defer res.Body.Close()
|
||||
resc <- res.Header.Get("Metadata-Flavor") == "Google"
|
||||
}()
|
||||
|
||||
go func() {
|
||||
addrs, err := net.LookupHost("metadata.google.internal")
|
||||
if err != nil || len(addrs) == 0 {
|
||||
resc <- false
|
||||
return
|
||||
}
|
||||
resc <- strsContains(addrs, metadataIP)
|
||||
}()
|
||||
|
||||
tryHarder := systemInfoSuggestsGCE()
|
||||
if tryHarder {
|
||||
res := <-resc
|
||||
if res {
|
||||
// The first strategy succeeded, so let's use it.
|
||||
return true
|
||||
}
|
||||
// Wait for either the DNS or metadata server probe to
|
||||
// contradict the other one and say we are running on
|
||||
// GCE. Give it a lot of time to do so, since the system
|
||||
// info already suggests we're running on a GCE BIOS.
|
||||
timer := time.NewTimer(5 * time.Second)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case res = <-resc:
|
||||
return res
|
||||
case <-timer.C:
|
||||
// Too slow. Who knows what this system is.
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// There's no hint from the system info that we're running on
|
||||
// GCE, so use the first probe's result as truth, whether it's
|
||||
// true or false. The goal here is to optimize for speed for
|
||||
// users who are NOT running on GCE. We can't assume that
|
||||
// either a DNS lookup or an HTTP request to a blackholed IP
|
||||
// address is fast. Worst case this should return when the
|
||||
// metaClient's Transport.ResponseHeaderTimeout or
|
||||
// Transport.Dial.Timeout fires (in two seconds).
|
||||
return <-resc
|
||||
}
|
||||
|
||||
// systemInfoSuggestsGCE reports whether the local system (without
|
||||
// doing network requests) suggests that we're running on GCE. If this
|
||||
// returns true, testOnGCE tries a bit harder to reach its metadata
|
||||
// server.
|
||||
func systemInfoSuggestsGCE() bool {
|
||||
if runtime.GOOS != "linux" {
|
||||
// We don't have any non-Linux clues available, at least yet.
|
||||
return false
|
||||
}
|
||||
slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
|
||||
name := strings.TrimSpace(string(slurp))
|
||||
return name == "Google" || name == "Google Compute Engine"
|
||||
}
|
||||
|
||||
// Subscribe subscribes to a value from the metadata service.
|
||||
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
||||
// The suffix may contain query parameters.
|
||||
//
|
||||
// Subscribe calls fn with the latest metadata value indicated by the provided
|
||||
// suffix. If the metadata value is deleted, fn is called with the empty string
|
||||
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
|
||||
// is deleted. Subscribe returns the error value returned from the last call to
|
||||
// fn, which may be nil when ok == false.
|
||||
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||
const failedSubscribeSleep = time.Second * 5
|
||||
|
||||
// First check to see if the metadata value exists at all.
|
||||
val, lastETag, err := getETag(subscribeClient, suffix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := fn(val, true); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ok := true
|
||||
if strings.ContainsRune(suffix, '?') {
|
||||
suffix += "&wait_for_change=true&last_etag="
|
||||
} else {
|
||||
suffix += "?wait_for_change=true&last_etag="
|
||||
}
|
||||
for {
|
||||
val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag))
|
||||
if err != nil {
|
||||
if _, deleted := err.(NotDefinedError); !deleted {
|
||||
time.Sleep(failedSubscribeSleep)
|
||||
continue // Retry on other errors.
|
||||
}
|
||||
ok = false
|
||||
}
|
||||
lastETag = etag
|
||||
|
||||
if err := fn(val, ok); err != nil || !ok {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ProjectID returns the current instance's project ID string.
|
||||
func ProjectID() (string, error) { return projID.get() }
|
||||
|
||||
// NumericProjectID returns the current instance's numeric project ID.
|
||||
func NumericProjectID() (string, error) { return projNum.get() }
|
||||
|
||||
// InternalIP returns the instance's primary internal IP address.
|
||||
func InternalIP() (string, error) {
|
||||
return getTrimmed("instance/network-interfaces/0/ip")
|
||||
}
|
||||
|
||||
// ExternalIP returns the instance's primary external (public) IP address.
|
||||
func ExternalIP() (string, error) {
|
||||
return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
|
||||
}
|
||||
|
||||
// Hostname returns the instance's hostname. This will be of the form
|
||||
// "<instanceID>.c.<projID>.internal".
|
||||
func Hostname() (string, error) {
|
||||
return getTrimmed("instance/hostname")
|
||||
}
|
||||
|
||||
// InstanceTags returns the list of user-defined instance tags,
|
||||
// assigned when initially creating a GCE instance.
|
||||
func InstanceTags() ([]string, error) {
|
||||
var s []string
|
||||
j, err := Get("instance/tags")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// InstanceID returns the current VM's numeric instance ID.
|
||||
func InstanceID() (string, error) {
|
||||
return instID.get()
|
||||
}
|
||||
|
||||
// InstanceName returns the current VM's instance ID string.
|
||||
func InstanceName() (string, error) {
|
||||
host, err := Hostname()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.Split(host, ".")[0], nil
|
||||
}
|
||||
|
||||
// Zone returns the current VM's zone, such as "us-central1-b".
|
||||
func Zone() (string, error) {
|
||||
zone, err := getTrimmed("instance/zone")
|
||||
// zone is of the form "projects/<projNum>/zones/<zoneName>".
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return zone[strings.LastIndex(zone, "/")+1:], nil
|
||||
}
|
||||
|
||||
// InstanceAttributes returns the list of user-defined attributes,
|
||||
// assigned when initially creating a GCE VM instance. The value of an
|
||||
// attribute can be obtained with InstanceAttributeValue.
|
||||
func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
|
||||
|
||||
// ProjectAttributes returns the list of user-defined attributes
|
||||
// applying to the project as a whole, not just this VM. The value of
|
||||
// an attribute can be obtained with ProjectAttributeValue.
|
||||
func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
|
||||
|
||||
func lines(suffix string) ([]string, error) {
|
||||
j, err := Get(suffix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := strings.Split(strings.TrimSpace(j), "\n")
|
||||
for i := range s {
|
||||
s[i] = strings.TrimSpace(s[i])
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// InstanceAttributeValue returns the value of the provided VM
|
||||
// instance attribute.
|
||||
//
|
||||
// If the requested attribute is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
//
|
||||
// InstanceAttributeValue may return ("", nil) if the attribute was
|
||||
// defined to be the empty string.
|
||||
func InstanceAttributeValue(attr string) (string, error) {
|
||||
return Get("instance/attributes/" + attr)
|
||||
}
|
||||
|
||||
// ProjectAttributeValue returns the value of the provided
|
||||
// project attribute.
|
||||
//
|
||||
// If the requested attribute is not defined, the returned error will
|
||||
// be of type NotDefinedError.
|
||||
//
|
||||
// ProjectAttributeValue may return ("", nil) if the attribute was
|
||||
// defined to be the empty string.
|
||||
func ProjectAttributeValue(attr string) (string, error) {
|
||||
return Get("project/attributes/" + attr)
|
||||
}
|
||||
|
||||
// Scopes returns the service account scopes for the given account.
|
||||
// The account may be empty or the string "default" to use the instance's
|
||||
// main account.
|
||||
func Scopes(serviceAccount string) ([]string, error) {
|
||||
if serviceAccount == "" {
|
||||
serviceAccount = "default"
|
||||
}
|
||||
return lines("instance/service-accounts/" + serviceAccount + "/scopes")
|
||||
}
|
||||
|
||||
func strsContains(ss []string, s string) bool {
|
||||
for _, v := range ss {
|
||||
if v == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
48
vendor/cloud.google.com/go/compute/metadata/metadata_test.go
generated
vendored
Normal file
48
vendor/cloud.google.com/go/compute/metadata/metadata_test.go
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package metadata
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestOnGCE_Stress(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping in -short mode")
|
||||
}
|
||||
var last bool
|
||||
for i := 0; i < 100; i++ {
|
||||
onGCEOnce = sync.Once{}
|
||||
|
||||
now := OnGCE()
|
||||
if i > 0 && now != last {
|
||||
t.Errorf("%d. changed from %v to %v", i, last, now)
|
||||
}
|
||||
last = now
|
||||
}
|
||||
t.Logf("OnGCE() = %v", last)
|
||||
}
|
||||
|
||||
func TestOnGCE_Force(t *testing.T) {
|
||||
onGCEOnce = sync.Once{}
|
||||
old := os.Getenv(metadataHostEnv)
|
||||
defer os.Setenv(metadataHostEnv, old)
|
||||
os.Setenv(metadataHostEnv, "127.0.0.1")
|
||||
if !OnGCE() {
|
||||
t.Error("OnGCE() = false; want true")
|
||||
}
|
||||
}
|
278
vendor/cloud.google.com/go/container/container.go
generated
vendored
Normal file
278
vendor/cloud.google.com/go/container/container.go
generated
vendored
Normal file
|
@ -0,0 +1,278 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package container contains a Google Container Engine client.
|
||||
//
|
||||
// For more information about the API,
|
||||
// see https://cloud.google.com/container-engine/docs
|
||||
//
|
||||
// Authentication
|
||||
//
|
||||
// See examples of authorization and authentication at
|
||||
// https://godoc.org/cloud.google.com/go#pkg-examples.
|
||||
package container // import "cloud.google.com/go/container"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
raw "google.golang.org/api/container/v1"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/transport"
|
||||
)
|
||||
|
||||
type Type string
|
||||
|
||||
const (
|
||||
TypeCreate = Type("createCluster")
|
||||
TypeDelete = Type("deleteCluster")
|
||||
)
|
||||
|
||||
type Status string
|
||||
|
||||
const (
|
||||
StatusDone = Status("done")
|
||||
StatusPending = Status("pending")
|
||||
StatusRunning = Status("running")
|
||||
StatusError = Status("error")
|
||||
StatusProvisioning = Status("provisioning")
|
||||
StatusStopping = Status("stopping")
|
||||
)
|
||||
|
||||
const prodAddr = "https://container.googleapis.com/"
|
||||
const userAgent = "gcloud-golang-container/20151008"
|
||||
|
||||
// Client is a Google Container Engine client, which may be used to manage
|
||||
// clusters with a project. It must be constructed via NewClient.
|
||||
type Client struct {
|
||||
projectID string
|
||||
svc *raw.Service
|
||||
}
|
||||
|
||||
// NewClient creates a new Google Container Engine client.
|
||||
func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) {
|
||||
o := []option.ClientOption{
|
||||
option.WithEndpoint(prodAddr),
|
||||
option.WithScopes(raw.CloudPlatformScope),
|
||||
option.WithUserAgent(userAgent),
|
||||
}
|
||||
o = append(o, opts...)
|
||||
httpClient, endpoint, err := transport.NewHTTPClient(ctx, o...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dialing: %v", err)
|
||||
}
|
||||
|
||||
svc, err := raw.New(httpClient)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("constructing container client: %v", err)
|
||||
}
|
||||
svc.BasePath = endpoint
|
||||
|
||||
c := &Client{
|
||||
projectID: projectID,
|
||||
svc: svc,
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Resource is a Google Container Engine cluster resource.
|
||||
type Resource struct {
|
||||
// Name is the name of this cluster. The name must be unique
|
||||
// within this project and zone, and can be up to 40 characters.
|
||||
Name string
|
||||
|
||||
// Description is the description of the cluster. Optional.
|
||||
Description string
|
||||
|
||||
// Zone is the Google Compute Engine zone in which the cluster resides.
|
||||
Zone string
|
||||
|
||||
// Status is the current status of the cluster. It could either be
|
||||
// StatusError, StatusProvisioning, StatusRunning or StatusStopping.
|
||||
Status Status
|
||||
|
||||
// Num is the number of the nodes in this cluster resource.
|
||||
Num int64
|
||||
|
||||
// APIVersion is the version of the Kubernetes master and kubelets running
|
||||
// in this cluster. Allowed value is 0.4.2, or leave blank to
|
||||
// pick up the latest stable release.
|
||||
APIVersion string
|
||||
|
||||
// Endpoint is the IP address of this cluster's Kubernetes master.
|
||||
// The endpoint can be accessed at https://username:password@endpoint/.
|
||||
// See Username and Password fields for the username and password information.
|
||||
Endpoint string
|
||||
|
||||
// Username is the username to use when accessing the Kubernetes master endpoint.
|
||||
Username string
|
||||
|
||||
// Password is the password to use when accessing the Kubernetes master endpoint.
|
||||
Password string
|
||||
|
||||
// ContainerIPv4CIDR is the IP addresses of the container pods in
|
||||
// this cluster, in CIDR notation (e.g. 1.2.3.4/29).
|
||||
ContainerIPv4CIDR string
|
||||
|
||||
// ServicesIPv4CIDR is the IP addresses of the Kubernetes services in this
|
||||
// cluster, in CIDR notation (e.g. 1.2.3.4/29). Service addresses are
|
||||
// always in the 10.0.0.0/16 range.
|
||||
ServicesIPv4CIDR string
|
||||
|
||||
// MachineType is a Google Compute Engine machine type (e.g. n1-standard-1).
|
||||
// If none set, the default type is used while creating a new cluster.
|
||||
MachineType string
|
||||
|
||||
// This field is ignored. It was removed from the underlying container API in v1.
|
||||
SourceImage string
|
||||
|
||||
// Created is the creation time of this cluster.
|
||||
Created time.Time
|
||||
}
|
||||
|
||||
func resourceFromRaw(c *raw.Cluster) *Resource {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
r := &Resource{
|
||||
Name: c.Name,
|
||||
Description: c.Description,
|
||||
Zone: c.Zone,
|
||||
Status: Status(c.Status),
|
||||
Num: c.InitialNodeCount,
|
||||
APIVersion: c.InitialClusterVersion,
|
||||
Endpoint: c.Endpoint,
|
||||
Username: c.MasterAuth.Username,
|
||||
Password: c.MasterAuth.Password,
|
||||
ContainerIPv4CIDR: c.ClusterIpv4Cidr,
|
||||
ServicesIPv4CIDR: c.ServicesIpv4Cidr,
|
||||
MachineType: c.NodeConfig.MachineType,
|
||||
}
|
||||
r.Created, _ = time.Parse(time.RFC3339, c.CreateTime)
|
||||
return r
|
||||
}
|
||||
|
||||
func resourcesFromRaw(c []*raw.Cluster) []*Resource {
|
||||
r := make([]*Resource, len(c))
|
||||
for i, val := range c {
|
||||
r[i] = resourceFromRaw(val)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// Op represents a Google Container Engine API operation.
|
||||
type Op struct {
|
||||
// Name is the name of the operation.
|
||||
Name string
|
||||
|
||||
// Zone is the Google Compute Engine zone.
|
||||
Zone string
|
||||
|
||||
// This field is ignored. It was removed from the underlying container API in v1.
|
||||
TargetURL string
|
||||
|
||||
// Type is the operation type. It could be either be TypeCreate or TypeDelete.
|
||||
Type Type
|
||||
|
||||
// Status is the current status of this operation. It could be either
|
||||
// OpDone or OpPending.
|
||||
Status Status
|
||||
}
|
||||
|
||||
func opFromRaw(o *raw.Operation) *Op {
|
||||
if o == nil {
|
||||
return nil
|
||||
}
|
||||
return &Op{
|
||||
Name: o.Name,
|
||||
Zone: o.Zone,
|
||||
Type: Type(o.OperationType),
|
||||
Status: Status(o.Status),
|
||||
}
|
||||
}
|
||||
|
||||
func opsFromRaw(o []*raw.Operation) []*Op {
|
||||
ops := make([]*Op, len(o))
|
||||
for i, val := range o {
|
||||
ops[i] = opFromRaw(val)
|
||||
}
|
||||
return ops
|
||||
}
|
||||
|
||||
// Clusters returns a list of cluster resources from the specified zone.
|
||||
// If no zone is specified, it returns all clusters under the user project.
|
||||
func (c *Client) Clusters(ctx context.Context, zone string) ([]*Resource, error) {
|
||||
if zone == "" {
|
||||
zone = "-"
|
||||
}
|
||||
resp, err := c.svc.Projects.Zones.Clusters.List(c.projectID, zone).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resourcesFromRaw(resp.Clusters), nil
|
||||
}
|
||||
|
||||
// Cluster returns metadata about the specified cluster.
|
||||
func (c *Client) Cluster(ctx context.Context, zone, name string) (*Resource, error) {
|
||||
resp, err := c.svc.Projects.Zones.Clusters.Get(c.projectID, zone, name).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resourceFromRaw(resp), nil
|
||||
}
|
||||
|
||||
// CreateCluster creates a new cluster with the provided metadata
|
||||
// in the specified zone.
|
||||
func (c *Client) CreateCluster(ctx context.Context, zone string, resource *Resource) (*Resource, error) {
|
||||
panic("not implemented")
|
||||
}
|
||||
|
||||
// DeleteCluster deletes a cluster.
|
||||
func (c *Client) DeleteCluster(ctx context.Context, zone, name string) error {
|
||||
_, err := c.svc.Projects.Zones.Clusters.Delete(c.projectID, zone, name).Do()
|
||||
return err
|
||||
}
|
||||
|
||||
// Operations returns a list of operations from the specified zone.
|
||||
// If no zone is specified, it looks up for all of the operations
|
||||
// that are running under the user's project.
|
||||
func (c *Client) Operations(ctx context.Context, zone string) ([]*Op, error) {
|
||||
if zone == "" {
|
||||
resp, err := c.svc.Projects.Zones.Operations.List(c.projectID, "-").Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return opsFromRaw(resp.Operations), nil
|
||||
}
|
||||
resp, err := c.svc.Projects.Zones.Operations.List(c.projectID, zone).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return opsFromRaw(resp.Operations), nil
|
||||
}
|
||||
|
||||
// Operation returns an operation.
|
||||
func (c *Client) Operation(ctx context.Context, zone, name string) (*Op, error) {
|
||||
resp, err := c.svc.Projects.Zones.Operations.Get(c.projectID, zone, name).Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusMessage != "" {
|
||||
return nil, errors.New(resp.StatusMessage)
|
||||
}
|
||||
return opFromRaw(resp), nil
|
||||
}
|
600
vendor/cloud.google.com/go/datastore/datastore.go
generated
vendored
Normal file
600
vendor/cloud.google.com/go/datastore/datastore.go
generated
vendored
Normal file
|
@ -0,0 +1,600 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package datastore
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
|
||||
"cloud.google.com/go/internal/version"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/transport"
|
||||
pb "google.golang.org/genproto/googleapis/datastore/v1"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
const (
|
||||
prodAddr = "datastore.googleapis.com:443"
|
||||
userAgent = "gcloud-golang-datastore/20160401"
|
||||
)
|
||||
|
||||
// ScopeDatastore grants permissions to view and/or manage datastore entities
|
||||
const ScopeDatastore = "https://www.googleapis.com/auth/datastore"
|
||||
|
||||
// resourcePrefixHeader is the name of the metadata header used to indicate
|
||||
// the resource being operated on.
|
||||
const resourcePrefixHeader = "google-cloud-resource-prefix"
|
||||
|
||||
// protoClient is an interface for *transport.ProtoClient to support injecting
|
||||
// fake clients in tests.
|
||||
type protoClient interface {
|
||||
Call(context.Context, string, proto.Message, proto.Message) error
|
||||
}
|
||||
|
||||
// datastoreClient is a wrapper for the pb.DatastoreClient that includes gRPC
|
||||
// metadata to be sent in each request for server-side traffic management.
|
||||
type datastoreClient struct {
|
||||
c pb.DatastoreClient
|
||||
md metadata.MD
|
||||
}
|
||||
|
||||
func newDatastoreClient(conn *grpc.ClientConn, projectID string) pb.DatastoreClient {
|
||||
return &datastoreClient{
|
||||
c: pb.NewDatastoreClient(conn),
|
||||
md: metadata.Pairs(
|
||||
resourcePrefixHeader, "projects/"+projectID,
|
||||
"x-goog-api-client", fmt.Sprintf("gl-go/%s gccl/%s grpc/", version.Go(), version.Repo)),
|
||||
}
|
||||
}
|
||||
|
||||
func (dc *datastoreClient) Lookup(ctx context.Context, in *pb.LookupRequest, opts ...grpc.CallOption) (*pb.LookupResponse, error) {
|
||||
return dc.c.Lookup(metadata.NewContext(ctx, dc.md), in, opts...)
|
||||
}
|
||||
|
||||
func (dc *datastoreClient) RunQuery(ctx context.Context, in *pb.RunQueryRequest, opts ...grpc.CallOption) (*pb.RunQueryResponse, error) {
|
||||
return dc.c.RunQuery(metadata.NewContext(ctx, dc.md), in, opts...)
|
||||
}
|
||||
|
||||
func (dc *datastoreClient) BeginTransaction(ctx context.Context, in *pb.BeginTransactionRequest, opts ...grpc.CallOption) (*pb.BeginTransactionResponse, error) {
|
||||
return dc.c.BeginTransaction(metadata.NewContext(ctx, dc.md), in, opts...)
|
||||
}
|
||||
|
||||
func (dc *datastoreClient) Commit(ctx context.Context, in *pb.CommitRequest, opts ...grpc.CallOption) (*pb.CommitResponse, error) {
|
||||
return dc.c.Commit(metadata.NewContext(ctx, dc.md), in, opts...)
|
||||
}
|
||||
|
||||
func (dc *datastoreClient) Rollback(ctx context.Context, in *pb.RollbackRequest, opts ...grpc.CallOption) (*pb.RollbackResponse, error) {
|
||||
return dc.c.Rollback(metadata.NewContext(ctx, dc.md), in, opts...)
|
||||
}
|
||||
|
||||
func (dc *datastoreClient) AllocateIds(ctx context.Context, in *pb.AllocateIdsRequest, opts ...grpc.CallOption) (*pb.AllocateIdsResponse, error) {
|
||||
return dc.c.AllocateIds(metadata.NewContext(ctx, dc.md), in, opts...)
|
||||
}
|
||||
|
||||
// Client is a client for reading and writing data in a datastore dataset.
|
||||
type Client struct {
|
||||
conn *grpc.ClientConn
|
||||
client pb.DatastoreClient
|
||||
endpoint string
|
||||
dataset string // Called dataset by the datastore API, synonym for project ID.
|
||||
}
|
||||
|
||||
// NewClient creates a new Client for a given dataset.
|
||||
// If the project ID is empty, it is derived from the DATASTORE_PROJECT_ID environment variable.
|
||||
// If the DATASTORE_EMULATOR_HOST environment variable is set, client will use its value
|
||||
// to connect to a locally-running datastore emulator.
|
||||
func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) {
|
||||
var o []option.ClientOption
|
||||
// Environment variables for gcd emulator:
|
||||
// https://cloud.google.com/datastore/docs/tools/datastore-emulator
|
||||
// If the emulator is available, dial it directly (and don't pass any credentials).
|
||||
if addr := os.Getenv("DATASTORE_EMULATOR_HOST"); addr != "" {
|
||||
conn, err := grpc.Dial(addr, grpc.WithInsecure())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("grpc.Dial: %v", err)
|
||||
}
|
||||
o = []option.ClientOption{option.WithGRPCConn(conn)}
|
||||
} else {
|
||||
o = []option.ClientOption{
|
||||
option.WithEndpoint(prodAddr),
|
||||
option.WithScopes(ScopeDatastore),
|
||||
option.WithUserAgent(userAgent),
|
||||
}
|
||||
}
|
||||
// Warn if we see the legacy emulator environment variables.
|
||||
if os.Getenv("DATASTORE_HOST") != "" && os.Getenv("DATASTORE_EMULATOR_HOST") == "" {
|
||||
log.Print("WARNING: legacy environment variable DATASTORE_HOST is ignored. Use DATASTORE_EMULATOR_HOST instead.")
|
||||
}
|
||||
if os.Getenv("DATASTORE_DATASET") != "" && os.Getenv("DATASTORE_PROJECT_ID") == "" {
|
||||
log.Print("WARNING: legacy environment variable DATASTORE_DATASET is ignored. Use DATASTORE_PROJECT_ID instead.")
|
||||
}
|
||||
if projectID == "" {
|
||||
projectID = os.Getenv("DATASTORE_PROJECT_ID")
|
||||
}
|
||||
if projectID == "" {
|
||||
return nil, errors.New("datastore: missing project/dataset id")
|
||||
}
|
||||
o = append(o, opts...)
|
||||
conn, err := transport.DialGRPC(ctx, o...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dialing: %v", err)
|
||||
}
|
||||
return &Client{
|
||||
conn: conn,
|
||||
client: newDatastoreClient(conn, projectID),
|
||||
dataset: projectID,
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrInvalidEntityType is returned when functions like Get or Next are
|
||||
// passed a dst or src argument of invalid type.
|
||||
ErrInvalidEntityType = errors.New("datastore: invalid entity type")
|
||||
// ErrInvalidKey is returned when an invalid key is presented.
|
||||
ErrInvalidKey = errors.New("datastore: invalid key")
|
||||
// ErrNoSuchEntity is returned when no entity was found for a given key.
|
||||
ErrNoSuchEntity = errors.New("datastore: no such entity")
|
||||
)
|
||||
|
||||
type multiArgType int
|
||||
|
||||
const (
|
||||
multiArgTypeInvalid multiArgType = iota
|
||||
multiArgTypePropertyLoadSaver
|
||||
multiArgTypeStruct
|
||||
multiArgTypeStructPtr
|
||||
multiArgTypeInterface
|
||||
)
|
||||
|
||||
// ErrFieldMismatch is returned when a field is to be loaded into a different
|
||||
// type than the one it was stored from, or when a field is missing or
|
||||
// unexported in the destination struct.
|
||||
// StructType is the type of the struct pointed to by the destination argument
|
||||
// passed to Get or to Iterator.Next.
|
||||
type ErrFieldMismatch struct {
|
||||
StructType reflect.Type
|
||||
FieldName string
|
||||
Reason string
|
||||
}
|
||||
|
||||
func (e *ErrFieldMismatch) Error() string {
|
||||
return fmt.Sprintf("datastore: cannot load field %q into a %q: %s",
|
||||
e.FieldName, e.StructType, e.Reason)
|
||||
}
|
||||
|
||||
// GeoPoint represents a location as latitude/longitude in degrees.
|
||||
type GeoPoint struct {
|
||||
Lat, Lng float64
|
||||
}
|
||||
|
||||
// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude.
|
||||
func (g GeoPoint) Valid() bool {
|
||||
return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180
|
||||
}
|
||||
|
||||
func keyToProto(k *Key) *pb.Key {
|
||||
if k == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO(jbd): Eliminate unrequired allocations.
|
||||
var path []*pb.Key_PathElement
|
||||
for {
|
||||
el := &pb.Key_PathElement{Kind: k.Kind}
|
||||
if k.ID != 0 {
|
||||
el.IdType = &pb.Key_PathElement_Id{k.ID}
|
||||
} else if k.Name != "" {
|
||||
el.IdType = &pb.Key_PathElement_Name{k.Name}
|
||||
}
|
||||
path = append([]*pb.Key_PathElement{el}, path...)
|
||||
if k.Parent == nil {
|
||||
break
|
||||
}
|
||||
k = k.Parent
|
||||
}
|
||||
key := &pb.Key{Path: path}
|
||||
if k.Namespace != "" {
|
||||
key.PartitionId = &pb.PartitionId{
|
||||
NamespaceId: k.Namespace,
|
||||
}
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
// protoToKey decodes a protocol buffer representation of a key into an
|
||||
// equivalent *Key object. If the key is invalid, protoToKey will return the
|
||||
// invalid key along with ErrInvalidKey.
|
||||
func protoToKey(p *pb.Key) (*Key, error) {
|
||||
var key *Key
|
||||
var namespace string
|
||||
if partition := p.PartitionId; partition != nil {
|
||||
namespace = partition.NamespaceId
|
||||
}
|
||||
for _, el := range p.Path {
|
||||
key = &Key{
|
||||
Namespace: namespace,
|
||||
Kind: el.Kind,
|
||||
ID: el.GetId(),
|
||||
Name: el.GetName(),
|
||||
Parent: key,
|
||||
}
|
||||
}
|
||||
if !key.valid() { // Also detects key == nil.
|
||||
return key, ErrInvalidKey
|
||||
}
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// multiKeyToProto is a batch version of keyToProto.
|
||||
func multiKeyToProto(keys []*Key) []*pb.Key {
|
||||
ret := make([]*pb.Key, len(keys))
|
||||
for i, k := range keys {
|
||||
ret[i] = keyToProto(k)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// multiKeyToProto is a batch version of keyToProto.
|
||||
func multiProtoToKey(keys []*pb.Key) ([]*Key, error) {
|
||||
hasErr := false
|
||||
ret := make([]*Key, len(keys))
|
||||
err := make(MultiError, len(keys))
|
||||
for i, k := range keys {
|
||||
ret[i], err[i] = protoToKey(k)
|
||||
if err[i] != nil {
|
||||
hasErr = true
|
||||
}
|
||||
}
|
||||
if hasErr {
|
||||
return nil, err
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// multiValid is a batch version of Key.valid. It returns an error, not a
|
||||
// []bool.
|
||||
func multiValid(key []*Key) error {
|
||||
invalid := false
|
||||
for _, k := range key {
|
||||
if !k.valid() {
|
||||
invalid = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !invalid {
|
||||
return nil
|
||||
}
|
||||
err := make(MultiError, len(key))
|
||||
for i, k := range key {
|
||||
if !k.valid() {
|
||||
err[i] = ErrInvalidKey
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct
|
||||
// type S, for some interface type I, or some non-interface non-pointer type P
|
||||
// such that P or *P implements PropertyLoadSaver.
|
||||
//
|
||||
// It returns what category the slice's elements are, and the reflect.Type
|
||||
// that represents S, I or P.
|
||||
//
|
||||
// As a special case, PropertyList is an invalid type for v.
|
||||
//
|
||||
// TODO(djd): multiArg is very confusing. Fold this logic into the
|
||||
// relevant Put/Get methods to make the logic less opaque.
|
||||
func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) {
|
||||
if v.Kind() != reflect.Slice {
|
||||
return multiArgTypeInvalid, nil
|
||||
}
|
||||
if v.Type() == typeOfPropertyList {
|
||||
return multiArgTypeInvalid, nil
|
||||
}
|
||||
elemType = v.Type().Elem()
|
||||
if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) {
|
||||
return multiArgTypePropertyLoadSaver, elemType
|
||||
}
|
||||
switch elemType.Kind() {
|
||||
case reflect.Struct:
|
||||
return multiArgTypeStruct, elemType
|
||||
case reflect.Interface:
|
||||
return multiArgTypeInterface, elemType
|
||||
case reflect.Ptr:
|
||||
elemType = elemType.Elem()
|
||||
if elemType.Kind() == reflect.Struct {
|
||||
return multiArgTypeStructPtr, elemType
|
||||
}
|
||||
}
|
||||
return multiArgTypeInvalid, nil
|
||||
}
|
||||
|
||||
// Close closes the Client.
|
||||
func (c *Client) Close() error {
|
||||
return c.conn.Close()
|
||||
}
|
||||
|
||||
// Get loads the entity stored for key into dst, which must be a struct pointer
|
||||
// or implement PropertyLoadSaver. If there is no such entity for the key, Get
|
||||
// returns ErrNoSuchEntity.
|
||||
//
|
||||
// The values of dst's unmatched struct fields are not modified, and matching
|
||||
// slice-typed fields are not reset before appending to them. In particular, it
|
||||
// is recommended to pass a pointer to a zero valued struct on each Get call.
|
||||
//
|
||||
// ErrFieldMismatch is returned when a field is to be loaded into a different
|
||||
// type than the one it was stored from, or when a field is missing or
|
||||
// unexported in the destination struct. ErrFieldMismatch is only returned if
|
||||
// dst is a struct pointer.
|
||||
func (c *Client) Get(ctx context.Context, key *Key, dst interface{}) error {
|
||||
if dst == nil { // get catches nil interfaces; we need to catch nil ptr here
|
||||
return ErrInvalidEntityType
|
||||
}
|
||||
err := c.get(ctx, []*Key{key}, []interface{}{dst}, nil)
|
||||
if me, ok := err.(MultiError); ok {
|
||||
return me[0]
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// GetMulti is a batch version of Get.
|
||||
//
|
||||
// dst must be a []S, []*S, []I or []P, for some struct type S, some interface
|
||||
// type I, or some non-interface non-pointer type P such that P or *P
|
||||
// implements PropertyLoadSaver. If an []I, each element must be a valid dst
|
||||
// for Get: it must be a struct pointer or implement PropertyLoadSaver.
|
||||
//
|
||||
// As a special case, PropertyList is an invalid type for dst, even though a
|
||||
// PropertyList is a slice of structs. It is treated as invalid to avoid being
|
||||
// mistakenly passed when []PropertyList was intended.
|
||||
func (c *Client) GetMulti(ctx context.Context, keys []*Key, dst interface{}) error {
|
||||
return c.get(ctx, keys, dst, nil)
|
||||
}
|
||||
|
||||
func (c *Client) get(ctx context.Context, keys []*Key, dst interface{}, opts *pb.ReadOptions) error {
|
||||
v := reflect.ValueOf(dst)
|
||||
multiArgType, _ := checkMultiArg(v)
|
||||
|
||||
// Sanity checks
|
||||
if multiArgType == multiArgTypeInvalid {
|
||||
return errors.New("datastore: dst has invalid type")
|
||||
}
|
||||
if len(keys) != v.Len() {
|
||||
return errors.New("datastore: keys and dst slices have different length")
|
||||
}
|
||||
if len(keys) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Go through keys, validate them, serialize then, and create a dict mapping them to their index
|
||||
multiErr, any := make(MultiError, len(keys)), false
|
||||
keyMap := make(map[string]int)
|
||||
pbKeys := make([]*pb.Key, len(keys))
|
||||
for i, k := range keys {
|
||||
if !k.valid() {
|
||||
multiErr[i] = ErrInvalidKey
|
||||
any = true
|
||||
} else {
|
||||
keyMap[k.String()] = i
|
||||
pbKeys[i] = keyToProto(k)
|
||||
}
|
||||
}
|
||||
if any {
|
||||
return multiErr
|
||||
}
|
||||
req := &pb.LookupRequest{
|
||||
ProjectId: c.dataset,
|
||||
Keys: pbKeys,
|
||||
ReadOptions: opts,
|
||||
}
|
||||
resp, err := c.client.Lookup(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
found := resp.Found
|
||||
missing := resp.Missing
|
||||
// Upper bound 100 iterations to prevent infinite loop.
|
||||
// We choose 100 iterations somewhat logically:
|
||||
// Max number of Entities you can request from Datastore is 1,000.
|
||||
// Max size for a Datastore Entity is 1 MiB.
|
||||
// Max request size is 10 MiB, so we assume max response size is also 10 MiB.
|
||||
// 1,000 / 10 = 100.
|
||||
// Note that if ctx has a deadline, the deadline will probably
|
||||
// be hit before we reach 100 iterations.
|
||||
for i := 0; len(resp.Deferred) > 0 && i < 100; i++ {
|
||||
req.Keys = resp.Deferred
|
||||
resp, err = c.client.Lookup(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
found = append(found, resp.Found...)
|
||||
missing = append(missing, resp.Missing...)
|
||||
}
|
||||
if len(keys) != len(found)+len(missing) {
|
||||
return errors.New("datastore: internal error: server returned the wrong number of entities")
|
||||
}
|
||||
for _, e := range found {
|
||||
k, err := protoToKey(e.Entity.Key)
|
||||
if err != nil {
|
||||
return errors.New("datastore: internal error: server returned an invalid key")
|
||||
}
|
||||
index := keyMap[k.String()]
|
||||
elem := v.Index(index)
|
||||
if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
|
||||
elem = elem.Addr()
|
||||
}
|
||||
if multiArgType == multiArgTypeStructPtr && elem.IsNil() {
|
||||
elem.Set(reflect.New(elem.Type().Elem()))
|
||||
}
|
||||
if err := loadEntityProto(elem.Interface(), e.Entity); err != nil {
|
||||
multiErr[index] = err
|
||||
any = true
|
||||
}
|
||||
}
|
||||
for _, e := range missing {
|
||||
k, err := protoToKey(e.Entity.Key)
|
||||
if err != nil {
|
||||
return errors.New("datastore: internal error: server returned an invalid key")
|
||||
}
|
||||
multiErr[keyMap[k.String()]] = ErrNoSuchEntity
|
||||
any = true
|
||||
}
|
||||
if any {
|
||||
return multiErr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put saves the entity src into the datastore with key k. src must be a struct
|
||||
// pointer or implement PropertyLoadSaver; if a struct pointer then any
|
||||
// unexported fields of that struct will be skipped. If k is an incomplete key,
|
||||
// the returned key will be a unique key generated by the datastore.
|
||||
func (c *Client) Put(ctx context.Context, key *Key, src interface{}) (*Key, error) {
|
||||
k, err := c.PutMulti(ctx, []*Key{key}, []interface{}{src})
|
||||
if err != nil {
|
||||
if me, ok := err.(MultiError); ok {
|
||||
return nil, me[0]
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return k[0], nil
|
||||
}
|
||||
|
||||
// PutMulti is a batch version of Put.
|
||||
//
|
||||
// src must satisfy the same conditions as the dst argument to GetMulti.
|
||||
func (c *Client) PutMulti(ctx context.Context, keys []*Key, src interface{}) ([]*Key, error) {
|
||||
mutations, err := putMutations(keys, src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Make the request.
|
||||
req := &pb.CommitRequest{
|
||||
ProjectId: c.dataset,
|
||||
Mutations: mutations,
|
||||
Mode: pb.CommitRequest_NON_TRANSACTIONAL,
|
||||
}
|
||||
resp, err := c.client.Commit(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Copy any newly minted keys into the returned keys.
|
||||
ret := make([]*Key, len(keys))
|
||||
for i, key := range keys {
|
||||
if key.Incomplete() {
|
||||
// This key is in the mutation results.
|
||||
ret[i], err = protoToKey(resp.MutationResults[i].Key)
|
||||
if err != nil {
|
||||
return nil, errors.New("datastore: internal error: server returned an invalid key")
|
||||
}
|
||||
} else {
|
||||
ret[i] = key
|
||||
}
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func putMutations(keys []*Key, src interface{}) ([]*pb.Mutation, error) {
|
||||
v := reflect.ValueOf(src)
|
||||
multiArgType, _ := checkMultiArg(v)
|
||||
if multiArgType == multiArgTypeInvalid {
|
||||
return nil, errors.New("datastore: src has invalid type")
|
||||
}
|
||||
if len(keys) != v.Len() {
|
||||
return nil, errors.New("datastore: key and src slices have different length")
|
||||
}
|
||||
if len(keys) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
if err := multiValid(keys); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mutations := make([]*pb.Mutation, 0, len(keys))
|
||||
multiErr := make(MultiError, len(keys))
|
||||
hasErr := false
|
||||
for i, k := range keys {
|
||||
elem := v.Index(i)
|
||||
// Two cases where we need to take the address:
|
||||
// 1) multiArgTypePropertyLoadSaver => &elem implements PLS
|
||||
// 2) multiArgTypeStruct => saveEntity needs *struct
|
||||
if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
|
||||
elem = elem.Addr()
|
||||
}
|
||||
p, err := saveEntity(k, elem.Interface())
|
||||
if err != nil {
|
||||
multiErr[i] = err
|
||||
hasErr = true
|
||||
}
|
||||
var mut *pb.Mutation
|
||||
if k.Incomplete() {
|
||||
mut = &pb.Mutation{Operation: &pb.Mutation_Insert{p}}
|
||||
} else {
|
||||
mut = &pb.Mutation{Operation: &pb.Mutation_Upsert{p}}
|
||||
}
|
||||
mutations = append(mutations, mut)
|
||||
}
|
||||
if hasErr {
|
||||
return nil, multiErr
|
||||
}
|
||||
return mutations, nil
|
||||
}
|
||||
|
||||
// Delete deletes the entity for the given key.
|
||||
func (c *Client) Delete(ctx context.Context, key *Key) error {
|
||||
err := c.DeleteMulti(ctx, []*Key{key})
|
||||
if me, ok := err.(MultiError); ok {
|
||||
return me[0]
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteMulti is a batch version of Delete.
|
||||
func (c *Client) DeleteMulti(ctx context.Context, keys []*Key) error {
|
||||
mutations, err := deleteMutations(keys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req := &pb.CommitRequest{
|
||||
ProjectId: c.dataset,
|
||||
Mutations: mutations,
|
||||
Mode: pb.CommitRequest_NON_TRANSACTIONAL,
|
||||
}
|
||||
_, err = c.client.Commit(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
func deleteMutations(keys []*Key) ([]*pb.Mutation, error) {
|
||||
mutations := make([]*pb.Mutation, 0, len(keys))
|
||||
for _, k := range keys {
|
||||
if k.Incomplete() {
|
||||
return nil, fmt.Errorf("datastore: can't delete the incomplete key: %v", k)
|
||||
}
|
||||
mutations = append(mutations, &pb.Mutation{
|
||||
Operation: &pb.Mutation_Delete{keyToProto(k)},
|
||||
})
|
||||
}
|
||||
return mutations, nil
|
||||
}
|
2776
vendor/cloud.google.com/go/datastore/datastore_test.go
generated
vendored
Normal file
2776
vendor/cloud.google.com/go/datastore/datastore_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
420
vendor/cloud.google.com/go/datastore/doc.go
generated
vendored
Normal file
420
vendor/cloud.google.com/go/datastore/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,420 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/*
|
||||
Package datastore provides a client for Google Cloud Datastore.
|
||||
|
||||
Note: This package is in beta. Some backwards-incompatible changes may occur.
|
||||
|
||||
|
||||
Basic Operations
|
||||
|
||||
Entities are the unit of storage and are associated with a key. A key
|
||||
consists of an optional parent key, a string application ID, a string kind
|
||||
(also known as an entity type), and either a StringID or an IntID. A
|
||||
StringID is also known as an entity name or key name.
|
||||
|
||||
It is valid to create a key with a zero StringID and a zero IntID; this is
|
||||
called an incomplete key, and does not refer to any saved entity. Putting an
|
||||
entity into the datastore under an incomplete key will cause a unique key
|
||||
to be generated for that entity, with a non-zero IntID.
|
||||
|
||||
An entity's contents are a mapping from case-sensitive field names to values.
|
||||
Valid value types are:
|
||||
- signed integers (int, int8, int16, int32 and int64),
|
||||
- bool,
|
||||
- string,
|
||||
- float32 and float64,
|
||||
- []byte (up to 1 megabyte in length),
|
||||
- any type whose underlying type is one of the above predeclared types,
|
||||
- *Key,
|
||||
- GeoPoint,
|
||||
- time.Time (stored with microsecond precision),
|
||||
- structs whose fields are all valid value types,
|
||||
- pointers to structs whose fields are all valid value types,
|
||||
- slices of any of the above.
|
||||
|
||||
Slices of structs are valid, as are structs that contain slices.
|
||||
|
||||
The Get and Put functions load and save an entity's contents. An entity's
|
||||
contents are typically represented by a struct pointer.
|
||||
|
||||
Example code:
|
||||
|
||||
type Entity struct {
|
||||
Value string
|
||||
}
|
||||
|
||||
func main() {
|
||||
ctx := context.Background()
|
||||
|
||||
// Create a datastore client. In a typical application, you would create
|
||||
// a single client which is reused for every datastore operation.
|
||||
dsClient, err := datastore.NewClient(ctx, "my-project")
|
||||
if err != nil {
|
||||
// Handle error.
|
||||
}
|
||||
|
||||
k := datastore.NameKey("Entity", "stringID", nil)
|
||||
e := new(Entity)
|
||||
if err := dsClient.Get(ctx, k, e); err != nil {
|
||||
// Handle error.
|
||||
}
|
||||
|
||||
old := e.Value
|
||||
e.Value = "Hello World!"
|
||||
|
||||
if _, err := dsClient.Put(ctx, k, e); err != nil {
|
||||
// Handle error.
|
||||
}
|
||||
|
||||
fmt.Printf("Updated value from %q to %q\n", old, e.Value)
|
||||
}
|
||||
|
||||
GetMulti, PutMulti and DeleteMulti are batch versions of the Get, Put and
|
||||
Delete functions. They take a []*Key instead of a *Key, and may return a
|
||||
datastore.MultiError when encountering partial failure.
|
||||
|
||||
|
||||
Properties
|
||||
|
||||
An entity's contents can be represented by a variety of types. These are
|
||||
typically struct pointers, but can also be any type that implements the
|
||||
PropertyLoadSaver interface. If using a struct pointer, you do not have to
|
||||
explicitly implement the PropertyLoadSaver interface; the datastore will
|
||||
automatically convert via reflection. If a struct pointer does implement that
|
||||
interface then those methods will be used in preference to the default
|
||||
behavior for struct pointers. Struct pointers are more strongly typed and are
|
||||
easier to use; PropertyLoadSavers are more flexible.
|
||||
|
||||
The actual types passed do not have to match between Get and Put calls or even
|
||||
across different calls to datastore. It is valid to put a *PropertyList and
|
||||
get that same entity as a *myStruct, or put a *myStruct0 and get a *myStruct1.
|
||||
Conceptually, any entity is saved as a sequence of properties, and is loaded
|
||||
into the destination value on a property-by-property basis. When loading into
|
||||
a struct pointer, an entity that cannot be completely represented (such as a
|
||||
missing field) will result in an ErrFieldMismatch error but it is up to the
|
||||
caller whether this error is fatal, recoverable or ignorable.
|
||||
|
||||
By default, for struct pointers, all properties are potentially indexed, and
|
||||
the property name is the same as the field name (and hence must start with an
|
||||
upper case letter).
|
||||
|
||||
Fields may have a `datastore:"name,options"` tag. The tag name is the
|
||||
property name, which must be one or more valid Go identifiers joined by ".",
|
||||
but may start with a lower case letter. An empty tag name means to just use the
|
||||
field name. A "-" tag name means that the datastore will ignore that field.
|
||||
|
||||
The only valid options are "omitempty", "noindex" and "flatten".
|
||||
|
||||
If the options include "omitempty" and the value of the field is empty, then the field will be omitted on Save.
|
||||
The empty values are false, 0, any nil interface value, and any array, slice, map, or string of length zero.
|
||||
Struct field values will never be empty.
|
||||
|
||||
If options include "noindex" then the field will not be indexed. All fields are indexed
|
||||
by default. Strings or byte slices longer than 1500 bytes cannot be indexed;
|
||||
fields used to store long strings and byte slices must be tagged with "noindex"
|
||||
or they will cause Put operations to fail.
|
||||
|
||||
For a nested struct field, the options may also include "flatten". This indicates
|
||||
that the immediate fields and any nested substruct fields of the nested struct should be
|
||||
flattened. See below for examples.
|
||||
|
||||
To use multiple options together, separate them by a comma.
|
||||
The order does not matter.
|
||||
|
||||
If the options is "" then the comma may be omitted.
|
||||
|
||||
Example code:
|
||||
|
||||
// A and B are renamed to a and b.
|
||||
// A, C and J are not indexed.
|
||||
// D's tag is equivalent to having no tag at all (E).
|
||||
// I is ignored entirely by the datastore.
|
||||
// J has tag information for both the datastore and json packages.
|
||||
type TaggedStruct struct {
|
||||
A int `datastore:"a,noindex"`
|
||||
B int `datastore:"b"`
|
||||
C int `datastore:",noindex"`
|
||||
D int `datastore:""`
|
||||
E int
|
||||
I int `datastore:"-"`
|
||||
J int `datastore:",noindex" json:"j"`
|
||||
}
|
||||
|
||||
|
||||
Key Field
|
||||
|
||||
If the struct contains a *datastore.Key field tagged with the name "__key__",
|
||||
its value will be ignored on Put. When reading the Entity back into the Go struct,
|
||||
the field will be populated with the *datastore.Key value used to query for
|
||||
the Entity.
|
||||
|
||||
Example code:
|
||||
|
||||
type MyEntity struct {
|
||||
A int
|
||||
K *datastore.Key `datastore:"__key__"`
|
||||
}
|
||||
|
||||
k := datastore.NameKey("Entity", "stringID", nil)
|
||||
e := MyEntity{A: 12}
|
||||
k, err = dsClient.Put(ctx, k, e)
|
||||
if err != nil {
|
||||
// Handle error.
|
||||
}
|
||||
|
||||
var entities []MyEntity
|
||||
q := datastore.NewQuery("Entity").Filter("A =", 12).Limit(1)
|
||||
_, err := dsClient.GetAll(ctx, q, &entities)
|
||||
if err != nil {
|
||||
// Handle error
|
||||
}
|
||||
|
||||
log.Println(entities[0])
|
||||
// Prints {12 /Entity,stringID}
|
||||
|
||||
|
||||
|
||||
Structured Properties
|
||||
|
||||
If the struct pointed to contains other structs, then the nested or embedded
|
||||
structs are themselves saved as Entity values. For example, given these definitions:
|
||||
|
||||
type Inner struct {
|
||||
W int32
|
||||
X string
|
||||
}
|
||||
|
||||
type Outer struct {
|
||||
I Inner
|
||||
}
|
||||
|
||||
then an Outer would have one property, Inner, encoded as an Entity value.
|
||||
|
||||
If an outer struct is tagged "noindex" then all of its implicit flattened
|
||||
fields are effectively "noindex".
|
||||
|
||||
If the Inner struct contains a *Key field with the name "__key__", like so:
|
||||
|
||||
type Inner struct {
|
||||
W int32
|
||||
X string
|
||||
K *datastore.Key `datastore:"__key__"`
|
||||
}
|
||||
|
||||
type Outer struct {
|
||||
I Inner
|
||||
}
|
||||
|
||||
then the value of K will be used as the Key for Inner, represented
|
||||
as an Entity value in datastore.
|
||||
|
||||
If any nested struct fields should be flattened, instead of encoded as
|
||||
Entity values, the nested struct field should be tagged with the "flatten"
|
||||
option. For example, given the following:
|
||||
|
||||
type Inner1 struct {
|
||||
W int32
|
||||
X string
|
||||
}
|
||||
|
||||
type Inner2 struct {
|
||||
Y float64
|
||||
}
|
||||
|
||||
type Inner3 struct {
|
||||
Z bool
|
||||
}
|
||||
|
||||
type Inner4 struct {
|
||||
WW int
|
||||
}
|
||||
|
||||
type Inner5 struct {
|
||||
X Inner4
|
||||
}
|
||||
|
||||
type Outer struct {
|
||||
A int16
|
||||
I []Inner1 `datastore:",flatten"`
|
||||
J Inner2 `datastore:",flatten"`
|
||||
K Inner5 `datastore:",flatten"`
|
||||
Inner3 `datastore:",flatten"`
|
||||
}
|
||||
|
||||
an Outer's properties would be equivalent to those of:
|
||||
|
||||
type OuterEquivalent struct {
|
||||
A int16
|
||||
IDotW []int32 `datastore:"I.W"`
|
||||
IDotX []string `datastore:"I.X"`
|
||||
JDotY float64 `datastore:"J.Y"`
|
||||
KDotXDotWW int `datastore:"K.X.WW"`
|
||||
Z bool
|
||||
}
|
||||
|
||||
Note that the "flatten" option cannot be used for Entity value fields.
|
||||
The server will reject any dotted field names for an Entity value.
|
||||
|
||||
|
||||
The PropertyLoadSaver Interface
|
||||
|
||||
An entity's contents can also be represented by any type that implements the
|
||||
PropertyLoadSaver interface. This type may be a struct pointer, but it does
|
||||
not have to be. The datastore package will call Load when getting the entity's
|
||||
contents, and Save when putting the entity's contents.
|
||||
Possible uses include deriving non-stored fields, verifying fields, or indexing
|
||||
a field only if its value is positive.
|
||||
|
||||
Example code:
|
||||
|
||||
type CustomPropsExample struct {
|
||||
I, J int
|
||||
// Sum is not stored, but should always be equal to I + J.
|
||||
Sum int `datastore:"-"`
|
||||
}
|
||||
|
||||
func (x *CustomPropsExample) Load(ps []datastore.Property) error {
|
||||
// Load I and J as usual.
|
||||
if err := datastore.LoadStruct(x, ps); err != nil {
|
||||
return err
|
||||
}
|
||||
// Derive the Sum field.
|
||||
x.Sum = x.I + x.J
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *CustomPropsExample) Save() ([]datastore.Property, error) {
|
||||
// Validate the Sum field.
|
||||
if x.Sum != x.I + x.J {
|
||||
return nil, errors.New("CustomPropsExample has inconsistent sum")
|
||||
}
|
||||
// Save I and J as usual. The code below is equivalent to calling
|
||||
// "return datastore.SaveStruct(x)", but is done manually for
|
||||
// demonstration purposes.
|
||||
return []datastore.Property{
|
||||
{
|
||||
Name: "I",
|
||||
Value: int64(x.I),
|
||||
},
|
||||
{
|
||||
Name: "J",
|
||||
Value: int64(x.J),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
The *PropertyList type implements PropertyLoadSaver, and can therefore hold an
|
||||
arbitrary entity's contents.
|
||||
|
||||
|
||||
Queries
|
||||
|
||||
Queries retrieve entities based on their properties or key's ancestry. Running
|
||||
a query yields an iterator of results: either keys or (key, entity) pairs.
|
||||
Queries are re-usable and it is safe to call Query.Run from concurrent
|
||||
goroutines. Iterators are not safe for concurrent use.
|
||||
|
||||
Queries are immutable, and are either created by calling NewQuery, or derived
|
||||
from an existing query by calling a method like Filter or Order that returns a
|
||||
new query value. A query is typically constructed by calling NewQuery followed
|
||||
by a chain of zero or more such methods. These methods are:
|
||||
- Ancestor and Filter constrain the entities returned by running a query.
|
||||
- Order affects the order in which they are returned.
|
||||
- Project constrains the fields returned.
|
||||
- Distinct de-duplicates projected entities.
|
||||
- KeysOnly makes the iterator return only keys, not (key, entity) pairs.
|
||||
- Start, End, Offset and Limit define which sub-sequence of matching entities
|
||||
to return. Start and End take cursors, Offset and Limit take integers. Start
|
||||
and Offset affect the first result, End and Limit affect the last result.
|
||||
If both Start and Offset are set, then the offset is relative to Start.
|
||||
If both End and Limit are set, then the earliest constraint wins. Limit is
|
||||
relative to Start+Offset, not relative to End. As a special case, a
|
||||
negative limit means unlimited.
|
||||
|
||||
Example code:
|
||||
|
||||
type Widget struct {
|
||||
Description string
|
||||
Price int
|
||||
}
|
||||
|
||||
func printWidgets(ctx context.Context, client *datastore.Client) {
|
||||
q := datastore.NewQuery("Widget").
|
||||
Filter("Price <", 1000).
|
||||
Order("-Price")
|
||||
for t := client.Run(ctx, q); ; {
|
||||
var x Widget
|
||||
key, err := t.Next(&x)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// Handle error.
|
||||
}
|
||||
fmt.Printf("Key=%v\nWidget=%#v\n\n", key, x)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Transactions
|
||||
|
||||
Client.RunInTransaction runs a function in a transaction.
|
||||
|
||||
Example code:
|
||||
|
||||
type Counter struct {
|
||||
Count int
|
||||
}
|
||||
|
||||
func incCount(ctx context.Context, client *datastore.Client) {
|
||||
var count int
|
||||
key := datastore.NameKey("Counter", "singleton", nil)
|
||||
_, err := client.RunInTransaction(ctx, func(tx *datastore.Transaction) error {
|
||||
var x Counter
|
||||
if err := tx.Get(key, &x); err != nil && err != datastore.ErrNoSuchEntity {
|
||||
return err
|
||||
}
|
||||
x.Count++
|
||||
if _, err := tx.Put(key, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
count = x.Count
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
// Handle error.
|
||||
}
|
||||
// The value of count is only valid once the transaction is successful
|
||||
// (RunInTransaction has returned nil).
|
||||
fmt.Printf("Count=%d\n", count)
|
||||
}
|
||||
|
||||
Google Cloud Datastore Emulator
|
||||
|
||||
This package supports the Cloud Datastore emulator, which is useful for testing and
|
||||
development. Environment variables are used to indicate that datastore traffic should be
|
||||
directed to the emulator instead of the production Datastore service.
|
||||
|
||||
To install and set up the emulator and its environment variables, see the documentation
|
||||
at https://cloud.google.com/datastore/docs/tools/datastore-emulator.
|
||||
|
||||
Authentication
|
||||
|
||||
See examples of authorization and authentication at
|
||||
https://godoc.org/cloud.google.com/go#pkg-examples.
|
||||
|
||||
*/
|
||||
package datastore // import "cloud.google.com/go/datastore"
|
47
vendor/cloud.google.com/go/datastore/errors.go
generated
vendored
Normal file
47
vendor/cloud.google.com/go/datastore/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,47 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This file provides error functions for common API failure modes.
|
||||
|
||||
package datastore
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// MultiError is returned by batch operations when there are errors with
|
||||
// particular elements. Errors will be in a one-to-one correspondence with
|
||||
// the input elements; successful elements will have a nil entry.
|
||||
type MultiError []error
|
||||
|
||||
func (m MultiError) Error() string {
|
||||
s, n := "", 0
|
||||
for _, e := range m {
|
||||
if e != nil {
|
||||
if n == 0 {
|
||||
s = e.Error()
|
||||
}
|
||||
n++
|
||||
}
|
||||
}
|
||||
switch n {
|
||||
case 0:
|
||||
return "(0 errors)"
|
||||
case 1:
|
||||
return s
|
||||
case 2:
|
||||
return s + " (and 1 other error)"
|
||||
}
|
||||
return fmt.Sprintf("%s (and %d other errors)", s, n-1)
|
||||
}
|
545
vendor/cloud.google.com/go/datastore/example_test.go
generated
vendored
Normal file
545
vendor/cloud.google.com/go/datastore/example_test.go
generated
vendored
Normal file
|
@ -0,0 +1,545 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package datastore_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/datastore"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
func ExampleNewClient() {
|
||||
ctx := context.Background()
|
||||
client, err := datastore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
_ = client // TODO: Use client.
|
||||
}
|
||||
|
||||
func ExampleClient_Get() {
|
||||
ctx := context.Background()
|
||||
client, err := datastore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
type Article struct {
|
||||
Title string
|
||||
Description string
|
||||
Body string `datastore:",noindex"`
|
||||
Author *datastore.Key
|
||||
PublishedAt time.Time
|
||||
}
|
||||
key := datastore.NameKey("Article", "articled1", nil)
|
||||
article := &Article{}
|
||||
if err := client.Get(ctx, key, article); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_Put() {
|
||||
ctx := context.Background()
|
||||
client, err := datastore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
type Article struct {
|
||||
Title string
|
||||
Description string
|
||||
Body string `datastore:",noindex"`
|
||||
Author *datastore.Key
|
||||
PublishedAt time.Time
|
||||
}
|
||||
newKey := datastore.IncompleteKey("Article", nil)
|
||||
_, err = client.Put(ctx, newKey, &Article{
|
||||
Title: "The title of the article",
|
||||
Description: "The description of the article...",
|
||||
Body: "...",
|
||||
Author: datastore.NameKey("Author", "jbd", nil),
|
||||
PublishedAt: time.Now(),
|
||||
})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_Put_flatten() {
|
||||
ctx := context.Background()
|
||||
client, err := datastore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
type Animal struct {
|
||||
Name string
|
||||
Type string
|
||||
Breed string
|
||||
}
|
||||
|
||||
type Human struct {
|
||||
Name string
|
||||
Height int
|
||||
Pet Animal `datastore:",flatten"`
|
||||
}
|
||||
|
||||
newKey := datastore.IncompleteKey("Human", nil)
|
||||
_, err = client.Put(ctx, newKey, &Human{
|
||||
Name: "Susan",
|
||||
Height: 67,
|
||||
Pet: Animal{
|
||||
Name: "Fluffy",
|
||||
Type: "Cat",
|
||||
Breed: "Sphynx",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_Delete() {
|
||||
ctx := context.Background()
|
||||
client, err := datastore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
key := datastore.NameKey("Article", "articled1", nil)
|
||||
if err := client.Delete(ctx, key); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_DeleteMulti() {
|
||||
ctx := context.Background()
|
||||
client, err := datastore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
var keys []*datastore.Key
|
||||
for i := 1; i <= 10; i++ {
|
||||
keys = append(keys, datastore.IDKey("Article", int64(i), nil))
|
||||
}
|
||||
if err := client.DeleteMulti(ctx, keys); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
type Post struct {
|
||||
Title string
|
||||
PublishedAt time.Time
|
||||
Comments int
|
||||
}
|
||||
|
||||
func ExampleClient_GetMulti() {
|
||||
ctx := context.Background()
|
||||
client, err := datastore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
keys := []*datastore.Key{
|
||||
datastore.NameKey("Post", "post1", nil),
|
||||
datastore.NameKey("Post", "post2", nil),
|
||||
datastore.NameKey("Post", "post3", nil),
|
||||
}
|
||||
posts := make([]Post, 3)
|
||||
if err := client.GetMulti(ctx, keys, posts); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_PutMulti_slice() {
|
||||
ctx := context.Background()
|
||||
client, err := datastore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
keys := []*datastore.Key{
|
||||
datastore.NameKey("Post", "post1", nil),
|
||||
datastore.NameKey("Post", "post2", nil),
|
||||
}
|
||||
|
||||
// PutMulti with a Post slice.
|
||||
posts := []*Post{
|
||||
{Title: "Post 1", PublishedAt: time.Now()},
|
||||
{Title: "Post 2", PublishedAt: time.Now()},
|
||||
}
|
||||
if _, err := client.PutMulti(ctx, keys, posts); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_PutMulti_interfaceSlice() {
|
||||
ctx := context.Background()
|
||||
client, err := datastore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
keys := []*datastore.Key{
|
||||
datastore.NameKey("Post", "post1", nil),
|
||||
datastore.NameKey("Post", "post2", nil),
|
||||
}
|
||||
|
||||
// PutMulti with an empty interface slice.
|
||||
posts := []interface{}{
|
||||
&Post{Title: "Post 1", PublishedAt: time.Now()},
|
||||
&Post{Title: "Post 2", PublishedAt: time.Now()},
|
||||
}
|
||||
if _, err := client.PutMulti(ctx, keys, posts); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleNewQuery() {
|
||||
// Query for Post entities.
|
||||
q := datastore.NewQuery("Post")
|
||||
_ = q // TODO: Use the query with Client.Run.
|
||||
}
|
||||
|
||||
func ExampleNewQuery_options() {
|
||||
// Query to order the posts by the number of comments they have recieved.
|
||||
q := datastore.NewQuery("Post").Order("-Comments")
|
||||
// Start listing from an offset and limit the results.
|
||||
q = q.Offset(20).Limit(10)
|
||||
_ = q // TODO: Use the query.
|
||||
}
|
||||
|
||||
func ExampleClient_Count() {
|
||||
ctx := context.Background()
|
||||
client, err := datastore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Count the number of the post entities.
|
||||
q := datastore.NewQuery("Post")
|
||||
n, err := client.Count(ctx, q)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Printf("There are %d posts.", n)
|
||||
}
|
||||
|
||||
func ExampleClient_Run() {
|
||||
ctx := context.Background()
|
||||
client, err := datastore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// List the posts published since yesterday.
|
||||
yesterday := time.Now().Add(-24 * time.Hour)
|
||||
q := datastore.NewQuery("Post").Filter("PublishedAt >", yesterday)
|
||||
it := client.Run(ctx, q)
|
||||
_ = it // TODO: iterate using Next.
|
||||
}
|
||||
|
||||
func ExampleClient_NewTransaction() {
|
||||
ctx := context.Background()
|
||||
client, err := datastore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
const retries = 3
|
||||
|
||||
// Increment a counter.
|
||||
// See https://cloud.google.com/appengine/articles/sharding_counters for
|
||||
// a more scalable solution.
|
||||
type Counter struct {
|
||||
Count int
|
||||
}
|
||||
|
||||
key := datastore.NameKey("counter", "CounterA", nil)
|
||||
var tx *datastore.Transaction
|
||||
for i := 0; i < retries; i++ {
|
||||
tx, err = client.NewTransaction(ctx)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
var c Counter
|
||||
if err = tx.Get(key, &c); err != nil && err != datastore.ErrNoSuchEntity {
|
||||
break
|
||||
}
|
||||
c.Count++
|
||||
if _, err = tx.Put(key, &c); err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Attempt to commit the transaction. If there's a conflict, try again.
|
||||
if _, err = tx.Commit(); err != datastore.ErrConcurrentTransaction {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_RunInTransaction() {
|
||||
ctx := context.Background()
|
||||
client, err := datastore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
// Increment a counter.
|
||||
// See https://cloud.google.com/appengine/articles/sharding_counters for
|
||||
// a more scalable solution.
|
||||
type Counter struct {
|
||||
Count int
|
||||
}
|
||||
|
||||
var count int
|
||||
key := datastore.NameKey("Counter", "singleton", nil)
|
||||
_, err = client.RunInTransaction(ctx, func(tx *datastore.Transaction) error {
|
||||
var x Counter
|
||||
if err := tx.Get(key, &x); err != nil && err != datastore.ErrNoSuchEntity {
|
||||
return err
|
||||
}
|
||||
x.Count++
|
||||
if _, err := tx.Put(key, &x); err != nil {
|
||||
return err
|
||||
}
|
||||
count = x.Count
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// The value of count is only valid once the transaction is successful
|
||||
// (RunInTransaction has returned nil).
|
||||
fmt.Printf("Count=%d\n", count)
|
||||
}
|
||||
|
||||
func ExampleClient_AllocateIDs() {
|
||||
ctx := context.Background()
|
||||
client, err := datastore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
var keys []*datastore.Key
|
||||
for i := 0; i < 10; i++ {
|
||||
keys = append(keys, datastore.IncompleteKey("Article", nil))
|
||||
}
|
||||
keys, err = client.AllocateIDs(ctx, keys)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
_ = keys // TODO: Use keys.
|
||||
}
|
||||
|
||||
func ExampleKey_Encode() {
|
||||
key := datastore.IDKey("Article", 1, nil)
|
||||
encoded := key.Encode()
|
||||
fmt.Println(encoded)
|
||||
// Output: EgsKB0FydGljbGUQAQ
|
||||
}
|
||||
|
||||
func ExampleDecodeKey() {
|
||||
const encoded = "EgsKB0FydGljbGUQAQ"
|
||||
key, err := datastore.DecodeKey(encoded)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(key)
|
||||
// Output: /Article,1
|
||||
}
|
||||
|
||||
func ExampleIDKey() {
|
||||
// Key with numeric ID.
|
||||
k := datastore.IDKey("Article", 1, nil)
|
||||
_ = k // TODO: Use key.
|
||||
}
|
||||
|
||||
func ExampleNameKey() {
|
||||
// Key with string ID.
|
||||
k := datastore.NameKey("Article", "article8", nil)
|
||||
_ = k // TODO: Use key.
|
||||
}
|
||||
|
||||
func ExampleIncompleteKey() {
|
||||
k := datastore.IncompleteKey("Article", nil)
|
||||
_ = k // TODO: Use incomplete key.
|
||||
}
|
||||
|
||||
func ExampleClient_GetAll() {
|
||||
ctx := context.Background()
|
||||
client, err := datastore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
var posts []*Post
|
||||
keys, err := client.GetAll(ctx, datastore.NewQuery("Post"), &posts)
|
||||
for i, key := range keys {
|
||||
fmt.Println(key)
|
||||
fmt.Println(posts[i])
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleCommit_Key() {
|
||||
ctx := context.Background()
|
||||
client, err := datastore.NewClient(ctx, "")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
var pk1, pk2 *datastore.PendingKey
|
||||
// Create two posts in a single transaction.
|
||||
commit, err := client.RunInTransaction(ctx, func(tx *datastore.Transaction) error {
|
||||
var err error
|
||||
pk1, err = tx.Put(datastore.IncompleteKey("Post", nil), &Post{Title: "Post 1", PublishedAt: time.Now()})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pk2, err = tx.Put(datastore.IncompleteKey("Post", nil), &Post{Title: "Post 2", PublishedAt: time.Now()})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// Now pk1, pk2 are valid PendingKeys. Let's convert them into real keys
|
||||
// using the Commit object.
|
||||
k1 := commit.Key(pk1)
|
||||
k2 := commit.Key(pk2)
|
||||
fmt.Println(k1, k2)
|
||||
}
|
||||
|
||||
func ExampleIterator_Next() {
|
||||
ctx := context.Background()
|
||||
client, err := datastore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it := client.Run(ctx, datastore.NewQuery("Post"))
|
||||
for {
|
||||
var p Post
|
||||
key, err := it.Next(&p)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(key, p)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleIterator_Cursor() {
|
||||
ctx := context.Background()
|
||||
client, err := datastore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it := client.Run(ctx, datastore.NewQuery("Post"))
|
||||
for {
|
||||
var p Post
|
||||
_, err := it.Next(&p)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(p)
|
||||
cursor, err := it.Cursor()
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// When printed, a cursor will display as a string that can be passed
|
||||
// to datastore.NewCursor.
|
||||
fmt.Printf("to resume with this post, use cursor %s\n", cursor)
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleDecodeCursor() {
|
||||
// See Query.Start for a fuller example of DecodeCursor.
|
||||
// getCursor represents a function that returns a cursor from a previous
|
||||
// iteration in string form.
|
||||
cursorString := getCursor()
|
||||
cursor, err := datastore.DecodeCursor(cursorString)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
_ = cursor // TODO: Use the cursor with Query.Start or Query.End.
|
||||
}
|
||||
|
||||
func getCursor() string { return "" }
|
||||
|
||||
func ExampleQuery_Start() {
|
||||
// This example demonstrates how to use cursors and Query.Start
|
||||
// to resume an iteration.
|
||||
ctx := context.Background()
|
||||
client, err := datastore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// getCursor represents a function that returns a cursor from a previous
|
||||
// iteration in string form.
|
||||
cursorString := getCursor()
|
||||
cursor, err := datastore.DecodeCursor(cursorString)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it := client.Run(ctx, datastore.NewQuery("Post").Start(cursor))
|
||||
_ = it // TODO: Use iterator.
|
||||
}
|
||||
|
||||
func ExampleLoadStruct() {
|
||||
type Player struct {
|
||||
User string
|
||||
Score int
|
||||
}
|
||||
// Normally LoadStruct would only be used inside a custom implementation of
|
||||
// PropertyLoadSaver; this is for illustrative purposes only.
|
||||
props := []datastore.Property{
|
||||
{Name: "User", Value: "Alice"},
|
||||
{Name: "Score", Value: int64(97)},
|
||||
}
|
||||
|
||||
var p Player
|
||||
if err := datastore.LoadStruct(&p, props); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(p)
|
||||
// Output: {Alice 97}
|
||||
}
|
||||
|
||||
func ExampleSaveStruct() {
|
||||
type Player struct {
|
||||
User string
|
||||
Score int
|
||||
}
|
||||
|
||||
p := &Player{
|
||||
User: "Alice",
|
||||
Score: 97,
|
||||
}
|
||||
props, err := datastore.SaveStruct(p)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(props)
|
||||
// TODO(jba): make this output stable: Output: [{User Alice false} {Score 97 false}]
|
||||
}
|
1040
vendor/cloud.google.com/go/datastore/integration_test.go
generated
vendored
Normal file
1040
vendor/cloud.google.com/go/datastore/integration_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
280
vendor/cloud.google.com/go/datastore/key.go
generated
vendored
Normal file
280
vendor/cloud.google.com/go/datastore/key.go
generated
vendored
Normal file
|
@ -0,0 +1,280 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package datastore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/context"
|
||||
pb "google.golang.org/genproto/googleapis/datastore/v1"
|
||||
)
|
||||
|
||||
// Key represents the datastore key for a stored entity.
|
||||
type Key struct {
|
||||
// Kind cannot be empty.
|
||||
Kind string
|
||||
// Either ID or Name must be zero for the Key to be valid.
|
||||
// If both are zero, the Key is incomplete.
|
||||
ID int64
|
||||
Name string
|
||||
// Parent must either be a complete Key or nil.
|
||||
Parent *Key
|
||||
|
||||
// Namespace provides the ability to partition your data for multiple
|
||||
// tenants. In most cases, it is not necessary to specify a namespace.
|
||||
// See docs on datastore multitenancy for details:
|
||||
// https://cloud.google.com/datastore/docs/concepts/multitenancy
|
||||
Namespace string
|
||||
}
|
||||
|
||||
// Incomplete reports whether the key does not refer to a stored entity.
|
||||
func (k *Key) Incomplete() bool {
|
||||
return k.Name == "" && k.ID == 0
|
||||
}
|
||||
|
||||
// valid returns whether the key is valid.
|
||||
func (k *Key) valid() bool {
|
||||
if k == nil {
|
||||
return false
|
||||
}
|
||||
for ; k != nil; k = k.Parent {
|
||||
if k.Kind == "" {
|
||||
return false
|
||||
}
|
||||
if k.Name != "" && k.ID != 0 {
|
||||
return false
|
||||
}
|
||||
if k.Parent != nil {
|
||||
if k.Parent.Incomplete() {
|
||||
return false
|
||||
}
|
||||
if k.Parent.Namespace != k.Namespace {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Equal reports whether two keys are equal. Two keys are equal if they are
|
||||
// both nil, or if their kinds, IDs, names, namespaces and parents are equal.
|
||||
func (k *Key) Equal(o *Key) bool {
|
||||
for {
|
||||
if k == nil || o == nil {
|
||||
return k == o // if either is nil, both must be nil
|
||||
}
|
||||
if k.Namespace != o.Namespace || k.Name != o.Name || k.ID != o.ID || k.Kind != o.Kind {
|
||||
return false
|
||||
}
|
||||
if k.Parent == nil && o.Parent == nil {
|
||||
return true
|
||||
}
|
||||
k = k.Parent
|
||||
o = o.Parent
|
||||
}
|
||||
}
|
||||
|
||||
// marshal marshals the key's string representation to the buffer.
|
||||
func (k *Key) marshal(b *bytes.Buffer) {
|
||||
if k.Parent != nil {
|
||||
k.Parent.marshal(b)
|
||||
}
|
||||
b.WriteByte('/')
|
||||
b.WriteString(k.Kind)
|
||||
b.WriteByte(',')
|
||||
if k.Name != "" {
|
||||
b.WriteString(k.Name)
|
||||
} else {
|
||||
b.WriteString(strconv.FormatInt(k.ID, 10))
|
||||
}
|
||||
}
|
||||
|
||||
// String returns a string representation of the key.
|
||||
func (k *Key) String() string {
|
||||
if k == nil {
|
||||
return ""
|
||||
}
|
||||
b := bytes.NewBuffer(make([]byte, 0, 512))
|
||||
k.marshal(b)
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// Note: Fields not renamed compared to appengine gobKey struct
|
||||
// This ensures gobs created by appengine can be read here, and vice/versa
|
||||
type gobKey struct {
|
||||
Kind string
|
||||
StringID string
|
||||
IntID int64
|
||||
Parent *gobKey
|
||||
AppID string
|
||||
Namespace string
|
||||
}
|
||||
|
||||
func keyToGobKey(k *Key) *gobKey {
|
||||
if k == nil {
|
||||
return nil
|
||||
}
|
||||
return &gobKey{
|
||||
Kind: k.Kind,
|
||||
StringID: k.Name,
|
||||
IntID: k.ID,
|
||||
Parent: keyToGobKey(k.Parent),
|
||||
Namespace: k.Namespace,
|
||||
}
|
||||
}
|
||||
|
||||
func gobKeyToKey(gk *gobKey) *Key {
|
||||
if gk == nil {
|
||||
return nil
|
||||
}
|
||||
return &Key{
|
||||
Kind: gk.Kind,
|
||||
Name: gk.StringID,
|
||||
ID: gk.IntID,
|
||||
Parent: gobKeyToKey(gk.Parent),
|
||||
Namespace: gk.Namespace,
|
||||
}
|
||||
}
|
||||
|
||||
// GobEncode marshals the key into a sequence of bytes
|
||||
// using an encoding/gob.Encoder.
|
||||
func (k *Key) GobEncode() ([]byte, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
if err := gob.NewEncoder(buf).Encode(keyToGobKey(k)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// GobDecode unmarshals a sequence of bytes using an encoding/gob.Decoder.
|
||||
func (k *Key) GobDecode(buf []byte) error {
|
||||
gk := new(gobKey)
|
||||
if err := gob.NewDecoder(bytes.NewBuffer(buf)).Decode(gk); err != nil {
|
||||
return err
|
||||
}
|
||||
*k = *gobKeyToKey(gk)
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON marshals the key into JSON.
|
||||
func (k *Key) MarshalJSON() ([]byte, error) {
|
||||
return []byte(`"` + k.Encode() + `"`), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON unmarshals a key JSON object into a Key.
|
||||
func (k *Key) UnmarshalJSON(buf []byte) error {
|
||||
if len(buf) < 2 || buf[0] != '"' || buf[len(buf)-1] != '"' {
|
||||
return errors.New("datastore: bad JSON key")
|
||||
}
|
||||
k2, err := DecodeKey(string(buf[1 : len(buf)-1]))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*k = *k2
|
||||
return nil
|
||||
}
|
||||
|
||||
// Encode returns an opaque representation of the key
|
||||
// suitable for use in HTML and URLs.
|
||||
// This is compatible with the Python and Java runtimes.
|
||||
func (k *Key) Encode() string {
|
||||
pKey := keyToProto(k)
|
||||
|
||||
b, err := proto.Marshal(pKey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Trailing padding is stripped.
|
||||
return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
|
||||
}
|
||||
|
||||
// DecodeKey decodes a key from the opaque representation returned by Encode.
|
||||
func DecodeKey(encoded string) (*Key, error) {
|
||||
// Re-add padding.
|
||||
if m := len(encoded) % 4; m != 0 {
|
||||
encoded += strings.Repeat("=", 4-m)
|
||||
}
|
||||
|
||||
b, err := base64.URLEncoding.DecodeString(encoded)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pKey := new(pb.Key)
|
||||
if err := proto.Unmarshal(b, pKey); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return protoToKey(pKey)
|
||||
}
|
||||
|
||||
// AllocateIDs accepts a slice of incomplete keys and returns a
|
||||
// slice of complete keys that are guaranteed to be valid in the datastore.
|
||||
func (c *Client) AllocateIDs(ctx context.Context, keys []*Key) ([]*Key, error) {
|
||||
if keys == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
req := &pb.AllocateIdsRequest{
|
||||
ProjectId: c.dataset,
|
||||
Keys: multiKeyToProto(keys),
|
||||
}
|
||||
resp, err := c.client.AllocateIds(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return multiProtoToKey(resp.Keys)
|
||||
}
|
||||
|
||||
// IncompleteKey creates a new incomplete key.
|
||||
// The supplied kind cannot be empty.
|
||||
// The namespace of the new key is empty.
|
||||
func IncompleteKey(kind string, parent *Key) *Key {
|
||||
return &Key{
|
||||
Kind: kind,
|
||||
Parent: parent,
|
||||
}
|
||||
}
|
||||
|
||||
// NameKey creates a new key with a name.
|
||||
// The supplied kind cannot be empty.
|
||||
// The supplied parent must either be a complete key or nil.
|
||||
// The namespace of the new key is empty.
|
||||
func NameKey(kind, name string, parent *Key) *Key {
|
||||
return &Key{
|
||||
Kind: kind,
|
||||
Name: name,
|
||||
Parent: parent,
|
||||
}
|
||||
}
|
||||
|
||||
// IDKey creates a new key with an ID.
|
||||
// The supplied kind cannot be empty.
|
||||
// The supplied parent must either be a complete key or nil.
|
||||
// The namespace of the new key is empty.
|
||||
func IDKey(kind string, id int64, parent *Key) *Key {
|
||||
return &Key{
|
||||
Kind: kind,
|
||||
ID: id,
|
||||
Parent: parent,
|
||||
}
|
||||
}
|
210
vendor/cloud.google.com/go/datastore/key_test.go
generated
vendored
Normal file
210
vendor/cloud.google.com/go/datastore/key_test.go
generated
vendored
Normal file
|
@ -0,0 +1,210 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package datastore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEqual(t *testing.T) {
|
||||
testCases := []struct {
|
||||
x, y *Key
|
||||
equal bool
|
||||
}{
|
||||
{
|
||||
x: nil,
|
||||
y: nil,
|
||||
equal: true,
|
||||
},
|
||||
{
|
||||
x: &Key{Kind: "kindA"},
|
||||
y: &Key{Kind: "kindA"},
|
||||
equal: true,
|
||||
},
|
||||
{
|
||||
x: &Key{Kind: "kindA", Name: "nameA"},
|
||||
y: &Key{Kind: "kindA", Name: "nameA"},
|
||||
equal: true,
|
||||
},
|
||||
{
|
||||
x: &Key{Kind: "kindA", Name: "nameA", Namespace: "gopherspace"},
|
||||
y: &Key{Kind: "kindA", Name: "nameA", Namespace: "gopherspace"},
|
||||
equal: true,
|
||||
},
|
||||
{
|
||||
x: &Key{Kind: "kindA", ID: 1337, Parent: &Key{Kind: "kindX", Name: "nameX"}},
|
||||
y: &Key{Kind: "kindA", ID: 1337, Parent: &Key{Kind: "kindX", Name: "nameX"}},
|
||||
equal: true,
|
||||
},
|
||||
{
|
||||
x: &Key{Kind: "kindA", Name: "nameA"},
|
||||
y: &Key{Kind: "kindB", Name: "nameA"},
|
||||
equal: false,
|
||||
},
|
||||
{
|
||||
x: &Key{Kind: "kindA", Name: "nameA"},
|
||||
y: &Key{Kind: "kindA", Name: "nameB"},
|
||||
equal: false,
|
||||
},
|
||||
{
|
||||
x: &Key{Kind: "kindA", Name: "nameA"},
|
||||
y: &Key{Kind: "kindA", ID: 1337},
|
||||
equal: false,
|
||||
},
|
||||
{
|
||||
x: &Key{Kind: "kindA", Name: "nameA"},
|
||||
y: &Key{Kind: "kindA", Name: "nameA", Namespace: "gopherspace"},
|
||||
equal: false,
|
||||
},
|
||||
{
|
||||
x: &Key{Kind: "kindA", ID: 1337, Parent: &Key{Kind: "kindX", Name: "nameX"}},
|
||||
y: &Key{Kind: "kindA", ID: 1337, Parent: &Key{Kind: "kindY", Name: "nameX"}},
|
||||
equal: false,
|
||||
},
|
||||
{
|
||||
x: &Key{Kind: "kindA", ID: 1337, Parent: &Key{Kind: "kindX", Name: "nameX"}},
|
||||
y: &Key{Kind: "kindA", ID: 1337},
|
||||
equal: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range testCases {
|
||||
if got := tt.x.Equal(tt.y); got != tt.equal {
|
||||
t.Errorf("Equal(%v, %v) = %t; want %t", tt.x, tt.y, got, tt.equal)
|
||||
}
|
||||
if got := tt.y.Equal(tt.x); got != tt.equal {
|
||||
t.Errorf("Equal(%v, %v) = %t; want %t", tt.y, tt.x, got, tt.equal)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncoding(t *testing.T) {
|
||||
testCases := []struct {
|
||||
k *Key
|
||||
valid bool
|
||||
}{
|
||||
{
|
||||
k: nil,
|
||||
valid: false,
|
||||
},
|
||||
{
|
||||
k: &Key{},
|
||||
valid: false,
|
||||
},
|
||||
{
|
||||
k: &Key{Kind: "kindA"},
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
k: &Key{Kind: "kindA", Namespace: "gopherspace"},
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
k: &Key{Kind: "kindA", Name: "nameA"},
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
k: &Key{Kind: "kindA", ID: 1337},
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
k: &Key{Kind: "kindA", Name: "nameA", ID: 1337},
|
||||
valid: false,
|
||||
},
|
||||
{
|
||||
k: &Key{Kind: "kindA", Parent: &Key{Kind: "kindB", Name: "nameB"}},
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
k: &Key{Kind: "kindA", Parent: &Key{Kind: "kindB"}},
|
||||
valid: false,
|
||||
},
|
||||
{
|
||||
k: &Key{Kind: "kindA", Parent: &Key{Kind: "kindB", Name: "nameB", Namespace: "gopherspace"}},
|
||||
valid: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range testCases {
|
||||
if got := tt.k.valid(); got != tt.valid {
|
||||
t.Errorf("valid(%v) = %t; want %t", tt.k, got, tt.valid)
|
||||
}
|
||||
|
||||
// Check encoding/decoding for valid keys.
|
||||
if !tt.valid {
|
||||
continue
|
||||
}
|
||||
enc := tt.k.Encode()
|
||||
dec, err := DecodeKey(enc)
|
||||
if err != nil {
|
||||
t.Errorf("DecodeKey(%q) from %v: %v", enc, tt.k, err)
|
||||
continue
|
||||
}
|
||||
if !tt.k.Equal(dec) {
|
||||
t.Logf("Proto: %s", keyToProto(tt.k))
|
||||
t.Errorf("Decoded key %v not equal to %v", dec, tt.k)
|
||||
}
|
||||
|
||||
b, err := json.Marshal(tt.k)
|
||||
if err != nil {
|
||||
t.Errorf("json.Marshal(%v): %v", tt.k, err)
|
||||
continue
|
||||
}
|
||||
key := &Key{}
|
||||
if err := json.Unmarshal(b, key); err != nil {
|
||||
t.Errorf("json.Unmarshal(%s) for key %v: %v", b, tt.k, err)
|
||||
continue
|
||||
}
|
||||
if !tt.k.Equal(key) {
|
||||
t.Errorf("JSON decoded key %v not equal to %v", dec, tt.k)
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
gobEnc := gob.NewEncoder(buf)
|
||||
if err := gobEnc.Encode(tt.k); err != nil {
|
||||
t.Errorf("gobEnc.Encode(%v): %v", tt.k, err)
|
||||
continue
|
||||
}
|
||||
gobDec := gob.NewDecoder(buf)
|
||||
key = &Key{}
|
||||
if err := gobDec.Decode(key); err != nil {
|
||||
t.Errorf("gobDec.Decode() for key %v: %v", tt.k, err)
|
||||
}
|
||||
if !tt.k.Equal(key) {
|
||||
t.Errorf("gob decoded key %v not equal to %v", dec, tt.k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInvalidKeyDecode(t *testing.T) {
|
||||
// Check that decoding an invalid key returns an err and doesn't panic.
|
||||
enc := NameKey("Kind", "Foo", nil).Encode()
|
||||
|
||||
invalid := []string{
|
||||
"",
|
||||
"Laboratorio",
|
||||
enc + "Junk",
|
||||
enc[:len(enc)-4],
|
||||
}
|
||||
for _, enc := range invalid {
|
||||
key, err := DecodeKey(enc)
|
||||
if err == nil || key != nil {
|
||||
t.Errorf("DecodeKey(%q) = %v, %v; want nil, error", enc, key, err)
|
||||
}
|
||||
}
|
||||
}
|
430
vendor/cloud.google.com/go/datastore/load.go
generated
vendored
Normal file
430
vendor/cloud.google.com/go/datastore/load.go
generated
vendored
Normal file
|
@ -0,0 +1,430 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package datastore
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/fields"
|
||||
pb "google.golang.org/genproto/googleapis/datastore/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
typeOfByteSlice = reflect.TypeOf([]byte(nil))
|
||||
typeOfTime = reflect.TypeOf(time.Time{})
|
||||
typeOfGeoPoint = reflect.TypeOf(GeoPoint{})
|
||||
typeOfKeyPtr = reflect.TypeOf(&Key{})
|
||||
typeOfEntityPtr = reflect.TypeOf(&Entity{})
|
||||
)
|
||||
|
||||
// typeMismatchReason returns a string explaining why the property p could not
|
||||
// be stored in an entity field of type v.Type().
|
||||
func typeMismatchReason(p Property, v reflect.Value) string {
|
||||
entityType := "empty"
|
||||
switch p.Value.(type) {
|
||||
case int64:
|
||||
entityType = "int"
|
||||
case bool:
|
||||
entityType = "bool"
|
||||
case string:
|
||||
entityType = "string"
|
||||
case float64:
|
||||
entityType = "float"
|
||||
case *Key:
|
||||
entityType = "*datastore.Key"
|
||||
case *Entity:
|
||||
entityType = "*datastore.Entity"
|
||||
case GeoPoint:
|
||||
entityType = "GeoPoint"
|
||||
case time.Time:
|
||||
entityType = "time.Time"
|
||||
case []byte:
|
||||
entityType = "[]byte"
|
||||
}
|
||||
|
||||
return fmt.Sprintf("type mismatch: %s versus %v", entityType, v.Type())
|
||||
}
|
||||
|
||||
type propertyLoader struct {
|
||||
// m holds the number of times a substruct field like "Foo.Bar.Baz" has
|
||||
// been seen so far. The map is constructed lazily.
|
||||
m map[string]int
|
||||
}
|
||||
|
||||
func (l *propertyLoader) load(codec fields.List, structValue reflect.Value, p Property, prev map[string]struct{}) string {
|
||||
sl, ok := p.Value.([]interface{})
|
||||
if !ok {
|
||||
return l.loadOneElement(codec, structValue, p, prev)
|
||||
}
|
||||
for _, val := range sl {
|
||||
p.Value = val
|
||||
if errStr := l.loadOneElement(codec, structValue, p, prev); errStr != "" {
|
||||
return errStr
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// loadOneElement loads the value of Property p into structValue based on the provided
|
||||
// codec. codec is used to find the field in structValue into which p should be loaded.
|
||||
// prev is the set of property names already seen for structValue.
|
||||
func (l *propertyLoader) loadOneElement(codec fields.List, structValue reflect.Value, p Property, prev map[string]struct{}) string {
|
||||
var sliceOk bool
|
||||
var sliceIndex int
|
||||
var v reflect.Value
|
||||
|
||||
name := p.Name
|
||||
fieldNames := strings.Split(name, ".")
|
||||
|
||||
for len(fieldNames) > 0 {
|
||||
var field *fields.Field
|
||||
|
||||
// Start by trying to find a field with name. If none found,
|
||||
// cut off the last field (delimited by ".") and find its parent
|
||||
// in the codec.
|
||||
// eg. for name "A.B.C.D", split off "A.B.C" and try to
|
||||
// find a field in the codec with this name.
|
||||
// Loop again with "A.B", etc.
|
||||
for i := len(fieldNames); i > 0; i-- {
|
||||
parent := strings.Join(fieldNames[:i], ".")
|
||||
field = codec.Match(parent)
|
||||
if field != nil {
|
||||
fieldNames = fieldNames[i:]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If we never found a matching field in the codec, return
|
||||
// error message.
|
||||
if field == nil {
|
||||
return "no such struct field"
|
||||
}
|
||||
|
||||
v = initField(structValue, field.Index)
|
||||
if !v.IsValid() {
|
||||
return "no such struct field"
|
||||
}
|
||||
if !v.CanSet() {
|
||||
return "cannot set struct field"
|
||||
}
|
||||
|
||||
var err error
|
||||
if field.Type.Kind() == reflect.Struct {
|
||||
codec, err = structCache.Fields(field.Type)
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
structValue = v
|
||||
}
|
||||
|
||||
// If the element is a slice, we need to accommodate it.
|
||||
if v.Kind() == reflect.Slice && v.Type() != typeOfByteSlice {
|
||||
if l.m == nil {
|
||||
l.m = make(map[string]int)
|
||||
}
|
||||
sliceIndex = l.m[p.Name]
|
||||
l.m[p.Name] = sliceIndex + 1
|
||||
for v.Len() <= sliceIndex {
|
||||
v.Set(reflect.Append(v, reflect.New(v.Type().Elem()).Elem()))
|
||||
}
|
||||
structValue = v.Index(sliceIndex)
|
||||
if structValue.Type().Kind() == reflect.Struct {
|
||||
codec, err = structCache.Fields(structValue.Type())
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
}
|
||||
sliceOk = true
|
||||
}
|
||||
}
|
||||
|
||||
var slice reflect.Value
|
||||
if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
|
||||
slice = v
|
||||
v = reflect.New(v.Type().Elem()).Elem()
|
||||
} else if _, ok := prev[p.Name]; ok && !sliceOk {
|
||||
// Zero the field back out that was set previously, turns out
|
||||
// it's a slice and we don't know what to do with it
|
||||
v.Set(reflect.Zero(v.Type()))
|
||||
return "multiple-valued property requires a slice field type"
|
||||
}
|
||||
|
||||
prev[p.Name] = struct{}{}
|
||||
|
||||
if errReason := setVal(v, p); errReason != "" {
|
||||
// Set the slice back to its zero value.
|
||||
if slice.IsValid() {
|
||||
slice.Set(reflect.Zero(slice.Type()))
|
||||
}
|
||||
return errReason
|
||||
}
|
||||
|
||||
if slice.IsValid() {
|
||||
slice.Index(sliceIndex).Set(v)
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// setVal sets 'v' to the value of the Property 'p'.
|
||||
func setVal(v reflect.Value, p Property) string {
|
||||
pValue := p.Value
|
||||
|
||||
switch v.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
x, ok := pValue.(int64)
|
||||
if !ok && pValue != nil {
|
||||
return typeMismatchReason(p, v)
|
||||
}
|
||||
if v.OverflowInt(x) {
|
||||
return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
|
||||
}
|
||||
v.SetInt(x)
|
||||
case reflect.Bool:
|
||||
x, ok := pValue.(bool)
|
||||
if !ok && pValue != nil {
|
||||
return typeMismatchReason(p, v)
|
||||
}
|
||||
v.SetBool(x)
|
||||
case reflect.String:
|
||||
x, ok := pValue.(string)
|
||||
if !ok && pValue != nil {
|
||||
return typeMismatchReason(p, v)
|
||||
}
|
||||
v.SetString(x)
|
||||
case reflect.Float32, reflect.Float64:
|
||||
x, ok := pValue.(float64)
|
||||
if !ok && pValue != nil {
|
||||
return typeMismatchReason(p, v)
|
||||
}
|
||||
if v.OverflowFloat(x) {
|
||||
return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
|
||||
}
|
||||
v.SetFloat(x)
|
||||
case reflect.Ptr:
|
||||
// v must be either a pointer to a Key or Entity.
|
||||
if v.Type() != typeOfKeyPtr && v.Type().Elem().Kind() != reflect.Struct {
|
||||
return typeMismatchReason(p, v)
|
||||
}
|
||||
|
||||
if pValue == nil {
|
||||
// If v is populated already, set it to nil.
|
||||
if !v.IsNil() {
|
||||
v.Set(reflect.New(v.Type()).Elem())
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
switch x := pValue.(type) {
|
||||
case *Key:
|
||||
if _, ok := v.Interface().(*Key); !ok {
|
||||
return typeMismatchReason(p, v)
|
||||
}
|
||||
v.Set(reflect.ValueOf(x))
|
||||
case *Entity:
|
||||
if v.IsNil() {
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
}
|
||||
err := loadEntity(v.Interface(), x)
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
default:
|
||||
return typeMismatchReason(p, v)
|
||||
}
|
||||
case reflect.Struct:
|
||||
switch v.Type() {
|
||||
case typeOfTime:
|
||||
x, ok := pValue.(time.Time)
|
||||
if !ok && pValue != nil {
|
||||
return typeMismatchReason(p, v)
|
||||
}
|
||||
v.Set(reflect.ValueOf(x))
|
||||
case typeOfGeoPoint:
|
||||
x, ok := pValue.(GeoPoint)
|
||||
if !ok && pValue != nil {
|
||||
return typeMismatchReason(p, v)
|
||||
}
|
||||
v.Set(reflect.ValueOf(x))
|
||||
default:
|
||||
ent, ok := pValue.(*Entity)
|
||||
if !ok {
|
||||
return typeMismatchReason(p, v)
|
||||
}
|
||||
|
||||
// Check if v implements PropertyLoadSaver.
|
||||
if _, ok := v.Interface().(PropertyLoadSaver); ok {
|
||||
return fmt.Sprintf("datastore: PropertyLoadSaver methods must be implemented on a pointer to %T.", v.Interface())
|
||||
}
|
||||
|
||||
err := loadEntity(v.Addr().Interface(), ent)
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
x, ok := pValue.([]byte)
|
||||
if !ok && pValue != nil {
|
||||
return typeMismatchReason(p, v)
|
||||
}
|
||||
if v.Type().Elem().Kind() != reflect.Uint8 {
|
||||
return typeMismatchReason(p, v)
|
||||
}
|
||||
v.SetBytes(x)
|
||||
default:
|
||||
return typeMismatchReason(p, v)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// initField is similar to reflect's Value.FieldByIndex, in that it
|
||||
// returns the nested struct field corresponding to index, but it
|
||||
// initialises any nil pointers encountered when traversing the structure.
|
||||
func initField(val reflect.Value, index []int) reflect.Value {
|
||||
for _, i := range index[:len(index)-1] {
|
||||
val = val.Field(i)
|
||||
if val.Kind() == reflect.Ptr {
|
||||
if val.IsNil() {
|
||||
val.Set(reflect.New(val.Type().Elem()))
|
||||
}
|
||||
val = val.Elem()
|
||||
}
|
||||
}
|
||||
return val.Field(index[len(index)-1])
|
||||
}
|
||||
|
||||
// loadEntityProto loads an EntityProto into PropertyLoadSaver or struct pointer.
|
||||
func loadEntityProto(dst interface{}, src *pb.Entity) error {
|
||||
ent, err := protoToEntity(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return loadEntity(dst, ent)
|
||||
}
|
||||
|
||||
func loadEntity(dst interface{}, ent *Entity) error {
|
||||
if pls, ok := dst.(PropertyLoadSaver); ok {
|
||||
return pls.Load(ent.Properties)
|
||||
}
|
||||
return loadEntityToStruct(dst, ent)
|
||||
}
|
||||
|
||||
func loadEntityToStruct(dst interface{}, ent *Entity) error {
|
||||
pls, err := newStructPLS(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Load properties.
|
||||
err = pls.Load(ent.Properties)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Load key.
|
||||
keyField := pls.codec.Match(keyFieldName)
|
||||
if keyField != nil && ent.Key != nil {
|
||||
pls.v.FieldByIndex(keyField.Index).Set(reflect.ValueOf(ent.Key))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s structPLS) Load(props []Property) error {
|
||||
var fieldName, errReason string
|
||||
var l propertyLoader
|
||||
|
||||
prev := make(map[string]struct{})
|
||||
for _, p := range props {
|
||||
if errStr := l.load(s.codec, s.v, p, prev); errStr != "" {
|
||||
// We don't return early, as we try to load as many properties as possible.
|
||||
// It is valid to load an entity into a struct that cannot fully represent it.
|
||||
// That case returns an error, but the caller is free to ignore it.
|
||||
fieldName, errReason = p.Name, errStr
|
||||
}
|
||||
}
|
||||
if errReason != "" {
|
||||
return &ErrFieldMismatch{
|
||||
StructType: s.v.Type(),
|
||||
FieldName: fieldName,
|
||||
Reason: errReason,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func protoToEntity(src *pb.Entity) (*Entity, error) {
|
||||
props := make([]Property, 0, len(src.Properties))
|
||||
for name, val := range src.Properties {
|
||||
v, err := propToValue(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
props = append(props, Property{
|
||||
Name: name,
|
||||
Value: v,
|
||||
NoIndex: val.ExcludeFromIndexes,
|
||||
})
|
||||
}
|
||||
var key *Key
|
||||
if src.Key != nil {
|
||||
// Ignore any error, since nested entity values
|
||||
// are allowed to have an invalid key.
|
||||
key, _ = protoToKey(src.Key)
|
||||
}
|
||||
|
||||
return &Entity{key, props}, nil
|
||||
}
|
||||
|
||||
// propToValue returns a Go value that represents the PropertyValue. For
|
||||
// example, a TimestampValue becomes a time.Time.
|
||||
func propToValue(v *pb.Value) (interface{}, error) {
|
||||
switch v := v.ValueType.(type) {
|
||||
case *pb.Value_NullValue:
|
||||
return nil, nil
|
||||
case *pb.Value_BooleanValue:
|
||||
return v.BooleanValue, nil
|
||||
case *pb.Value_IntegerValue:
|
||||
return v.IntegerValue, nil
|
||||
case *pb.Value_DoubleValue:
|
||||
return v.DoubleValue, nil
|
||||
case *pb.Value_TimestampValue:
|
||||
return time.Unix(v.TimestampValue.Seconds, int64(v.TimestampValue.Nanos)), nil
|
||||
case *pb.Value_KeyValue:
|
||||
return protoToKey(v.KeyValue)
|
||||
case *pb.Value_StringValue:
|
||||
return v.StringValue, nil
|
||||
case *pb.Value_BlobValue:
|
||||
return []byte(v.BlobValue), nil
|
||||
case *pb.Value_GeoPointValue:
|
||||
return GeoPoint{Lat: v.GeoPointValue.Latitude, Lng: v.GeoPointValue.Longitude}, nil
|
||||
case *pb.Value_EntityValue:
|
||||
return protoToEntity(v.EntityValue)
|
||||
case *pb.Value_ArrayValue:
|
||||
arr := make([]interface{}, 0, len(v.ArrayValue.Values))
|
||||
for _, v := range v.ArrayValue.Values {
|
||||
vv, err := propToValue(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
arr = append(arr, vv)
|
||||
}
|
||||
return arr, nil
|
||||
default:
|
||||
return nil, nil
|
||||
}
|
||||
}
|
510
vendor/cloud.google.com/go/datastore/load_test.go
generated
vendored
Normal file
510
vendor/cloud.google.com/go/datastore/load_test.go
generated
vendored
Normal file
|
@ -0,0 +1,510 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package datastore
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
pb "google.golang.org/genproto/googleapis/datastore/v1"
|
||||
)
|
||||
|
||||
type Simple struct {
|
||||
I int64
|
||||
}
|
||||
|
||||
type SimpleWithTag struct {
|
||||
I int64 `datastore:"II"`
|
||||
}
|
||||
|
||||
type NestedSimpleWithTag struct {
|
||||
A SimpleWithTag `datastore:"AA"`
|
||||
}
|
||||
|
||||
type NestedSliceOfSimple struct {
|
||||
A []Simple
|
||||
}
|
||||
|
||||
type SimpleTwoFields struct {
|
||||
S string
|
||||
SS string
|
||||
}
|
||||
|
||||
type NestedSimpleAnonymous struct {
|
||||
Simple
|
||||
X string
|
||||
}
|
||||
|
||||
type NestedSimple struct {
|
||||
A Simple
|
||||
I int
|
||||
}
|
||||
|
||||
type NestedSimple1 struct {
|
||||
A Simple
|
||||
X string
|
||||
}
|
||||
|
||||
type NestedSimple2X struct {
|
||||
AA NestedSimple
|
||||
A SimpleTwoFields
|
||||
S string
|
||||
}
|
||||
|
||||
type BDotB struct {
|
||||
B string `datastore:"B.B"`
|
||||
}
|
||||
|
||||
type ABDotB struct {
|
||||
A BDotB
|
||||
}
|
||||
|
||||
type MultiAnonymous struct {
|
||||
Simple
|
||||
SimpleTwoFields
|
||||
X string
|
||||
}
|
||||
|
||||
func TestLoadEntityNestedLegacy(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
src *pb.Entity
|
||||
want interface{}
|
||||
}{
|
||||
{
|
||||
"nested",
|
||||
&pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"X": {ValueType: &pb.Value_StringValue{"two"}},
|
||||
"A.I": {ValueType: &pb.Value_IntegerValue{2}},
|
||||
},
|
||||
},
|
||||
&NestedSimple1{
|
||||
A: Simple{I: 2},
|
||||
X: "two",
|
||||
},
|
||||
},
|
||||
{
|
||||
"nested with tag",
|
||||
&pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"AA.II": {ValueType: &pb.Value_IntegerValue{2}},
|
||||
},
|
||||
},
|
||||
&NestedSimpleWithTag{
|
||||
A: SimpleWithTag{I: 2},
|
||||
},
|
||||
},
|
||||
{
|
||||
"nested with anonymous struct field",
|
||||
&pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"X": {ValueType: &pb.Value_StringValue{"two"}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{2}},
|
||||
},
|
||||
},
|
||||
&NestedSimpleAnonymous{
|
||||
Simple: Simple{I: 2},
|
||||
X: "two",
|
||||
},
|
||||
},
|
||||
{
|
||||
"nested with dotted field tag",
|
||||
&pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"A.B.B": {ValueType: &pb.Value_StringValue{"bb"}},
|
||||
},
|
||||
},
|
||||
&ABDotB{
|
||||
A: BDotB{
|
||||
B: "bb",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"nested with multiple anonymous fields",
|
||||
&pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"I": {ValueType: &pb.Value_IntegerValue{3}},
|
||||
"S": {ValueType: &pb.Value_StringValue{"S"}},
|
||||
"SS": {ValueType: &pb.Value_StringValue{"s"}},
|
||||
"X": {ValueType: &pb.Value_StringValue{"s"}},
|
||||
},
|
||||
},
|
||||
&MultiAnonymous{
|
||||
Simple: Simple{I: 3},
|
||||
SimpleTwoFields: SimpleTwoFields{S: "S", SS: "s"},
|
||||
X: "s",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
dst := reflect.New(reflect.TypeOf(tc.want).Elem()).Interface()
|
||||
err := loadEntityProto(dst, tc.src)
|
||||
if err != nil {
|
||||
t.Errorf("loadEntityProto: %s: %v", tc.desc, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(tc.want, dst) {
|
||||
t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, dst, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type WithKey struct {
|
||||
X string
|
||||
I int
|
||||
K *Key `datastore:"__key__"`
|
||||
}
|
||||
|
||||
type NestedWithKey struct {
|
||||
Y string
|
||||
N WithKey
|
||||
}
|
||||
|
||||
var (
|
||||
incompleteKey = newKey("", nil)
|
||||
invalidKey = newKey("s", incompleteKey)
|
||||
)
|
||||
|
||||
func TestLoadEntityNested(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
src *pb.Entity
|
||||
want interface{}
|
||||
}{
|
||||
{
|
||||
"nested basic",
|
||||
&pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"A": {ValueType: &pb.Value_EntityValue{
|
||||
&pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"I": {ValueType: &pb.Value_IntegerValue{3}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{10}},
|
||||
},
|
||||
},
|
||||
&NestedSimple{
|
||||
A: Simple{I: 3},
|
||||
I: 10,
|
||||
},
|
||||
},
|
||||
{
|
||||
"nested with struct tags",
|
||||
&pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"AA": {ValueType: &pb.Value_EntityValue{
|
||||
&pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"II": {ValueType: &pb.Value_IntegerValue{1}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
&NestedSimpleWithTag{
|
||||
A: SimpleWithTag{I: 1},
|
||||
},
|
||||
},
|
||||
{
|
||||
"nested 2x",
|
||||
&pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"AA": {ValueType: &pb.Value_EntityValue{
|
||||
&pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"A": {ValueType: &pb.Value_EntityValue{
|
||||
&pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"I": {ValueType: &pb.Value_IntegerValue{3}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{1}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
"A": {ValueType: &pb.Value_EntityValue{
|
||||
&pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"S": {ValueType: &pb.Value_StringValue{"S"}},
|
||||
"SS": {ValueType: &pb.Value_StringValue{"s"}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
"S": {ValueType: &pb.Value_StringValue{"SS"}},
|
||||
},
|
||||
},
|
||||
&NestedSimple2X{
|
||||
AA: NestedSimple{
|
||||
A: Simple{I: 3},
|
||||
I: 1,
|
||||
},
|
||||
A: SimpleTwoFields{S: "S", SS: "s"},
|
||||
S: "SS",
|
||||
},
|
||||
},
|
||||
{
|
||||
"nested anonymous",
|
||||
&pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"I": {ValueType: &pb.Value_IntegerValue{3}},
|
||||
"X": {ValueType: &pb.Value_StringValue{"SomeX"}},
|
||||
},
|
||||
},
|
||||
&NestedSimpleAnonymous{
|
||||
Simple: Simple{I: 3},
|
||||
X: "SomeX",
|
||||
},
|
||||
},
|
||||
{
|
||||
"nested simple with slice",
|
||||
&pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"A": {ValueType: &pb.Value_ArrayValue{
|
||||
&pb.ArrayValue{
|
||||
[]*pb.Value{
|
||||
{ValueType: &pb.Value_EntityValue{
|
||||
&pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"I": {ValueType: &pb.Value_IntegerValue{3}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
{ValueType: &pb.Value_EntityValue{
|
||||
&pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"I": {ValueType: &pb.Value_IntegerValue{4}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
|
||||
&NestedSliceOfSimple{
|
||||
A: []Simple{Simple{I: 3}, Simple{I: 4}},
|
||||
},
|
||||
},
|
||||
{
|
||||
"nested with multiple anonymous fields",
|
||||
&pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"I": {ValueType: &pb.Value_IntegerValue{3}},
|
||||
"S": {ValueType: &pb.Value_StringValue{"S"}},
|
||||
"SS": {ValueType: &pb.Value_StringValue{"s"}},
|
||||
"X": {ValueType: &pb.Value_StringValue{"ss"}},
|
||||
},
|
||||
},
|
||||
&MultiAnonymous{
|
||||
Simple: Simple{I: 3},
|
||||
SimpleTwoFields: SimpleTwoFields{S: "S", SS: "s"},
|
||||
X: "ss",
|
||||
},
|
||||
},
|
||||
{
|
||||
"nested with dotted field tag",
|
||||
&pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"A": {ValueType: &pb.Value_EntityValue{
|
||||
&pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"B.B": {ValueType: &pb.Value_StringValue{"bb"}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
&ABDotB{
|
||||
A: BDotB{
|
||||
B: "bb",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"nested entity with key",
|
||||
&pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"Y": {ValueType: &pb.Value_StringValue{"yyy"}},
|
||||
"N": {ValueType: &pb.Value_EntityValue{
|
||||
&pb.Entity{
|
||||
Key: keyToProto(testKey1a),
|
||||
Properties: map[string]*pb.Value{
|
||||
"X": {ValueType: &pb.Value_StringValue{"two"}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{2}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
&NestedWithKey{
|
||||
Y: "yyy",
|
||||
N: WithKey{
|
||||
X: "two",
|
||||
I: 2,
|
||||
K: testKey1a,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"nested entity with invalid key",
|
||||
&pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"Y": {ValueType: &pb.Value_StringValue{"yyy"}},
|
||||
"N": {ValueType: &pb.Value_EntityValue{
|
||||
&pb.Entity{
|
||||
Key: keyToProto(invalidKey),
|
||||
Properties: map[string]*pb.Value{
|
||||
"X": {ValueType: &pb.Value_StringValue{"two"}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{2}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
&NestedWithKey{
|
||||
Y: "yyy",
|
||||
N: WithKey{
|
||||
X: "two",
|
||||
I: 2,
|
||||
K: invalidKey,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
dst := reflect.New(reflect.TypeOf(tc.want).Elem()).Interface()
|
||||
err := loadEntityProto(dst, tc.src)
|
||||
if err != nil {
|
||||
t.Errorf("loadEntityProto: %s: %v", tc.desc, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(tc.want, dst) {
|
||||
t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, dst, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type NestedStructPtrs struct {
|
||||
*SimpleTwoFields
|
||||
Nest *SimpleTwoFields
|
||||
TwiceNest *NestedSimple2
|
||||
I int
|
||||
}
|
||||
|
||||
type NestedSimple2 struct {
|
||||
A *Simple
|
||||
I int
|
||||
}
|
||||
|
||||
func TestAlreadyPopulatedDst(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
src *pb.Entity
|
||||
dst interface{}
|
||||
want interface{}
|
||||
}{
|
||||
{
|
||||
"simple already populated, nil properties",
|
||||
&pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"I": {ValueType: &pb.Value_NullValue{}},
|
||||
},
|
||||
},
|
||||
&Simple{
|
||||
I: 12,
|
||||
},
|
||||
&Simple{},
|
||||
},
|
||||
{
|
||||
"nested structs already populated",
|
||||
&pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"SS": {ValueType: &pb.Value_StringValue{"world"}},
|
||||
},
|
||||
},
|
||||
&SimpleTwoFields{S: "hello" /* SS: "" */},
|
||||
&SimpleTwoFields{S: "hello", SS: "world"},
|
||||
},
|
||||
{
|
||||
"nested structs already populated, pValues nil",
|
||||
&pb.Entity{
|
||||
Key: keyToProto(testKey0),
|
||||
Properties: map[string]*pb.Value{
|
||||
"S": {ValueType: &pb.Value_NullValue{}},
|
||||
"SS": {ValueType: &pb.Value_StringValue{"ss hello"}},
|
||||
"Nest": {ValueType: &pb.Value_NullValue{}},
|
||||
"TwiceNest": {ValueType: &pb.Value_EntityValue{
|
||||
&pb.Entity{
|
||||
Properties: map[string]*pb.Value{
|
||||
"A": {ValueType: &pb.Value_NullValue{}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{2}},
|
||||
},
|
||||
},
|
||||
}},
|
||||
"I": {ValueType: &pb.Value_IntegerValue{5}},
|
||||
},
|
||||
},
|
||||
&NestedStructPtrs{
|
||||
&SimpleTwoFields{S: "hello" /* SS: "" */},
|
||||
&SimpleTwoFields{ /* S: "" */ SS: "twice hello"},
|
||||
&NestedSimple2{
|
||||
A: &Simple{I: 2},
|
||||
/* I: 0 */
|
||||
},
|
||||
0,
|
||||
},
|
||||
&NestedStructPtrs{
|
||||
&SimpleTwoFields{ /* S: "" */ SS: "ss hello"},
|
||||
nil,
|
||||
&NestedSimple2{
|
||||
/* A: nil, */
|
||||
I: 2,
|
||||
},
|
||||
5,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
err := loadEntityProto(tc.dst, tc.src)
|
||||
if err != nil {
|
||||
t.Errorf("loadEntityProto: %s: %v", tc.desc, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(tc.want, tc.dst) {
|
||||
t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, tc.dst, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
279
vendor/cloud.google.com/go/datastore/prop.go
generated
vendored
Normal file
279
vendor/cloud.google.com/go/datastore/prop.go
generated
vendored
Normal file
|
@ -0,0 +1,279 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package datastore
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"cloud.google.com/go/internal/fields"
|
||||
)
|
||||
|
||||
// Entities with more than this many indexed properties will not be saved.
|
||||
const maxIndexedProperties = 20000
|
||||
|
||||
// []byte fields more than 1 megabyte long will not be loaded or saved.
|
||||
const maxBlobLen = 1 << 20
|
||||
|
||||
// Property is a name/value pair plus some metadata. A datastore entity's
|
||||
// contents are loaded and saved as a sequence of Properties. Each property
|
||||
// name must be unique within an entity.
|
||||
type Property struct {
|
||||
// Name is the property name.
|
||||
Name string
|
||||
// Value is the property value. The valid types are:
|
||||
// - int64
|
||||
// - bool
|
||||
// - string
|
||||
// - float64
|
||||
// - *Key
|
||||
// - time.Time
|
||||
// - GeoPoint
|
||||
// - []byte (up to 1 megabyte in length)
|
||||
// - *Entity (representing a nested struct)
|
||||
// Value can also be:
|
||||
// - []interface{} where each element is one of the above types
|
||||
// This set is smaller than the set of valid struct field types that the
|
||||
// datastore can load and save. A Value's type must be explicitly on
|
||||
// the list above; it is not sufficient for the underlying type to be
|
||||
// on that list. For example, a Value of "type myInt64 int64" is
|
||||
// invalid. Smaller-width integers and floats are also invalid. Again,
|
||||
// this is more restrictive than the set of valid struct field types.
|
||||
//
|
||||
// A Value will have an opaque type when loading entities from an index,
|
||||
// such as via a projection query. Load entities into a struct instead
|
||||
// of a PropertyLoadSaver when using a projection query.
|
||||
//
|
||||
// A Value may also be the nil interface value; this is equivalent to
|
||||
// Python's None but not directly representable by a Go struct. Loading
|
||||
// a nil-valued property into a struct will set that field to the zero
|
||||
// value.
|
||||
Value interface{}
|
||||
// NoIndex is whether the datastore cannot index this property.
|
||||
// If NoIndex is set to false, []byte and string values are limited to
|
||||
// 1500 bytes.
|
||||
NoIndex bool
|
||||
}
|
||||
|
||||
// An Entity is the value type for a nested struct.
|
||||
// This type is only used for a Property's Value.
|
||||
type Entity struct {
|
||||
Key *Key
|
||||
Properties []Property
|
||||
}
|
||||
|
||||
// PropertyLoadSaver can be converted from and to a slice of Properties.
|
||||
type PropertyLoadSaver interface {
|
||||
Load([]Property) error
|
||||
Save() ([]Property, error)
|
||||
}
|
||||
|
||||
// PropertyList converts a []Property to implement PropertyLoadSaver.
|
||||
type PropertyList []Property
|
||||
|
||||
var (
|
||||
typeOfPropertyLoadSaver = reflect.TypeOf((*PropertyLoadSaver)(nil)).Elem()
|
||||
typeOfPropertyList = reflect.TypeOf(PropertyList(nil))
|
||||
)
|
||||
|
||||
// Load loads all of the provided properties into l.
|
||||
// It does not first reset *l to an empty slice.
|
||||
func (l *PropertyList) Load(p []Property) error {
|
||||
*l = append(*l, p...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Save saves all of l's properties as a slice of Properties.
|
||||
func (l *PropertyList) Save() ([]Property, error) {
|
||||
return *l, nil
|
||||
}
|
||||
|
||||
// validPropertyName returns whether name consists of one or more valid Go
|
||||
// identifiers joined by ".".
|
||||
func validPropertyName(name string) bool {
|
||||
if name == "" {
|
||||
return false
|
||||
}
|
||||
for _, s := range strings.Split(name, ".") {
|
||||
if s == "" {
|
||||
return false
|
||||
}
|
||||
first := true
|
||||
for _, c := range s {
|
||||
if first {
|
||||
first = false
|
||||
if c != '_' && !unicode.IsLetter(c) {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
if c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// parseTag interprets datastore struct field tags
|
||||
func parseTag(t reflect.StructTag) (name string, keep bool, other interface{}, err error) {
|
||||
s := t.Get("datastore")
|
||||
parts := strings.Split(s, ",")
|
||||
if parts[0] == "-" && len(parts) == 1 {
|
||||
return "", false, nil, nil
|
||||
}
|
||||
if parts[0] != "" && !validPropertyName(parts[0]) {
|
||||
err = fmt.Errorf("datastore: struct tag has invalid property name: %q", parts[0])
|
||||
return "", false, nil, err
|
||||
}
|
||||
|
||||
var opts saveOpts
|
||||
if len(parts) > 1 {
|
||||
for _, p := range parts[1:] {
|
||||
switch p {
|
||||
case "flatten":
|
||||
opts.flatten = true
|
||||
case "omitempty":
|
||||
opts.omitEmpty = true
|
||||
case "noindex":
|
||||
opts.noIndex = true
|
||||
default:
|
||||
err = fmt.Errorf("datastore: struct tag has invalid option: %q", p)
|
||||
return "", false, nil, err
|
||||
}
|
||||
}
|
||||
other = opts
|
||||
}
|
||||
return parts[0], true, other, nil
|
||||
}
|
||||
|
||||
func validateType(t reflect.Type) error {
|
||||
if t.Kind() != reflect.Struct {
|
||||
return fmt.Errorf("datastore: validate called with non-struct type %s", t)
|
||||
}
|
||||
|
||||
return validateChildType(t, "", false, false, map[reflect.Type]bool{})
|
||||
}
|
||||
|
||||
// validateChildType is a recursion helper func for validateType
|
||||
func validateChildType(t reflect.Type, fieldName string, flatten, prevSlice bool, prevTypes map[reflect.Type]bool) error {
|
||||
if prevTypes[t] {
|
||||
return nil
|
||||
}
|
||||
prevTypes[t] = true
|
||||
|
||||
switch t.Kind() {
|
||||
case reflect.Slice:
|
||||
if flatten && prevSlice {
|
||||
return fmt.Errorf("datastore: flattening nested structs leads to a slice of slices: field %q", fieldName)
|
||||
}
|
||||
return validateChildType(t.Elem(), fieldName, flatten, true, prevTypes)
|
||||
case reflect.Struct:
|
||||
if t == typeOfTime || t == typeOfGeoPoint {
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
|
||||
// If a named field is unexported, ignore it. An anonymous
|
||||
// unexported field is processed, because it may contain
|
||||
// exported fields, which are visible.
|
||||
exported := (f.PkgPath == "")
|
||||
if !exported && !f.Anonymous {
|
||||
continue
|
||||
}
|
||||
|
||||
_, keep, other, err := parseTag(f.Tag)
|
||||
// Handle error from parseTag now instead of later (in cache.Fields call).
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !keep {
|
||||
continue
|
||||
}
|
||||
if other != nil {
|
||||
opts := other.(saveOpts)
|
||||
flatten = flatten || opts.flatten
|
||||
}
|
||||
if err := validateChildType(f.Type, f.Name, flatten, prevSlice, prevTypes); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if t == typeOfKeyPtr {
|
||||
return nil
|
||||
}
|
||||
return validateChildType(t.Elem(), fieldName, flatten, prevSlice, prevTypes)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// isLeafType determines whether or not a type is a 'leaf type'
|
||||
// and should not be recursed into, but considered one field.
|
||||
func isLeafType(t reflect.Type) bool {
|
||||
return t == typeOfTime || t == typeOfGeoPoint
|
||||
}
|
||||
|
||||
// structCache collects the structs whose fields have already been calculated.
|
||||
var structCache = fields.NewCache(parseTag, validateType, isLeafType)
|
||||
|
||||
// structPLS adapts a struct to be a PropertyLoadSaver.
|
||||
type structPLS struct {
|
||||
v reflect.Value
|
||||
codec fields.List
|
||||
}
|
||||
|
||||
// newStructPLS returns a structPLS, which implements the
|
||||
// PropertyLoadSaver interface, for the struct pointer p.
|
||||
func newStructPLS(p interface{}) (*structPLS, error) {
|
||||
v := reflect.ValueOf(p)
|
||||
if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct {
|
||||
return nil, ErrInvalidEntityType
|
||||
}
|
||||
v = v.Elem()
|
||||
f, err := structCache.Fields(v.Type())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &structPLS{v, f}, nil
|
||||
}
|
||||
|
||||
// LoadStruct loads the properties from p to dst.
|
||||
// dst must be a struct pointer.
|
||||
//
|
||||
// The values of dst's unmatched struct fields are not modified,
|
||||
// and matching slice-typed fields are not reset before appending to
|
||||
// them. In particular, it is recommended to pass a pointer to a zero
|
||||
// valued struct on each LoadStruct call.
|
||||
func LoadStruct(dst interface{}, p []Property) error {
|
||||
x, err := newStructPLS(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return x.Load(p)
|
||||
}
|
||||
|
||||
// SaveStruct returns the properties from src as a slice of Properties.
|
||||
// src must be a struct pointer.
|
||||
func SaveStruct(src interface{}) ([]Property, error) {
|
||||
x, err := newStructPLS(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return x.Save()
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue