From bf51655a7bff737be5e58942f9c0239f52435d0f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jacek=20J=2E=20=C5=81akis?= Date: Wed, 8 Feb 2017 14:57:52 +0100 Subject: [PATCH 01/10] vendor: Update vendoring for the exec client and server implementations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jacek J. Łakis Signed-off-by: Samuel Ortiz --- lock.json | 417 +- vendor/cloud.google.com/go/.travis.yml | 16 + vendor/cloud.google.com/go/AUTHORS | 15 + vendor/cloud.google.com/go/CONTRIBUTING.md | 132 + vendor/cloud.google.com/go/CONTRIBUTORS | 37 + vendor/cloud.google.com/go/LICENSE | 202 + vendor/cloud.google.com/go/README.md | 528 + vendor/cloud.google.com/go/appveyor.yml | 32 + .../cloud.google.com/go/authexample_test.go | 49 + .../cloud.google.com/go/bigquery/bigquery.go | 76 + vendor/cloud.google.com/go/bigquery/copy.go | 74 + .../cloud.google.com/go/bigquery/copy_test.go | 136 + .../go/bigquery/create_table_test.go | 103 + .../cloud.google.com/go/bigquery/dataset.go | 188 + .../go/bigquery/dataset_test.go | 156 + vendor/cloud.google.com/go/bigquery/doc.go | 295 + vendor/cloud.google.com/go/bigquery/error.go | 82 + .../go/bigquery/error_test.go | 109 + .../go/bigquery/examples_test.go | 652 + .../cloud.google.com/go/bigquery/extract.go | 76 + .../go/bigquery/extract_test.go | 102 + vendor/cloud.google.com/go/bigquery/file.go | 172 + .../cloud.google.com/go/bigquery/file_test.go | 90 + vendor/cloud.google.com/go/bigquery/gcs.go | 68 + .../go/bigquery/integration_test.go | 754 + .../cloud.google.com/go/bigquery/iterator.go | 158 + .../go/bigquery/iterator_test.go | 413 + vendor/cloud.google.com/go/bigquery/job.go | 133 + vendor/cloud.google.com/go/bigquery/load.go | 86 + .../cloud.google.com/go/bigquery/load_test.go | 229 + vendor/cloud.google.com/go/bigquery/params.go | 265 + .../go/bigquery/params_test.go | 262 + vendor/cloud.google.com/go/bigquery/query.go | 196 + .../go/bigquery/query_test.go | 305 + vendor/cloud.google.com/go/bigquery/read.go | 64 + .../cloud.google.com/go/bigquery/read_test.go | 303 + vendor/cloud.google.com/go/bigquery/schema.go | 312 + .../go/bigquery/schema_test.go | 792 + .../cloud.google.com/go/bigquery/service.go | 623 + vendor/cloud.google.com/go/bigquery/table.go | 224 + .../cloud.google.com/go/bigquery/uploader.go | 162 + .../go/bigquery/uploader_test.go | 285 + .../go/bigquery/utils_test.go | 47 + vendor/cloud.google.com/go/bigquery/value.go | 637 + .../go/bigquery/value_test.go | 885 + vendor/cloud.google.com/go/bigtable/admin.go | 335 + .../go/bigtable/admin_test.go | 91 + .../cloud.google.com/go/bigtable/bigtable.go | 735 + .../go/bigtable/bigtable_test.go | 854 + .../go/bigtable/bttest/example_test.go | 83 + .../go/bigtable/bttest/inmem.go | 1230 + .../go/bigtable/bttest/inmem_test.go | 517 + .../go/bigtable/cmd/cbt/cbt.go | 789 + .../go/bigtable/cmd/cbt/cbt_test.go | 59 + .../go/bigtable/cmd/cbt/cbtdoc.go | 191 + .../go/bigtable/cmd/emulator/cbtemulator.go | 44 + .../go/bigtable/cmd/loadtest/loadtest.go | 186 + .../go/bigtable/cmd/scantest/scantest.go | 155 + vendor/cloud.google.com/go/bigtable/doc.go | 125 + .../go/bigtable/export_test.go | 203 + vendor/cloud.google.com/go/bigtable/filter.go | 288 + vendor/cloud.google.com/go/bigtable/gc.go | 131 + .../bigtable/internal/cbtconfig/cbtconfig.go | 246 + .../go/bigtable/internal/gax/call_option.go | 106 + .../go/bigtable/internal/gax/invoke.go | 84 + .../go/bigtable/internal/gax/invoke_test.go | 49 + .../go/bigtable/internal/option/option.go | 48 + .../go/bigtable/internal/stat/stats.go | 144 + vendor/cloud.google.com/go/bigtable/reader.go | 250 + .../go/bigtable/reader_test.go | 343 + .../go/bigtable/retry_test.go | 362 + .../testdata/read-rows-acceptance-test.json | 1178 + vendor/cloud.google.com/go/civil/civil.go | 277 + .../cloud.google.com/go/civil/civil_test.go | 441 + vendor/cloud.google.com/go/cloud.go | 20 + .../go/cmd/go-cloud-debug-agent/debuglet.go | 450 + .../internal/breakpoints/breakpoints.go | 174 + .../internal/breakpoints/breakpoints_test.go | 168 + .../internal/controller/client.go | 279 + .../internal/controller/client_test.go | 218 + .../internal/valuecollector/valuecollector.go | 460 + .../valuecollector/valuecollector_test.go | 418 + .../go/compute/metadata/metadata.go | 438 + .../go/compute/metadata/metadata_test.go | 48 + .../go/container/container.go | 278 + .../go/datastore/datastore.go | 600 + .../go/datastore/datastore_test.go | 2776 + vendor/cloud.google.com/go/datastore/doc.go | 420 + .../cloud.google.com/go/datastore/errors.go | 47 + .../go/datastore/example_test.go | 545 + .../go/datastore/integration_test.go | 1040 + vendor/cloud.google.com/go/datastore/key.go | 280 + .../cloud.google.com/go/datastore/key_test.go | 210 + vendor/cloud.google.com/go/datastore/load.go | 430 + .../go/datastore/load_test.go | 510 + vendor/cloud.google.com/go/datastore/prop.go | 279 + vendor/cloud.google.com/go/datastore/query.go | 773 + .../go/datastore/query_test.go | 536 + vendor/cloud.google.com/go/datastore/save.go | 383 + .../go/datastore/save_test.go | 194 + .../go/datastore/testdata/index.yaml | 41 + vendor/cloud.google.com/go/datastore/time.go | 36 + .../go/datastore/time_test.go | 75 + .../go/datastore/transaction.go | 310 + .../go/debugger/apiv2/controller2_client.go | 226 + .../apiv2/controller2_client_example_test.go | 87 + .../go/debugger/apiv2/debugger2_client.go | 219 + .../apiv2/debugger2_client_example_test.go | 121 + .../cloud.google.com/go/debugger/apiv2/doc.go | 36 + .../go/debugger/apiv2/mock_test.go | 641 + .../go/errorreporting/apiv1beta1/doc.go | 35 + .../apiv1beta1/error_group_client.go | 166 + .../error_group_client_example_test.go | 69 + .../apiv1beta1/error_stats_client.go | 306 + .../error_stats_client_example_test.go | 95 + .../go/errorreporting/apiv1beta1/mock_test.go | 547 + .../apiv1beta1/report_errors_client.go | 153 + .../report_errors_client_example_test.go | 51 + .../go/errors/error_logging_test.go | 202 + vendor/cloud.google.com/go/errors/errors.go | 422 + .../cloud.google.com/go/errors/errors_test.go | 206 + .../cloud.google.com/go/errors/stack_test.go | 118 + .../go/examples/bigquery/concat_table/main.go | 93 + .../go/examples/bigquery/load/main.go | 95 + .../go/examples/bigquery/query/main.go | 98 + .../go/examples/bigquery/read/main.go | 142 + .../go/examples/bigtable/helloworld/README.md | 46 + .../go/examples/bigtable/helloworld/main.go | 157 + .../go/examples/bigtable/search/search.go | 453 + .../examples/bigtable/usercounter/README.md | 29 + .../go/examples/bigtable/usercounter/app.yaml | 11 + .../go/examples/bigtable/usercounter/main.go | 180 + .../go/examples/storage/appengine/app.go | 428 + .../go/examples/storage/appengine/app.yaml | 8 + .../go/examples/storage/appenginevm/app.go | 432 + .../go/examples/storage/appenginevm/app.yaml | 10 + .../go/iam/admin/apiv1/doc.go | 35 + .../go/iam/admin/apiv1/iam_client.go | 490 + .../admin/apiv1/iam_client_example_test.go | 250 + .../go/iam/admin/apiv1/mock_test.go | 1055 + .../go/iam/admin/apiv1/policy_methods.go | 52 + vendor/cloud.google.com/go/iam/iam.go | 199 + vendor/cloud.google.com/go/iam/iam_test.go | 86 + .../go/internal/atomiccache/atomiccache.go | 58 + .../internal/atomiccache/atomiccache_test.go | 46 + vendor/cloud.google.com/go/internal/cloud.go | 64 + .../go/internal/fields/fields.go | 444 + .../go/internal/fields/fields_test.go | 561 + .../go/internal/fields/fold.go | 156 + .../go/internal/fields/fold_test.go | 129 + .../go/internal/kokoro/build.sh | 41 + .../go/internal/kokoro/kokoro-key.json.enc | Bin 0 -> 2448 bytes .../go/internal/optional/optional.go | 94 + .../go/internal/optional/optional_test.go | 64 + .../go/internal/pretty/diff.go | 78 + .../go/internal/pretty/diff_test.go | 50 + .../go/internal/pretty/pretty.go | 241 + .../go/internal/pretty/pretty_test.go | 105 + vendor/cloud.google.com/go/internal/retry.go | 55 + .../go/internal/retry_test.go | 64 + .../go/internal/testutil/context.go | 67 + .../go/internal/testutil/server.go | 73 + .../go/internal/testutil/server_test.go | 35 + .../go/internal/version/update_version.sh | 6 + .../go/internal/version/version.go | 49 + .../go/internal/version/version_test.go | 47 + vendor/cloud.google.com/go/key.json.enc | Bin 0 -> 1248 bytes .../cloud.google.com/go/language/apiv1/doc.go | 35 + .../go/language/apiv1/language_client.go | 188 + .../apiv1/language_client_example_test.go | 105 + .../go/language/apiv1/mock_test.go | 363 + vendor/cloud.google.com/go/license_test.go | 70 + .../go/logging/apiv2/README.md | 11 + .../go/logging/apiv2/config_client.go | 315 + .../apiv2/config_client_example_test.go | 125 + .../cloud.google.com/go/logging/apiv2/doc.go | 36 + .../go/logging/apiv2/logging_client.go | 443 + .../apiv2/logging_client_example_test.go | 133 + .../go/logging/apiv2/metrics_client.go | 302 + .../apiv2/metrics_client_example_test.go | 125 + .../go/logging/apiv2/mock_test.go | 1179 + vendor/cloud.google.com/go/logging/doc.go | 90 + .../go/logging/examples_test.go | 125 + .../go/logging/internal/common.go | 30 + .../go/logging/internal/testing/fake.go | 408 + .../go/logging/internal/testing/fake_test.go | 110 + .../go/logging/internal/testing/unique.go | 73 + .../logging/internal/testing/unique_test.go | 72 + .../logadmin/example_entry_iterator_test.go | 66 + .../logadmin/example_metric_iterator_test.go | 52 + .../logging/logadmin/example_paging_test.go | 92 + .../example_resource_iterator_test.go | 52 + .../logadmin/example_sink_iterator_test.go | 52 + .../go/logging/logadmin/examples_test.go | 161 + .../go/logging/logadmin/logadmin.go | 347 + .../go/logging/logadmin/logadmin_test.go | 274 + .../go/logging/logadmin/metrics.go | 154 + .../go/logging/logadmin/metrics_test.go | 141 + .../go/logging/logadmin/resources.go | 74 + .../go/logging/logadmin/resources_test.go | 46 + .../go/logging/logadmin/sinks.go | 169 + .../go/logging/logadmin/sinks_test.go | 218 + vendor/cloud.google.com/go/logging/logging.go | 678 + .../go/logging/logging_test.go | 509 + .../go/logging/logging_unexported_test.go | 197 + .../go/longrunning/example_test.go | 116 + .../go/longrunning/longrunning.go | 163 + .../go/longrunning/longrunning_test.go | 216 + .../go/monitoring/apiv3/doc.go | 35 + .../go/monitoring/apiv3/group_client.go | 392 + .../apiv3/group_client_example_test.go | 147 + .../go/monitoring/apiv3/metric_client.go | 492 + .../apiv3/metric_client_example_test.go | 185 + .../go/monitoring/apiv3/mock_test.go | 1133 + vendor/cloud.google.com/go/old-news.md | 312 + vendor/cloud.google.com/go/pubsub/acker.go | 159 + .../cloud.google.com/go/pubsub/acker_test.go | 262 + .../go/pubsub/apiv1/README.md | 9 + .../cloud.google.com/go/pubsub/apiv1/doc.go | 36 + .../go/pubsub/apiv1/mock_test.go | 1202 + .../go/pubsub/apiv1/publisher_client.go | 394 + .../apiv1/publisher_client_example_test.go | 181 + .../go/pubsub/apiv1/subscriber_client.go | 409 + .../apiv1/subscriber_client_example_test.go | 243 + vendor/cloud.google.com/go/pubsub/doc.go | 120 + .../go/pubsub/endtoend_test.go | 324 + .../example_subscription_iterator_test.go | 54 + .../go/pubsub/example_test.go | 299 + .../go/pubsub/example_topic_iterator_test.go | 53 + .../cloud.google.com/go/pubsub/fake_test.go | 148 + .../go/pubsub/integration_test.go | 232 + vendor/cloud.google.com/go/pubsub/iterator.go | 527 + .../go/pubsub/iterator_test.go | 324 + .../cloud.google.com/go/pubsub/keepalive.go | 182 + .../go/pubsub/keepalive_test.go | 319 + vendor/cloud.google.com/go/pubsub/message.go | 84 + vendor/cloud.google.com/go/pubsub/pubsub.go | 136 + vendor/cloud.google.com/go/pubsub/puller.go | 115 + .../cloud.google.com/go/pubsub/puller_test.go | 154 + vendor/cloud.google.com/go/pubsub/service.go | 485 + .../go/pubsub/service_test.go | 68 + .../go/pubsub/streaming_pull_test.go | 277 + .../go/pubsub/subscription.go | 265 + .../go/pubsub/subscription_test.go | 151 + vendor/cloud.google.com/go/pubsub/topic.go | 132 + .../cloud.google.com/go/pubsub/topic_test.go | 127 + .../cloud.google.com/go/pubsub/utils_test.go | 63 + .../database/apiv1/database_admin_client.go | 533 + .../database_admin_client_example_test.go | 204 + .../go/spanner/admin/database/apiv1/doc.go | 32 + .../spanner/admin/database/apiv1/mock_test.go | 740 + .../go/spanner/admin/instance/apiv1/doc.go | 32 + .../instance/apiv1/instance_admin_client.go | 649 + .../instance_admin_client_example_test.go | 230 + .../spanner/admin/instance/apiv1/mock_test.go | 853 + vendor/cloud.google.com/go/spanner/backoff.go | 58 + .../go/spanner/backoff_test.go | 62 + vendor/cloud.google.com/go/spanner/client.go | 302 + .../go/spanner/client_test.go | 43 + vendor/cloud.google.com/go/spanner/doc.go | 319 + vendor/cloud.google.com/go/spanner/errors.go | 105 + .../go/spanner/examples_test.go | 420 + .../spanner/internal/testutil/mockclient.go | 355 + .../spanner/internal/testutil/mockserver.go | 255 + vendor/cloud.google.com/go/spanner/key.go | 321 + .../cloud.google.com/go/spanner/key_test.go | 253 + vendor/cloud.google.com/go/spanner/keyset.go | 108 + .../go/spanner/keyset_test.go | 119 + .../cloud.google.com/go/spanner/mutation.go | 422 + .../go/spanner/mutation_test.go | 545 + .../cloud.google.com/go/spanner/protoutils.go | 113 + vendor/cloud.google.com/go/spanner/read.go | 679 + .../cloud.google.com/go/spanner/read_test.go | 1727 + vendor/cloud.google.com/go/spanner/retry.go | 189 + .../cloud.google.com/go/spanner/retry_test.go | 106 + vendor/cloud.google.com/go/spanner/row.go | 307 + .../cloud.google.com/go/spanner/row_test.go | 1775 + vendor/cloud.google.com/go/spanner/session.go | 965 + .../go/spanner/session_test.go | 792 + .../go/spanner/spanner_test.go | 955 + .../cloud.google.com/go/spanner/statement.go | 78 + .../go/spanner/statement_test.go | 64 + .../go/spanner/timestampbound.go | 245 + .../go/spanner/timestampbound_test.go | 208 + .../go/spanner/transaction.go | 821 + vendor/cloud.google.com/go/spanner/value.go | 1244 + .../cloud.google.com/go/spanner/value_test.go | 611 + .../go/speech/apiv1beta1/doc.go | 33 + .../go/speech/apiv1beta1/mock_test.go | 378 + .../go/speech/apiv1beta1/speech_client.go | 255 + .../apiv1beta1/speech_client_example_test.go | 110 + vendor/cloud.google.com/go/storage/acl.go | 223 + vendor/cloud.google.com/go/storage/bucket.go | 331 + vendor/cloud.google.com/go/storage/copy.go | 190 + vendor/cloud.google.com/go/storage/doc.go | 161 + .../go/storage/example_test.go | 501 + .../go/storage/integration_test.go | 1147 + vendor/cloud.google.com/go/storage/invoke.go | 43 + .../go/storage/invoke_test.go | 55 + vendor/cloud.google.com/go/storage/reader.go | 57 + vendor/cloud.google.com/go/storage/storage.go | 1083 + .../go/storage/storage_test.go | 683 + .../go/storage/testdata/dummy_pem | 39 + .../go/storage/testdata/dummy_rsa | 27 + vendor/cloud.google.com/go/storage/writer.go | 150 + .../go/storage/writer_test.go | 92 + vendor/cloud.google.com/go/trace/apiv1/doc.go | 38 + .../go/trace/apiv1/mock_test.go | 299 + .../go/trace/apiv1/trace_client.go | 235 + .../trace/apiv1/trace_client_example_test.go | 89 + vendor/cloud.google.com/go/trace/sampling.go | 117 + vendor/cloud.google.com/go/trace/trace.go | 811 + .../cloud.google.com/go/trace/trace_test.go | 849 + .../go/translate/internal/translate/v2/README | 12 + .../translate/internal/translate/v2/regen.sh | 29 + .../translate/v2/translate-nov2016-api.json | 285 + .../translate/v2/translate-nov2016-gen.go | 790 + .../go/translate/translate.go | 230 + .../go/translate/translate_test.go | 342 + .../cloud.google.com/go/vision/annotations.go | 689 + .../go/vision/apiv1/README.md | 9 + .../cloud.google.com/go/vision/apiv1/doc.go | 37 + .../go/vision/apiv1/image_annotator_client.go | 134 + .../image_annotator_client_example_test.go | 51 + .../go/vision/apiv1/mock_test.go | 149 + vendor/cloud.google.com/go/vision/doc.go | 102 + .../go/vision/examples_test.go | 99 + vendor/cloud.google.com/go/vision/face.go | 172 + vendor/cloud.google.com/go/vision/geometry.go | 36 + vendor/cloud.google.com/go/vision/image.go | 91 + .../cloud.google.com/go/vision/image_test.go | 41 + vendor/cloud.google.com/go/vision/latlng.go | 58 + .../go/vision/testdata/README.md | 16 + .../go/vision/testdata/cat.jpg | Bin 0 -> 122667 bytes .../go/vision/testdata/eiffel-tower.jpg | Bin 0 -> 6832 bytes .../go/vision/testdata/face.jpg | Bin 0 -> 68133 bytes .../go/vision/testdata/faulkner.jpg | Bin 0 -> 167040 bytes .../go/vision/testdata/google.png | Bin 0 -> 5969 bytes .../go/vision/testdata/mountain.jpg | Bin 0 -> 41852 bytes .../go/vision/testdata/no-text.jpg | Bin 0 -> 12377 bytes vendor/cloud.google.com/go/vision/vision.go | 357 + .../cloud.google.com/go/vision/vision_test.go | 283 + vendor/github.com/Azure/go-ansiterm/LICENSE | 21 + vendor/github.com/Azure/go-ansiterm/README.md | 12 + .../github.com/Azure/go-ansiterm/constants.go | 188 + .../github.com/Azure/go-ansiterm/context.go | 7 + .../Azure/go-ansiterm/csi_entry_state.go | 49 + .../Azure/go-ansiterm/csi_param_state.go | 38 + .../go-ansiterm/escape_intermediate_state.go | 36 + .../Azure/go-ansiterm/escape_state.go | 47 + .../Azure/go-ansiterm/event_handler.go | 90 + .../Azure/go-ansiterm/ground_state.go | 24 + .../Azure/go-ansiterm/osc_string_state.go | 31 + vendor/github.com/Azure/go-ansiterm/parser.go | 136 + .../go-ansiterm/parser_action_helpers.go | 103 + .../Azure/go-ansiterm/parser_actions.go | 122 + .../Azure/go-ansiterm/parser_test.go | 141 + .../go-ansiterm/parser_test_helpers_test.go | 114 + .../go-ansiterm/parser_test_utilities_test.go | 66 + vendor/github.com/Azure/go-ansiterm/states.go | 71 + .../go-ansiterm/test_event_handler_test.go | 173 + .../github.com/Azure/go-ansiterm/utilities.go | 21 + .../Azure/go-ansiterm/winterm/ansi.go | 182 + .../Azure/go-ansiterm/winterm/api.go | 322 + .../go-ansiterm/winterm/attr_translation.go | 100 + .../go-ansiterm/winterm/cursor_helpers.go | 101 + .../go-ansiterm/winterm/erase_helpers.go | 84 + .../go-ansiterm/winterm/scroll_helper.go | 118 + .../Azure/go-ansiterm/winterm/utilities.go | 9 + .../go-ansiterm/winterm/win_event_handler.go | 726 + .../github.com/PuerkitoBio/purell/.gitignore | 5 + .../github.com/PuerkitoBio/purell/.travis.yml | 7 + vendor/github.com/PuerkitoBio/purell/LICENSE | 12 + .../github.com/PuerkitoBio/purell/README.md | 187 + .../PuerkitoBio/purell/bench_test.go | 57 + .../PuerkitoBio/purell/benchmarks/v0.1.0 | 9 + .../PuerkitoBio/purell/example_test.go | 35 + .../github.com/PuerkitoBio/purell/purell.go | 379 + .../PuerkitoBio/purell/purell_test.go | 768 + .../PuerkitoBio/purell/urlnorm_test.go | 53 + .../github.com/PuerkitoBio/urlesc/.travis.yml | 11 + vendor/github.com/PuerkitoBio/urlesc/LICENSE | 27 + .../github.com/PuerkitoBio/urlesc/README.md | 16 + .../github.com/PuerkitoBio/urlesc/urlesc.go | 180 + .../PuerkitoBio/urlesc/urlesc_test.go | 641 + vendor/github.com/coreos/go-oidc/.gitignore | 2 + vendor/github.com/coreos/go-oidc/.travis.yml | 16 + .../github.com/coreos/go-oidc/CONTRIBUTING.md | 71 + vendor/github.com/coreos/go-oidc/DCO | 36 + vendor/github.com/coreos/go-oidc/LICENSE | 202 + vendor/github.com/coreos/go-oidc/MAINTAINERS | 3 + vendor/github.com/coreos/go-oidc/NOTICE | 5 + vendor/github.com/coreos/go-oidc/README.md | 72 + .../coreos/go-oidc/example/README.md | 21 + .../coreos/go-oidc/example/idtoken/app.go | 93 + .../coreos/go-oidc/example/nonce/app.go | 105 + .../coreos/go-oidc/example/userinfo/app.go | 76 + vendor/github.com/coreos/go-oidc/gen.go | 150 + .../github.com/coreos/go-oidc/http/client.go | 7 + vendor/github.com/coreos/go-oidc/http/doc.go | 2 + vendor/github.com/coreos/go-oidc/http/http.go | 156 + .../coreos/go-oidc/http/http_test.go | 367 + vendor/github.com/coreos/go-oidc/http/url.go | 29 + .../coreos/go-oidc/http/url_test.go | 49 + vendor/github.com/coreos/go-oidc/jose.go | 20 + .../github.com/coreos/go-oidc/jose/claims.go | 126 + .../coreos/go-oidc/jose/claims_test.go | 328 + vendor/github.com/coreos/go-oidc/jose/doc.go | 2 + vendor/github.com/coreos/go-oidc/jose/jose.go | 112 + vendor/github.com/coreos/go-oidc/jose/jwk.go | 135 + .../coreos/go-oidc/jose/jwk_test.go | 64 + vendor/github.com/coreos/go-oidc/jose/jws.go | 51 + .../coreos/go-oidc/jose/jws_test.go | 74 + vendor/github.com/coreos/go-oidc/jose/jwt.go | 82 + .../coreos/go-oidc/jose/jwt_test.go | 94 + vendor/github.com/coreos/go-oidc/jose/sig.go | 24 + .../github.com/coreos/go-oidc/jose/sig_rsa.go | 67 + vendor/github.com/coreos/go-oidc/jose_test.go | 405 + vendor/github.com/coreos/go-oidc/jwks.go | 199 + vendor/github.com/coreos/go-oidc/jwks_test.go | 99 + vendor/github.com/coreos/go-oidc/key/doc.go | 2 + vendor/github.com/coreos/go-oidc/key/key.go | 153 + .../github.com/coreos/go-oidc/key/key_test.go | 103 + .../github.com/coreos/go-oidc/key/manager.go | 99 + .../coreos/go-oidc/key/manager_test.go | 225 + vendor/github.com/coreos/go-oidc/key/repo.go | 55 + .../github.com/coreos/go-oidc/key/rotate.go | 159 + .../coreos/go-oidc/key/rotate_test.go | 311 + vendor/github.com/coreos/go-oidc/key/sync.go | 91 + .../coreos/go-oidc/key/sync_test.go | 214 + .../github.com/coreos/go-oidc/oauth2/doc.go | 2 + .../github.com/coreos/go-oidc/oauth2/error.go | 29 + .../coreos/go-oidc/oauth2/oauth2.go | 416 + .../coreos/go-oidc/oauth2/oauth2_test.go | 518 + vendor/github.com/coreos/go-oidc/oidc.go | 319 + .../github.com/coreos/go-oidc/oidc/client.go | 846 + .../coreos/go-oidc/oidc/client_race_test.go | 81 + .../coreos/go-oidc/oidc/client_test.go | 654 + vendor/github.com/coreos/go-oidc/oidc/doc.go | 2 + .../coreos/go-oidc/oidc/identity.go | 44 + .../coreos/go-oidc/oidc/identity_test.go | 113 + .../coreos/go-oidc/oidc/interface.go | 3 + vendor/github.com/coreos/go-oidc/oidc/key.go | 67 + .../coreos/go-oidc/oidc/provider.go | 687 + .../coreos/go-oidc/oidc/provider_test.go | 974 + .../coreos/go-oidc/oidc/transport.go | 88 + .../coreos/go-oidc/oidc/transport_test.go | 176 + vendor/github.com/coreos/go-oidc/oidc/util.go | 109 + .../coreos/go-oidc/oidc/util_test.go | 110 + .../coreos/go-oidc/oidc/verification.go | 190 + .../coreos/go-oidc/oidc/verification_test.go | 380 + vendor/github.com/coreos/go-oidc/test | 15 + vendor/github.com/coreos/go-oidc/verify.go | 242 + .../github.com/coreos/go-oidc/verify_test.go | 319 + vendor/github.com/coreos/pkg/.gitignore | 27 + vendor/github.com/coreos/pkg/.travis.yml | 8 + vendor/github.com/coreos/pkg/CONTRIBUTING.md | 71 + vendor/github.com/coreos/pkg/DCO | 36 + vendor/github.com/coreos/pkg/LICENSE | 202 + vendor/github.com/coreos/pkg/MAINTAINERS | 1 + vendor/github.com/coreos/pkg/NOTICE | 5 + vendor/github.com/coreos/pkg/README.md | 4 + vendor/github.com/coreos/pkg/build | 3 + .../github.com/coreos/pkg/capnslog/README.md | 39 + .../pkg/capnslog/example/hello_dolly.go | 57 + .../coreos/pkg/capnslog/formatters.go | 157 + .../coreos/pkg/capnslog/glog_formatter.go | 96 + vendor/github.com/coreos/pkg/capnslog/init.go | 49 + .../coreos/pkg/capnslog/init_windows.go | 25 + .../coreos/pkg/capnslog/journald_formatter.go | 68 + .../coreos/pkg/capnslog/log_hijack.go | 39 + .../github.com/coreos/pkg/capnslog/logmap.go | 240 + .../coreos/pkg/capnslog/pkg_logger.go | 177 + .../coreos/pkg/capnslog/syslog_formatter.go | 65 + .../github.com/coreos/pkg/cryptoutil/aes.go | 94 + .../coreos/pkg/cryptoutil/aes_test.go | 93 + vendor/github.com/coreos/pkg/dlopen/dlopen.go | 82 + .../coreos/pkg/dlopen/dlopen_example.go | 56 + .../coreos/pkg/dlopen/dlopen_test.go | 63 + vendor/github.com/coreos/pkg/flagutil/env.go | 33 + .../coreos/pkg/flagutil/env_file.go | 77 + .../coreos/pkg/flagutil/env_test.go | 64 + .../coreos/pkg/flagutil/file_env_test.go | 107 + .../github.com/coreos/pkg/flagutil/types.go | 44 + .../coreos/pkg/flagutil/types_test.go | 57 + vendor/github.com/coreos/pkg/health/README.md | 11 + vendor/github.com/coreos/pkg/health/health.go | 127 + .../coreos/pkg/health/health_test.go | 198 + .../github.com/coreos/pkg/httputil/README.md | 13 + .../github.com/coreos/pkg/httputil/cookie.go | 21 + .../coreos/pkg/httputil/cookie_test.go | 51 + vendor/github.com/coreos/pkg/httputil/json.go | 27 + .../coreos/pkg/httputil/json_test.go | 56 + .../coreos/pkg/k8s-tlsutil/k8s-tlsutil.go | 140 + .../coreos/pkg/multierror/multierror.go | 32 + .../coreos/pkg/multierror/multierror_test.go | 59 + vendor/github.com/coreos/pkg/netutil/proxy.go | 48 + vendor/github.com/coreos/pkg/netutil/url.go | 17 + .../github.com/coreos/pkg/netutil/url_test.go | 86 + .../coreos/pkg/progressutil/iocopy.go | 189 + .../coreos/pkg/progressutil/iocopy_test.go | 140 + .../coreos/pkg/progressutil/progressbar.go | 263 + .../pkg/progressutil/progressbar_test.go | 116 + vendor/github.com/coreos/pkg/test | 56 + .../github.com/coreos/pkg/timeutil/backoff.go | 15 + .../coreos/pkg/timeutil/backoff_test.go | 52 + vendor/github.com/coreos/pkg/yamlutil/yaml.go | 55 + .../coreos/pkg/yamlutil/yaml_test.go | 80 + vendor/github.com/davecgh/go-spew/.gitignore | 22 + vendor/github.com/davecgh/go-spew/.travis.yml | 14 + vendor/github.com/davecgh/go-spew/LICENSE | 15 + vendor/github.com/davecgh/go-spew/README.md | 205 + .../github.com/davecgh/go-spew/cov_report.sh | 22 + .../github.com/davecgh/go-spew/spew/bypass.go | 152 + .../davecgh/go-spew/spew/bypasssafe.go | 38 + .../github.com/davecgh/go-spew/spew/common.go | 341 + .../davecgh/go-spew/spew/common_test.go | 298 + .../github.com/davecgh/go-spew/spew/config.go | 306 + vendor/github.com/davecgh/go-spew/spew/doc.go | 211 + .../github.com/davecgh/go-spew/spew/dump.go | 509 + .../davecgh/go-spew/spew/dump_test.go | 1042 + .../davecgh/go-spew/spew/dumpcgo_test.go | 99 + .../davecgh/go-spew/spew/dumpnocgo_test.go | 26 + .../davecgh/go-spew/spew/example_test.go | 226 + .../github.com/davecgh/go-spew/spew/format.go | 419 + .../davecgh/go-spew/spew/format_test.go | 1558 + .../davecgh/go-spew/spew/internal_test.go | 87 + .../go-spew/spew/internalunsafe_test.go | 102 + .../github.com/davecgh/go-spew/spew/spew.go | 148 + .../davecgh/go-spew/spew/spew_test.go | 320 + .../davecgh/go-spew/spew/testdata/dumpcgo.go | 82 + .../davecgh/go-spew/test_coverage.txt | 61 + .../docker/spdystream/CONTRIBUTING.md | 13 + vendor/github.com/docker/spdystream/LICENSE | 191 + .../github.com/docker/spdystream/LICENSE.docs | 425 + .../github.com/docker/spdystream/MAINTAINERS | 28 + vendor/github.com/docker/spdystream/README.md | 77 + .../docker/spdystream/connection.go | 959 + .../github.com/docker/spdystream/handlers.go | 38 + .../github.com/docker/spdystream/priority.go | 98 + .../docker/spdystream/priority_test.go | 108 + .../docker/spdystream/spdy/dictionary.go | 187 + .../github.com/docker/spdystream/spdy/read.go | 348 + .../docker/spdystream/spdy/spdy_test.go | 644 + .../docker/spdystream/spdy/types.go | 275 + .../docker/spdystream/spdy/write.go | 318 + .../docker/spdystream/spdy_bench_test.go | 113 + .../github.com/docker/spdystream/spdy_test.go | 1171 + vendor/github.com/docker/spdystream/stream.go | 327 + vendor/github.com/docker/spdystream/utils.go | 16 + .../docker/spdystream/ws/connection.go | 65 + .../docker/spdystream/ws/ws_test.go | 175 + .../github.com/emicklei/go-restful/.gitignore | 70 + .../github.com/emicklei/go-restful/CHANGES.md | 171 + vendor/github.com/emicklei/go-restful/LICENSE | 22 + .../github.com/emicklei/go-restful/README.md | 74 + vendor/github.com/emicklei/go-restful/Srcfile | 1 + .../emicklei/go-restful/bench_curly_test.go | 51 + .../emicklei/go-restful/bench_test.go | 43 + .../emicklei/go-restful/bench_test.sh | 10 + .../emicklei/go-restful/compress.go | 123 + .../emicklei/go-restful/compress_test.go | 127 + .../emicklei/go-restful/compressor_cache.go | 103 + .../emicklei/go-restful/compressor_pools.go | 91 + .../emicklei/go-restful/compressors.go | 53 + .../emicklei/go-restful/constants.go | 30 + .../emicklei/go-restful/container.go | 361 + .../emicklei/go-restful/container_test.go | 83 + .../emicklei/go-restful/cors_filter.go | 202 + .../emicklei/go-restful/cors_filter_test.go | 129 + .../emicklei/go-restful/coverage.sh | 2 + .../github.com/emicklei/go-restful/curly.go | 164 + .../emicklei/go-restful/curly_route.go | 52 + .../emicklei/go-restful/curly_test.go | 231 + vendor/github.com/emicklei/go-restful/doc.go | 185 + .../emicklei/go-restful/doc_examples_test.go | 41 + .../emicklei/go-restful/entity_accessors.go | 163 + .../go-restful/entity_accessors_test.go | 69 + .../emicklei/go-restful/examples/.goconvey | 1 + .../examples/google_app_engine/.goconvey | 1 + .../examples/google_app_engine/app.yaml | 20 + .../google_app_engine/datastore/.goconvey | 1 + .../google_app_engine/datastore/app.yaml | 18 + .../google_app_engine/datastore/main.go | 266 + .../restful-appstats-integration.go | 12 + .../google_app_engine/restful-user-service.go | 161 + .../emicklei/go-restful/examples/home.html | 7 + .../examples/msgpack/msgpack_entity.go | 34 + .../examples/msgpack/msgpack_entity_test.go | 160 + .../examples/restful-CORS-filter.go | 68 + .../examples/restful-NCSA-logging.go | 54 + .../examples/restful-basic-authentication.go | 35 + .../examples/restful-cpuprofiler-service.go | 65 + .../examples/restful-curly-router.go | 107 + .../examples/restful-curly-router_test.go | 149 + .../examples/restful-encoding-filter.go | 61 + .../go-restful/examples/restful-filters.go | 114 + .../examples/restful-form-handling.go | 62 + .../examples/restful-hello-world.go | 22 + .../examples/restful-html-template.go | 35 + .../examples/restful-multi-containers.go | 43 + .../examples/restful-no-cache-filter.go | 24 + .../examples/restful-options-filter.go | 51 + .../go-restful/examples/restful-path-tail.go | 26 + .../examples/restful-pre-post-filters.go | 98 + .../examples/restful-resource-functions.go | 63 + .../go-restful/examples/restful-route_test.go | 39 + .../examples/restful-routefunction_test.go | 29 + .../examples/restful-serve-static.go | 47 + .../go-restful/examples/restful-swagger.go | 61 + .../examples/restful-user-resource.go | 152 + .../examples/restful-user-service.go | 137 + .../github.com/emicklei/go-restful/filter.go | 35 + .../emicklei/go-restful/filter_test.go | 141 + .../github.com/emicklei/go-restful/install.sh | 10 + .../github.com/emicklei/go-restful/jsr311.go | 248 + .../emicklei/go-restful/jsr311_test.go | 212 + .../github.com/emicklei/go-restful/log/log.go | 31 + .../github.com/emicklei/go-restful/logger.go | 32 + vendor/github.com/emicklei/go-restful/mime.go | 45 + .../emicklei/go-restful/mime_test.go | 17 + .../emicklei/go-restful/options_filter.go | 26 + .../go-restful/options_filter_test.go | 34 + .../emicklei/go-restful/parameter.go | 114 + .../emicklei/go-restful/path_expression.go | 69 + .../go-restful/path_expression_test.go | 37 + .../github.com/emicklei/go-restful/request.go | 136 + .../emicklei/go-restful/request_test.go | 204 + .../emicklei/go-restful/response.go | 235 + .../emicklei/go-restful/response_test.go | 213 + .../github.com/emicklei/go-restful/route.go | 183 + .../emicklei/go-restful/route_builder.go | 240 + .../emicklei/go-restful/route_builder_test.go | 58 + .../emicklei/go-restful/route_test.go | 127 + .../github.com/emicklei/go-restful/router.go | 18 + .../emicklei/go-restful/service_error.go | 23 + .../emicklei/go-restful/swagger/CHANGES.md | 43 + .../emicklei/go-restful/swagger/README.md | 76 + .../swagger/api_declaration_list.go | 64 + .../emicklei/go-restful/swagger/config.go | 46 + .../go-restful/swagger/model_builder.go | 467 + .../go-restful/swagger/model_builder_test.go | 1283 + .../emicklei/go-restful/swagger/model_list.go | 86 + .../go-restful/swagger/model_list_test.go | 48 + .../go-restful/swagger/model_property_ext.go | 81 + .../swagger/model_property_ext_test.go | 70 + .../go-restful/swagger/model_property_list.go | 87 + .../swagger/model_property_list_test.go | 47 + .../go-restful/swagger/ordered_route_map.go | 36 + .../swagger/ordered_route_map_test.go | 29 + .../swagger/postbuild_model_test.go | 42 + .../emicklei/go-restful/swagger/swagger.go | 185 + .../go-restful/swagger/swagger_builder.go | 21 + .../go-restful/swagger/swagger_test.go | 284 + .../go-restful/swagger/swagger_webservice.go | 440 + .../go-restful/swagger/test_package/struct.go | 5 + .../emicklei/go-restful/swagger/utils_test.go | 86 + .../emicklei/go-restful/tracer_test.go | 18 + .../emicklei/go-restful/web_service.go | 268 + .../go-restful/web_service_container.go | 39 + .../emicklei/go-restful/web_service_test.go | 320 + .../go-openapi/jsonpointer/.editorconfig | 26 + .../jsonpointer/.github/CONTRIBUTING.md | 117 + .../go-openapi/jsonpointer/.gitignore | 1 + .../go-openapi/jsonpointer/.travis.yml | 13 + .../go-openapi/jsonpointer/CODE_OF_CONDUCT.md | 74 + .../github.com/go-openapi/jsonpointer/LICENSE | 202 + .../go-openapi/jsonpointer/README.md | 15 + .../go-openapi/jsonpointer/pointer.go | 390 + .../go-openapi/jsonpointer/pointer_test.go | 573 + .../jsonreference/.github/CONTRIBUTING.md | 117 + .../go-openapi/jsonreference/.gitignore | 1 + .../go-openapi/jsonreference/.travis.yml | 14 + .../jsonreference/CODE_OF_CONDUCT.md | 74 + .../go-openapi/jsonreference/LICENSE | 202 + .../go-openapi/jsonreference/README.md | 15 + .../go-openapi/jsonreference/reference.go | 156 + .../jsonreference/reference_test.go | 420 + .../github.com/go-openapi/spec/.editorconfig | 26 + .../go-openapi/spec/.github/CONTRIBUTING.md | 117 + vendor/github.com/go-openapi/spec/.gitignore | 2 + vendor/github.com/go-openapi/spec/.travis.yml | 16 + .../go-openapi/spec/CODE_OF_CONDUCT.md | 74 + vendor/github.com/go-openapi/spec/LICENSE | 202 + vendor/github.com/go-openapi/spec/README.md | 5 + .../github.com/go-openapi/spec/auth_test.go | 128 + vendor/github.com/go-openapi/spec/bindata.go | 260 + .../go-openapi/spec/contact_info.go | 24 + .../go-openapi/spec/contact_info_test.go | 37 + vendor/github.com/go-openapi/spec/expander.go | 857 + .../go-openapi/spec/expander_test.go | 1133 + .../go-openapi/spec/external_docs.go | 24 + .../go-openapi/spec/external_docs_test.go | 29 + .../fixtures/expansion/all-the-things.json | 254 + .../spec/fixtures/expansion/circularRefs.json | 54 + .../spec/fixtures/expansion/circularSpec.json | 1 + .../spec/fixtures/expansion/circularSpec.yaml | 67 + .../spec/fixtures/expansion/clickmeter.json | 1 + .../spec/fixtures/expansion/clickmeter.yaml | 6461 ++ .../spec/fixtures/expansion/invalid-refs.json | 85 + .../spec/fixtures/expansion/overflow.json | 124 + .../spec/fixtures/expansion/params.json | 25 + .../spec/fixtures/expansion/schemas1.json | 127 + .../spec/fixtures/expansion/schemas2.json | 161 + .../spec/fixtures/local_expansion/item.json | 18 + .../spec/fixtures/local_expansion/spec.json | 46 + .../spec/fixtures/specs/deeper/arrayProp.json | 6 + .../fixtures/specs/deeper/stringProp.json | 3 + .../go-openapi/spec/fixtures/specs/refed.json | 224 + .../spec/fixtures/specs/resolution.json | 14 + .../spec/fixtures/specs/resolution2.json | 9 + vendor/github.com/go-openapi/spec/header.go | 195 + .../github.com/go-openapi/spec/header_test.go | 90 + vendor/github.com/go-openapi/spec/info.go | 168 + .../github.com/go-openapi/spec/info_test.go | 65 + vendor/github.com/go-openapi/spec/items.go | 219 + .../github.com/go-openapi/spec/items_test.go | 81 + vendor/github.com/go-openapi/spec/license.go | 23 + .../go-openapi/spec/license_test.go | 28 + .../github.com/go-openapi/spec/operation.go | 233 + .../go-openapi/spec/operation_test.go | 85 + .../github.com/go-openapi/spec/parameter.go | 301 + .../go-openapi/spec/parameters_test.go | 156 + .../github.com/go-openapi/spec/path_item.go | 90 + .../go-openapi/spec/path_item_test.go | 81 + vendor/github.com/go-openapi/spec/paths.go | 97 + .../github.com/go-openapi/spec/paths_test.go | 43 + .../go-openapi/spec/properties_test.go | 58 + vendor/github.com/go-openapi/spec/ref.go | 171 + vendor/github.com/go-openapi/spec/response.go | 134 + .../go-openapi/spec/response_test.go | 53 + .../github.com/go-openapi/spec/responses.go | 122 + vendor/github.com/go-openapi/spec/schema.go | 628 + .../github.com/go-openapi/spec/schema_test.go | 205 + .../spec/schemas/jsonschema-draft-04.json | 150 + .../go-openapi/spec/schemas/v2/README.md | 5 + .../go-openapi/spec/schemas/v2/schema.json | 1607 + .../go-openapi/spec/security_scheme.go | 142 + vendor/github.com/go-openapi/spec/spec.go | 86 + .../go-openapi/spec/structs_test.go | 110 + vendor/github.com/go-openapi/spec/swagger.go | 317 + .../go-openapi/spec/swagger_test.go | 365 + vendor/github.com/go-openapi/spec/tag.go | 73 + .../github.com/go-openapi/spec/xml_object.go | 68 + .../go-openapi/spec/xml_object_test.go | 65 + .../github.com/go-openapi/swag/.editorconfig | 26 + .../go-openapi/swag/.github/CONTRIBUTING.md | 117 + vendor/github.com/go-openapi/swag/.gitignore | 1 + vendor/github.com/go-openapi/swag/.travis.yml | 13 + .../go-openapi/swag/CODE_OF_CONDUCT.md | 74 + vendor/github.com/go-openapi/swag/LICENSE | 202 + vendor/github.com/go-openapi/swag/README.md | 12 + vendor/github.com/go-openapi/swag/convert.go | 188 + .../go-openapi/swag/convert_test.go | 215 + .../go-openapi/swag/convert_types.go | 595 + .../go-openapi/swag/convert_types_test.go | 579 + vendor/github.com/go-openapi/swag/json.go | 291 + .../github.com/go-openapi/swag/json_test.go | 163 + vendor/github.com/go-openapi/swag/loading.go | 74 + .../go-openapi/swag/loading_test.go | 47 + vendor/github.com/go-openapi/swag/net.go | 24 + vendor/github.com/go-openapi/swag/net_test.go | 30 + vendor/github.com/go-openapi/swag/path.go | 56 + .../github.com/go-openapi/swag/path_test.go | 118 + vendor/github.com/go-openapi/swag/util.go | 336 + .../github.com/go-openapi/swag/util_test.go | 277 + vendor/github.com/golang/glog/LICENSE | 191 + vendor/github.com/golang/glog/README | 44 + vendor/github.com/golang/glog/glog.go | 1180 + vendor/github.com/golang/glog/glog_file.go | 124 + vendor/github.com/golang/glog/glog_test.go | 415 + .../github.com/golang/groupcache/.gitignore | 1 + vendor/github.com/golang/groupcache/LICENSE | 191 + vendor/github.com/golang/groupcache/README.md | 73 + .../github.com/golang/groupcache/byteview.go | 175 + .../golang/groupcache/byteview_test.go | 147 + .../consistenthash/consistenthash.go | 81 + .../consistenthash/consistenthash_test.go | 110 + .../golang/groupcache/groupcache.go | 491 + .../golang/groupcache/groupcache_test.go | 456 + .../groupcache/groupcachepb/groupcache.pb.go | 65 + .../groupcache/groupcachepb/groupcache.proto | 34 + vendor/github.com/golang/groupcache/http.go | 227 + .../github.com/golang/groupcache/http_test.go | 166 + .../github.com/golang/groupcache/lru/lru.go | 133 + .../golang/groupcache/lru/lru_test.go | 73 + vendor/github.com/golang/groupcache/peers.go | 85 + .../groupcache/singleflight/singleflight.go | 64 + .../singleflight/singleflight_test.go | 85 + vendor/github.com/golang/groupcache/sinks.go | 322 + .../golang/groupcache/testpb/test.pb.go | 235 + .../golang/groupcache/testpb/test.proto | 63 + vendor/github.com/google/gofuzz/.travis.yml | 13 + .../github.com/google/gofuzz/CONTRIBUTING.md | 67 + vendor/github.com/google/gofuzz/LICENSE | 202 + vendor/github.com/google/gofuzz/README.md | 71 + vendor/github.com/google/gofuzz/doc.go | 18 + .../github.com/google/gofuzz/example_test.go | 225 + vendor/github.com/google/gofuzz/fuzz.go | 453 + vendor/github.com/google/gofuzz/fuzz_test.go | 428 + .../github.com/googleapis/gax-go/.gitignore | 1 + .../github.com/googleapis/gax-go/.travis.yml | 15 + .../googleapis/gax-go/CONTRIBUTING.md | 27 + vendor/github.com/googleapis/gax-go/LICENSE | 27 + vendor/github.com/googleapis/gax-go/README.md | 24 + .../googleapis/gax-go/call_option.go | 149 + .../googleapis/gax-go/call_option_test.go | 88 + vendor/github.com/googleapis/gax-go/gax.go | 40 + vendor/github.com/googleapis/gax-go/header.go | 24 + .../googleapis/gax-go/header_test.go | 19 + vendor/github.com/googleapis/gax-go/invoke.go | 90 + .../googleapis/gax-go/invoke_test.go | 156 + .../googleapis/gax-go/path_template.go | 176 + .../googleapis/gax-go/path_template_parser.go | 227 + .../googleapis/gax-go/path_template_test.go | 211 + .../github.com/jonboulle/clockwork/.gitignore | 25 + .../jonboulle/clockwork/.travis.yml | 5 + vendor/github.com/jonboulle/clockwork/LICENSE | 201 + .../github.com/jonboulle/clockwork/README.md | 61 + .../jonboulle/clockwork/clockwork.go | 169 + .../jonboulle/clockwork/clockwork_test.go | 129 + .../jonboulle/clockwork/example_test.go | 49 + vendor/github.com/juju/ratelimit/LICENSE | 191 + vendor/github.com/juju/ratelimit/README.md | 117 + vendor/github.com/juju/ratelimit/ratelimit.go | 245 + .../juju/ratelimit/ratelimit_test.go | 389 + vendor/github.com/juju/ratelimit/reader.go | 51 + vendor/github.com/kr/pty/.gitignore | 4 + vendor/github.com/kr/pty/License | 23 + vendor/github.com/kr/pty/README.md | 36 + vendor/github.com/kr/pty/doc.go | 16 + vendor/github.com/kr/pty/ioctl.go | 13 + vendor/github.com/kr/pty/ioctl_bsd.go | 39 + vendor/github.com/kr/pty/mktypes.bash | 19 + vendor/github.com/kr/pty/pty_darwin.go | 60 + vendor/github.com/kr/pty/pty_dragonfly.go | 76 + vendor/github.com/kr/pty/pty_freebsd.go | 73 + vendor/github.com/kr/pty/pty_linux.go | 46 + vendor/github.com/kr/pty/pty_unsupported.go | 11 + vendor/github.com/kr/pty/run.go | 34 + vendor/github.com/kr/pty/types.go | 10 + vendor/github.com/kr/pty/types_dragonfly.go | 17 + vendor/github.com/kr/pty/types_freebsd.go | 15 + vendor/github.com/kr/pty/util.go | 37 + vendor/github.com/kr/pty/ztypes_386.go | 9 + vendor/github.com/kr/pty/ztypes_amd64.go | 9 + vendor/github.com/kr/pty/ztypes_arm.go | 9 + vendor/github.com/kr/pty/ztypes_arm64.go | 11 + .../kr/pty/ztypes_dragonfly_amd64.go | 14 + .../github.com/kr/pty/ztypes_freebsd_386.go | 13 + .../github.com/kr/pty/ztypes_freebsd_amd64.go | 14 + .../github.com/kr/pty/ztypes_freebsd_arm.go | 13 + vendor/github.com/kr/pty/ztypes_mipsx.go | 12 + vendor/github.com/kr/pty/ztypes_ppc64.go | 11 + vendor/github.com/kr/pty/ztypes_ppc64le.go | 11 + vendor/github.com/kr/pty/ztypes_s390x.go | 11 + vendor/github.com/mailru/easyjson/.gitignore | 4 + vendor/github.com/mailru/easyjson/.travis.yml | 8 + vendor/github.com/mailru/easyjson/LICENSE | 7 + vendor/github.com/mailru/easyjson/Makefile | 54 + vendor/github.com/mailru/easyjson/README.md | 193 + .../mailru/easyjson/benchmark/codec_test.go | 279 + .../mailru/easyjson/benchmark/data.go | 148 + .../mailru/easyjson/benchmark/data_codec.go | 6911 ++ .../mailru/easyjson/benchmark/data_ffjson.go | 6723 ++ .../mailru/easyjson/benchmark/data_var.go | 350 + .../mailru/easyjson/benchmark/default_test.go | 118 + .../mailru/easyjson/benchmark/dummy_test.go | 11 + .../easyjson/benchmark/easyjson_test.go | 184 + .../mailru/easyjson/benchmark/example.json | 415 + .../mailru/easyjson/benchmark/ffjson_test.go | 190 + .../mailru/easyjson/benchmark/ujson.sh | 7 + .../mailru/easyjson/bootstrap/bootstrap.go | 180 + .../github.com/mailru/easyjson/buffer/pool.go | 207 + .../mailru/easyjson/buffer/pool_test.go | 79 + .../mailru/easyjson/easyjson/main.go | 99 + .../github.com/mailru/easyjson/gen/decoder.go | 458 + .../github.com/mailru/easyjson/gen/encoder.go | 351 + .../mailru/easyjson/gen/generator.go | 438 + .../mailru/easyjson/gen/generator_test.go | 65 + vendor/github.com/mailru/easyjson/helpers.go | 78 + .../mailru/easyjson/jlexer/error.go | 15 + .../mailru/easyjson/jlexer/lexer.go | 1121 + .../mailru/easyjson/jlexer/lexer_test.go | 251 + .../mailru/easyjson/jwriter/writer.go | 302 + .../mailru/easyjson/opt/gotemplate_Bool.go | 79 + .../mailru/easyjson/opt/gotemplate_Float32.go | 79 + .../mailru/easyjson/opt/gotemplate_Float64.go | 79 + .../mailru/easyjson/opt/gotemplate_Int.go | 79 + .../mailru/easyjson/opt/gotemplate_Int16.go | 79 + .../mailru/easyjson/opt/gotemplate_Int32.go | 79 + .../mailru/easyjson/opt/gotemplate_Int64.go | 79 + .../mailru/easyjson/opt/gotemplate_Int8.go | 79 + .../mailru/easyjson/opt/gotemplate_String.go | 79 + .../mailru/easyjson/opt/gotemplate_Uint.go | 79 + .../mailru/easyjson/opt/gotemplate_Uint16.go | 79 + .../mailru/easyjson/opt/gotemplate_Uint32.go | 79 + .../mailru/easyjson/opt/gotemplate_Uint64.go | 79 + .../mailru/easyjson/opt/gotemplate_Uint8.go | 79 + .../mailru/easyjson/opt/optional/opt.go | 80 + vendor/github.com/mailru/easyjson/opt/opts.go | 22 + .../mailru/easyjson/parser/parser.go | 91 + .../mailru/easyjson/parser/parser_unix.go | 33 + .../mailru/easyjson/parser/parser_windows.go | 37 + vendor/github.com/mailru/easyjson/raw.go | 45 + .../mailru/easyjson/tests/basic_test.go | 221 + .../github.com/mailru/easyjson/tests/data.go | 650 + .../mailru/easyjson/tests/errors.go | 23 + .../mailru/easyjson/tests/errors_test.go | 243 + .../mailru/easyjson/tests/named_type.go | 22 + .../mailru/easyjson/tests/nested_easy.go | 25 + .../mailru/easyjson/tests/nothing.go | 3 + .../mailru/easyjson/tests/omitempty.go | 12 + .../mailru/easyjson/tests/required_test.go | 28 + .../github.com/mailru/easyjson/tests/snake.go | 10 + .../mitchellh/go-wordwrap/LICENSE.md | 21 + .../mitchellh/go-wordwrap/README.md | 39 + .../mitchellh/go-wordwrap/wordwrap.go | 73 + .../mitchellh/go-wordwrap/wordwrap_test.go | 85 + .../opencontainers/runc/Godeps/Godeps.json | 87 + .../opencontainers/runc/Godeps/Readme | 5 + .../runc/Godeps/_workspace/.gitignore | 2 + .../src/github.com/Sirupsen/logrus/.gitignore | 1 + .../github.com/Sirupsen/logrus/.travis.yml | 8 + .../github.com/Sirupsen/logrus/CHANGELOG.md | 7 + .../src/github.com/Sirupsen/logrus/LICENSE | 21 + .../src/github.com/Sirupsen/logrus/README.md | 349 + .../src/github.com/Sirupsen/logrus/entry.go | 252 + .../Sirupsen/logrus/examples/basic/basic.go | 50 + .../Sirupsen/logrus/examples/hook/hook.go | 30 + .../github.com/Sirupsen/logrus/exported.go | 188 + .../github.com/Sirupsen/logrus/formatter.go | 48 + .../logrus/formatters/logstash/logstash.go | 56 + .../src/github.com/Sirupsen/logrus/hooks.go | 34 + .../logrus/hooks/airbrake/airbrake.go | 54 + .../Sirupsen/logrus/hooks/bugsnag/bugsnag.go | 68 + .../logrus/hooks/papertrail/README.md | 28 + .../logrus/hooks/papertrail/papertrail.go | 55 + .../Sirupsen/logrus/hooks/sentry/README.md | 61 + .../Sirupsen/logrus/hooks/sentry/sentry.go | 100 + .../Sirupsen/logrus/hooks/syslog/README.md | 20 + .../Sirupsen/logrus/hooks/syslog/syslog.go | 59 + .../Sirupsen/logrus/json_formatter.go | 40 + .../src/github.com/Sirupsen/logrus/logger.go | 203 + .../src/github.com/Sirupsen/logrus/logrus.go | 94 + .../Sirupsen/logrus/terminal_darwin.go | 12 + .../Sirupsen/logrus/terminal_freebsd.go | 20 + .../Sirupsen/logrus/terminal_linux.go | 12 + .../Sirupsen/logrus/terminal_notwindows.go | 21 + .../Sirupsen/logrus/terminal_openbsd.go | 7 + .../Sirupsen/logrus/terminal_windows.go | 27 + .../Sirupsen/logrus/text_formatter.go | 149 + .../src/github.com/Sirupsen/logrus/writer.go | 31 + .../src/github.com/coreos/go-systemd/LICENSE | 191 + .../coreos/go-systemd/activation/files.go | 52 + .../coreos/go-systemd/activation/listeners.go | 60 + .../go-systemd/activation/packetconns.go | 37 + .../github.com/coreos/go-systemd/dbus/dbus.go | 213 + .../coreos/go-systemd/dbus/methods.go | 565 + .../coreos/go-systemd/dbus/properties.go | 237 + .../github.com/coreos/go-systemd/dbus/set.go | 47 + .../coreos/go-systemd/dbus/subscription.go | 250 + .../go-systemd/dbus/subscription_set.go | 57 + .../github.com/coreos/go-systemd/util/util.go | 90 + .../coreos/go-systemd/util/util_cgo.go | 174 + .../coreos/go-systemd/util/util_stub.go | 23 + .../src/github.com/coreos/pkg/LICENSE | 202 + .../src/github.com/coreos/pkg/NOTICE | 5 + .../github.com/coreos/pkg/dlopen/dlopen.go | 82 + .../coreos/pkg/dlopen/dlopen_example.go | 56 + .../src/github.com/docker/docker/LICENSE | 191 + .../src/github.com/docker/docker/NOTICE | 19 + .../docker/docker/contrib/syntax/vim/LICENSE | 22 + .../docker/docs/project/images/red_notice.png | Bin 0 -> 45387 bytes .../docker/docker/pkg/mflag/LICENSE | 27 + .../docker/docker/pkg/mount/flags.go | 69 + .../docker/docker/pkg/mount/flags_freebsd.go | 48 + .../docker/docker/pkg/mount/flags_linux.go | 85 + .../docker/pkg/mount/flags_unsupported.go | 30 + .../docker/docker/pkg/mount/mount.go | 74 + .../docker/pkg/mount/mounter_freebsd.go | 59 + .../docker/docker/pkg/mount/mounter_linux.go | 21 + .../docker/pkg/mount/mounter_unsupported.go | 11 + .../docker/docker/pkg/mount/mountinfo.go | 40 + .../docker/pkg/mount/mountinfo_freebsd.go | 41 + .../docker/pkg/mount/mountinfo_linux.go | 95 + .../docker/pkg/mount/mountinfo_unsupported.go | 12 + .../docker/pkg/mount/sharedsubtree_linux.go | 70 + .../docker/docker/pkg/symlink/LICENSE.APACHE | 191 + .../docker/docker/pkg/symlink/LICENSE.BSD | 27 + .../docker/docker/pkg/symlink/README.md | 5 + .../docker/docker/pkg/symlink/fs.go | 131 + .../docker/docker/pkg/term/tc_linux_cgo.go | 48 + .../docker/docker/pkg/term/tc_other.go | 19 + .../github.com/docker/docker/pkg/term/term.go | 118 + .../docker/docker/pkg/term/term_windows.go | 139 + .../docker/docker/pkg/term/termios_darwin.go | 65 + .../docker/docker/pkg/term/termios_freebsd.go | 65 + .../docker/docker/pkg/term/termios_linux.go | 46 + .../pkg/term/winconsole/console_windows.go | 1053 + .../pkg/term/winconsole/term_emulator.go | 234 + .../src/github.com/docker/go-units/LICENSE | 191 + .../src/github.com/docker/go-units/README.md | 11 + .../github.com/docker/go-units/duration.go | 33 + .../src/github.com/docker/go-units/size.go | 95 + .../github.com/godbus/dbus/CONTRIBUTING.md | 50 + .../src/github.com/godbus/dbus/LICENSE | 25 + .../src/github.com/godbus/dbus/MAINTAINERS | 2 + .../github.com/godbus/dbus/README.markdown | 41 + .../src/github.com/godbus/dbus/auth.go | 253 + .../github.com/godbus/dbus/auth_external.go | 26 + .../src/github.com/godbus/dbus/auth_sha1.go | 102 + .../src/github.com/godbus/dbus/call.go | 36 + .../src/github.com/godbus/dbus/conn.go | 625 + .../src/github.com/godbus/dbus/conn_darwin.go | 21 + .../src/github.com/godbus/dbus/conn_other.go | 27 + .../src/github.com/godbus/dbus/dbus.go | 258 + .../src/github.com/godbus/dbus/decoder.go | 228 + .../src/github.com/godbus/dbus/doc.go | 63 + .../src/github.com/godbus/dbus/encoder.go | 208 + .../src/github.com/godbus/dbus/export.go | 411 + .../src/github.com/godbus/dbus/homedir.go | 28 + .../github.com/godbus/dbus/homedir_dynamic.go | 15 + .../github.com/godbus/dbus/homedir_static.go | 45 + .../github.com/godbus/dbus/introspect/call.go | 27 + .../godbus/dbus/introspect/introspect.go | 86 + .../godbus/dbus/introspect/introspectable.go | 76 + .../src/github.com/godbus/dbus/message.go | 346 + .../src/github.com/godbus/dbus/object.go | 126 + .../src/github.com/godbus/dbus/prop/prop.go | 264 + .../src/github.com/godbus/dbus/sig.go | 257 + .../godbus/dbus/transport_darwin.go | 6 + .../godbus/dbus/transport_generic.go | 35 + .../github.com/godbus/dbus/transport_unix.go | 196 + .../dbus/transport_unixcred_dragonfly.go | 95 + .../godbus/dbus/transport_unixcred_linux.go | 25 + .../src/github.com/godbus/dbus/variant.go | 139 + .../github.com/godbus/dbus/variant_lexer.go | 284 + .../github.com/godbus/dbus/variant_parser.go | 817 + .../src/github.com/golang/protobuf/LICENSE | 31 + .../github.com/golang/protobuf/proto/Makefile | 43 + .../github.com/golang/protobuf/proto/clone.go | 197 + .../golang/protobuf/proto/decode.go | 821 + .../golang/protobuf/proto/encode.go | 1286 + .../github.com/golang/protobuf/proto/equal.go | 256 + .../golang/protobuf/proto/extensions.go | 362 + .../github.com/golang/protobuf/proto/lib.go | 759 + .../golang/protobuf/proto/message_set.go | 287 + .../golang/protobuf/proto/pointer_reflect.go | 479 + .../golang/protobuf/proto/pointer_unsafe.go | 266 + .../golang/protobuf/proto/properties.go | 737 + .../protobuf/proto/proto3_proto/Makefile | 44 + .../protobuf/proto/proto3_proto/proto3.proto | 58 + .../github.com/golang/protobuf/proto/text.go | 789 + .../golang/protobuf/proto/text_parser.go | 757 + .../src/github.com/mrunalp/fileutils/LICENSE | 191 + .../github.com/mrunalp/fileutils/README.md | 5 + .../github.com/mrunalp/fileutils/fileutils.go | 161 + .../github.com/mrunalp/fileutils/idtools.go | 49 + .../opencontainers/runtime-spec/LICENSE | 191 + .../runtime-spec/specs-go/config.go | 535 + .../runtime-spec/specs-go/state.go | 17 + .../runtime-spec/specs-go/version.go | 18 + .../src/github.com/pquerna/ffjson/LICENSE | 202 + .../src/github.com/pquerna/ffjson/NOTICE | 8 + .../pquerna/ffjson/fflib/v1/buffer.go | 414 + .../pquerna/ffjson/fflib/v1/buffer_nopool.go | 11 + .../pquerna/ffjson/fflib/v1/buffer_pool.go | 105 + .../pquerna/ffjson/fflib/v1/bytenum.go | 88 + .../pquerna/ffjson/fflib/v1/decimal.go | 378 + .../pquerna/ffjson/fflib/v1/extfloat.go | 668 + .../pquerna/ffjson/fflib/v1/fold.go | 121 + .../pquerna/ffjson/fflib/v1/ftoa.go | 542 + .../pquerna/ffjson/fflib/v1/internal/atof.go | 936 + .../pquerna/ffjson/fflib/v1/internal/atoi.go | 213 + .../ffjson/fflib/v1/internal/extfloat.go | 668 + .../pquerna/ffjson/fflib/v1/internal/ftoa.go | 475 + .../pquerna/ffjson/fflib/v1/iota.go | 161 + .../pquerna/ffjson/fflib/v1/jsonstring.go | 512 + .../pquerna/ffjson/fflib/v1/lexer.go | 944 + .../pquerna/ffjson/fflib/v1/reader.go | 509 + .../ffjson/fflib/v1/reader_scan_amd64.go | 49 + .../ffjson/fflib/v1/reader_scan_amd64.s | 22 + .../ffjson/fflib/v1/reader_scan_generic.go | 36 + .../seccomp/libseccomp-golang/LICENSE | 22 + .../seccomp/libseccomp-golang/README | 26 + .../seccomp/libseccomp-golang/seccomp.go | 857 + .../libseccomp-golang/seccomp_internal.go | 506 + .../github.com/syndtr/gocapability/LICENSE | 24 + .../gocapability/capability/capability.go | 72 + .../capability/capability_linux.go | 650 + .../capability/capability_noop.go | 19 + .../syndtr/gocapability/capability/enum.go | 268 + .../gocapability/capability/enum_gen.go | 129 + .../gocapability/capability/enumgen/gen.go | 92 + .../gocapability/capability/syscall_linux.go | 154 + .../src/github.com/urfave/cli/.gitignore | 2 + .../src/github.com/urfave/cli/.travis.yml | 36 + .../src/github.com/urfave/cli/CHANGELOG.md | 336 + .../src/github.com/urfave/cli/LICENSE | 21 + .../src/github.com/urfave/cli/README.md | 1313 + .../github.com/urfave/cli/altsrc/altsrc.go | 3 + .../src/github.com/urfave/cli/altsrc/flag.go | 263 + .../urfave/cli/altsrc/flag_generated.go | 256 + .../urfave/cli/altsrc/input_source_context.go | 21 + .../urfave/cli/altsrc/map_input_source.go | 248 + .../urfave/cli/altsrc/toml_file_loader.go | 113 + .../urfave/cli/altsrc/yaml_file_loader.go | 84 + .../src/github.com/urfave/cli/app.go | 502 + .../src/github.com/urfave/cli/appveyor.yml | 24 + .../urfave/cli/autocomplete/bash_autocomplete | 14 + .../urfave/cli/autocomplete/zsh_autocomplete | 5 + .../src/github.com/urfave/cli/category.go | 44 + .../src/github.com/urfave/cli/cli.go | 21 + .../src/github.com/urfave/cli/command.go | 284 + .../src/github.com/urfave/cli/context.go | 264 + .../src/github.com/urfave/cli/errors.go | 98 + .../src/github.com/urfave/cli/flag-types.json | 93 + .../src/github.com/urfave/cli/flag.go | 621 + .../github.com/urfave/cli/flag_generated.go | 627 + .../src/github.com/urfave/cli/funcs.go | 28 + .../github.com/urfave/cli/generate-flag-types | 248 + .../src/github.com/urfave/cli/help.go | 267 + .../src/github.com/urfave/cli/runtests | 122 + .../vishvananda/netlink/.travis.yml | 3 + .../github.com/vishvananda/netlink/LICENSE | 192 + .../github.com/vishvananda/netlink/Makefile | 29 + .../github.com/vishvananda/netlink/README.md | 89 + .../github.com/vishvananda/netlink/addr.go | 43 + .../vishvananda/netlink/addr_linux.go | 128 + .../github.com/vishvananda/netlink/filter.go | 55 + .../vishvananda/netlink/filter_linux.go | 191 + .../github.com/vishvananda/netlink/link.go | 223 + .../vishvananda/netlink/link_linux.go | 750 + .../github.com/vishvananda/netlink/neigh.go | 22 + .../vishvananda/netlink/neigh_linux.go | 189 + .../github.com/vishvananda/netlink/netlink.go | 39 + .../netlink/netlink_unspecified.go | 143 + .../vishvananda/netlink/nl/addr_linux.go | 47 + .../vishvananda/netlink/nl/link_linux.go | 104 + .../vishvananda/netlink/nl/nl_linux.go | 418 + .../vishvananda/netlink/nl/route_linux.go | 42 + .../vishvananda/netlink/nl/tc_linux.go | 359 + .../vishvananda/netlink/nl/xfrm_linux.go | 258 + .../netlink/nl/xfrm_policy_linux.go | 119 + .../netlink/nl/xfrm_state_linux.go | 221 + .../vishvananda/netlink/protinfo.go | 53 + .../vishvananda/netlink/protinfo_linux.go | 60 + .../github.com/vishvananda/netlink/qdisc.go | 138 + .../vishvananda/netlink/qdisc_linux.go | 263 + .../github.com/vishvananda/netlink/route.go | 35 + .../vishvananda/netlink/route_linux.go | 225 + .../github.com/vishvananda/netlink/xfrm.go | 64 + .../vishvananda/netlink/xfrm_policy.go | 59 + .../vishvananda/netlink/xfrm_policy_linux.go | 127 + .../vishvananda/netlink/xfrm_state.go | 53 + .../vishvananda/netlink/xfrm_state_linux.go | 181 + .../runtime-tools/Godeps/Godeps.json | 54 + .../runtime-tools/Godeps/Readme | 5 + .../Godeps/_workspace/.gitignore | 2 + .../src/github.com/Sirupsen/logrus/.gitignore | 1 + .../github.com/Sirupsen/logrus/.travis.yml | 8 + .../github.com/Sirupsen/logrus/CHANGELOG.md | 29 + .../src/github.com/Sirupsen/logrus/LICENSE | 21 + .../src/github.com/Sirupsen/logrus/README.md | 356 + .../src/github.com/Sirupsen/logrus/entry.go | 255 + .../github.com/Sirupsen/logrus/entry_test.go | 67 + .../Sirupsen/logrus/examples/basic/basic.go | 50 + .../Sirupsen/logrus/examples/hook/hook.go | 30 + .../github.com/Sirupsen/logrus/exported.go | 188 + .../github.com/Sirupsen/logrus/formatter.go | 48 + .../Sirupsen/logrus/formatter_bench_test.go | 98 + .../logrus/formatters/logstash/logstash.go | 56 + .../formatters/logstash/logstash_test.go | 52 + .../github.com/Sirupsen/logrus/hook_test.go | 122 + .../src/github.com/Sirupsen/logrus/hooks.go | 34 + .../logrus/hooks/airbrake/airbrake.go | 54 + .../logrus/hooks/airbrake/airbrake_test.go | 133 + .../Sirupsen/logrus/hooks/bugsnag/bugsnag.go | 68 + .../logrus/hooks/bugsnag/bugsnag_test.go | 64 + .../logrus/hooks/papertrail/README.md | 28 + .../logrus/hooks/papertrail/papertrail.go | 55 + .../hooks/papertrail/papertrail_test.go | 26 + .../Sirupsen/logrus/hooks/sentry/README.md | 78 + .../Sirupsen/logrus/hooks/sentry/sentry.go | 131 + .../logrus/hooks/sentry/sentry_test.go | 132 + .../Sirupsen/logrus/hooks/syslog/README.md | 20 + .../Sirupsen/logrus/hooks/syslog/syslog.go | 59 + .../logrus/hooks/syslog/syslog_test.go | 26 + .../Sirupsen/logrus/json_formatter.go | 41 + .../Sirupsen/logrus/json_formatter_test.go | 120 + .../src/github.com/Sirupsen/logrus/logger.go | 206 + .../src/github.com/Sirupsen/logrus/logrus.go | 94 + .../github.com/Sirupsen/logrus/logrus_test.go | 301 + .../Sirupsen/logrus/terminal_bsd.go | 9 + .../Sirupsen/logrus/terminal_linux.go | 12 + .../Sirupsen/logrus/terminal_notwindows.go | 21 + .../Sirupsen/logrus/terminal_windows.go | 27 + .../Sirupsen/logrus/text_formatter.go | 158 + .../Sirupsen/logrus/text_formatter_test.go | 61 + .../src/github.com/Sirupsen/logrus/writer.go | 31 + .../src/github.com/blang/semver/LICENSE | 22 + .../src/github.com/blang/semver/README.md | 191 + .../github.com/blang/semver/examples/main.go | 83 + .../src/github.com/blang/semver/json.go | 23 + .../src/github.com/blang/semver/json_test.go | 45 + .../src/github.com/blang/semver/range.go | 224 + .../src/github.com/blang/semver/range_test.go | 442 + .../src/github.com/blang/semver/semver.go | 395 + .../github.com/blang/semver/semver_test.go | 417 + .../src/github.com/blang/semver/sort.go | 28 + .../src/github.com/blang/semver/sort_test.go | 30 + .../src/github.com/blang/semver/sql.go | 30 + .../src/github.com/blang/semver/sql_test.go | 38 + .../src/github.com/hashicorp/errwrap/LICENSE | 354 + .../github.com/hashicorp/errwrap/README.md | 89 + .../github.com/hashicorp/errwrap/errwrap.go | 169 + .../hashicorp/go-multierror/.travis.yml | 12 + .../hashicorp/go-multierror/LICENSE | 353 + .../hashicorp/go-multierror/Makefile | 31 + .../hashicorp/go-multierror/README.md | 97 + .../hashicorp/go-multierror/append.go | 41 + .../hashicorp/go-multierror/flatten.go | 26 + .../hashicorp/go-multierror/format.go | 27 + .../hashicorp/go-multierror/multierror.go | 51 + .../hashicorp/go-multierror/prefix.go | 37 + .../src/github.com/mndrix/tap-go/.gitignore | 3 + .../src/github.com/mndrix/tap-go/LICENSE | 24 + .../src/github.com/mndrix/tap-go/Makefile | 16 + .../src/github.com/mndrix/tap-go/README.md | 7 + .../src/github.com/mndrix/tap-go/tap.go | 130 + .../github.com/mrunalp/fileutils/.gitignore | 1 + .../src/github.com/mrunalp/fileutils/LICENSE | 191 + .../github.com/mrunalp/fileutils/MAINTAINERS | 1 + .../github.com/mrunalp/fileutils/README.md | 5 + .../github.com/mrunalp/fileutils/fileutils.go | 161 + .../github.com/mrunalp/fileutils/idtools.go | 49 + .../opencontainers/runtime-spec/LICENSE | 191 + .../runtime-spec/specs-go/config.go | 553 + .../runtime-spec/specs-go/state.go | 17 + .../runtime-spec/specs-go/version.go | 18 + .../src/github.com/satori/go.uuid/.travis.yml | 21 + .../src/github.com/satori/go.uuid/LICENSE | 20 + .../src/github.com/satori/go.uuid/README.md | 65 + .../src/github.com/satori/go.uuid/uuid.go | 481 + .../gocapability/capability/capability.go | 72 + .../capability/capability_linux.go | 650 + .../capability/capability_noop.go | 19 + .../capability/capability_test.go | 83 + .../syndtr/gocapability/capability/enum.go | 268 + .../gocapability/capability/enum_gen.go | 129 + .../gocapability/capability/enumgen/gen.go | 92 + .../gocapability/capability/syscall_linux.go | 154 + .../src/github.com/urfave/cli/.gitignore | 2 + .../src/github.com/urfave/cli/.travis.yml | 39 + .../src/github.com/urfave/cli/CHANGELOG.md | 392 + .../src/github.com/urfave/cli/LICENSE | 21 + .../src/github.com/urfave/cli/README.md | 1364 + .../src/github.com/urfave/cli/app.go | 492 + .../src/github.com/urfave/cli/appveyor.yml | 24 + .../src/github.com/urfave/cli/category.go | 44 + .../src/github.com/urfave/cli/cli.go | 21 + .../src/github.com/urfave/cli/command.go | 286 + .../src/github.com/urfave/cli/context.go | 276 + .../src/github.com/urfave/cli/errors.go | 110 + .../src/github.com/urfave/cli/flag-types.json | 93 + .../src/github.com/urfave/cli/flag.go | 799 + .../github.com/urfave/cli/flag_generated.go | 627 + .../src/github.com/urfave/cli/funcs.go | 28 + .../github.com/urfave/cli/generate-flag-types | 255 + .../src/github.com/urfave/cli/help.go | 294 + .../src/github.com/urfave/cli/runtests | 122 + vendor/github.com/spf13/pflag/.gitignore | 2 + vendor/github.com/spf13/pflag/.travis.yml | 20 + vendor/github.com/spf13/pflag/LICENSE | 28 + vendor/github.com/spf13/pflag/README.md | 277 + vendor/github.com/spf13/pflag/bool.go | 94 + vendor/github.com/spf13/pflag/bool_slice.go | 147 + .../github.com/spf13/pflag/bool_slice_test.go | 215 + vendor/github.com/spf13/pflag/bool_test.go | 179 + vendor/github.com/spf13/pflag/count.go | 94 + vendor/github.com/spf13/pflag/count_test.go | 52 + vendor/github.com/spf13/pflag/duration.go | 86 + vendor/github.com/spf13/pflag/example_test.go | 77 + vendor/github.com/spf13/pflag/export_test.go | 29 + vendor/github.com/spf13/pflag/flag.go | 1063 + vendor/github.com/spf13/pflag/flag_test.go | 1006 + vendor/github.com/spf13/pflag/float32.go | 88 + vendor/github.com/spf13/pflag/float64.go | 84 + vendor/github.com/spf13/pflag/golangflag.go | 101 + .../github.com/spf13/pflag/golangflag_test.go | 39 + vendor/github.com/spf13/pflag/int.go | 84 + vendor/github.com/spf13/pflag/int32.go | 88 + vendor/github.com/spf13/pflag/int64.go | 84 + vendor/github.com/spf13/pflag/int8.go | 88 + vendor/github.com/spf13/pflag/int_slice.go | 128 + .../github.com/spf13/pflag/int_slice_test.go | 165 + vendor/github.com/spf13/pflag/ip.go | 94 + vendor/github.com/spf13/pflag/ip_slice.go | 148 + .../github.com/spf13/pflag/ip_slice_test.go | 222 + vendor/github.com/spf13/pflag/ip_test.go | 63 + vendor/github.com/spf13/pflag/ipmask.go | 122 + vendor/github.com/spf13/pflag/ipnet.go | 98 + vendor/github.com/spf13/pflag/ipnet_test.go | 70 + vendor/github.com/spf13/pflag/string.go | 80 + vendor/github.com/spf13/pflag/string_array.go | 103 + .../spf13/pflag/string_array_test.go | 233 + vendor/github.com/spf13/pflag/string_slice.go | 129 + .../spf13/pflag/string_slice_test.go | 253 + vendor/github.com/spf13/pflag/uint.go | 88 + vendor/github.com/spf13/pflag/uint16.go | 88 + vendor/github.com/spf13/pflag/uint32.go | 88 + vendor/github.com/spf13/pflag/uint64.go | 88 + vendor/github.com/spf13/pflag/uint8.go | 88 + vendor/github.com/spf13/pflag/uint_slice.go | 126 + .../github.com/spf13/pflag/uint_slice_test.go | 161 + vendor/github.com/spf13/pflag/verify/all.sh | 69 + vendor/github.com/spf13/pflag/verify/gofmt.sh | 19 + .../github.com/spf13/pflag/verify/golint.sh | 15 + vendor/github.com/ugorji/go/LICENSE | 22 + vendor/github.com/ugorji/go/README.md | 20 + vendor/github.com/ugorji/go/codec/0doc.go | 199 + vendor/github.com/ugorji/go/codec/README.md | 148 + vendor/github.com/ugorji/go/codec/binc.go | 929 + vendor/github.com/ugorji/go/codec/cbor.go | 592 + .../github.com/ugorji/go/codec/cbor_test.go | 205 + .../github.com/ugorji/go/codec/codec_test.go | 1508 + .../ugorji/go/codec/codecgen/README.md | 37 + .../ugorji/go/codec/codecgen/gen.go | 317 + .../github.com/ugorji/go/codec/codecgen/z.go | 3 + .../ugorji/go/codec/codecgen_test.go | 24 + vendor/github.com/ugorji/go/codec/decode.go | 2066 + .../github.com/ugorji/go/codec/decode_go.go | 16 + .../github.com/ugorji/go/codec/decode_go14.go | 14 + vendor/github.com/ugorji/go/codec/encode.go | 1461 + .../ugorji/go/codec/fast-path.generated.go | 39352 ++++++++ .../ugorji/go/codec/fast-path.go.tmpl | 527 + .../ugorji/go/codec/fast-path.not.go | 34 + .../ugorji/go/codec/gen-dec-array.go.tmpl | 104 + .../ugorji/go/codec/gen-dec-map.go.tmpl | 58 + .../ugorji/go/codec/gen-helper.generated.go | 243 + .../ugorji/go/codec/gen-helper.go.tmpl | 372 + .../ugorji/go/codec/gen.generated.go | 175 + vendor/github.com/ugorji/go/codec/gen.go | 2014 + vendor/github.com/ugorji/go/codec/gen_15.go | 12 + vendor/github.com/ugorji/go/codec/gen_16.go | 12 + vendor/github.com/ugorji/go/codec/gen_17.go | 10 + vendor/github.com/ugorji/go/codec/helper.go | 1314 + .../ugorji/go/codec/helper_internal.go | 242 + .../ugorji/go/codec/helper_not_unsafe.go | 20 + .../github.com/ugorji/go/codec/helper_test.go | 259 + .../ugorji/go/codec/helper_unsafe.go | 49 + vendor/github.com/ugorji/go/codec/json.go | 1234 + vendor/github.com/ugorji/go/codec/msgpack.go | 852 + vendor/github.com/ugorji/go/codec/noop.go | 213 + vendor/github.com/ugorji/go/codec/prebuild.go | 3 + vendor/github.com/ugorji/go/codec/prebuild.sh | 199 + vendor/github.com/ugorji/go/codec/py_test.go | 30 + vendor/github.com/ugorji/go/codec/rpc.go | 180 + vendor/github.com/ugorji/go/codec/simple.go | 526 + .../ugorji/go/codec/test-cbor-goldens.json | 639 + vendor/github.com/ugorji/go/codec/test.py | 126 + vendor/github.com/ugorji/go/codec/tests.sh | 107 + vendor/github.com/ugorji/go/codec/time.go | 233 + .../github.com/ugorji/go/codec/values_test.go | 203 + vendor/github.com/ugorji/go/msgpack.org.md | 47 + vendor/golang.org/x/oauth2/.travis.yml | 13 + vendor/golang.org/x/oauth2/AUTHORS | 3 + vendor/golang.org/x/oauth2/CONTRIBUTING.md | 31 + vendor/golang.org/x/oauth2/CONTRIBUTORS | 3 + vendor/golang.org/x/oauth2/LICENSE | 27 + vendor/golang.org/x/oauth2/README.md | 74 + vendor/golang.org/x/oauth2/amazon/amazon.go | 16 + .../x/oauth2/bitbucket/bitbucket.go | 16 + .../golang.org/x/oauth2/client_appengine.go | 25 + .../clientcredentials/clientcredentials.go | 104 + .../clientcredentials_test.go | 97 + vendor/golang.org/x/oauth2/example_test.go | 71 + .../golang.org/x/oauth2/facebook/facebook.go | 16 + vendor/golang.org/x/oauth2/fitbit/fitbit.go | 16 + .../x/oauth2/foursquare/foursquare.go | 16 + vendor/golang.org/x/oauth2/github/github.go | 16 + .../golang.org/x/oauth2/google/appengine.go | 89 + .../x/oauth2/google/appengine_hook.go | 14 + .../x/oauth2/google/appengineflex_hook.go | 11 + vendor/golang.org/x/oauth2/google/default.go | 130 + .../x/oauth2/google/example_test.go | 150 + vendor/golang.org/x/oauth2/google/google.go | 202 + .../golang.org/x/oauth2/google/google_test.go | 116 + vendor/golang.org/x/oauth2/google/jwt.go | 74 + vendor/golang.org/x/oauth2/google/jwt_test.go | 91 + vendor/golang.org/x/oauth2/google/sdk.go | 172 + vendor/golang.org/x/oauth2/google/sdk_test.go | 46 + .../oauth2/google/testdata/gcloud/credentials | 122 + .../oauth2/google/testdata/gcloud/properties | 2 + vendor/golang.org/x/oauth2/heroku/heroku.go | 16 + vendor/golang.org/x/oauth2/hipchat/hipchat.go | 60 + vendor/golang.org/x/oauth2/internal/oauth2.go | 76 + .../x/oauth2/internal/oauth2_test.go | 62 + vendor/golang.org/x/oauth2/internal/token.go | 247 + .../x/oauth2/internal/token_test.go | 81 + .../golang.org/x/oauth2/internal/transport.go | 69 + .../x/oauth2/internal/transport_test.go | 38 + vendor/golang.org/x/oauth2/jws/jws.go | 182 + vendor/golang.org/x/oauth2/jws/jws_test.go | 46 + .../golang.org/x/oauth2/jwt/example_test.go | 33 + vendor/golang.org/x/oauth2/jwt/jwt.go | 159 + vendor/golang.org/x/oauth2/jwt/jwt_test.go | 190 + .../golang.org/x/oauth2/linkedin/linkedin.go | 16 + .../x/oauth2/mediamath/mediamath.go | 22 + .../x/oauth2/microsoft/microsoft.go | 16 + vendor/golang.org/x/oauth2/oauth2.go | 340 + vendor/golang.org/x/oauth2/oauth2_test.go | 448 + .../x/oauth2/odnoklassniki/odnoklassniki.go | 16 + vendor/golang.org/x/oauth2/paypal/paypal.go | 22 + vendor/golang.org/x/oauth2/slack/slack.go | 16 + vendor/golang.org/x/oauth2/token.go | 158 + vendor/golang.org/x/oauth2/token_test.go | 72 + vendor/golang.org/x/oauth2/transport.go | 132 + vendor/golang.org/x/oauth2/transport_test.go | 108 + vendor/golang.org/x/oauth2/uber/uber.go | 16 + vendor/golang.org/x/oauth2/vk/vk.go | 16 + vendor/golang.org/x/oauth2/yandex/yandex.go | 16 + vendor/golang.org/x/text/.gitattributes | 10 + vendor/golang.org/x/text/.gitignore | 3 + vendor/golang.org/x/text/AUTHORS | 3 + vendor/golang.org/x/text/CONTRIBUTING.md | 31 + vendor/golang.org/x/text/CONTRIBUTORS | 3 + vendor/golang.org/x/text/LICENSE | 27 + vendor/golang.org/x/text/PATENTS | 22 + vendor/golang.org/x/text/README | 23 + vendor/golang.org/x/text/cases/cases.go | 162 + vendor/golang.org/x/text/cases/context.go | 376 + .../golang.org/x/text/cases/context_test.go | 438 + .../golang.org/x/text/cases/example_test.go | 53 + vendor/golang.org/x/text/cases/fold.go | 34 + vendor/golang.org/x/text/cases/fold_test.go | 51 + vendor/golang.org/x/text/cases/gen.go | 839 + vendor/golang.org/x/text/cases/gen_trieval.go | 219 + vendor/golang.org/x/text/cases/icu.go | 61 + vendor/golang.org/x/text/cases/icu_test.go | 210 + vendor/golang.org/x/text/cases/info.go | 82 + vendor/golang.org/x/text/cases/map.go | 816 + vendor/golang.org/x/text/cases/map_test.go | 950 + vendor/golang.org/x/text/cases/tables.go | 2211 + vendor/golang.org/x/text/cases/tables_test.go | 1154 + vendor/golang.org/x/text/cases/trieval.go | 215 + vendor/golang.org/x/text/cmd/gotext/doc.go | 35 + .../golang.org/x/text/cmd/gotext/extract.go | 195 + vendor/golang.org/x/text/cmd/gotext/main.go | 356 + .../golang.org/x/text/cmd/gotext/message.go | 127 + vendor/golang.org/x/text/codereview.cfg | 1 + .../x/text/collate/build/builder.go | 703 + .../x/text/collate/build/builder_test.go | 290 + .../x/text/collate/build/colelem.go | 294 + .../x/text/collate/build/colelem_test.go | 215 + .../x/text/collate/build/contract.go | 309 + .../x/text/collate/build/contract_test.go | 266 + .../golang.org/x/text/collate/build/order.go | 393 + .../x/text/collate/build/order_test.go | 229 + .../golang.org/x/text/collate/build/table.go | 81 + .../golang.org/x/text/collate/build/trie.go | 290 + .../x/text/collate/build/trie_test.go | 107 + vendor/golang.org/x/text/collate/collate.go | 403 + .../golang.org/x/text/collate/collate_test.go | 482 + .../golang.org/x/text/collate/export_test.go | 51 + vendor/golang.org/x/text/collate/index.go | 32 + .../golang.org/x/text/collate/maketables.go | 553 + vendor/golang.org/x/text/collate/option.go | 239 + .../golang.org/x/text/collate/option_test.go | 209 + vendor/golang.org/x/text/collate/reg_test.go | 230 + vendor/golang.org/x/text/collate/sort.go | 81 + vendor/golang.org/x/text/collate/sort_test.go | 55 + .../golang.org/x/text/collate/table_test.go | 291 + vendor/golang.org/x/text/collate/tables.go | 73789 ++++++++++++++++ .../x/text/collate/tools/colcmp/Makefile | 7 + .../x/text/collate/tools/colcmp/chars.go | 1156 + .../x/text/collate/tools/colcmp/col.go | 97 + .../x/text/collate/tools/colcmp/colcmp.go | 529 + .../x/text/collate/tools/colcmp/darwin.go | 111 + .../x/text/collate/tools/colcmp/gen.go | 183 + .../x/text/collate/tools/colcmp/icu.go | 209 + vendor/golang.org/x/text/currency/common.go | 66 + vendor/golang.org/x/text/currency/currency.go | 185 + .../x/text/currency/currency_test.go | 171 + .../x/text/currency/example_test.go | 27 + vendor/golang.org/x/text/currency/format.go | 215 + .../golang.org/x/text/currency/format_test.go | 70 + vendor/golang.org/x/text/currency/gen.go | 400 + .../golang.org/x/text/currency/gen_common.go | 70 + vendor/golang.org/x/text/currency/query.go | 152 + .../golang.org/x/text/currency/query_test.go | 107 + vendor/golang.org/x/text/currency/tables.go | 2573 + .../golang.org/x/text/currency/tables_test.go | 93 + vendor/golang.org/x/text/doc.go | 13 + .../x/text/encoding/charmap/charmap.go | 209 + .../x/text/encoding/charmap/charmap_test.go | 56 + .../x/text/encoding/charmap/maketables.go | 556 + .../x/text/encoding/charmap/tables.go | 7410 ++ vendor/golang.org/x/text/encoding/encoding.go | 335 + .../x/text/encoding/encoding_test.go | 1148 + .../x/text/encoding/example_test.go | 42 + .../x/text/encoding/htmlindex/gen.go | 170 + .../x/text/encoding/htmlindex/htmlindex.go | 86 + .../text/encoding/htmlindex/htmlindex_test.go | 144 + .../x/text/encoding/htmlindex/map.go | 105 + .../x/text/encoding/htmlindex/tables.go | 352 + .../x/text/encoding/ianaindex/example_test.go | 26 + .../x/text/encoding/ianaindex/ianaindex.go | 67 + .../text/encoding/internal/identifier/gen.go | 137 + .../internal/identifier/identifier.go | 81 + .../text/encoding/internal/identifier/mib.go | 1621 + .../x/text/encoding/internal/internal.go | 75 + .../x/text/encoding/japanese/all.go | 12 + .../x/text/encoding/japanese/all_test.go | 80 + .../x/text/encoding/japanese/eucjp.go | 211 + .../x/text/encoding/japanese/iso2022jp.go | 296 + .../x/text/encoding/japanese/maketables.go | 161 + .../x/text/encoding/japanese/shiftjis.go | 189 + .../x/text/encoding/japanese/tables.go | 26971 ++++++ .../x/text/encoding/korean/all_test.go | 47 + .../x/text/encoding/korean/euckr.go | 178 + .../x/text/encoding/korean/maketables.go | 143 + .../x/text/encoding/korean/tables.go | 34152 +++++++ .../x/text/encoding/simplifiedchinese/all.go | 12 + .../encoding/simplifiedchinese/all_test.go | 50 + .../x/text/encoding/simplifiedchinese/gbk.go | 281 + .../encoding/simplifiedchinese/hzgb2312.go | 240 + .../encoding/simplifiedchinese/maketables.go | 161 + .../text/encoding/simplifiedchinese/tables.go | 43999 +++++++++ .../encoding/testdata/candide-gb18030.txt | 510 + .../encoding/testdata/candide-utf-16le.txt | Bin 0 -> 51932 bytes .../encoding/testdata/candide-utf-32be.txt | Bin 0 -> 103864 bytes .../text/encoding/testdata/candide-utf-8.txt | 510 + .../testdata/candide-windows-1252.txt | 510 + .../encoding/testdata/rashomon-euc-jp.txt | 178 + .../testdata/rashomon-iso-2022-jp.txt | 178 + .../encoding/testdata/rashomon-shift-jis.txt | 178 + .../text/encoding/testdata/rashomon-utf-8.txt | 178 + ...nzi-bingfa-gb-levels-1-and-2-hz-gb2312.txt | 107 + .../sunzi-bingfa-gb-levels-1-and-2-utf-8.txt | 107 + .../testdata/sunzi-bingfa-simplified-gbk.txt | 107 + .../sunzi-bingfa-simplified-utf-8.txt | 107 + .../sunzi-bingfa-traditional-big5.txt | 106 + .../sunzi-bingfa-traditional-utf-8.txt | 106 + .../testdata/unsu-joh-eun-nal-euc-kr.txt | 175 + .../testdata/unsu-joh-eun-nal-utf-8.txt | 175 + .../encoding/traditionalchinese/all_test.go | 45 + .../text/encoding/traditionalchinese/big5.go | 198 + .../encoding/traditionalchinese/maketables.go | 140 + .../encoding/traditionalchinese/tables.go | 37142 ++++++++ .../x/text/encoding/unicode/override.go | 82 + .../x/text/encoding/unicode/unicode.go | 434 + .../x/text/encoding/unicode/unicode_test.go | 178 + .../x/text/encoding/unicode/utf32/utf32.go | 296 + .../text/encoding/unicode/utf32/utf32_test.go | 210 + vendor/golang.org/x/text/gen.go | 284 + .../x/text/internal/colltab/collate_test.go | 121 + .../x/text/internal/colltab/collelem.go | 371 + .../x/text/internal/colltab/collelem_test.go | 183 + .../x/text/internal/colltab/colltab.go | 105 + .../x/text/internal/colltab/colltab_test.go | 64 + .../x/text/internal/colltab/contract.go | 145 + .../x/text/internal/colltab/contract_test.go | 131 + .../x/text/internal/colltab/iter.go | 178 + .../x/text/internal/colltab/iter_test.go | 63 + .../x/text/internal/colltab/numeric.go | 236 + .../x/text/internal/colltab/numeric_test.go | 159 + .../x/text/internal/colltab/table.go | 275 + .../x/text/internal/colltab/trie.go | 159 + .../x/text/internal/colltab/trie_test.go | 106 + .../x/text/internal/colltab/weighter.go | 31 + .../x/text/internal/colltab/weighter_test.go | 42 + .../golang.org/x/text/internal/export/README | 4 + .../text/internal/export/idna/common_test.go | 55 + .../x/text/internal/export/idna/gen.go | 259 + .../x/text/internal/export/idna/gen_common.go | 59 + .../x/text/internal/export/idna/gen_test.go | 82 + .../text/internal/export/idna/gen_trieval.go | 118 + .../x/text/internal/export/idna/idna.go | 519 + .../x/text/internal/export/idna/idna_test.go | 250 + .../x/text/internal/export/idna/punycode.go | 201 + .../internal/export/idna/punycode_test.go | 198 + .../x/text/internal/export/idna/tables.go | 4477 + .../x/text/internal/export/idna/trie.go | 69 + .../x/text/internal/export/idna/trieval.go | 114 + .../x/text/internal/format/format.go | 43 + .../x/text/internal/format/plural/plural.go | 38 + vendor/golang.org/x/text/internal/gen.go | 52 + vendor/golang.org/x/text/internal/gen/code.go | 351 + vendor/golang.org/x/text/internal/gen/gen.go | 281 + vendor/golang.org/x/text/internal/gen_test.go | 38 + vendor/golang.org/x/text/internal/internal.go | 51 + .../x/text/internal/internal_test.go | 38 + vendor/golang.org/x/text/internal/match.go | 67 + .../golang.org/x/text/internal/match_test.go | 56 + .../x/text/internal/number/common.go | 92 + .../x/text/internal/number/data_test.go | 194 + .../x/text/internal/number/decimal.go | 416 + .../x/text/internal/number/extfloat.go | 671 + .../golang.org/x/text/internal/number/ftoa.go | 448 + .../golang.org/x/text/internal/number/gen.go | 466 + .../x/text/internal/number/gen_common.go | 96 + .../x/text/internal/number/gen_plural.go | 471 + .../golang.org/x/text/internal/number/itoa.go | 111 + .../x/text/internal/number/number.go | 145 + .../x/text/internal/number/number_test.go | 90 + .../x/text/internal/number/pattern.go | 386 + .../x/text/internal/number/pattern_test.go | 300 + .../x/text/internal/number/plural.go | 119 + .../x/text/internal/number/plural_test.go | 110 + .../x/text/internal/number/tables.go | 1652 + .../x/text/internal/number/tables_test.go | 125 + .../x/text/internal/stringset/set.go | 86 + .../x/text/internal/stringset/set_test.go | 53 + vendor/golang.org/x/text/internal/tables.go | 116 + vendor/golang.org/x/text/internal/tag/tag.go | 100 + .../x/text/internal/tag/tag_test.go | 67 + .../x/text/internal/testtext/codesize.go | 53 + .../x/text/internal/testtext/flag.go | 22 + .../golang.org/x/text/internal/testtext/gc.go | 14 + .../x/text/internal/testtext/gccgo.go | 11 + .../x/text/internal/testtext/go1_6.go | 23 + .../x/text/internal/testtext/go1_7.go | 17 + .../x/text/internal/testtext/text.go | 105 + .../x/text/internal/triegen/compact.go | 58 + .../x/text/internal/triegen/data_test.go | 875 + .../internal/triegen/example_compact_test.go | 71 + .../x/text/internal/triegen/example_test.go | 148 + .../x/text/internal/triegen/gen_test.go | 68 + .../x/text/internal/triegen/print.go | 251 + .../x/text/internal/triegen/triegen.go | 494 + .../x/text/internal/ucd/example_test.go | 81 + vendor/golang.org/x/text/internal/ucd/ucd.go | 376 + .../x/text/internal/ucd/ucd_test.go | 105 + .../internal/utf8internal/utf8internal.go | 87 + vendor/golang.org/x/text/language/Makefile | 16 + vendor/golang.org/x/text/language/common.go | 16 + vendor/golang.org/x/text/language/coverage.go | 197 + .../x/text/language/coverage_test.go | 154 + .../golang.org/x/text/language/data_test.go | 416 + .../x/text/language/display/dict.go | 92 + .../x/text/language/display/dict_test.go | 39 + .../x/text/language/display/display.go | 343 + .../x/text/language/display/display_test.go | 651 + .../x/text/language/display/examples_test.go | 98 + .../x/text/language/display/lookup.go | 251 + .../x/text/language/display/maketables.go | 596 + .../x/text/language/display/tables.go | 50345 +++++++++++ .../x/text/language/examples_test.go | 396 + .../golang.org/x/text/language/gen_common.go | 20 + .../golang.org/x/text/language/gen_index.go | 162 + vendor/golang.org/x/text/language/go1_1.go | 38 + vendor/golang.org/x/text/language/go1_2.go | 11 + .../x/text/language/httpexample_test.go | 48 + vendor/golang.org/x/text/language/index.go | 767 + vendor/golang.org/x/text/language/language.go | 975 + .../x/text/language/language_test.go | 878 + vendor/golang.org/x/text/language/lookup.go | 396 + .../golang.org/x/text/language/lookup_test.go | 457 + .../golang.org/x/text/language/maketables.go | 1648 + vendor/golang.org/x/text/language/match.go | 841 + .../golang.org/x/text/language/match_test.go | 409 + vendor/golang.org/x/text/language/parse.go | 859 + .../golang.org/x/text/language/parse_test.go | 517 + vendor/golang.org/x/text/language/tables.go | 3547 + vendor/golang.org/x/text/language/tags.go | 143 + vendor/golang.org/x/text/message/catalog.go | 113 + .../golang.org/x/text/message/catalog_test.go | 98 + vendor/golang.org/x/text/message/message.go | 185 + .../golang.org/x/text/message/message_test.go | 149 + vendor/golang.org/x/text/runes/cond.go | 187 + vendor/golang.org/x/text/runes/cond_test.go | 282 + .../golang.org/x/text/runes/example_test.go | 60 + vendor/golang.org/x/text/runes/runes.go | 355 + vendor/golang.org/x/text/runes/runes_test.go | 664 + vendor/golang.org/x/text/search/index.go | 35 + vendor/golang.org/x/text/search/pattern.go | 155 + .../golang.org/x/text/search/pattern_test.go | 357 + vendor/golang.org/x/text/search/search.go | 237 + vendor/golang.org/x/text/search/tables.go | 12448 +++ .../x/text/secure/bidirule/bench_test.go | 54 + .../x/text/secure/bidirule/bidirule.go | 342 + .../x/text/secure/bidirule/bidirule_test.go | 825 + vendor/golang.org/x/text/secure/doc.go | 6 + .../x/text/secure/precis/benchmark_test.go | 82 + .../golang.org/x/text/secure/precis/class.go | 36 + .../x/text/secure/precis/class_test.go | 50 + .../x/text/secure/precis/context.go | 139 + vendor/golang.org/x/text/secure/precis/doc.go | 14 + .../x/text/secure/precis/enforce_test.go | 320 + vendor/golang.org/x/text/secure/precis/gen.go | 310 + .../x/text/secure/precis/gen_trieval.go | 68 + .../x/text/secure/precis/nickname.go | 70 + .../x/text/secure/precis/options.go | 153 + .../x/text/secure/precis/profile.go | 388 + .../x/text/secure/precis/profile_test.go | 142 + .../x/text/secure/precis/profiles.go | 69 + .../golang.org/x/text/secure/precis/tables.go | 3788 + .../x/text/secure/precis/tables_test.go | 69 + .../x/text/secure/precis/transformer.go | 32 + .../x/text/secure/precis/trieval.go | 64 + .../x/text/transform/examples_test.go | 37 + .../golang.org/x/text/transform/transform.go | 705 + .../x/text/transform/transform_test.go | 1317 + vendor/golang.org/x/text/unicode/bidi/bidi.go | 198 + .../golang.org/x/text/unicode/bidi/bracket.go | 335 + vendor/golang.org/x/text/unicode/bidi/core.go | 1058 + .../x/text/unicode/bidi/core_test.go | 224 + vendor/golang.org/x/text/unicode/bidi/gen.go | 133 + .../x/text/unicode/bidi/gen_ranges.go | 57 + .../x/text/unicode/bidi/gen_trieval.go | 64 + vendor/golang.org/x/text/unicode/bidi/prop.go | 206 + .../x/text/unicode/bidi/ranges_test.go | 53 + .../golang.org/x/text/unicode/bidi/tables.go | 1779 + .../x/text/unicode/bidi/tables_test.go | 82 + .../golang.org/x/text/unicode/bidi/trieval.go | 60 + vendor/golang.org/x/text/unicode/cldr/base.go | 100 + vendor/golang.org/x/text/unicode/cldr/cldr.go | 130 + .../x/text/unicode/cldr/cldr_test.go | 27 + .../golang.org/x/text/unicode/cldr/collate.go | 359 + .../x/text/unicode/cldr/collate_test.go | 275 + .../x/text/unicode/cldr/data_test.go | 186 + .../golang.org/x/text/unicode/cldr/decode.go | 171 + .../x/text/unicode/cldr/examples_test.go | 21 + .../golang.org/x/text/unicode/cldr/makexml.go | 400 + .../golang.org/x/text/unicode/cldr/resolve.go | 602 + .../x/text/unicode/cldr/resolve_test.go | 368 + .../golang.org/x/text/unicode/cldr/slice.go | 144 + .../x/text/unicode/cldr/slice_test.go | 175 + vendor/golang.org/x/text/unicode/cldr/xml.go | 1456 + vendor/golang.org/x/text/unicode/doc.go | 8 + .../x/text/unicode/norm/composition.go | 514 + .../x/text/unicode/norm/composition_test.go | 130 + .../x/text/unicode/norm/example_iter_test.go | 82 + .../x/text/unicode/norm/example_test.go | 27 + .../x/text/unicode/norm/forminfo.go | 259 + .../x/text/unicode/norm/forminfo_test.go | 54 + .../golang.org/x/text/unicode/norm/input.go | 105 + vendor/golang.org/x/text/unicode/norm/iter.go | 450 + .../x/text/unicode/norm/iter_test.go | 98 + .../x/text/unicode/norm/maketables.go | 978 + .../x/text/unicode/norm/norm_test.go | 14 + .../x/text/unicode/norm/normalize.go | 608 + .../x/text/unicode/norm/normalize_test.go | 1226 + .../x/text/unicode/norm/readwriter.go | 125 + .../x/text/unicode/norm/readwriter_test.go | 56 + .../golang.org/x/text/unicode/norm/tables.go | 7627 ++ .../x/text/unicode/norm/transform.go | 88 + .../x/text/unicode/norm/transform_test.go | 101 + vendor/golang.org/x/text/unicode/norm/trie.go | 54 + .../golang.org/x/text/unicode/norm/triegen.go | 117 + .../x/text/unicode/norm/ucd_test.go | 275 + .../x/text/unicode/rangetable/gen.go | 113 + .../x/text/unicode/rangetable/merge.go | 260 + .../x/text/unicode/rangetable/merge_test.go | 184 + .../x/text/unicode/rangetable/rangetable.go | 70 + .../unicode/rangetable/rangetable_test.go | 55 + .../x/text/unicode/rangetable/tables.go | 5735 ++ .../x/text/unicode/runenames/bits.go | 59 + .../x/text/unicode/runenames/example_test.go | 118 + .../x/text/unicode/runenames/gen.go | 195 + .../x/text/unicode/runenames/gen_bits.go | 63 + .../x/text/unicode/runenames/runenames.go | 48 + .../text/unicode/runenames/runenames_test.go | 46 + .../x/text/unicode/runenames/tables.go | 15514 ++++ vendor/golang.org/x/text/width/common_test.go | 92 + .../golang.org/x/text/width/example_test.go | 52 + vendor/golang.org/x/text/width/gen.go | 115 + vendor/golang.org/x/text/width/gen_common.go | 96 + vendor/golang.org/x/text/width/gen_trieval.go | 34 + vendor/golang.org/x/text/width/kind_string.go | 16 + vendor/golang.org/x/text/width/runes_test.go | 461 + vendor/golang.org/x/text/width/tables.go | 1284 + vendor/golang.org/x/text/width/tables_test.go | 59 + vendor/golang.org/x/text/width/transform.go | 239 + .../golang.org/x/text/width/transform_test.go | 701 + vendor/golang.org/x/text/width/trieval.go | 30 + vendor/golang.org/x/text/width/width.go | 206 + .../google.golang.org/appengine/.travis.yml | 18 + vendor/google.golang.org/appengine/LICENSE | 202 + vendor/google.golang.org/appengine/README.md | 73 + .../google.golang.org/appengine/aetest/doc.go | 42 + .../appengine/aetest/instance.go | 51 + .../appengine/aetest/instance_classic.go | 21 + .../appengine/aetest/instance_test.go | 116 + .../appengine/aetest/instance_vm.go | 276 + .../appengine/aetest/user.go | 36 + .../google.golang.org/appengine/appengine.go | 112 + .../appengine/appengine_test.go | 49 + .../appengine/appengine_vm.go | 20 + .../appengine/blobstore/blobstore.go | 276 + .../appengine/blobstore/blobstore_test.go | 183 + .../appengine/blobstore/read.go | 160 + .../appengine/capability/capability.go | 52 + .../appengine/channel/channel.go | 83 + .../appengine/channel/channel_test.go | 21 + .../appengine/cloudsql/cloudsql.go | 62 + .../appengine/cloudsql/cloudsql_classic.go | 17 + .../appengine/cloudsql/cloudsql_vm.go | 16 + .../appengine/cmd/aebundler/aebundler.go | 342 + .../appengine/cmd/aedeploy/aedeploy.go | 268 + .../appengine/cmd/aefix/ae.go | 185 + .../appengine/cmd/aefix/ae_test.go | 144 + .../appengine/cmd/aefix/fix.go | 848 + .../appengine/cmd/aefix/main.go | 258 + .../appengine/cmd/aefix/main_test.go | 129 + .../appengine/cmd/aefix/typecheck.go | 673 + .../appengine/datastore/datastore.go | 406 + .../appengine/datastore/datastore_test.go | 1567 + .../appengine/datastore/doc.go | 351 + .../appengine/datastore/key.go | 309 + .../appengine/datastore/key_test.go | 204 + .../appengine/datastore/load.go | 334 + .../appengine/datastore/metadata.go | 78 + .../appengine/datastore/prop.go | 296 + .../appengine/datastore/prop_test.go | 604 + .../appengine/datastore/query.go | 724 + .../appengine/datastore/query_test.go | 583 + .../appengine/datastore/save.go | 300 + .../appengine/datastore/time_test.go | 65 + .../appengine/datastore/transaction.go | 87 + .../appengine/delay/delay.go | 278 + .../appengine/delay/delay_test.go | 375 + .../appengine/demos/guestbook/app.yaml | 14 + .../appengine/demos/guestbook/favicon.ico | Bin 0 -> 1150 bytes .../appengine/demos/guestbook/guestbook.go | 109 + .../appengine/demos/guestbook/index.yaml | 7 + .../demos/guestbook/templates/guestbook.html | 26 + .../appengine/demos/helloworld/app.yaml | 10 + .../appengine/demos/helloworld/favicon.ico | Bin 0 -> 1150 bytes .../appengine/demos/helloworld/helloworld.go | 50 + vendor/google.golang.org/appengine/errors.go | 46 + .../google.golang.org/appengine/file/file.go | 28 + .../google.golang.org/appengine/identity.go | 142 + .../appengine/image/image.go | 67 + .../appengine/internal/aetesting/fake.go | 81 + .../appengine/internal/api.go | 646 + .../appengine/internal/api_classic.go | 159 + .../appengine/internal/api_common.go | 86 + .../appengine/internal/api_race_test.go | 9 + .../appengine/internal/api_test.go | 467 + .../appengine/internal/app_id.go | 28 + .../appengine/internal/app_id_test.go | 34 + .../app_identity/app_identity_service.pb.go | 296 + .../app_identity/app_identity_service.proto | 64 + .../appengine/internal/base/api_base.pb.go | 133 + .../appengine/internal/base/api_base.proto | 33 + .../blobstore/blobstore_service.pb.go | 347 + .../blobstore/blobstore_service.proto | 71 + .../capability/capability_service.pb.go | 125 + .../capability/capability_service.proto | 28 + .../internal/channel/channel_service.pb.go | 154 + .../internal/channel/channel_service.proto | 30 + .../internal/datastore/datastore_v3.pb.go | 2778 + .../internal/datastore/datastore_v3.proto | 541 + .../appengine/internal/identity.go | 14 + .../appengine/internal/identity_classic.go | 27 + .../appengine/internal/identity_vm.go | 97 + .../internal/image/images_service.pb.go | 845 + .../internal/image/images_service.proto | 162 + .../appengine/internal/internal.go | 110 + .../appengine/internal/internal_vm_test.go | 60 + .../appengine/internal/log/log_service.pb.go | 899 + .../appengine/internal/log/log_service.proto | 150 + .../internal/mail/mail_service.pb.go | 229 + .../internal/mail/mail_service.proto | 45 + .../appengine/internal/main.go | 15 + .../appengine/internal/main_vm.go | 44 + .../internal/memcache/memcache_service.pb.go | 938 + .../internal/memcache/memcache_service.proto | 165 + .../appengine/internal/metadata.go | 61 + .../internal/modules/modules_service.pb.go | 375 + .../internal/modules/modules_service.proto | 80 + .../appengine/internal/net.go | 56 + .../appengine/internal/net_test.go | 58 + .../appengine/internal/regen.sh | 40 + .../internal/remote_api/remote_api.pb.go | 231 + .../internal/remote_api/remote_api.proto | 44 + .../appengine/internal/search/search.pb.go | 2127 + .../appengine/internal/search/search.proto | 388 + .../internal/socket/socket_service.pb.go | 1858 + .../internal/socket/socket_service.proto | 460 + .../internal/system/system_service.pb.go | 198 + .../internal/system/system_service.proto | 49 + .../taskqueue/taskqueue_service.pb.go | 1888 + .../taskqueue/taskqueue_service.proto | 342 + .../appengine/internal/transaction.go | 107 + .../internal/urlfetch/urlfetch_service.pb.go | 355 + .../internal/urlfetch/urlfetch_service.proto | 64 + .../internal/user/user_service.pb.go | 289 + .../internal/user/user_service.proto | 58 + .../internal/xmpp/xmpp_service.pb.go | 427 + .../internal/xmpp/xmpp_service.proto | 83 + vendor/google.golang.org/appengine/log/api.go | 40 + vendor/google.golang.org/appengine/log/log.go | 323 + .../appengine/log/log_test.go | 112 + .../google.golang.org/appengine/mail/mail.go | 123 + .../appengine/mail/mail_test.go | 65 + .../appengine/memcache/memcache.go | 526 + .../appengine/memcache/memcache_test.go | 263 + .../appengine/module/module.go | 113 + .../appengine/module/module_test.go | 124 + .../google.golang.org/appengine/namespace.go | 25 + .../appengine/namespace_test.go | 39 + .../appengine/remote_api/client.go | 174 + .../appengine/remote_api/client_test.go | 24 + .../appengine/remote_api/remote_api.go | 152 + .../appengine/runtime/runtime.go | 148 + .../appengine/runtime/runtime_test.go | 101 + .../google.golang.org/appengine/search/doc.go | 209 + .../appengine/search/field.go | 82 + .../appengine/search/search.go | 1121 + .../appengine/search/search_test.go | 1000 + .../appengine/search/struct.go | 251 + .../appengine/search/struct_test.go | 213 + .../google.golang.org/appengine/socket/doc.go | 10 + .../appengine/socket/socket_classic.go | 290 + .../appengine/socket/socket_vm.go | 64 + .../appengine/taskqueue/taskqueue.go | 496 + .../appengine/taskqueue/taskqueue_test.go | 116 + vendor/google.golang.org/appengine/timeout.go | 20 + .../appengine/urlfetch/urlfetch.go | 210 + .../google.golang.org/appengine/user/oauth.go | 52 + .../google.golang.org/appengine/user/user.go | 84 + .../appengine/user/user_classic.go | 35 + .../appengine/user/user_test.go | 99 + .../appengine/user/user_vm.go | 38 + .../google.golang.org/appengine/xmpp/xmpp.go | 253 + .../appengine/xmpp/xmpp_test.go | 173 + vendor/gopkg.in/inf.v0/LICENSE | 28 + vendor/gopkg.in/inf.v0/benchmark_test.go | 210 + vendor/gopkg.in/inf.v0/dec.go | 615 + vendor/gopkg.in/inf.v0/dec_go1_2_test.go | 33 + vendor/gopkg.in/inf.v0/dec_internal_test.go | 40 + vendor/gopkg.in/inf.v0/dec_test.go | 379 + vendor/gopkg.in/inf.v0/example_test.go | 62 + vendor/gopkg.in/inf.v0/rounder.go | 145 + .../gopkg.in/inf.v0/rounder_example_test.go | 72 + vendor/gopkg.in/inf.v0/rounder_test.go | 109 + vendor/k8s.io/apiserver/.import-restrictions | 10 + vendor/k8s.io/apiserver/LICENSE | 202 + vendor/k8s.io/apiserver/OWNERS | 42 + vendor/k8s.io/apiserver/README.md | 29 + vendor/k8s.io/apiserver/filter-branch-sha | 1 + .../apiserver/hack/sync-from-kubernetes.sh | 61 + vendor/k8s.io/apiserver/kubernetes-sha | 1 + vendor/k8s.io/apiserver/pkg/admission/OWNERS | 10 + .../apiserver/pkg/admission/attributes.go | 85 + .../k8s.io/apiserver/pkg/admission/chain.go | 49 + .../apiserver/pkg/admission/chain_test.go | 154 + .../k8s.io/apiserver/pkg/admission/errors.go | 66 + .../k8s.io/apiserver/pkg/admission/handler.go | 85 + .../apiserver/pkg/admission/interfaces.go | 90 + .../k8s.io/apiserver/pkg/admission/plugins.go | 177 + .../k8s.io/apiserver/pkg/apis/example/doc.go | 22 + .../pkg/apis/example/fuzzer/fuzzer.go | 99 + .../pkg/apis/example/install/install.go | 44 + .../apiserver/pkg/apis/example/register.go | 53 + .../apiserver/pkg/apis/example/types.go | 134 + .../pkg/apis/example/v1/conversion.go | 26 + .../apiserver/pkg/apis/example/v1/defaults.go | 26 + .../apiserver/pkg/apis/example/v1/doc.go | 23 + .../pkg/apis/example/v1/generated.pb.go | 1992 + .../pkg/apis/example/v1/generated.proto | 212 + .../apiserver/pkg/apis/example/v1/register.go | 53 + .../pkg/apis/example/v1/types.generated.go | 22 + .../apiserver/pkg/apis/example/v1/types.go | 192 + .../example/v1/types_swagger_doc_generated.go | 17 + .../example/v1/zz_generated.conversion.go | 216 + .../apis/example/v1/zz_generated.deepcopy.go | 150 + .../apis/example/v1/zz_generated.defaults.go | 21 + .../pkg/apis/example/zz_generated.deepcopy.go | 150 + .../authenticator/interfaces.go | 68 + .../authenticatorfactory/delegating.go | 115 + .../authenticatorfactory/loopback.go | 29 + .../authenticatorfactory/requestheader.go | 31 + .../pkg/authentication/group/group_adder.go | 50 + .../authentication/group/group_adder_test.go | 42 + .../request/anonymous/anonymous.go | 36 + .../request/anonymous/anonymous_test.go | 42 + .../request/bearertoken/bearertoken.go | 63 + .../request/bearertoken/bearertoken_test.go | 105 + .../request/headerrequest/requestheader.go | 178 + .../headerrequest/requestheader_test.go | 159 + .../pkg/authentication/request/union/union.go | 72 + .../request/union/unionauth_test.go | 166 + .../pkg/authentication/request/x509/doc.go | 19 + .../request/x509/testdata/client-expired.pem | 11 + .../request/x509/testdata/client-valid.pem | 11 + .../request/x509/testdata/client.config.json | 24 + .../request/x509/testdata/client.csr.json | 3 + .../request/x509/testdata/generate.sh | 24 + .../x509/testdata/intermediate.config.json | 18 + .../x509/testdata/intermediate.csr.json | 6 + .../request/x509/testdata/intermediate.pem | 11 + .../request/x509/testdata/root.csr.json | 6 + .../request/x509/testdata/root.pem | 11 + .../pkg/authentication/request/x509/x509.go | 185 + .../authentication/request/x509/x509_test.go | 933 + .../pkg/authentication/serviceaccount/util.go | 73 + .../serviceaccount/util_test.go | 82 + .../token/tokenfile/tokenfile.go | 91 + .../token/tokenfile/tokenfile_test.go | 141 + .../apiserver/pkg/authentication/user/doc.go | 19 + .../apiserver/pkg/authentication/user/user.go | 82 + .../authorization/authorizer/interfaces.go | 140 + .../authorization/authorizerfactory/OWNERS | 4 + .../authorizerfactory/authz_test.go | 56 + .../authorizerfactory/builtin.go | 87 + .../authorizerfactory/delegating.go | 47 + .../pkg/authorization/union/union.go | 57 + .../pkg/authorization/union/union_test.go | 83 + .../pkg/endpoints/handlers/negotiation/doc.go | 18 + .../endpoints/handlers/negotiation/errors.go | 61 + .../handlers/negotiation/negotiate.go | 305 + .../handlers/negotiation/negotiate_test.go | 245 + .../apiserver/pkg/endpoints/metrics/OWNERS | 3 + .../pkg/endpoints/metrics/metrics.go | 245 + .../apiserver/pkg/endpoints/request/OWNERS | 2 + .../pkg/endpoints/request/context.go | 145 + .../pkg/endpoints/request/context_test.go | 134 + .../apiserver/pkg/endpoints/request/doc.go | 20 + .../pkg/endpoints/request/requestcontext.go | 117 + .../pkg/endpoints/request/requestinfo.go | 241 + .../pkg/endpoints/request/requestinfo_test.go | 196 + .../apiserver/pkg/features/kube_features.go | 47 + vendor/k8s.io/apiserver/pkg/server/doc.go | 18 + .../apiserver/pkg/server/filters/OWNERS | 3 + .../apiserver/pkg/server/filters/cors.go | 98 + .../apiserver/pkg/server/filters/cors_test.go | 183 + .../apiserver/pkg/server/filters/doc.go | 19 + .../pkg/server/filters/longrunning.go | 40 + .../pkg/server/filters/maxinflight.go | 111 + .../pkg/server/filters/maxinflight_test.go | 240 + .../apiserver/pkg/server/filters/timeout.go | 271 + .../pkg/server/filters/timeout_test.go | 85 + .../apiserver/pkg/server/filters/wrap.go | 76 + .../apiserver/pkg/server/healthz/doc.go | 21 + .../apiserver/pkg/server/healthz/healthz.go | 133 + .../pkg/server/healthz/healthz_test.go | 82 + .../apiserver/pkg/server/httplog/doc.go | 19 + .../apiserver/pkg/server/httplog/log.go | 225 + .../apiserver/pkg/server/httplog/log_test.go | 175 + .../apiserver/pkg/server/options/OWNERS | 14 + .../pkg/server/options/authentication.go | 175 + .../pkg/server/options/authorization.go | 114 + .../apiserver/pkg/server/options/doc.go | 21 + .../apiserver/pkg/server/options/etcd.go | 88 + .../pkg/server/options/server_run_options.go | 204 + .../apiserver/pkg/server/options/serving.go | 220 + .../pkg/server/routes/data/README.md | 12 + .../server/routes/data/swagger/datafile.go | 17087 ++++ vendor/k8s.io/apiserver/pkg/storage/OWNERS | 41 + vendor/k8s.io/apiserver/pkg/storage/cacher.go | 958 + .../pkg/storage/cacher_whitebox_test.go | 56 + vendor/k8s.io/apiserver/pkg/storage/doc.go | 18 + vendor/k8s.io/apiserver/pkg/storage/errors.go | 170 + .../pkg/storage/etcd/metrics/metrics.go | 113 + .../apiserver/pkg/storage/interfaces.go | 181 + .../apiserver/pkg/storage/names/generate.go | 54 + .../pkg/storage/names/generate_test.go | 29 + .../pkg/storage/selection_predicate.go | 90 + .../pkg/storage/selection_predicate_test.go | 119 + .../pkg/storage/storagebackend/OWNERS | 6 + .../pkg/storage/storagebackend/config.go | 48 + .../apiserver/pkg/storage/time_budget.go | 95 + .../apiserver/pkg/storage/time_budget_test.go | 53 + vendor/k8s.io/apiserver/pkg/storage/util.go | 161 + .../k8s.io/apiserver/pkg/storage/util_test.go | 136 + .../apiserver/pkg/storage/watch_cache.go | 468 + .../apiserver/pkg/storage/watch_cache_test.go | 367 + .../k8s.io/apiserver/pkg/util/cache/cache.go | 83 + .../apiserver/pkg/util/cache/cache_test.go | 90 + .../pkg/util/cache/lruexpirecache.go | 85 + .../pkg/util/cache/lruexpirecache_test.go | 68 + .../pkg/util/feature/feature_gate.go | 232 + .../pkg/util/feature/feature_gate_test.go | 159 + .../pkg/util/flag/configuration_map.go | 53 + .../k8s.io/apiserver/pkg/util/flag/flags.go | 51 + .../pkg/util/flag/namedcertkey_flag.go | 113 + .../pkg/util/flag/namedcertkey_flag_test.go | 139 + .../apiserver/pkg/util/flag/string_flag.go | 56 + .../apiserver/pkg/util/flag/tristate.go | 83 + .../apiserver/pkg/util/flushwriter/doc.go | 19 + .../apiserver/pkg/util/flushwriter/writer.go | 53 + .../pkg/util/flushwriter/writer_test.go | 86 + .../k8s.io/apiserver/pkg/util/proxy/dial.go | 106 + .../apiserver/pkg/util/proxy/dial_test.go | 171 + vendor/k8s.io/apiserver/pkg/util/proxy/doc.go | 18 + .../apiserver/pkg/util/proxy/transport.go | 241 + .../pkg/util/proxy/transport_test.go | 261 + .../k8s.io/apiserver/pkg/util/trace/trace.go | 72 + vendor/k8s.io/apiserver/pkg/util/trie/trie.go | 79 + .../apiserver/pkg/util/webhook/webhook.go | 105 + .../apiserver/pkg/util/wsstream/conn.go | 349 + .../apiserver/pkg/util/wsstream/conn_test.go | 272 + .../k8s.io/apiserver/pkg/util/wsstream/doc.go | 21 + .../apiserver/pkg/util/wsstream/stream.go | 177 + .../pkg/util/wsstream/stream_test.go | 294 + .../apiserver/plugin/pkg/authenticator/doc.go | 18 + .../pkg/authenticator/password/allow/allow.go | 38 + .../password/allow/allow_test.go | 47 + .../plugin/pkg/authenticator/password/doc.go | 18 + .../authenticator/password/keystone/doc.go | 20 + .../password/keystone/keystone.go | 94 + .../password/passwordfile/passwordfile.go | 90 + .../passwordfile/passwordfile_test.go | 160 + .../request/basicauth/basicauth.go | 43 + .../request/basicauth/basicauth_test.go | 123 + .../authenticator/token/anytoken/anytoken.go | 42 + .../token/anytoken/anytoken_test.go | 71 + .../pkg/authenticator/token/oidc/OWNERS | 4 + .../pkg/authenticator/token/oidc/oidc.go | 282 + .../pkg/authenticator/token/oidc/oidc_test.go | 336 + .../token/oidc/testing/provider.go | 200 + .../token/tokentest/tokentest.go | 36 + .../authenticator/token/webhook/certs_test.go | 211 + .../authenticator/token/webhook/webhook.go | 131 + .../token/webhook/webhook_test.go | 564 + .../pkg/authorizer/webhook/certs_test.go | 211 + .../plugin/pkg/authorizer/webhook/gencerts.sh | 102 + .../plugin/pkg/authorizer/webhook/webhook.go | 229 + .../pkg/authorizer/webhook/webhook_test.go | 620 + 2124 files changed, 809703 insertions(+), 5 deletions(-) create mode 100644 vendor/cloud.google.com/go/.travis.yml create mode 100644 vendor/cloud.google.com/go/AUTHORS create mode 100644 vendor/cloud.google.com/go/CONTRIBUTING.md create mode 100644 vendor/cloud.google.com/go/CONTRIBUTORS create mode 100644 vendor/cloud.google.com/go/LICENSE create mode 100644 vendor/cloud.google.com/go/README.md create mode 100644 vendor/cloud.google.com/go/appveyor.yml create mode 100644 vendor/cloud.google.com/go/authexample_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/bigquery.go create mode 100644 vendor/cloud.google.com/go/bigquery/copy.go create mode 100644 vendor/cloud.google.com/go/bigquery/copy_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/create_table_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/dataset.go create mode 100644 vendor/cloud.google.com/go/bigquery/dataset_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/doc.go create mode 100644 vendor/cloud.google.com/go/bigquery/error.go create mode 100644 vendor/cloud.google.com/go/bigquery/error_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/examples_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/extract.go create mode 100644 vendor/cloud.google.com/go/bigquery/extract_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/file.go create mode 100644 vendor/cloud.google.com/go/bigquery/file_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/gcs.go create mode 100644 vendor/cloud.google.com/go/bigquery/integration_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/iterator.go create mode 100644 vendor/cloud.google.com/go/bigquery/iterator_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/job.go create mode 100644 vendor/cloud.google.com/go/bigquery/load.go create mode 100644 vendor/cloud.google.com/go/bigquery/load_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/params.go create mode 100644 vendor/cloud.google.com/go/bigquery/params_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/query.go create mode 100644 vendor/cloud.google.com/go/bigquery/query_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/read.go create mode 100644 vendor/cloud.google.com/go/bigquery/read_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/schema.go create mode 100644 vendor/cloud.google.com/go/bigquery/schema_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/service.go create mode 100644 vendor/cloud.google.com/go/bigquery/table.go create mode 100644 vendor/cloud.google.com/go/bigquery/uploader.go create mode 100644 vendor/cloud.google.com/go/bigquery/uploader_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/utils_test.go create mode 100644 vendor/cloud.google.com/go/bigquery/value.go create mode 100644 vendor/cloud.google.com/go/bigquery/value_test.go create mode 100644 vendor/cloud.google.com/go/bigtable/admin.go create mode 100644 vendor/cloud.google.com/go/bigtable/admin_test.go create mode 100644 vendor/cloud.google.com/go/bigtable/bigtable.go create mode 100644 vendor/cloud.google.com/go/bigtable/bigtable_test.go create mode 100644 vendor/cloud.google.com/go/bigtable/bttest/example_test.go create mode 100644 vendor/cloud.google.com/go/bigtable/bttest/inmem.go create mode 100644 vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go create mode 100644 vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go create mode 100644 vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt_test.go create mode 100644 vendor/cloud.google.com/go/bigtable/cmd/cbt/cbtdoc.go create mode 100644 vendor/cloud.google.com/go/bigtable/cmd/emulator/cbtemulator.go create mode 100644 vendor/cloud.google.com/go/bigtable/cmd/loadtest/loadtest.go create mode 100644 vendor/cloud.google.com/go/bigtable/cmd/scantest/scantest.go create mode 100644 vendor/cloud.google.com/go/bigtable/doc.go create mode 100644 vendor/cloud.google.com/go/bigtable/export_test.go create mode 100644 vendor/cloud.google.com/go/bigtable/filter.go create mode 100644 vendor/cloud.google.com/go/bigtable/gc.go create mode 100644 vendor/cloud.google.com/go/bigtable/internal/cbtconfig/cbtconfig.go create mode 100644 vendor/cloud.google.com/go/bigtable/internal/gax/call_option.go create mode 100644 vendor/cloud.google.com/go/bigtable/internal/gax/invoke.go create mode 100644 vendor/cloud.google.com/go/bigtable/internal/gax/invoke_test.go create mode 100644 vendor/cloud.google.com/go/bigtable/internal/option/option.go create mode 100644 vendor/cloud.google.com/go/bigtable/internal/stat/stats.go create mode 100644 vendor/cloud.google.com/go/bigtable/reader.go create mode 100644 vendor/cloud.google.com/go/bigtable/reader_test.go create mode 100644 vendor/cloud.google.com/go/bigtable/retry_test.go create mode 100644 vendor/cloud.google.com/go/bigtable/testdata/read-rows-acceptance-test.json create mode 100644 vendor/cloud.google.com/go/civil/civil.go create mode 100644 vendor/cloud.google.com/go/civil/civil_test.go create mode 100644 vendor/cloud.google.com/go/cloud.go create mode 100644 vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/debuglet.go create mode 100644 vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints/breakpoints.go create mode 100644 vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints/breakpoints_test.go create mode 100644 vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client.go create mode 100644 vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client_test.go create mode 100644 vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector.go create mode 100644 vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector_test.go create mode 100644 vendor/cloud.google.com/go/compute/metadata/metadata.go create mode 100644 vendor/cloud.google.com/go/compute/metadata/metadata_test.go create mode 100644 vendor/cloud.google.com/go/container/container.go create mode 100644 vendor/cloud.google.com/go/datastore/datastore.go create mode 100644 vendor/cloud.google.com/go/datastore/datastore_test.go create mode 100644 vendor/cloud.google.com/go/datastore/doc.go create mode 100644 vendor/cloud.google.com/go/datastore/errors.go create mode 100644 vendor/cloud.google.com/go/datastore/example_test.go create mode 100644 vendor/cloud.google.com/go/datastore/integration_test.go create mode 100644 vendor/cloud.google.com/go/datastore/key.go create mode 100644 vendor/cloud.google.com/go/datastore/key_test.go create mode 100644 vendor/cloud.google.com/go/datastore/load.go create mode 100644 vendor/cloud.google.com/go/datastore/load_test.go create mode 100644 vendor/cloud.google.com/go/datastore/prop.go create mode 100644 vendor/cloud.google.com/go/datastore/query.go create mode 100644 vendor/cloud.google.com/go/datastore/query_test.go create mode 100644 vendor/cloud.google.com/go/datastore/save.go create mode 100644 vendor/cloud.google.com/go/datastore/save_test.go create mode 100644 vendor/cloud.google.com/go/datastore/testdata/index.yaml create mode 100644 vendor/cloud.google.com/go/datastore/time.go create mode 100644 vendor/cloud.google.com/go/datastore/time_test.go create mode 100644 vendor/cloud.google.com/go/datastore/transaction.go create mode 100644 vendor/cloud.google.com/go/debugger/apiv2/controller2_client.go create mode 100644 vendor/cloud.google.com/go/debugger/apiv2/controller2_client_example_test.go create mode 100644 vendor/cloud.google.com/go/debugger/apiv2/debugger2_client.go create mode 100644 vendor/cloud.google.com/go/debugger/apiv2/debugger2_client_example_test.go create mode 100644 vendor/cloud.google.com/go/debugger/apiv2/doc.go create mode 100644 vendor/cloud.google.com/go/debugger/apiv2/mock_test.go create mode 100644 vendor/cloud.google.com/go/errorreporting/apiv1beta1/doc.go create mode 100644 vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client.go create mode 100644 vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client_example_test.go create mode 100644 vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client.go create mode 100644 vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client_example_test.go create mode 100644 vendor/cloud.google.com/go/errorreporting/apiv1beta1/mock_test.go create mode 100644 vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client.go create mode 100644 vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client_example_test.go create mode 100644 vendor/cloud.google.com/go/errors/error_logging_test.go create mode 100644 vendor/cloud.google.com/go/errors/errors.go create mode 100644 vendor/cloud.google.com/go/errors/errors_test.go create mode 100644 vendor/cloud.google.com/go/errors/stack_test.go create mode 100644 vendor/cloud.google.com/go/examples/bigquery/concat_table/main.go create mode 100644 vendor/cloud.google.com/go/examples/bigquery/load/main.go create mode 100644 vendor/cloud.google.com/go/examples/bigquery/query/main.go create mode 100644 vendor/cloud.google.com/go/examples/bigquery/read/main.go create mode 100644 vendor/cloud.google.com/go/examples/bigtable/helloworld/README.md create mode 100644 vendor/cloud.google.com/go/examples/bigtable/helloworld/main.go create mode 100644 vendor/cloud.google.com/go/examples/bigtable/search/search.go create mode 100644 vendor/cloud.google.com/go/examples/bigtable/usercounter/README.md create mode 100644 vendor/cloud.google.com/go/examples/bigtable/usercounter/app.yaml create mode 100644 vendor/cloud.google.com/go/examples/bigtable/usercounter/main.go create mode 100644 vendor/cloud.google.com/go/examples/storage/appengine/app.go create mode 100644 vendor/cloud.google.com/go/examples/storage/appengine/app.yaml create mode 100644 vendor/cloud.google.com/go/examples/storage/appenginevm/app.go create mode 100644 vendor/cloud.google.com/go/examples/storage/appenginevm/app.yaml create mode 100644 vendor/cloud.google.com/go/iam/admin/apiv1/doc.go create mode 100644 vendor/cloud.google.com/go/iam/admin/apiv1/iam_client.go create mode 100644 vendor/cloud.google.com/go/iam/admin/apiv1/iam_client_example_test.go create mode 100644 vendor/cloud.google.com/go/iam/admin/apiv1/mock_test.go create mode 100644 vendor/cloud.google.com/go/iam/admin/apiv1/policy_methods.go create mode 100644 vendor/cloud.google.com/go/iam/iam.go create mode 100644 vendor/cloud.google.com/go/iam/iam_test.go create mode 100644 vendor/cloud.google.com/go/internal/atomiccache/atomiccache.go create mode 100644 vendor/cloud.google.com/go/internal/atomiccache/atomiccache_test.go create mode 100644 vendor/cloud.google.com/go/internal/cloud.go create mode 100644 vendor/cloud.google.com/go/internal/fields/fields.go create mode 100644 vendor/cloud.google.com/go/internal/fields/fields_test.go create mode 100644 vendor/cloud.google.com/go/internal/fields/fold.go create mode 100644 vendor/cloud.google.com/go/internal/fields/fold_test.go create mode 100755 vendor/cloud.google.com/go/internal/kokoro/build.sh create mode 100644 vendor/cloud.google.com/go/internal/kokoro/kokoro-key.json.enc create mode 100644 vendor/cloud.google.com/go/internal/optional/optional.go create mode 100644 vendor/cloud.google.com/go/internal/optional/optional_test.go create mode 100644 vendor/cloud.google.com/go/internal/pretty/diff.go create mode 100644 vendor/cloud.google.com/go/internal/pretty/diff_test.go create mode 100644 vendor/cloud.google.com/go/internal/pretty/pretty.go create mode 100644 vendor/cloud.google.com/go/internal/pretty/pretty_test.go create mode 100644 vendor/cloud.google.com/go/internal/retry.go create mode 100644 vendor/cloud.google.com/go/internal/retry_test.go create mode 100644 vendor/cloud.google.com/go/internal/testutil/context.go create mode 100644 vendor/cloud.google.com/go/internal/testutil/server.go create mode 100644 vendor/cloud.google.com/go/internal/testutil/server_test.go create mode 100755 vendor/cloud.google.com/go/internal/version/update_version.sh create mode 100644 vendor/cloud.google.com/go/internal/version/version.go create mode 100644 vendor/cloud.google.com/go/internal/version/version_test.go create mode 100644 vendor/cloud.google.com/go/key.json.enc create mode 100644 vendor/cloud.google.com/go/language/apiv1/doc.go create mode 100644 vendor/cloud.google.com/go/language/apiv1/language_client.go create mode 100644 vendor/cloud.google.com/go/language/apiv1/language_client_example_test.go create mode 100644 vendor/cloud.google.com/go/language/apiv1/mock_test.go create mode 100644 vendor/cloud.google.com/go/license_test.go create mode 100644 vendor/cloud.google.com/go/logging/apiv2/README.md create mode 100644 vendor/cloud.google.com/go/logging/apiv2/config_client.go create mode 100644 vendor/cloud.google.com/go/logging/apiv2/config_client_example_test.go create mode 100644 vendor/cloud.google.com/go/logging/apiv2/doc.go create mode 100644 vendor/cloud.google.com/go/logging/apiv2/logging_client.go create mode 100644 vendor/cloud.google.com/go/logging/apiv2/logging_client_example_test.go create mode 100644 vendor/cloud.google.com/go/logging/apiv2/metrics_client.go create mode 100644 vendor/cloud.google.com/go/logging/apiv2/metrics_client_example_test.go create mode 100644 vendor/cloud.google.com/go/logging/apiv2/mock_test.go create mode 100644 vendor/cloud.google.com/go/logging/doc.go create mode 100644 vendor/cloud.google.com/go/logging/examples_test.go create mode 100644 vendor/cloud.google.com/go/logging/internal/common.go create mode 100644 vendor/cloud.google.com/go/logging/internal/testing/fake.go create mode 100644 vendor/cloud.google.com/go/logging/internal/testing/fake_test.go create mode 100644 vendor/cloud.google.com/go/logging/internal/testing/unique.go create mode 100644 vendor/cloud.google.com/go/logging/internal/testing/unique_test.go create mode 100644 vendor/cloud.google.com/go/logging/logadmin/example_entry_iterator_test.go create mode 100644 vendor/cloud.google.com/go/logging/logadmin/example_metric_iterator_test.go create mode 100644 vendor/cloud.google.com/go/logging/logadmin/example_paging_test.go create mode 100644 vendor/cloud.google.com/go/logging/logadmin/example_resource_iterator_test.go create mode 100644 vendor/cloud.google.com/go/logging/logadmin/example_sink_iterator_test.go create mode 100644 vendor/cloud.google.com/go/logging/logadmin/examples_test.go create mode 100644 vendor/cloud.google.com/go/logging/logadmin/logadmin.go create mode 100644 vendor/cloud.google.com/go/logging/logadmin/logadmin_test.go create mode 100644 vendor/cloud.google.com/go/logging/logadmin/metrics.go create mode 100644 vendor/cloud.google.com/go/logging/logadmin/metrics_test.go create mode 100644 vendor/cloud.google.com/go/logging/logadmin/resources.go create mode 100644 vendor/cloud.google.com/go/logging/logadmin/resources_test.go create mode 100644 vendor/cloud.google.com/go/logging/logadmin/sinks.go create mode 100644 vendor/cloud.google.com/go/logging/logadmin/sinks_test.go create mode 100644 vendor/cloud.google.com/go/logging/logging.go create mode 100644 vendor/cloud.google.com/go/logging/logging_test.go create mode 100644 vendor/cloud.google.com/go/logging/logging_unexported_test.go create mode 100644 vendor/cloud.google.com/go/longrunning/example_test.go create mode 100644 vendor/cloud.google.com/go/longrunning/longrunning.go create mode 100644 vendor/cloud.google.com/go/longrunning/longrunning_test.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/doc.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/group_client.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/group_client_example_test.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/metric_client_example_test.go create mode 100644 vendor/cloud.google.com/go/monitoring/apiv3/mock_test.go create mode 100644 vendor/cloud.google.com/go/old-news.md create mode 100644 vendor/cloud.google.com/go/pubsub/acker.go create mode 100644 vendor/cloud.google.com/go/pubsub/acker_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/apiv1/README.md create mode 100644 vendor/cloud.google.com/go/pubsub/apiv1/doc.go create mode 100644 vendor/cloud.google.com/go/pubsub/apiv1/mock_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go create mode 100644 vendor/cloud.google.com/go/pubsub/apiv1/publisher_client_example_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go create mode 100644 vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client_example_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/doc.go create mode 100644 vendor/cloud.google.com/go/pubsub/endtoend_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/example_subscription_iterator_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/example_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/example_topic_iterator_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/fake_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/integration_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/iterator.go create mode 100644 vendor/cloud.google.com/go/pubsub/iterator_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/keepalive.go create mode 100644 vendor/cloud.google.com/go/pubsub/keepalive_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/message.go create mode 100644 vendor/cloud.google.com/go/pubsub/pubsub.go create mode 100644 vendor/cloud.google.com/go/pubsub/puller.go create mode 100644 vendor/cloud.google.com/go/pubsub/puller_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/service.go create mode 100644 vendor/cloud.google.com/go/pubsub/service_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/streaming_pull_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/subscription.go create mode 100644 vendor/cloud.google.com/go/pubsub/subscription_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/topic.go create mode 100644 vendor/cloud.google.com/go/pubsub/topic_test.go create mode 100644 vendor/cloud.google.com/go/pubsub/utils_test.go create mode 100644 vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client.go create mode 100644 vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client_example_test.go create mode 100644 vendor/cloud.google.com/go/spanner/admin/database/apiv1/doc.go create mode 100644 vendor/cloud.google.com/go/spanner/admin/database/apiv1/mock_test.go create mode 100644 vendor/cloud.google.com/go/spanner/admin/instance/apiv1/doc.go create mode 100644 vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client.go create mode 100644 vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client_example_test.go create mode 100644 vendor/cloud.google.com/go/spanner/admin/instance/apiv1/mock_test.go create mode 100644 vendor/cloud.google.com/go/spanner/backoff.go create mode 100644 vendor/cloud.google.com/go/spanner/backoff_test.go create mode 100644 vendor/cloud.google.com/go/spanner/client.go create mode 100644 vendor/cloud.google.com/go/spanner/client_test.go create mode 100644 vendor/cloud.google.com/go/spanner/doc.go create mode 100644 vendor/cloud.google.com/go/spanner/errors.go create mode 100644 vendor/cloud.google.com/go/spanner/examples_test.go create mode 100644 vendor/cloud.google.com/go/spanner/internal/testutil/mockclient.go create mode 100644 vendor/cloud.google.com/go/spanner/internal/testutil/mockserver.go create mode 100644 vendor/cloud.google.com/go/spanner/key.go create mode 100644 vendor/cloud.google.com/go/spanner/key_test.go create mode 100644 vendor/cloud.google.com/go/spanner/keyset.go create mode 100644 vendor/cloud.google.com/go/spanner/keyset_test.go create mode 100644 vendor/cloud.google.com/go/spanner/mutation.go create mode 100644 vendor/cloud.google.com/go/spanner/mutation_test.go create mode 100644 vendor/cloud.google.com/go/spanner/protoutils.go create mode 100644 vendor/cloud.google.com/go/spanner/read.go create mode 100644 vendor/cloud.google.com/go/spanner/read_test.go create mode 100644 vendor/cloud.google.com/go/spanner/retry.go create mode 100644 vendor/cloud.google.com/go/spanner/retry_test.go create mode 100644 vendor/cloud.google.com/go/spanner/row.go create mode 100644 vendor/cloud.google.com/go/spanner/row_test.go create mode 100644 vendor/cloud.google.com/go/spanner/session.go create mode 100644 vendor/cloud.google.com/go/spanner/session_test.go create mode 100644 vendor/cloud.google.com/go/spanner/spanner_test.go create mode 100644 vendor/cloud.google.com/go/spanner/statement.go create mode 100644 vendor/cloud.google.com/go/spanner/statement_test.go create mode 100644 vendor/cloud.google.com/go/spanner/timestampbound.go create mode 100644 vendor/cloud.google.com/go/spanner/timestampbound_test.go create mode 100644 vendor/cloud.google.com/go/spanner/transaction.go create mode 100644 vendor/cloud.google.com/go/spanner/value.go create mode 100644 vendor/cloud.google.com/go/spanner/value_test.go create mode 100644 vendor/cloud.google.com/go/speech/apiv1beta1/doc.go create mode 100644 vendor/cloud.google.com/go/speech/apiv1beta1/mock_test.go create mode 100644 vendor/cloud.google.com/go/speech/apiv1beta1/speech_client.go create mode 100644 vendor/cloud.google.com/go/speech/apiv1beta1/speech_client_example_test.go create mode 100644 vendor/cloud.google.com/go/storage/acl.go create mode 100644 vendor/cloud.google.com/go/storage/bucket.go create mode 100644 vendor/cloud.google.com/go/storage/copy.go create mode 100644 vendor/cloud.google.com/go/storage/doc.go create mode 100644 vendor/cloud.google.com/go/storage/example_test.go create mode 100644 vendor/cloud.google.com/go/storage/integration_test.go create mode 100644 vendor/cloud.google.com/go/storage/invoke.go create mode 100644 vendor/cloud.google.com/go/storage/invoke_test.go create mode 100644 vendor/cloud.google.com/go/storage/reader.go create mode 100644 vendor/cloud.google.com/go/storage/storage.go create mode 100644 vendor/cloud.google.com/go/storage/storage_test.go create mode 100644 vendor/cloud.google.com/go/storage/testdata/dummy_pem create mode 100644 vendor/cloud.google.com/go/storage/testdata/dummy_rsa create mode 100644 vendor/cloud.google.com/go/storage/writer.go create mode 100644 vendor/cloud.google.com/go/storage/writer_test.go create mode 100644 vendor/cloud.google.com/go/trace/apiv1/doc.go create mode 100644 vendor/cloud.google.com/go/trace/apiv1/mock_test.go create mode 100644 vendor/cloud.google.com/go/trace/apiv1/trace_client.go create mode 100644 vendor/cloud.google.com/go/trace/apiv1/trace_client_example_test.go create mode 100644 vendor/cloud.google.com/go/trace/sampling.go create mode 100644 vendor/cloud.google.com/go/trace/trace.go create mode 100644 vendor/cloud.google.com/go/trace/trace_test.go create mode 100644 vendor/cloud.google.com/go/translate/internal/translate/v2/README create mode 100755 vendor/cloud.google.com/go/translate/internal/translate/v2/regen.sh create mode 100644 vendor/cloud.google.com/go/translate/internal/translate/v2/translate-nov2016-api.json create mode 100644 vendor/cloud.google.com/go/translate/internal/translate/v2/translate-nov2016-gen.go create mode 100644 vendor/cloud.google.com/go/translate/translate.go create mode 100644 vendor/cloud.google.com/go/translate/translate_test.go create mode 100644 vendor/cloud.google.com/go/vision/annotations.go create mode 100644 vendor/cloud.google.com/go/vision/apiv1/README.md create mode 100644 vendor/cloud.google.com/go/vision/apiv1/doc.go create mode 100644 vendor/cloud.google.com/go/vision/apiv1/image_annotator_client.go create mode 100644 vendor/cloud.google.com/go/vision/apiv1/image_annotator_client_example_test.go create mode 100644 vendor/cloud.google.com/go/vision/apiv1/mock_test.go create mode 100644 vendor/cloud.google.com/go/vision/doc.go create mode 100644 vendor/cloud.google.com/go/vision/examples_test.go create mode 100644 vendor/cloud.google.com/go/vision/face.go create mode 100644 vendor/cloud.google.com/go/vision/geometry.go create mode 100644 vendor/cloud.google.com/go/vision/image.go create mode 100644 vendor/cloud.google.com/go/vision/image_test.go create mode 100644 vendor/cloud.google.com/go/vision/latlng.go create mode 100644 vendor/cloud.google.com/go/vision/testdata/README.md create mode 100644 vendor/cloud.google.com/go/vision/testdata/cat.jpg create mode 100644 vendor/cloud.google.com/go/vision/testdata/eiffel-tower.jpg create mode 100644 vendor/cloud.google.com/go/vision/testdata/face.jpg create mode 100644 vendor/cloud.google.com/go/vision/testdata/faulkner.jpg create mode 100644 vendor/cloud.google.com/go/vision/testdata/google.png create mode 100644 vendor/cloud.google.com/go/vision/testdata/mountain.jpg create mode 100644 vendor/cloud.google.com/go/vision/testdata/no-text.jpg create mode 100644 vendor/cloud.google.com/go/vision/vision.go create mode 100644 vendor/cloud.google.com/go/vision/vision_test.go create mode 100644 vendor/github.com/Azure/go-ansiterm/LICENSE create mode 100644 vendor/github.com/Azure/go-ansiterm/README.md create mode 100644 vendor/github.com/Azure/go-ansiterm/constants.go create mode 100644 vendor/github.com/Azure/go-ansiterm/context.go create mode 100644 vendor/github.com/Azure/go-ansiterm/csi_entry_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/csi_param_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/escape_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/event_handler.go create mode 100644 vendor/github.com/Azure/go-ansiterm/ground_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/osc_string_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/parser.go create mode 100644 vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go create mode 100644 vendor/github.com/Azure/go-ansiterm/parser_actions.go create mode 100644 vendor/github.com/Azure/go-ansiterm/parser_test.go create mode 100644 vendor/github.com/Azure/go-ansiterm/parser_test_helpers_test.go create mode 100644 vendor/github.com/Azure/go-ansiterm/parser_test_utilities_test.go create mode 100644 vendor/github.com/Azure/go-ansiterm/states.go create mode 100644 vendor/github.com/Azure/go-ansiterm/test_event_handler_test.go create mode 100644 vendor/github.com/Azure/go-ansiterm/utilities.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/ansi.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/api.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/utilities.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go create mode 100644 vendor/github.com/PuerkitoBio/purell/.gitignore create mode 100644 vendor/github.com/PuerkitoBio/purell/.travis.yml create mode 100644 vendor/github.com/PuerkitoBio/purell/LICENSE create mode 100644 vendor/github.com/PuerkitoBio/purell/README.md create mode 100644 vendor/github.com/PuerkitoBio/purell/bench_test.go create mode 100644 vendor/github.com/PuerkitoBio/purell/benchmarks/v0.1.0 create mode 100644 vendor/github.com/PuerkitoBio/purell/example_test.go create mode 100644 vendor/github.com/PuerkitoBio/purell/purell.go create mode 100644 vendor/github.com/PuerkitoBio/purell/purell_test.go create mode 100644 vendor/github.com/PuerkitoBio/purell/urlnorm_test.go create mode 100644 vendor/github.com/PuerkitoBio/urlesc/.travis.yml create mode 100644 vendor/github.com/PuerkitoBio/urlesc/LICENSE create mode 100644 vendor/github.com/PuerkitoBio/urlesc/README.md create mode 100644 vendor/github.com/PuerkitoBio/urlesc/urlesc.go create mode 100644 vendor/github.com/PuerkitoBio/urlesc/urlesc_test.go create mode 100644 vendor/github.com/coreos/go-oidc/.gitignore create mode 100644 vendor/github.com/coreos/go-oidc/.travis.yml create mode 100644 vendor/github.com/coreos/go-oidc/CONTRIBUTING.md create mode 100644 vendor/github.com/coreos/go-oidc/DCO create mode 100644 vendor/github.com/coreos/go-oidc/LICENSE create mode 100644 vendor/github.com/coreos/go-oidc/MAINTAINERS create mode 100644 vendor/github.com/coreos/go-oidc/NOTICE create mode 100644 vendor/github.com/coreos/go-oidc/README.md create mode 100644 vendor/github.com/coreos/go-oidc/example/README.md create mode 100644 vendor/github.com/coreos/go-oidc/example/idtoken/app.go create mode 100644 vendor/github.com/coreos/go-oidc/example/nonce/app.go create mode 100644 vendor/github.com/coreos/go-oidc/example/userinfo/app.go create mode 100644 vendor/github.com/coreos/go-oidc/gen.go create mode 100644 vendor/github.com/coreos/go-oidc/http/client.go create mode 100644 vendor/github.com/coreos/go-oidc/http/doc.go create mode 100644 vendor/github.com/coreos/go-oidc/http/http.go create mode 100644 vendor/github.com/coreos/go-oidc/http/http_test.go create mode 100644 vendor/github.com/coreos/go-oidc/http/url.go create mode 100644 vendor/github.com/coreos/go-oidc/http/url_test.go create mode 100644 vendor/github.com/coreos/go-oidc/jose.go create mode 100644 vendor/github.com/coreos/go-oidc/jose/claims.go create mode 100644 vendor/github.com/coreos/go-oidc/jose/claims_test.go create mode 100644 vendor/github.com/coreos/go-oidc/jose/doc.go create mode 100644 vendor/github.com/coreos/go-oidc/jose/jose.go create mode 100644 vendor/github.com/coreos/go-oidc/jose/jwk.go create mode 100644 vendor/github.com/coreos/go-oidc/jose/jwk_test.go create mode 100644 vendor/github.com/coreos/go-oidc/jose/jws.go create mode 100644 vendor/github.com/coreos/go-oidc/jose/jws_test.go create mode 100644 vendor/github.com/coreos/go-oidc/jose/jwt.go create mode 100644 vendor/github.com/coreos/go-oidc/jose/jwt_test.go create mode 100755 vendor/github.com/coreos/go-oidc/jose/sig.go create mode 100755 vendor/github.com/coreos/go-oidc/jose/sig_rsa.go create mode 100644 vendor/github.com/coreos/go-oidc/jose_test.go create mode 100644 vendor/github.com/coreos/go-oidc/jwks.go create mode 100644 vendor/github.com/coreos/go-oidc/jwks_test.go create mode 100644 vendor/github.com/coreos/go-oidc/key/doc.go create mode 100644 vendor/github.com/coreos/go-oidc/key/key.go create mode 100644 vendor/github.com/coreos/go-oidc/key/key_test.go create mode 100644 vendor/github.com/coreos/go-oidc/key/manager.go create mode 100644 vendor/github.com/coreos/go-oidc/key/manager_test.go create mode 100644 vendor/github.com/coreos/go-oidc/key/repo.go create mode 100644 vendor/github.com/coreos/go-oidc/key/rotate.go create mode 100644 vendor/github.com/coreos/go-oidc/key/rotate_test.go create mode 100644 vendor/github.com/coreos/go-oidc/key/sync.go create mode 100644 vendor/github.com/coreos/go-oidc/key/sync_test.go create mode 100644 vendor/github.com/coreos/go-oidc/oauth2/doc.go create mode 100644 vendor/github.com/coreos/go-oidc/oauth2/error.go create mode 100644 vendor/github.com/coreos/go-oidc/oauth2/oauth2.go create mode 100644 vendor/github.com/coreos/go-oidc/oauth2/oauth2_test.go create mode 100644 vendor/github.com/coreos/go-oidc/oidc.go create mode 100644 vendor/github.com/coreos/go-oidc/oidc/client.go create mode 100644 vendor/github.com/coreos/go-oidc/oidc/client_race_test.go create mode 100644 vendor/github.com/coreos/go-oidc/oidc/client_test.go create mode 100644 vendor/github.com/coreos/go-oidc/oidc/doc.go create mode 100644 vendor/github.com/coreos/go-oidc/oidc/identity.go create mode 100644 vendor/github.com/coreos/go-oidc/oidc/identity_test.go create mode 100644 vendor/github.com/coreos/go-oidc/oidc/interface.go create mode 100755 vendor/github.com/coreos/go-oidc/oidc/key.go create mode 100644 vendor/github.com/coreos/go-oidc/oidc/provider.go create mode 100644 vendor/github.com/coreos/go-oidc/oidc/provider_test.go create mode 100644 vendor/github.com/coreos/go-oidc/oidc/transport.go create mode 100644 vendor/github.com/coreos/go-oidc/oidc/transport_test.go create mode 100644 vendor/github.com/coreos/go-oidc/oidc/util.go create mode 100644 vendor/github.com/coreos/go-oidc/oidc/util_test.go create mode 100644 vendor/github.com/coreos/go-oidc/oidc/verification.go create mode 100644 vendor/github.com/coreos/go-oidc/oidc/verification_test.go create mode 100755 vendor/github.com/coreos/go-oidc/test create mode 100644 vendor/github.com/coreos/go-oidc/verify.go create mode 100644 vendor/github.com/coreos/go-oidc/verify_test.go create mode 100644 vendor/github.com/coreos/pkg/.gitignore create mode 100644 vendor/github.com/coreos/pkg/.travis.yml create mode 100644 vendor/github.com/coreos/pkg/CONTRIBUTING.md create mode 100644 vendor/github.com/coreos/pkg/DCO create mode 100644 vendor/github.com/coreos/pkg/LICENSE create mode 100644 vendor/github.com/coreos/pkg/MAINTAINERS create mode 100644 vendor/github.com/coreos/pkg/NOTICE create mode 100644 vendor/github.com/coreos/pkg/README.md create mode 100755 vendor/github.com/coreos/pkg/build create mode 100644 vendor/github.com/coreos/pkg/capnslog/README.md create mode 100644 vendor/github.com/coreos/pkg/capnslog/example/hello_dolly.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/formatters.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/glog_formatter.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/init.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/init_windows.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/journald_formatter.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/log_hijack.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/logmap.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/pkg_logger.go create mode 100644 vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go create mode 100644 vendor/github.com/coreos/pkg/cryptoutil/aes.go create mode 100644 vendor/github.com/coreos/pkg/cryptoutil/aes_test.go create mode 100644 vendor/github.com/coreos/pkg/dlopen/dlopen.go create mode 100644 vendor/github.com/coreos/pkg/dlopen/dlopen_example.go create mode 100644 vendor/github.com/coreos/pkg/dlopen/dlopen_test.go create mode 100644 vendor/github.com/coreos/pkg/flagutil/env.go create mode 100644 vendor/github.com/coreos/pkg/flagutil/env_file.go create mode 100644 vendor/github.com/coreos/pkg/flagutil/env_test.go create mode 100644 vendor/github.com/coreos/pkg/flagutil/file_env_test.go create mode 100644 vendor/github.com/coreos/pkg/flagutil/types.go create mode 100644 vendor/github.com/coreos/pkg/flagutil/types_test.go create mode 100644 vendor/github.com/coreos/pkg/health/README.md create mode 100644 vendor/github.com/coreos/pkg/health/health.go create mode 100644 vendor/github.com/coreos/pkg/health/health_test.go create mode 100644 vendor/github.com/coreos/pkg/httputil/README.md create mode 100644 vendor/github.com/coreos/pkg/httputil/cookie.go create mode 100644 vendor/github.com/coreos/pkg/httputil/cookie_test.go create mode 100644 vendor/github.com/coreos/pkg/httputil/json.go create mode 100644 vendor/github.com/coreos/pkg/httputil/json_test.go create mode 100644 vendor/github.com/coreos/pkg/k8s-tlsutil/k8s-tlsutil.go create mode 100644 vendor/github.com/coreos/pkg/multierror/multierror.go create mode 100644 vendor/github.com/coreos/pkg/multierror/multierror_test.go create mode 100644 vendor/github.com/coreos/pkg/netutil/proxy.go create mode 100644 vendor/github.com/coreos/pkg/netutil/url.go create mode 100644 vendor/github.com/coreos/pkg/netutil/url_test.go create mode 100644 vendor/github.com/coreos/pkg/progressutil/iocopy.go create mode 100644 vendor/github.com/coreos/pkg/progressutil/iocopy_test.go create mode 100644 vendor/github.com/coreos/pkg/progressutil/progressbar.go create mode 100644 vendor/github.com/coreos/pkg/progressutil/progressbar_test.go create mode 100755 vendor/github.com/coreos/pkg/test create mode 100644 vendor/github.com/coreos/pkg/timeutil/backoff.go create mode 100644 vendor/github.com/coreos/pkg/timeutil/backoff_test.go create mode 100644 vendor/github.com/coreos/pkg/yamlutil/yaml.go create mode 100644 vendor/github.com/coreos/pkg/yamlutil/yaml_test.go create mode 100644 vendor/github.com/davecgh/go-spew/.gitignore create mode 100644 vendor/github.com/davecgh/go-spew/.travis.yml create mode 100644 vendor/github.com/davecgh/go-spew/LICENSE create mode 100644 vendor/github.com/davecgh/go-spew/README.md create mode 100644 vendor/github.com/davecgh/go-spew/cov_report.sh create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypass.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypasssafe.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/common.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/common_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/config.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/doc.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/dump.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/dump_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/example_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/format.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/format_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/internal_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/spew.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/spew_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/testdata/dumpcgo.go create mode 100644 vendor/github.com/davecgh/go-spew/test_coverage.txt create mode 100644 vendor/github.com/docker/spdystream/CONTRIBUTING.md create mode 100644 vendor/github.com/docker/spdystream/LICENSE create mode 100644 vendor/github.com/docker/spdystream/LICENSE.docs create mode 100644 vendor/github.com/docker/spdystream/MAINTAINERS create mode 100644 vendor/github.com/docker/spdystream/README.md create mode 100644 vendor/github.com/docker/spdystream/connection.go create mode 100644 vendor/github.com/docker/spdystream/handlers.go create mode 100644 vendor/github.com/docker/spdystream/priority.go create mode 100644 vendor/github.com/docker/spdystream/priority_test.go create mode 100644 vendor/github.com/docker/spdystream/spdy/dictionary.go create mode 100644 vendor/github.com/docker/spdystream/spdy/read.go create mode 100644 vendor/github.com/docker/spdystream/spdy/spdy_test.go create mode 100644 vendor/github.com/docker/spdystream/spdy/types.go create mode 100644 vendor/github.com/docker/spdystream/spdy/write.go create mode 100644 vendor/github.com/docker/spdystream/spdy_bench_test.go create mode 100644 vendor/github.com/docker/spdystream/spdy_test.go create mode 100644 vendor/github.com/docker/spdystream/stream.go create mode 100644 vendor/github.com/docker/spdystream/utils.go create mode 100644 vendor/github.com/docker/spdystream/ws/connection.go create mode 100644 vendor/github.com/docker/spdystream/ws/ws_test.go create mode 100644 vendor/github.com/emicklei/go-restful/.gitignore create mode 100644 vendor/github.com/emicklei/go-restful/CHANGES.md create mode 100644 vendor/github.com/emicklei/go-restful/LICENSE create mode 100644 vendor/github.com/emicklei/go-restful/README.md create mode 100644 vendor/github.com/emicklei/go-restful/Srcfile create mode 100644 vendor/github.com/emicklei/go-restful/bench_curly_test.go create mode 100644 vendor/github.com/emicklei/go-restful/bench_test.go create mode 100644 vendor/github.com/emicklei/go-restful/bench_test.sh create mode 100644 vendor/github.com/emicklei/go-restful/compress.go create mode 100644 vendor/github.com/emicklei/go-restful/compress_test.go create mode 100644 vendor/github.com/emicklei/go-restful/compressor_cache.go create mode 100644 vendor/github.com/emicklei/go-restful/compressor_pools.go create mode 100644 vendor/github.com/emicklei/go-restful/compressors.go create mode 100644 vendor/github.com/emicklei/go-restful/constants.go create mode 100644 vendor/github.com/emicklei/go-restful/container.go create mode 100644 vendor/github.com/emicklei/go-restful/container_test.go create mode 100644 vendor/github.com/emicklei/go-restful/cors_filter.go create mode 100644 vendor/github.com/emicklei/go-restful/cors_filter_test.go create mode 100644 vendor/github.com/emicklei/go-restful/coverage.sh create mode 100644 vendor/github.com/emicklei/go-restful/curly.go create mode 100644 vendor/github.com/emicklei/go-restful/curly_route.go create mode 100644 vendor/github.com/emicklei/go-restful/curly_test.go create mode 100644 vendor/github.com/emicklei/go-restful/doc.go create mode 100644 vendor/github.com/emicklei/go-restful/doc_examples_test.go create mode 100644 vendor/github.com/emicklei/go-restful/entity_accessors.go create mode 100644 vendor/github.com/emicklei/go-restful/entity_accessors_test.go create mode 100644 vendor/github.com/emicklei/go-restful/examples/.goconvey create mode 100644 vendor/github.com/emicklei/go-restful/examples/google_app_engine/.goconvey create mode 100644 vendor/github.com/emicklei/go-restful/examples/google_app_engine/app.yaml create mode 100644 vendor/github.com/emicklei/go-restful/examples/google_app_engine/datastore/.goconvey create mode 100644 vendor/github.com/emicklei/go-restful/examples/google_app_engine/datastore/app.yaml create mode 100644 vendor/github.com/emicklei/go-restful/examples/google_app_engine/datastore/main.go create mode 100644 vendor/github.com/emicklei/go-restful/examples/google_app_engine/restful-appstats-integration.go create mode 100644 vendor/github.com/emicklei/go-restful/examples/google_app_engine/restful-user-service.go create mode 100644 vendor/github.com/emicklei/go-restful/examples/home.html create mode 100644 vendor/github.com/emicklei/go-restful/examples/msgpack/msgpack_entity.go create mode 100644 vendor/github.com/emicklei/go-restful/examples/msgpack/msgpack_entity_test.go create mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-CORS-filter.go create mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-NCSA-logging.go create mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-basic-authentication.go create mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-cpuprofiler-service.go create mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-curly-router.go create mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-curly-router_test.go create mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-encoding-filter.go create mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-filters.go create mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-form-handling.go create mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-hello-world.go create mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-html-template.go create mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-multi-containers.go create mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-no-cache-filter.go create mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-options-filter.go create mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-path-tail.go create mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-pre-post-filters.go create mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-resource-functions.go create mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-route_test.go create mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-routefunction_test.go create mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-serve-static.go create mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-swagger.go create mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-user-resource.go create mode 100644 vendor/github.com/emicklei/go-restful/examples/restful-user-service.go create mode 100644 vendor/github.com/emicklei/go-restful/filter.go create mode 100644 vendor/github.com/emicklei/go-restful/filter_test.go create mode 100644 vendor/github.com/emicklei/go-restful/install.sh create mode 100644 vendor/github.com/emicklei/go-restful/jsr311.go create mode 100644 vendor/github.com/emicklei/go-restful/jsr311_test.go create mode 100644 vendor/github.com/emicklei/go-restful/log/log.go create mode 100644 vendor/github.com/emicklei/go-restful/logger.go create mode 100644 vendor/github.com/emicklei/go-restful/mime.go create mode 100644 vendor/github.com/emicklei/go-restful/mime_test.go create mode 100644 vendor/github.com/emicklei/go-restful/options_filter.go create mode 100644 vendor/github.com/emicklei/go-restful/options_filter_test.go create mode 100644 vendor/github.com/emicklei/go-restful/parameter.go create mode 100644 vendor/github.com/emicklei/go-restful/path_expression.go create mode 100644 vendor/github.com/emicklei/go-restful/path_expression_test.go create mode 100644 vendor/github.com/emicklei/go-restful/request.go create mode 100644 vendor/github.com/emicklei/go-restful/request_test.go create mode 100644 vendor/github.com/emicklei/go-restful/response.go create mode 100644 vendor/github.com/emicklei/go-restful/response_test.go create mode 100644 vendor/github.com/emicklei/go-restful/route.go create mode 100644 vendor/github.com/emicklei/go-restful/route_builder.go create mode 100644 vendor/github.com/emicklei/go-restful/route_builder_test.go create mode 100644 vendor/github.com/emicklei/go-restful/route_test.go create mode 100644 vendor/github.com/emicklei/go-restful/router.go create mode 100644 vendor/github.com/emicklei/go-restful/service_error.go create mode 100644 vendor/github.com/emicklei/go-restful/swagger/CHANGES.md create mode 100644 vendor/github.com/emicklei/go-restful/swagger/README.md create mode 100644 vendor/github.com/emicklei/go-restful/swagger/api_declaration_list.go create mode 100644 vendor/github.com/emicklei/go-restful/swagger/config.go create mode 100644 vendor/github.com/emicklei/go-restful/swagger/model_builder.go create mode 100644 vendor/github.com/emicklei/go-restful/swagger/model_builder_test.go create mode 100644 vendor/github.com/emicklei/go-restful/swagger/model_list.go create mode 100644 vendor/github.com/emicklei/go-restful/swagger/model_list_test.go create mode 100644 vendor/github.com/emicklei/go-restful/swagger/model_property_ext.go create mode 100644 vendor/github.com/emicklei/go-restful/swagger/model_property_ext_test.go create mode 100644 vendor/github.com/emicklei/go-restful/swagger/model_property_list.go create mode 100644 vendor/github.com/emicklei/go-restful/swagger/model_property_list_test.go create mode 100644 vendor/github.com/emicklei/go-restful/swagger/ordered_route_map.go create mode 100644 vendor/github.com/emicklei/go-restful/swagger/ordered_route_map_test.go create mode 100644 vendor/github.com/emicklei/go-restful/swagger/postbuild_model_test.go create mode 100644 vendor/github.com/emicklei/go-restful/swagger/swagger.go create mode 100644 vendor/github.com/emicklei/go-restful/swagger/swagger_builder.go create mode 100644 vendor/github.com/emicklei/go-restful/swagger/swagger_test.go create mode 100644 vendor/github.com/emicklei/go-restful/swagger/swagger_webservice.go create mode 100644 vendor/github.com/emicklei/go-restful/swagger/test_package/struct.go create mode 100644 vendor/github.com/emicklei/go-restful/swagger/utils_test.go create mode 100644 vendor/github.com/emicklei/go-restful/tracer_test.go create mode 100644 vendor/github.com/emicklei/go-restful/web_service.go create mode 100644 vendor/github.com/emicklei/go-restful/web_service_container.go create mode 100644 vendor/github.com/emicklei/go-restful/web_service_test.go create mode 100644 vendor/github.com/go-openapi/jsonpointer/.editorconfig create mode 100644 vendor/github.com/go-openapi/jsonpointer/.github/CONTRIBUTING.md create mode 100644 vendor/github.com/go-openapi/jsonpointer/.gitignore create mode 100644 vendor/github.com/go-openapi/jsonpointer/.travis.yml create mode 100644 vendor/github.com/go-openapi/jsonpointer/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-openapi/jsonpointer/LICENSE create mode 100644 vendor/github.com/go-openapi/jsonpointer/README.md create mode 100644 vendor/github.com/go-openapi/jsonpointer/pointer.go create mode 100644 vendor/github.com/go-openapi/jsonpointer/pointer_test.go create mode 100644 vendor/github.com/go-openapi/jsonreference/.github/CONTRIBUTING.md create mode 100644 vendor/github.com/go-openapi/jsonreference/.gitignore create mode 100644 vendor/github.com/go-openapi/jsonreference/.travis.yml create mode 100644 vendor/github.com/go-openapi/jsonreference/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-openapi/jsonreference/LICENSE create mode 100644 vendor/github.com/go-openapi/jsonreference/README.md create mode 100644 vendor/github.com/go-openapi/jsonreference/reference.go create mode 100644 vendor/github.com/go-openapi/jsonreference/reference_test.go create mode 100644 vendor/github.com/go-openapi/spec/.editorconfig create mode 100644 vendor/github.com/go-openapi/spec/.github/CONTRIBUTING.md create mode 100644 vendor/github.com/go-openapi/spec/.gitignore create mode 100644 vendor/github.com/go-openapi/spec/.travis.yml create mode 100644 vendor/github.com/go-openapi/spec/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-openapi/spec/LICENSE create mode 100644 vendor/github.com/go-openapi/spec/README.md create mode 100644 vendor/github.com/go-openapi/spec/auth_test.go create mode 100644 vendor/github.com/go-openapi/spec/bindata.go create mode 100644 vendor/github.com/go-openapi/spec/contact_info.go create mode 100644 vendor/github.com/go-openapi/spec/contact_info_test.go create mode 100644 vendor/github.com/go-openapi/spec/expander.go create mode 100644 vendor/github.com/go-openapi/spec/expander_test.go create mode 100644 vendor/github.com/go-openapi/spec/external_docs.go create mode 100644 vendor/github.com/go-openapi/spec/external_docs_test.go create mode 100644 vendor/github.com/go-openapi/spec/fixtures/expansion/all-the-things.json create mode 100644 vendor/github.com/go-openapi/spec/fixtures/expansion/circularRefs.json create mode 100644 vendor/github.com/go-openapi/spec/fixtures/expansion/circularSpec.json create mode 100644 vendor/github.com/go-openapi/spec/fixtures/expansion/circularSpec.yaml create mode 100644 vendor/github.com/go-openapi/spec/fixtures/expansion/clickmeter.json create mode 100644 vendor/github.com/go-openapi/spec/fixtures/expansion/clickmeter.yaml create mode 100644 vendor/github.com/go-openapi/spec/fixtures/expansion/invalid-refs.json create mode 100644 vendor/github.com/go-openapi/spec/fixtures/expansion/overflow.json create mode 100644 vendor/github.com/go-openapi/spec/fixtures/expansion/params.json create mode 100644 vendor/github.com/go-openapi/spec/fixtures/expansion/schemas1.json create mode 100644 vendor/github.com/go-openapi/spec/fixtures/expansion/schemas2.json create mode 100644 vendor/github.com/go-openapi/spec/fixtures/local_expansion/item.json create mode 100644 vendor/github.com/go-openapi/spec/fixtures/local_expansion/spec.json create mode 100644 vendor/github.com/go-openapi/spec/fixtures/specs/deeper/arrayProp.json create mode 100644 vendor/github.com/go-openapi/spec/fixtures/specs/deeper/stringProp.json create mode 100644 vendor/github.com/go-openapi/spec/fixtures/specs/refed.json create mode 100644 vendor/github.com/go-openapi/spec/fixtures/specs/resolution.json create mode 100644 vendor/github.com/go-openapi/spec/fixtures/specs/resolution2.json create mode 100644 vendor/github.com/go-openapi/spec/header.go create mode 100644 vendor/github.com/go-openapi/spec/header_test.go create mode 100644 vendor/github.com/go-openapi/spec/info.go create mode 100644 vendor/github.com/go-openapi/spec/info_test.go create mode 100644 vendor/github.com/go-openapi/spec/items.go create mode 100644 vendor/github.com/go-openapi/spec/items_test.go create mode 100644 vendor/github.com/go-openapi/spec/license.go create mode 100644 vendor/github.com/go-openapi/spec/license_test.go create mode 100644 vendor/github.com/go-openapi/spec/operation.go create mode 100644 vendor/github.com/go-openapi/spec/operation_test.go create mode 100644 vendor/github.com/go-openapi/spec/parameter.go create mode 100644 vendor/github.com/go-openapi/spec/parameters_test.go create mode 100644 vendor/github.com/go-openapi/spec/path_item.go create mode 100644 vendor/github.com/go-openapi/spec/path_item_test.go create mode 100644 vendor/github.com/go-openapi/spec/paths.go create mode 100644 vendor/github.com/go-openapi/spec/paths_test.go create mode 100644 vendor/github.com/go-openapi/spec/properties_test.go create mode 100644 vendor/github.com/go-openapi/spec/ref.go create mode 100644 vendor/github.com/go-openapi/spec/response.go create mode 100644 vendor/github.com/go-openapi/spec/response_test.go create mode 100644 vendor/github.com/go-openapi/spec/responses.go create mode 100644 vendor/github.com/go-openapi/spec/schema.go create mode 100644 vendor/github.com/go-openapi/spec/schema_test.go create mode 100644 vendor/github.com/go-openapi/spec/schemas/jsonschema-draft-04.json create mode 100644 vendor/github.com/go-openapi/spec/schemas/v2/README.md create mode 100644 vendor/github.com/go-openapi/spec/schemas/v2/schema.json create mode 100644 vendor/github.com/go-openapi/spec/security_scheme.go create mode 100644 vendor/github.com/go-openapi/spec/spec.go create mode 100644 vendor/github.com/go-openapi/spec/structs_test.go create mode 100644 vendor/github.com/go-openapi/spec/swagger.go create mode 100644 vendor/github.com/go-openapi/spec/swagger_test.go create mode 100644 vendor/github.com/go-openapi/spec/tag.go create mode 100644 vendor/github.com/go-openapi/spec/xml_object.go create mode 100644 vendor/github.com/go-openapi/spec/xml_object_test.go create mode 100644 vendor/github.com/go-openapi/swag/.editorconfig create mode 100644 vendor/github.com/go-openapi/swag/.github/CONTRIBUTING.md create mode 100644 vendor/github.com/go-openapi/swag/.gitignore create mode 100644 vendor/github.com/go-openapi/swag/.travis.yml create mode 100644 vendor/github.com/go-openapi/swag/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-openapi/swag/LICENSE create mode 100644 vendor/github.com/go-openapi/swag/README.md create mode 100644 vendor/github.com/go-openapi/swag/convert.go create mode 100644 vendor/github.com/go-openapi/swag/convert_test.go create mode 100644 vendor/github.com/go-openapi/swag/convert_types.go create mode 100644 vendor/github.com/go-openapi/swag/convert_types_test.go create mode 100644 vendor/github.com/go-openapi/swag/json.go create mode 100644 vendor/github.com/go-openapi/swag/json_test.go create mode 100644 vendor/github.com/go-openapi/swag/loading.go create mode 100644 vendor/github.com/go-openapi/swag/loading_test.go create mode 100644 vendor/github.com/go-openapi/swag/net.go create mode 100644 vendor/github.com/go-openapi/swag/net_test.go create mode 100644 vendor/github.com/go-openapi/swag/path.go create mode 100644 vendor/github.com/go-openapi/swag/path_test.go create mode 100644 vendor/github.com/go-openapi/swag/util.go create mode 100644 vendor/github.com/go-openapi/swag/util_test.go create mode 100644 vendor/github.com/golang/glog/LICENSE create mode 100644 vendor/github.com/golang/glog/README create mode 100644 vendor/github.com/golang/glog/glog.go create mode 100644 vendor/github.com/golang/glog/glog_file.go create mode 100644 vendor/github.com/golang/glog/glog_test.go create mode 100644 vendor/github.com/golang/groupcache/.gitignore create mode 100644 vendor/github.com/golang/groupcache/LICENSE create mode 100644 vendor/github.com/golang/groupcache/README.md create mode 100644 vendor/github.com/golang/groupcache/byteview.go create mode 100644 vendor/github.com/golang/groupcache/byteview_test.go create mode 100644 vendor/github.com/golang/groupcache/consistenthash/consistenthash.go create mode 100644 vendor/github.com/golang/groupcache/consistenthash/consistenthash_test.go create mode 100644 vendor/github.com/golang/groupcache/groupcache.go create mode 100644 vendor/github.com/golang/groupcache/groupcache_test.go create mode 100644 vendor/github.com/golang/groupcache/groupcachepb/groupcache.pb.go create mode 100644 vendor/github.com/golang/groupcache/groupcachepb/groupcache.proto create mode 100644 vendor/github.com/golang/groupcache/http.go create mode 100644 vendor/github.com/golang/groupcache/http_test.go create mode 100644 vendor/github.com/golang/groupcache/lru/lru.go create mode 100644 vendor/github.com/golang/groupcache/lru/lru_test.go create mode 100644 vendor/github.com/golang/groupcache/peers.go create mode 100644 vendor/github.com/golang/groupcache/singleflight/singleflight.go create mode 100644 vendor/github.com/golang/groupcache/singleflight/singleflight_test.go create mode 100644 vendor/github.com/golang/groupcache/sinks.go create mode 100644 vendor/github.com/golang/groupcache/testpb/test.pb.go create mode 100644 vendor/github.com/golang/groupcache/testpb/test.proto create mode 100644 vendor/github.com/google/gofuzz/.travis.yml create mode 100644 vendor/github.com/google/gofuzz/CONTRIBUTING.md create mode 100644 vendor/github.com/google/gofuzz/LICENSE create mode 100644 vendor/github.com/google/gofuzz/README.md create mode 100644 vendor/github.com/google/gofuzz/doc.go create mode 100644 vendor/github.com/google/gofuzz/example_test.go create mode 100644 vendor/github.com/google/gofuzz/fuzz.go create mode 100644 vendor/github.com/google/gofuzz/fuzz_test.go create mode 100644 vendor/github.com/googleapis/gax-go/.gitignore create mode 100644 vendor/github.com/googleapis/gax-go/.travis.yml create mode 100644 vendor/github.com/googleapis/gax-go/CONTRIBUTING.md create mode 100644 vendor/github.com/googleapis/gax-go/LICENSE create mode 100644 vendor/github.com/googleapis/gax-go/README.md create mode 100644 vendor/github.com/googleapis/gax-go/call_option.go create mode 100644 vendor/github.com/googleapis/gax-go/call_option_test.go create mode 100644 vendor/github.com/googleapis/gax-go/gax.go create mode 100644 vendor/github.com/googleapis/gax-go/header.go create mode 100644 vendor/github.com/googleapis/gax-go/header_test.go create mode 100644 vendor/github.com/googleapis/gax-go/invoke.go create mode 100644 vendor/github.com/googleapis/gax-go/invoke_test.go create mode 100644 vendor/github.com/googleapis/gax-go/path_template.go create mode 100644 vendor/github.com/googleapis/gax-go/path_template_parser.go create mode 100644 vendor/github.com/googleapis/gax-go/path_template_test.go create mode 100644 vendor/github.com/jonboulle/clockwork/.gitignore create mode 100644 vendor/github.com/jonboulle/clockwork/.travis.yml create mode 100644 vendor/github.com/jonboulle/clockwork/LICENSE create mode 100644 vendor/github.com/jonboulle/clockwork/README.md create mode 100644 vendor/github.com/jonboulle/clockwork/clockwork.go create mode 100644 vendor/github.com/jonboulle/clockwork/clockwork_test.go create mode 100644 vendor/github.com/jonboulle/clockwork/example_test.go create mode 100644 vendor/github.com/juju/ratelimit/LICENSE create mode 100644 vendor/github.com/juju/ratelimit/README.md create mode 100644 vendor/github.com/juju/ratelimit/ratelimit.go create mode 100644 vendor/github.com/juju/ratelimit/ratelimit_test.go create mode 100644 vendor/github.com/juju/ratelimit/reader.go create mode 100644 vendor/github.com/kr/pty/.gitignore create mode 100644 vendor/github.com/kr/pty/License create mode 100644 vendor/github.com/kr/pty/README.md create mode 100644 vendor/github.com/kr/pty/doc.go create mode 100644 vendor/github.com/kr/pty/ioctl.go create mode 100644 vendor/github.com/kr/pty/ioctl_bsd.go create mode 100755 vendor/github.com/kr/pty/mktypes.bash create mode 100644 vendor/github.com/kr/pty/pty_darwin.go create mode 100644 vendor/github.com/kr/pty/pty_dragonfly.go create mode 100644 vendor/github.com/kr/pty/pty_freebsd.go create mode 100644 vendor/github.com/kr/pty/pty_linux.go create mode 100644 vendor/github.com/kr/pty/pty_unsupported.go create mode 100644 vendor/github.com/kr/pty/run.go create mode 100644 vendor/github.com/kr/pty/types.go create mode 100644 vendor/github.com/kr/pty/types_dragonfly.go create mode 100644 vendor/github.com/kr/pty/types_freebsd.go create mode 100644 vendor/github.com/kr/pty/util.go create mode 100644 vendor/github.com/kr/pty/ztypes_386.go create mode 100644 vendor/github.com/kr/pty/ztypes_amd64.go create mode 100644 vendor/github.com/kr/pty/ztypes_arm.go create mode 100644 vendor/github.com/kr/pty/ztypes_arm64.go create mode 100644 vendor/github.com/kr/pty/ztypes_dragonfly_amd64.go create mode 100644 vendor/github.com/kr/pty/ztypes_freebsd_386.go create mode 100644 vendor/github.com/kr/pty/ztypes_freebsd_amd64.go create mode 100644 vendor/github.com/kr/pty/ztypes_freebsd_arm.go create mode 100644 vendor/github.com/kr/pty/ztypes_mipsx.go create mode 100644 vendor/github.com/kr/pty/ztypes_ppc64.go create mode 100644 vendor/github.com/kr/pty/ztypes_ppc64le.go create mode 100644 vendor/github.com/kr/pty/ztypes_s390x.go create mode 100644 vendor/github.com/mailru/easyjson/.gitignore create mode 100644 vendor/github.com/mailru/easyjson/.travis.yml create mode 100644 vendor/github.com/mailru/easyjson/LICENSE create mode 100644 vendor/github.com/mailru/easyjson/Makefile create mode 100644 vendor/github.com/mailru/easyjson/README.md create mode 100644 vendor/github.com/mailru/easyjson/benchmark/codec_test.go create mode 100644 vendor/github.com/mailru/easyjson/benchmark/data.go create mode 100644 vendor/github.com/mailru/easyjson/benchmark/data_codec.go create mode 100644 vendor/github.com/mailru/easyjson/benchmark/data_ffjson.go create mode 100644 vendor/github.com/mailru/easyjson/benchmark/data_var.go create mode 100644 vendor/github.com/mailru/easyjson/benchmark/default_test.go create mode 100644 vendor/github.com/mailru/easyjson/benchmark/dummy_test.go create mode 100644 vendor/github.com/mailru/easyjson/benchmark/easyjson_test.go create mode 100644 vendor/github.com/mailru/easyjson/benchmark/example.json create mode 100644 vendor/github.com/mailru/easyjson/benchmark/ffjson_test.go create mode 100755 vendor/github.com/mailru/easyjson/benchmark/ujson.sh create mode 100644 vendor/github.com/mailru/easyjson/bootstrap/bootstrap.go create mode 100644 vendor/github.com/mailru/easyjson/buffer/pool.go create mode 100644 vendor/github.com/mailru/easyjson/buffer/pool_test.go create mode 100644 vendor/github.com/mailru/easyjson/easyjson/main.go create mode 100644 vendor/github.com/mailru/easyjson/gen/decoder.go create mode 100644 vendor/github.com/mailru/easyjson/gen/encoder.go create mode 100644 vendor/github.com/mailru/easyjson/gen/generator.go create mode 100644 vendor/github.com/mailru/easyjson/gen/generator_test.go create mode 100644 vendor/github.com/mailru/easyjson/helpers.go create mode 100644 vendor/github.com/mailru/easyjson/jlexer/error.go create mode 100644 vendor/github.com/mailru/easyjson/jlexer/lexer.go create mode 100644 vendor/github.com/mailru/easyjson/jlexer/lexer_test.go create mode 100644 vendor/github.com/mailru/easyjson/jwriter/writer.go create mode 100644 vendor/github.com/mailru/easyjson/opt/gotemplate_Bool.go create mode 100644 vendor/github.com/mailru/easyjson/opt/gotemplate_Float32.go create mode 100644 vendor/github.com/mailru/easyjson/opt/gotemplate_Float64.go create mode 100644 vendor/github.com/mailru/easyjson/opt/gotemplate_Int.go create mode 100644 vendor/github.com/mailru/easyjson/opt/gotemplate_Int16.go create mode 100644 vendor/github.com/mailru/easyjson/opt/gotemplate_Int32.go create mode 100644 vendor/github.com/mailru/easyjson/opt/gotemplate_Int64.go create mode 100644 vendor/github.com/mailru/easyjson/opt/gotemplate_Int8.go create mode 100644 vendor/github.com/mailru/easyjson/opt/gotemplate_String.go create mode 100644 vendor/github.com/mailru/easyjson/opt/gotemplate_Uint.go create mode 100644 vendor/github.com/mailru/easyjson/opt/gotemplate_Uint16.go create mode 100644 vendor/github.com/mailru/easyjson/opt/gotemplate_Uint32.go create mode 100644 vendor/github.com/mailru/easyjson/opt/gotemplate_Uint64.go create mode 100644 vendor/github.com/mailru/easyjson/opt/gotemplate_Uint8.go create mode 100644 vendor/github.com/mailru/easyjson/opt/optional/opt.go create mode 100644 vendor/github.com/mailru/easyjson/opt/opts.go create mode 100644 vendor/github.com/mailru/easyjson/parser/parser.go create mode 100644 vendor/github.com/mailru/easyjson/parser/parser_unix.go create mode 100644 vendor/github.com/mailru/easyjson/parser/parser_windows.go create mode 100644 vendor/github.com/mailru/easyjson/raw.go create mode 100644 vendor/github.com/mailru/easyjson/tests/basic_test.go create mode 100644 vendor/github.com/mailru/easyjson/tests/data.go create mode 100644 vendor/github.com/mailru/easyjson/tests/errors.go create mode 100644 vendor/github.com/mailru/easyjson/tests/errors_test.go create mode 100644 vendor/github.com/mailru/easyjson/tests/named_type.go create mode 100644 vendor/github.com/mailru/easyjson/tests/nested_easy.go create mode 100644 vendor/github.com/mailru/easyjson/tests/nothing.go create mode 100644 vendor/github.com/mailru/easyjson/tests/omitempty.go create mode 100644 vendor/github.com/mailru/easyjson/tests/required_test.go create mode 100644 vendor/github.com/mailru/easyjson/tests/snake.go create mode 100644 vendor/github.com/mitchellh/go-wordwrap/LICENSE.md create mode 100644 vendor/github.com/mitchellh/go-wordwrap/README.md create mode 100644 vendor/github.com/mitchellh/go-wordwrap/wordwrap.go create mode 100644 vendor/github.com/mitchellh/go-wordwrap/wordwrap_test.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/Godeps.json create mode 100644 vendor/github.com/opencontainers/runc/Godeps/Readme create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/.gitignore create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_darwin.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_freebsd.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_openbsd.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/coreos/go-systemd/LICENSE create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/coreos/go-systemd/activation/files.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/coreos/go-systemd/activation/listeners.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/coreos/go-systemd/activation/packetconns.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/dbus.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/methods.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/properties.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/set.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/coreos/go-systemd/dbus/subscription_set.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/coreos/go-systemd/util/util.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/coreos/go-systemd/util/util_cgo.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/coreos/go-systemd/util/util_stub.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/coreos/pkg/LICENSE create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/coreos/pkg/NOTICE create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/coreos/pkg/dlopen/dlopen.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/coreos/pkg/dlopen/dlopen_example.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/LICENSE create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/NOTICE create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/contrib/syntax/vim/LICENSE create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/docs/project/images/red_notice.png create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/mflag/LICENSE create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_freebsd.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_linux.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/flags_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mount.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_freebsd.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_linux.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mounter_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_linux.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.APACHE create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/LICENSE.BSD create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/README.md create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/symlink/fs.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_linux_cgo.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/term/tc_other.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/term/term_windows.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_darwin.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_freebsd.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/term/termios_linux.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/term/winconsole/console_windows.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/docker/pkg/term/winconsole/term_emulator.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/go-units/LICENSE create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/go-units/README.md create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/go-units/duration.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/docker/go-units/size.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/CONTRIBUTING.md create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/LICENSE create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/MAINTAINERS create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/README.markdown create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/auth.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/auth_external.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/auth_sha1.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/call.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/conn.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/conn_darwin.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/conn_other.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/dbus.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/decoder.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/doc.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/encoder.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/export.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/homedir.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/homedir_dynamic.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/homedir_static.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/introspect/call.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/introspect/introspect.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/introspect/introspectable.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/message.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/object.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/prop/prop.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/sig.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/transport_darwin.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/transport_generic.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/transport_unix.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/transport_unixcred_dragonfly.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/transport_unixcred_linux.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/variant.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/variant_lexer.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/godbus/dbus/variant_parser.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/golang/protobuf/LICENSE create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/Makefile create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/mrunalp/fileutils/LICENSE create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/mrunalp/fileutils/README.md create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/mrunalp/fileutils/fileutils.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/mrunalp/fileutils/idtools.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/opencontainers/runtime-spec/LICENSE create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/opencontainers/runtime-spec/specs-go/config.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/opencontainers/runtime-spec/specs-go/state.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/opencontainers/runtime-spec/specs-go/version.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/LICENSE create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/NOTICE create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/fflib/v1/buffer.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/fflib/v1/buffer_nopool.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/fflib/v1/buffer_pool.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/fflib/v1/bytenum.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/fflib/v1/decimal.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/fflib/v1/extfloat.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/fflib/v1/fold.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/fflib/v1/ftoa.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/fflib/v1/internal/atof.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/fflib/v1/internal/atoi.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/fflib/v1/internal/extfloat.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/fflib/v1/internal/ftoa.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/fflib/v1/iota.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/fflib/v1/jsonstring.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/fflib/v1/lexer.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/fflib/v1/reader.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/fflib/v1/reader_scan_amd64.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/fflib/v1/reader_scan_amd64.s create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/pquerna/ffjson/fflib/v1/reader_scan_generic.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/seccomp/libseccomp-golang/LICENSE create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/seccomp/libseccomp-golang/README create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/seccomp/libseccomp-golang/seccomp.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/seccomp/libseccomp-golang/seccomp_internal.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/syndtr/gocapability/LICENSE create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/capability.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/capability_linux.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/capability_noop.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/enum.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/enum_gen.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/enumgen/gen.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/syscall_linux.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/urfave/cli/.gitignore create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/urfave/cli/.travis.yml create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/urfave/cli/CHANGELOG.md create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/urfave/cli/LICENSE create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/urfave/cli/README.md create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/urfave/cli/altsrc/altsrc.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/urfave/cli/altsrc/flag.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/urfave/cli/altsrc/flag_generated.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/urfave/cli/altsrc/input_source_context.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/urfave/cli/altsrc/map_input_source.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/urfave/cli/altsrc/toml_file_loader.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/urfave/cli/altsrc/yaml_file_loader.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/urfave/cli/app.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/urfave/cli/appveyor.yml create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/urfave/cli/autocomplete/bash_autocomplete create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/urfave/cli/autocomplete/zsh_autocomplete create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/urfave/cli/category.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/urfave/cli/cli.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/urfave/cli/command.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/urfave/cli/context.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/urfave/cli/errors.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/urfave/cli/flag-types.json create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/urfave/cli/flag.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/urfave/cli/flag_generated.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/urfave/cli/funcs.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/urfave/cli/generate-flag-types create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/urfave/cli/help.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/urfave/cli/runtests create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/.travis.yml create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/LICENSE create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/Makefile create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/README.md create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/addr.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/addr_linux.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/filter.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/filter_linux.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/link.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/link_linux.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/neigh.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/neigh_linux.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/netlink.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/netlink_unspecified.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/addr_linux.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/link_linux.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/nl_linux.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/route_linux.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/tc_linux.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/xfrm_linux.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/xfrm_policy_linux.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/nl/xfrm_state_linux.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/protinfo.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/protinfo_linux.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/qdisc.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/qdisc_linux.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/route.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/route_linux.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/xfrm.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/xfrm_policy.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/xfrm_policy_linux.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/xfrm_state.go create mode 100644 vendor/github.com/opencontainers/runc/Godeps/_workspace/src/github.com/vishvananda/netlink/xfrm_state_linux.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/Godeps.json create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/Readme create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/.gitignore create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/.gitignore create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/.travis.yml create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/CHANGELOG.md create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/LICENSE create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/README.md create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/entry_test.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/basic/basic.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/examples/hook/hook.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/exported.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatter_bench_test.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/formatters/logstash/logstash_test.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/hook_test.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/airbrake/airbrake_test.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/bugsnag/bugsnag_test.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/README.md create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/papertrail/papertrail_test.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/README.md create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/sentry/sentry_test.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/README.md create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/hooks/syslog/syslog_test.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/json_formatter_test.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/logger.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/logrus_test.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_bsd.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_linux.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_notwindows.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/terminal_windows.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/text_formatter_test.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/Sirupsen/logrus/writer.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/blang/semver/LICENSE create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/blang/semver/README.md create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/blang/semver/examples/main.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/blang/semver/json.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/blang/semver/json_test.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/blang/semver/range.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/blang/semver/range_test.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/blang/semver/semver.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/blang/semver/semver_test.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/blang/semver/sort.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/blang/semver/sort_test.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/blang/semver/sql.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/blang/semver/sql_test.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/hashicorp/errwrap/LICENSE create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/hashicorp/errwrap/README.md create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/hashicorp/errwrap/errwrap.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/hashicorp/go-multierror/.travis.yml create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/hashicorp/go-multierror/LICENSE create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/hashicorp/go-multierror/Makefile create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/hashicorp/go-multierror/README.md create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/hashicorp/go-multierror/append.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/hashicorp/go-multierror/flatten.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/hashicorp/go-multierror/format.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/hashicorp/go-multierror/multierror.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/hashicorp/go-multierror/prefix.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/mndrix/tap-go/.gitignore create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/mndrix/tap-go/LICENSE create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/mndrix/tap-go/Makefile create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/mndrix/tap-go/README.md create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/mndrix/tap-go/tap.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/mrunalp/fileutils/.gitignore create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/mrunalp/fileutils/LICENSE create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/mrunalp/fileutils/MAINTAINERS create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/mrunalp/fileutils/README.md create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/mrunalp/fileutils/fileutils.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/mrunalp/fileutils/idtools.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/opencontainers/runtime-spec/LICENSE create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/opencontainers/runtime-spec/specs-go/config.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/opencontainers/runtime-spec/specs-go/state.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/opencontainers/runtime-spec/specs-go/version.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/satori/go.uuid/.travis.yml create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/satori/go.uuid/LICENSE create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/satori/go.uuid/README.md create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/satori/go.uuid/uuid.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/capability.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/capability_linux.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/capability_noop.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/capability_test.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/enum.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/enum_gen.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/enumgen/gen.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/syndtr/gocapability/capability/syscall_linux.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/urfave/cli/.gitignore create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/urfave/cli/.travis.yml create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/urfave/cli/CHANGELOG.md create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/urfave/cli/LICENSE create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/urfave/cli/README.md create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/urfave/cli/app.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/urfave/cli/appveyor.yml create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/urfave/cli/category.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/urfave/cli/cli.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/urfave/cli/command.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/urfave/cli/context.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/urfave/cli/errors.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/urfave/cli/flag-types.json create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/urfave/cli/flag.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/urfave/cli/flag_generated.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/urfave/cli/funcs.go create mode 100755 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/urfave/cli/generate-flag-types create mode 100644 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/urfave/cli/help.go create mode 100755 vendor/github.com/opencontainers/runtime-tools/Godeps/_workspace/src/github.com/urfave/cli/runtests create mode 100644 vendor/github.com/spf13/pflag/.gitignore create mode 100644 vendor/github.com/spf13/pflag/.travis.yml create mode 100644 vendor/github.com/spf13/pflag/LICENSE create mode 100644 vendor/github.com/spf13/pflag/README.md create mode 100644 vendor/github.com/spf13/pflag/bool.go create mode 100644 vendor/github.com/spf13/pflag/bool_slice.go create mode 100644 vendor/github.com/spf13/pflag/bool_slice_test.go create mode 100644 vendor/github.com/spf13/pflag/bool_test.go create mode 100644 vendor/github.com/spf13/pflag/count.go create mode 100644 vendor/github.com/spf13/pflag/count_test.go create mode 100644 vendor/github.com/spf13/pflag/duration.go create mode 100644 vendor/github.com/spf13/pflag/example_test.go create mode 100644 vendor/github.com/spf13/pflag/export_test.go create mode 100644 vendor/github.com/spf13/pflag/flag.go create mode 100644 vendor/github.com/spf13/pflag/flag_test.go create mode 100644 vendor/github.com/spf13/pflag/float32.go create mode 100644 vendor/github.com/spf13/pflag/float64.go create mode 100644 vendor/github.com/spf13/pflag/golangflag.go create mode 100644 vendor/github.com/spf13/pflag/golangflag_test.go create mode 100644 vendor/github.com/spf13/pflag/int.go create mode 100644 vendor/github.com/spf13/pflag/int32.go create mode 100644 vendor/github.com/spf13/pflag/int64.go create mode 100644 vendor/github.com/spf13/pflag/int8.go create mode 100644 vendor/github.com/spf13/pflag/int_slice.go create mode 100644 vendor/github.com/spf13/pflag/int_slice_test.go create mode 100644 vendor/github.com/spf13/pflag/ip.go create mode 100644 vendor/github.com/spf13/pflag/ip_slice.go create mode 100644 vendor/github.com/spf13/pflag/ip_slice_test.go create mode 100644 vendor/github.com/spf13/pflag/ip_test.go create mode 100644 vendor/github.com/spf13/pflag/ipmask.go create mode 100644 vendor/github.com/spf13/pflag/ipnet.go create mode 100644 vendor/github.com/spf13/pflag/ipnet_test.go create mode 100644 vendor/github.com/spf13/pflag/string.go create mode 100644 vendor/github.com/spf13/pflag/string_array.go create mode 100644 vendor/github.com/spf13/pflag/string_array_test.go create mode 100644 vendor/github.com/spf13/pflag/string_slice.go create mode 100644 vendor/github.com/spf13/pflag/string_slice_test.go create mode 100644 vendor/github.com/spf13/pflag/uint.go create mode 100644 vendor/github.com/spf13/pflag/uint16.go create mode 100644 vendor/github.com/spf13/pflag/uint32.go create mode 100644 vendor/github.com/spf13/pflag/uint64.go create mode 100644 vendor/github.com/spf13/pflag/uint8.go create mode 100644 vendor/github.com/spf13/pflag/uint_slice.go create mode 100644 vendor/github.com/spf13/pflag/uint_slice_test.go create mode 100755 vendor/github.com/spf13/pflag/verify/all.sh create mode 100755 vendor/github.com/spf13/pflag/verify/gofmt.sh create mode 100755 vendor/github.com/spf13/pflag/verify/golint.sh create mode 100644 vendor/github.com/ugorji/go/LICENSE create mode 100644 vendor/github.com/ugorji/go/README.md create mode 100644 vendor/github.com/ugorji/go/codec/0doc.go create mode 100644 vendor/github.com/ugorji/go/codec/README.md create mode 100644 vendor/github.com/ugorji/go/codec/binc.go create mode 100644 vendor/github.com/ugorji/go/codec/cbor.go create mode 100644 vendor/github.com/ugorji/go/codec/cbor_test.go create mode 100644 vendor/github.com/ugorji/go/codec/codec_test.go create mode 100644 vendor/github.com/ugorji/go/codec/codecgen/README.md create mode 100644 vendor/github.com/ugorji/go/codec/codecgen/gen.go create mode 100644 vendor/github.com/ugorji/go/codec/codecgen/z.go create mode 100644 vendor/github.com/ugorji/go/codec/codecgen_test.go create mode 100644 vendor/github.com/ugorji/go/codec/decode.go create mode 100644 vendor/github.com/ugorji/go/codec/decode_go.go create mode 100644 vendor/github.com/ugorji/go/codec/decode_go14.go create mode 100644 vendor/github.com/ugorji/go/codec/encode.go create mode 100644 vendor/github.com/ugorji/go/codec/fast-path.generated.go create mode 100644 vendor/github.com/ugorji/go/codec/fast-path.go.tmpl create mode 100644 vendor/github.com/ugorji/go/codec/fast-path.not.go create mode 100644 vendor/github.com/ugorji/go/codec/gen-dec-array.go.tmpl create mode 100644 vendor/github.com/ugorji/go/codec/gen-dec-map.go.tmpl create mode 100644 vendor/github.com/ugorji/go/codec/gen-helper.generated.go create mode 100644 vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl create mode 100644 vendor/github.com/ugorji/go/codec/gen.generated.go create mode 100644 vendor/github.com/ugorji/go/codec/gen.go create mode 100644 vendor/github.com/ugorji/go/codec/gen_15.go create mode 100644 vendor/github.com/ugorji/go/codec/gen_16.go create mode 100644 vendor/github.com/ugorji/go/codec/gen_17.go create mode 100644 vendor/github.com/ugorji/go/codec/helper.go create mode 100644 vendor/github.com/ugorji/go/codec/helper_internal.go create mode 100644 vendor/github.com/ugorji/go/codec/helper_not_unsafe.go create mode 100644 vendor/github.com/ugorji/go/codec/helper_test.go create mode 100644 vendor/github.com/ugorji/go/codec/helper_unsafe.go create mode 100644 vendor/github.com/ugorji/go/codec/json.go create mode 100644 vendor/github.com/ugorji/go/codec/msgpack.go create mode 100644 vendor/github.com/ugorji/go/codec/noop.go create mode 100644 vendor/github.com/ugorji/go/codec/prebuild.go create mode 100755 vendor/github.com/ugorji/go/codec/prebuild.sh create mode 100644 vendor/github.com/ugorji/go/codec/py_test.go create mode 100644 vendor/github.com/ugorji/go/codec/rpc.go create mode 100644 vendor/github.com/ugorji/go/codec/simple.go create mode 100644 vendor/github.com/ugorji/go/codec/test-cbor-goldens.json create mode 100755 vendor/github.com/ugorji/go/codec/test.py create mode 100755 vendor/github.com/ugorji/go/codec/tests.sh create mode 100644 vendor/github.com/ugorji/go/codec/time.go create mode 100644 vendor/github.com/ugorji/go/codec/values_test.go create mode 100644 vendor/github.com/ugorji/go/msgpack.org.md create mode 100644 vendor/golang.org/x/oauth2/.travis.yml create mode 100644 vendor/golang.org/x/oauth2/AUTHORS create mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTING.md create mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTORS create mode 100644 vendor/golang.org/x/oauth2/LICENSE create mode 100644 vendor/golang.org/x/oauth2/README.md create mode 100644 vendor/golang.org/x/oauth2/amazon/amazon.go create mode 100644 vendor/golang.org/x/oauth2/bitbucket/bitbucket.go create mode 100644 vendor/golang.org/x/oauth2/client_appengine.go create mode 100644 vendor/golang.org/x/oauth2/clientcredentials/clientcredentials.go create mode 100644 vendor/golang.org/x/oauth2/clientcredentials/clientcredentials_test.go create mode 100644 vendor/golang.org/x/oauth2/example_test.go create mode 100644 vendor/golang.org/x/oauth2/facebook/facebook.go create mode 100644 vendor/golang.org/x/oauth2/fitbit/fitbit.go create mode 100644 vendor/golang.org/x/oauth2/foursquare/foursquare.go create mode 100644 vendor/golang.org/x/oauth2/github/github.go create mode 100644 vendor/golang.org/x/oauth2/google/appengine.go create mode 100644 vendor/golang.org/x/oauth2/google/appengine_hook.go create mode 100644 vendor/golang.org/x/oauth2/google/appengineflex_hook.go create mode 100644 vendor/golang.org/x/oauth2/google/default.go create mode 100644 vendor/golang.org/x/oauth2/google/example_test.go create mode 100644 vendor/golang.org/x/oauth2/google/google.go create mode 100644 vendor/golang.org/x/oauth2/google/google_test.go create mode 100644 vendor/golang.org/x/oauth2/google/jwt.go create mode 100644 vendor/golang.org/x/oauth2/google/jwt_test.go create mode 100644 vendor/golang.org/x/oauth2/google/sdk.go create mode 100644 vendor/golang.org/x/oauth2/google/sdk_test.go create mode 100644 vendor/golang.org/x/oauth2/google/testdata/gcloud/credentials create mode 100644 vendor/golang.org/x/oauth2/google/testdata/gcloud/properties create mode 100644 vendor/golang.org/x/oauth2/heroku/heroku.go create mode 100644 vendor/golang.org/x/oauth2/hipchat/hipchat.go create mode 100644 vendor/golang.org/x/oauth2/internal/oauth2.go create mode 100644 vendor/golang.org/x/oauth2/internal/oauth2_test.go create mode 100644 vendor/golang.org/x/oauth2/internal/token.go create mode 100644 vendor/golang.org/x/oauth2/internal/token_test.go create mode 100644 vendor/golang.org/x/oauth2/internal/transport.go create mode 100644 vendor/golang.org/x/oauth2/internal/transport_test.go create mode 100644 vendor/golang.org/x/oauth2/jws/jws.go create mode 100644 vendor/golang.org/x/oauth2/jws/jws_test.go create mode 100644 vendor/golang.org/x/oauth2/jwt/example_test.go create mode 100644 vendor/golang.org/x/oauth2/jwt/jwt.go create mode 100644 vendor/golang.org/x/oauth2/jwt/jwt_test.go create mode 100644 vendor/golang.org/x/oauth2/linkedin/linkedin.go create mode 100644 vendor/golang.org/x/oauth2/mediamath/mediamath.go create mode 100644 vendor/golang.org/x/oauth2/microsoft/microsoft.go create mode 100644 vendor/golang.org/x/oauth2/oauth2.go create mode 100644 vendor/golang.org/x/oauth2/oauth2_test.go create mode 100644 vendor/golang.org/x/oauth2/odnoklassniki/odnoklassniki.go create mode 100644 vendor/golang.org/x/oauth2/paypal/paypal.go create mode 100644 vendor/golang.org/x/oauth2/slack/slack.go create mode 100644 vendor/golang.org/x/oauth2/token.go create mode 100644 vendor/golang.org/x/oauth2/token_test.go create mode 100644 vendor/golang.org/x/oauth2/transport.go create mode 100644 vendor/golang.org/x/oauth2/transport_test.go create mode 100644 vendor/golang.org/x/oauth2/uber/uber.go create mode 100644 vendor/golang.org/x/oauth2/vk/vk.go create mode 100644 vendor/golang.org/x/oauth2/yandex/yandex.go create mode 100644 vendor/golang.org/x/text/.gitattributes create mode 100644 vendor/golang.org/x/text/.gitignore create mode 100644 vendor/golang.org/x/text/AUTHORS create mode 100644 vendor/golang.org/x/text/CONTRIBUTING.md create mode 100644 vendor/golang.org/x/text/CONTRIBUTORS create mode 100644 vendor/golang.org/x/text/LICENSE create mode 100644 vendor/golang.org/x/text/PATENTS create mode 100644 vendor/golang.org/x/text/README create mode 100644 vendor/golang.org/x/text/cases/cases.go create mode 100644 vendor/golang.org/x/text/cases/context.go create mode 100644 vendor/golang.org/x/text/cases/context_test.go create mode 100644 vendor/golang.org/x/text/cases/example_test.go create mode 100644 vendor/golang.org/x/text/cases/fold.go create mode 100644 vendor/golang.org/x/text/cases/fold_test.go create mode 100644 vendor/golang.org/x/text/cases/gen.go create mode 100644 vendor/golang.org/x/text/cases/gen_trieval.go create mode 100644 vendor/golang.org/x/text/cases/icu.go create mode 100644 vendor/golang.org/x/text/cases/icu_test.go create mode 100644 vendor/golang.org/x/text/cases/info.go create mode 100644 vendor/golang.org/x/text/cases/map.go create mode 100644 vendor/golang.org/x/text/cases/map_test.go create mode 100644 vendor/golang.org/x/text/cases/tables.go create mode 100644 vendor/golang.org/x/text/cases/tables_test.go create mode 100644 vendor/golang.org/x/text/cases/trieval.go create mode 100644 vendor/golang.org/x/text/cmd/gotext/doc.go create mode 100644 vendor/golang.org/x/text/cmd/gotext/extract.go create mode 100644 vendor/golang.org/x/text/cmd/gotext/main.go create mode 100644 vendor/golang.org/x/text/cmd/gotext/message.go create mode 100644 vendor/golang.org/x/text/codereview.cfg create mode 100644 vendor/golang.org/x/text/collate/build/builder.go create mode 100644 vendor/golang.org/x/text/collate/build/builder_test.go create mode 100644 vendor/golang.org/x/text/collate/build/colelem.go create mode 100644 vendor/golang.org/x/text/collate/build/colelem_test.go create mode 100644 vendor/golang.org/x/text/collate/build/contract.go create mode 100644 vendor/golang.org/x/text/collate/build/contract_test.go create mode 100644 vendor/golang.org/x/text/collate/build/order.go create mode 100644 vendor/golang.org/x/text/collate/build/order_test.go create mode 100644 vendor/golang.org/x/text/collate/build/table.go create mode 100644 vendor/golang.org/x/text/collate/build/trie.go create mode 100644 vendor/golang.org/x/text/collate/build/trie_test.go create mode 100644 vendor/golang.org/x/text/collate/collate.go create mode 100644 vendor/golang.org/x/text/collate/collate_test.go create mode 100644 vendor/golang.org/x/text/collate/export_test.go create mode 100644 vendor/golang.org/x/text/collate/index.go create mode 100644 vendor/golang.org/x/text/collate/maketables.go create mode 100644 vendor/golang.org/x/text/collate/option.go create mode 100644 vendor/golang.org/x/text/collate/option_test.go create mode 100644 vendor/golang.org/x/text/collate/reg_test.go create mode 100644 vendor/golang.org/x/text/collate/sort.go create mode 100644 vendor/golang.org/x/text/collate/sort_test.go create mode 100644 vendor/golang.org/x/text/collate/table_test.go create mode 100644 vendor/golang.org/x/text/collate/tables.go create mode 100644 vendor/golang.org/x/text/collate/tools/colcmp/Makefile create mode 100644 vendor/golang.org/x/text/collate/tools/colcmp/chars.go create mode 100644 vendor/golang.org/x/text/collate/tools/colcmp/col.go create mode 100644 vendor/golang.org/x/text/collate/tools/colcmp/colcmp.go create mode 100644 vendor/golang.org/x/text/collate/tools/colcmp/darwin.go create mode 100644 vendor/golang.org/x/text/collate/tools/colcmp/gen.go create mode 100644 vendor/golang.org/x/text/collate/tools/colcmp/icu.go create mode 100644 vendor/golang.org/x/text/currency/common.go create mode 100644 vendor/golang.org/x/text/currency/currency.go create mode 100644 vendor/golang.org/x/text/currency/currency_test.go create mode 100644 vendor/golang.org/x/text/currency/example_test.go create mode 100644 vendor/golang.org/x/text/currency/format.go create mode 100644 vendor/golang.org/x/text/currency/format_test.go create mode 100644 vendor/golang.org/x/text/currency/gen.go create mode 100644 vendor/golang.org/x/text/currency/gen_common.go create mode 100644 vendor/golang.org/x/text/currency/query.go create mode 100644 vendor/golang.org/x/text/currency/query_test.go create mode 100644 vendor/golang.org/x/text/currency/tables.go create mode 100644 vendor/golang.org/x/text/currency/tables_test.go create mode 100644 vendor/golang.org/x/text/doc.go create mode 100644 vendor/golang.org/x/text/encoding/charmap/charmap.go create mode 100644 vendor/golang.org/x/text/encoding/charmap/charmap_test.go create mode 100644 vendor/golang.org/x/text/encoding/charmap/maketables.go create mode 100644 vendor/golang.org/x/text/encoding/charmap/tables.go create mode 100644 vendor/golang.org/x/text/encoding/encoding.go create mode 100644 vendor/golang.org/x/text/encoding/encoding_test.go create mode 100644 vendor/golang.org/x/text/encoding/example_test.go create mode 100644 vendor/golang.org/x/text/encoding/htmlindex/gen.go create mode 100644 vendor/golang.org/x/text/encoding/htmlindex/htmlindex.go create mode 100644 vendor/golang.org/x/text/encoding/htmlindex/htmlindex_test.go create mode 100644 vendor/golang.org/x/text/encoding/htmlindex/map.go create mode 100644 vendor/golang.org/x/text/encoding/htmlindex/tables.go create mode 100644 vendor/golang.org/x/text/encoding/ianaindex/example_test.go create mode 100644 vendor/golang.org/x/text/encoding/ianaindex/ianaindex.go create mode 100644 vendor/golang.org/x/text/encoding/internal/identifier/gen.go create mode 100644 vendor/golang.org/x/text/encoding/internal/identifier/identifier.go create mode 100644 vendor/golang.org/x/text/encoding/internal/identifier/mib.go create mode 100644 vendor/golang.org/x/text/encoding/internal/internal.go create mode 100644 vendor/golang.org/x/text/encoding/japanese/all.go create mode 100644 vendor/golang.org/x/text/encoding/japanese/all_test.go create mode 100644 vendor/golang.org/x/text/encoding/japanese/eucjp.go create mode 100644 vendor/golang.org/x/text/encoding/japanese/iso2022jp.go create mode 100644 vendor/golang.org/x/text/encoding/japanese/maketables.go create mode 100644 vendor/golang.org/x/text/encoding/japanese/shiftjis.go create mode 100644 vendor/golang.org/x/text/encoding/japanese/tables.go create mode 100644 vendor/golang.org/x/text/encoding/korean/all_test.go create mode 100644 vendor/golang.org/x/text/encoding/korean/euckr.go create mode 100644 vendor/golang.org/x/text/encoding/korean/maketables.go create mode 100644 vendor/golang.org/x/text/encoding/korean/tables.go create mode 100644 vendor/golang.org/x/text/encoding/simplifiedchinese/all.go create mode 100644 vendor/golang.org/x/text/encoding/simplifiedchinese/all_test.go create mode 100644 vendor/golang.org/x/text/encoding/simplifiedchinese/gbk.go create mode 100644 vendor/golang.org/x/text/encoding/simplifiedchinese/hzgb2312.go create mode 100644 vendor/golang.org/x/text/encoding/simplifiedchinese/maketables.go create mode 100644 vendor/golang.org/x/text/encoding/simplifiedchinese/tables.go create mode 100644 vendor/golang.org/x/text/encoding/testdata/candide-gb18030.txt create mode 100644 vendor/golang.org/x/text/encoding/testdata/candide-utf-16le.txt create mode 100644 vendor/golang.org/x/text/encoding/testdata/candide-utf-32be.txt create mode 100644 vendor/golang.org/x/text/encoding/testdata/candide-utf-8.txt create mode 100644 vendor/golang.org/x/text/encoding/testdata/candide-windows-1252.txt create mode 100644 vendor/golang.org/x/text/encoding/testdata/rashomon-euc-jp.txt create mode 100644 vendor/golang.org/x/text/encoding/testdata/rashomon-iso-2022-jp.txt create mode 100644 vendor/golang.org/x/text/encoding/testdata/rashomon-shift-jis.txt create mode 100644 vendor/golang.org/x/text/encoding/testdata/rashomon-utf-8.txt create mode 100644 vendor/golang.org/x/text/encoding/testdata/sunzi-bingfa-gb-levels-1-and-2-hz-gb2312.txt create mode 100644 vendor/golang.org/x/text/encoding/testdata/sunzi-bingfa-gb-levels-1-and-2-utf-8.txt create mode 100644 vendor/golang.org/x/text/encoding/testdata/sunzi-bingfa-simplified-gbk.txt create mode 100644 vendor/golang.org/x/text/encoding/testdata/sunzi-bingfa-simplified-utf-8.txt create mode 100644 vendor/golang.org/x/text/encoding/testdata/sunzi-bingfa-traditional-big5.txt create mode 100644 vendor/golang.org/x/text/encoding/testdata/sunzi-bingfa-traditional-utf-8.txt create mode 100644 vendor/golang.org/x/text/encoding/testdata/unsu-joh-eun-nal-euc-kr.txt create mode 100644 vendor/golang.org/x/text/encoding/testdata/unsu-joh-eun-nal-utf-8.txt create mode 100644 vendor/golang.org/x/text/encoding/traditionalchinese/all_test.go create mode 100644 vendor/golang.org/x/text/encoding/traditionalchinese/big5.go create mode 100644 vendor/golang.org/x/text/encoding/traditionalchinese/maketables.go create mode 100644 vendor/golang.org/x/text/encoding/traditionalchinese/tables.go create mode 100644 vendor/golang.org/x/text/encoding/unicode/override.go create mode 100644 vendor/golang.org/x/text/encoding/unicode/unicode.go create mode 100644 vendor/golang.org/x/text/encoding/unicode/unicode_test.go create mode 100644 vendor/golang.org/x/text/encoding/unicode/utf32/utf32.go create mode 100644 vendor/golang.org/x/text/encoding/unicode/utf32/utf32_test.go create mode 100644 vendor/golang.org/x/text/gen.go create mode 100644 vendor/golang.org/x/text/internal/colltab/collate_test.go create mode 100644 vendor/golang.org/x/text/internal/colltab/collelem.go create mode 100644 vendor/golang.org/x/text/internal/colltab/collelem_test.go create mode 100644 vendor/golang.org/x/text/internal/colltab/colltab.go create mode 100644 vendor/golang.org/x/text/internal/colltab/colltab_test.go create mode 100644 vendor/golang.org/x/text/internal/colltab/contract.go create mode 100644 vendor/golang.org/x/text/internal/colltab/contract_test.go create mode 100644 vendor/golang.org/x/text/internal/colltab/iter.go create mode 100644 vendor/golang.org/x/text/internal/colltab/iter_test.go create mode 100644 vendor/golang.org/x/text/internal/colltab/numeric.go create mode 100644 vendor/golang.org/x/text/internal/colltab/numeric_test.go create mode 100644 vendor/golang.org/x/text/internal/colltab/table.go create mode 100644 vendor/golang.org/x/text/internal/colltab/trie.go create mode 100644 vendor/golang.org/x/text/internal/colltab/trie_test.go create mode 100644 vendor/golang.org/x/text/internal/colltab/weighter.go create mode 100644 vendor/golang.org/x/text/internal/colltab/weighter_test.go create mode 100644 vendor/golang.org/x/text/internal/export/README create mode 100644 vendor/golang.org/x/text/internal/export/idna/common_test.go create mode 100644 vendor/golang.org/x/text/internal/export/idna/gen.go create mode 100644 vendor/golang.org/x/text/internal/export/idna/gen_common.go create mode 100644 vendor/golang.org/x/text/internal/export/idna/gen_test.go create mode 100644 vendor/golang.org/x/text/internal/export/idna/gen_trieval.go create mode 100644 vendor/golang.org/x/text/internal/export/idna/idna.go create mode 100644 vendor/golang.org/x/text/internal/export/idna/idna_test.go create mode 100644 vendor/golang.org/x/text/internal/export/idna/punycode.go create mode 100644 vendor/golang.org/x/text/internal/export/idna/punycode_test.go create mode 100644 vendor/golang.org/x/text/internal/export/idna/tables.go create mode 100644 vendor/golang.org/x/text/internal/export/idna/trie.go create mode 100644 vendor/golang.org/x/text/internal/export/idna/trieval.go create mode 100644 vendor/golang.org/x/text/internal/format/format.go create mode 100644 vendor/golang.org/x/text/internal/format/plural/plural.go create mode 100644 vendor/golang.org/x/text/internal/gen.go create mode 100644 vendor/golang.org/x/text/internal/gen/code.go create mode 100644 vendor/golang.org/x/text/internal/gen/gen.go create mode 100644 vendor/golang.org/x/text/internal/gen_test.go create mode 100644 vendor/golang.org/x/text/internal/internal.go create mode 100644 vendor/golang.org/x/text/internal/internal_test.go create mode 100644 vendor/golang.org/x/text/internal/match.go create mode 100644 vendor/golang.org/x/text/internal/match_test.go create mode 100644 vendor/golang.org/x/text/internal/number/common.go create mode 100644 vendor/golang.org/x/text/internal/number/data_test.go create mode 100644 vendor/golang.org/x/text/internal/number/decimal.go create mode 100644 vendor/golang.org/x/text/internal/number/extfloat.go create mode 100644 vendor/golang.org/x/text/internal/number/ftoa.go create mode 100644 vendor/golang.org/x/text/internal/number/gen.go create mode 100644 vendor/golang.org/x/text/internal/number/gen_common.go create mode 100644 vendor/golang.org/x/text/internal/number/gen_plural.go create mode 100644 vendor/golang.org/x/text/internal/number/itoa.go create mode 100644 vendor/golang.org/x/text/internal/number/number.go create mode 100644 vendor/golang.org/x/text/internal/number/number_test.go create mode 100644 vendor/golang.org/x/text/internal/number/pattern.go create mode 100644 vendor/golang.org/x/text/internal/number/pattern_test.go create mode 100644 vendor/golang.org/x/text/internal/number/plural.go create mode 100644 vendor/golang.org/x/text/internal/number/plural_test.go create mode 100644 vendor/golang.org/x/text/internal/number/tables.go create mode 100644 vendor/golang.org/x/text/internal/number/tables_test.go create mode 100644 vendor/golang.org/x/text/internal/stringset/set.go create mode 100644 vendor/golang.org/x/text/internal/stringset/set_test.go create mode 100644 vendor/golang.org/x/text/internal/tables.go create mode 100644 vendor/golang.org/x/text/internal/tag/tag.go create mode 100644 vendor/golang.org/x/text/internal/tag/tag_test.go create mode 100644 vendor/golang.org/x/text/internal/testtext/codesize.go create mode 100644 vendor/golang.org/x/text/internal/testtext/flag.go create mode 100644 vendor/golang.org/x/text/internal/testtext/gc.go create mode 100644 vendor/golang.org/x/text/internal/testtext/gccgo.go create mode 100644 vendor/golang.org/x/text/internal/testtext/go1_6.go create mode 100644 vendor/golang.org/x/text/internal/testtext/go1_7.go create mode 100644 vendor/golang.org/x/text/internal/testtext/text.go create mode 100644 vendor/golang.org/x/text/internal/triegen/compact.go create mode 100644 vendor/golang.org/x/text/internal/triegen/data_test.go create mode 100644 vendor/golang.org/x/text/internal/triegen/example_compact_test.go create mode 100644 vendor/golang.org/x/text/internal/triegen/example_test.go create mode 100644 vendor/golang.org/x/text/internal/triegen/gen_test.go create mode 100644 vendor/golang.org/x/text/internal/triegen/print.go create mode 100644 vendor/golang.org/x/text/internal/triegen/triegen.go create mode 100644 vendor/golang.org/x/text/internal/ucd/example_test.go create mode 100644 vendor/golang.org/x/text/internal/ucd/ucd.go create mode 100644 vendor/golang.org/x/text/internal/ucd/ucd_test.go create mode 100644 vendor/golang.org/x/text/internal/utf8internal/utf8internal.go create mode 100644 vendor/golang.org/x/text/language/Makefile create mode 100644 vendor/golang.org/x/text/language/common.go create mode 100644 vendor/golang.org/x/text/language/coverage.go create mode 100644 vendor/golang.org/x/text/language/coverage_test.go create mode 100644 vendor/golang.org/x/text/language/data_test.go create mode 100644 vendor/golang.org/x/text/language/display/dict.go create mode 100644 vendor/golang.org/x/text/language/display/dict_test.go create mode 100644 vendor/golang.org/x/text/language/display/display.go create mode 100644 vendor/golang.org/x/text/language/display/display_test.go create mode 100644 vendor/golang.org/x/text/language/display/examples_test.go create mode 100644 vendor/golang.org/x/text/language/display/lookup.go create mode 100644 vendor/golang.org/x/text/language/display/maketables.go create mode 100644 vendor/golang.org/x/text/language/display/tables.go create mode 100644 vendor/golang.org/x/text/language/examples_test.go create mode 100644 vendor/golang.org/x/text/language/gen_common.go create mode 100644 vendor/golang.org/x/text/language/gen_index.go create mode 100644 vendor/golang.org/x/text/language/go1_1.go create mode 100644 vendor/golang.org/x/text/language/go1_2.go create mode 100644 vendor/golang.org/x/text/language/httpexample_test.go create mode 100644 vendor/golang.org/x/text/language/index.go create mode 100644 vendor/golang.org/x/text/language/language.go create mode 100644 vendor/golang.org/x/text/language/language_test.go create mode 100644 vendor/golang.org/x/text/language/lookup.go create mode 100644 vendor/golang.org/x/text/language/lookup_test.go create mode 100644 vendor/golang.org/x/text/language/maketables.go create mode 100644 vendor/golang.org/x/text/language/match.go create mode 100644 vendor/golang.org/x/text/language/match_test.go create mode 100644 vendor/golang.org/x/text/language/parse.go create mode 100644 vendor/golang.org/x/text/language/parse_test.go create mode 100644 vendor/golang.org/x/text/language/tables.go create mode 100644 vendor/golang.org/x/text/language/tags.go create mode 100644 vendor/golang.org/x/text/message/catalog.go create mode 100644 vendor/golang.org/x/text/message/catalog_test.go create mode 100644 vendor/golang.org/x/text/message/message.go create mode 100644 vendor/golang.org/x/text/message/message_test.go create mode 100644 vendor/golang.org/x/text/runes/cond.go create mode 100644 vendor/golang.org/x/text/runes/cond_test.go create mode 100644 vendor/golang.org/x/text/runes/example_test.go create mode 100644 vendor/golang.org/x/text/runes/runes.go create mode 100644 vendor/golang.org/x/text/runes/runes_test.go create mode 100644 vendor/golang.org/x/text/search/index.go create mode 100644 vendor/golang.org/x/text/search/pattern.go create mode 100644 vendor/golang.org/x/text/search/pattern_test.go create mode 100644 vendor/golang.org/x/text/search/search.go create mode 100644 vendor/golang.org/x/text/search/tables.go create mode 100644 vendor/golang.org/x/text/secure/bidirule/bench_test.go create mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule.go create mode 100644 vendor/golang.org/x/text/secure/bidirule/bidirule_test.go create mode 100644 vendor/golang.org/x/text/secure/doc.go create mode 100644 vendor/golang.org/x/text/secure/precis/benchmark_test.go create mode 100644 vendor/golang.org/x/text/secure/precis/class.go create mode 100644 vendor/golang.org/x/text/secure/precis/class_test.go create mode 100644 vendor/golang.org/x/text/secure/precis/context.go create mode 100644 vendor/golang.org/x/text/secure/precis/doc.go create mode 100644 vendor/golang.org/x/text/secure/precis/enforce_test.go create mode 100644 vendor/golang.org/x/text/secure/precis/gen.go create mode 100644 vendor/golang.org/x/text/secure/precis/gen_trieval.go create mode 100644 vendor/golang.org/x/text/secure/precis/nickname.go create mode 100644 vendor/golang.org/x/text/secure/precis/options.go create mode 100644 vendor/golang.org/x/text/secure/precis/profile.go create mode 100644 vendor/golang.org/x/text/secure/precis/profile_test.go create mode 100644 vendor/golang.org/x/text/secure/precis/profiles.go create mode 100644 vendor/golang.org/x/text/secure/precis/tables.go create mode 100644 vendor/golang.org/x/text/secure/precis/tables_test.go create mode 100644 vendor/golang.org/x/text/secure/precis/transformer.go create mode 100644 vendor/golang.org/x/text/secure/precis/trieval.go create mode 100644 vendor/golang.org/x/text/transform/examples_test.go create mode 100644 vendor/golang.org/x/text/transform/transform.go create mode 100644 vendor/golang.org/x/text/transform/transform_test.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/bidi.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/bracket.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/core.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/core_test.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/gen.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/gen_ranges.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/gen_trieval.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/prop.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/ranges_test.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables_test.go create mode 100644 vendor/golang.org/x/text/unicode/bidi/trieval.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/base.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/cldr.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/cldr_test.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/collate.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/collate_test.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/data_test.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/decode.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/examples_test.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/makexml.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/resolve.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/resolve_test.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/slice.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/slice_test.go create mode 100644 vendor/golang.org/x/text/unicode/cldr/xml.go create mode 100644 vendor/golang.org/x/text/unicode/doc.go create mode 100644 vendor/golang.org/x/text/unicode/norm/composition.go create mode 100644 vendor/golang.org/x/text/unicode/norm/composition_test.go create mode 100644 vendor/golang.org/x/text/unicode/norm/example_iter_test.go create mode 100644 vendor/golang.org/x/text/unicode/norm/example_test.go create mode 100644 vendor/golang.org/x/text/unicode/norm/forminfo.go create mode 100644 vendor/golang.org/x/text/unicode/norm/forminfo_test.go create mode 100644 vendor/golang.org/x/text/unicode/norm/input.go create mode 100644 vendor/golang.org/x/text/unicode/norm/iter.go create mode 100644 vendor/golang.org/x/text/unicode/norm/iter_test.go create mode 100644 vendor/golang.org/x/text/unicode/norm/maketables.go create mode 100644 vendor/golang.org/x/text/unicode/norm/norm_test.go create mode 100644 vendor/golang.org/x/text/unicode/norm/normalize.go create mode 100644 vendor/golang.org/x/text/unicode/norm/normalize_test.go create mode 100644 vendor/golang.org/x/text/unicode/norm/readwriter.go create mode 100644 vendor/golang.org/x/text/unicode/norm/readwriter_test.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables.go create mode 100644 vendor/golang.org/x/text/unicode/norm/transform.go create mode 100644 vendor/golang.org/x/text/unicode/norm/transform_test.go create mode 100644 vendor/golang.org/x/text/unicode/norm/trie.go create mode 100644 vendor/golang.org/x/text/unicode/norm/triegen.go create mode 100644 vendor/golang.org/x/text/unicode/norm/ucd_test.go create mode 100644 vendor/golang.org/x/text/unicode/rangetable/gen.go create mode 100644 vendor/golang.org/x/text/unicode/rangetable/merge.go create mode 100644 vendor/golang.org/x/text/unicode/rangetable/merge_test.go create mode 100644 vendor/golang.org/x/text/unicode/rangetable/rangetable.go create mode 100644 vendor/golang.org/x/text/unicode/rangetable/rangetable_test.go create mode 100644 vendor/golang.org/x/text/unicode/rangetable/tables.go create mode 100644 vendor/golang.org/x/text/unicode/runenames/bits.go create mode 100644 vendor/golang.org/x/text/unicode/runenames/example_test.go create mode 100644 vendor/golang.org/x/text/unicode/runenames/gen.go create mode 100644 vendor/golang.org/x/text/unicode/runenames/gen_bits.go create mode 100644 vendor/golang.org/x/text/unicode/runenames/runenames.go create mode 100644 vendor/golang.org/x/text/unicode/runenames/runenames_test.go create mode 100644 vendor/golang.org/x/text/unicode/runenames/tables.go create mode 100644 vendor/golang.org/x/text/width/common_test.go create mode 100644 vendor/golang.org/x/text/width/example_test.go create mode 100644 vendor/golang.org/x/text/width/gen.go create mode 100644 vendor/golang.org/x/text/width/gen_common.go create mode 100644 vendor/golang.org/x/text/width/gen_trieval.go create mode 100644 vendor/golang.org/x/text/width/kind_string.go create mode 100644 vendor/golang.org/x/text/width/runes_test.go create mode 100644 vendor/golang.org/x/text/width/tables.go create mode 100644 vendor/golang.org/x/text/width/tables_test.go create mode 100644 vendor/golang.org/x/text/width/transform.go create mode 100644 vendor/golang.org/x/text/width/transform_test.go create mode 100644 vendor/golang.org/x/text/width/trieval.go create mode 100644 vendor/golang.org/x/text/width/width.go create mode 100644 vendor/google.golang.org/appengine/.travis.yml create mode 100644 vendor/google.golang.org/appengine/LICENSE create mode 100644 vendor/google.golang.org/appengine/README.md create mode 100644 vendor/google.golang.org/appengine/aetest/doc.go create mode 100644 vendor/google.golang.org/appengine/aetest/instance.go create mode 100644 vendor/google.golang.org/appengine/aetest/instance_classic.go create mode 100644 vendor/google.golang.org/appengine/aetest/instance_test.go create mode 100644 vendor/google.golang.org/appengine/aetest/instance_vm.go create mode 100644 vendor/google.golang.org/appengine/aetest/user.go create mode 100644 vendor/google.golang.org/appengine/appengine.go create mode 100644 vendor/google.golang.org/appengine/appengine_test.go create mode 100644 vendor/google.golang.org/appengine/appengine_vm.go create mode 100644 vendor/google.golang.org/appengine/blobstore/blobstore.go create mode 100644 vendor/google.golang.org/appengine/blobstore/blobstore_test.go create mode 100644 vendor/google.golang.org/appengine/blobstore/read.go create mode 100644 vendor/google.golang.org/appengine/capability/capability.go create mode 100644 vendor/google.golang.org/appengine/channel/channel.go create mode 100644 vendor/google.golang.org/appengine/channel/channel_test.go create mode 100644 vendor/google.golang.org/appengine/cloudsql/cloudsql.go create mode 100644 vendor/google.golang.org/appengine/cloudsql/cloudsql_classic.go create mode 100644 vendor/google.golang.org/appengine/cloudsql/cloudsql_vm.go create mode 100644 vendor/google.golang.org/appengine/cmd/aebundler/aebundler.go create mode 100644 vendor/google.golang.org/appengine/cmd/aedeploy/aedeploy.go create mode 100644 vendor/google.golang.org/appengine/cmd/aefix/ae.go create mode 100644 vendor/google.golang.org/appengine/cmd/aefix/ae_test.go create mode 100644 vendor/google.golang.org/appengine/cmd/aefix/fix.go create mode 100644 vendor/google.golang.org/appengine/cmd/aefix/main.go create mode 100644 vendor/google.golang.org/appengine/cmd/aefix/main_test.go create mode 100644 vendor/google.golang.org/appengine/cmd/aefix/typecheck.go create mode 100644 vendor/google.golang.org/appengine/datastore/datastore.go create mode 100644 vendor/google.golang.org/appengine/datastore/datastore_test.go create mode 100644 vendor/google.golang.org/appengine/datastore/doc.go create mode 100644 vendor/google.golang.org/appengine/datastore/key.go create mode 100644 vendor/google.golang.org/appengine/datastore/key_test.go create mode 100644 vendor/google.golang.org/appengine/datastore/load.go create mode 100644 vendor/google.golang.org/appengine/datastore/metadata.go create mode 100644 vendor/google.golang.org/appengine/datastore/prop.go create mode 100644 vendor/google.golang.org/appengine/datastore/prop_test.go create mode 100644 vendor/google.golang.org/appengine/datastore/query.go create mode 100644 vendor/google.golang.org/appengine/datastore/query_test.go create mode 100644 vendor/google.golang.org/appengine/datastore/save.go create mode 100644 vendor/google.golang.org/appengine/datastore/time_test.go create mode 100644 vendor/google.golang.org/appengine/datastore/transaction.go create mode 100644 vendor/google.golang.org/appengine/delay/delay.go create mode 100644 vendor/google.golang.org/appengine/delay/delay_test.go create mode 100644 vendor/google.golang.org/appengine/demos/guestbook/app.yaml create mode 100644 vendor/google.golang.org/appengine/demos/guestbook/favicon.ico create mode 100644 vendor/google.golang.org/appengine/demos/guestbook/guestbook.go create mode 100644 vendor/google.golang.org/appengine/demos/guestbook/index.yaml create mode 100644 vendor/google.golang.org/appengine/demos/guestbook/templates/guestbook.html create mode 100644 vendor/google.golang.org/appengine/demos/helloworld/app.yaml create mode 100644 vendor/google.golang.org/appengine/demos/helloworld/favicon.ico create mode 100644 vendor/google.golang.org/appengine/demos/helloworld/helloworld.go create mode 100644 vendor/google.golang.org/appengine/errors.go create mode 100644 vendor/google.golang.org/appengine/file/file.go create mode 100644 vendor/google.golang.org/appengine/identity.go create mode 100644 vendor/google.golang.org/appengine/image/image.go create mode 100644 vendor/google.golang.org/appengine/internal/aetesting/fake.go create mode 100644 vendor/google.golang.org/appengine/internal/api.go create mode 100644 vendor/google.golang.org/appengine/internal/api_classic.go create mode 100644 vendor/google.golang.org/appengine/internal/api_common.go create mode 100644 vendor/google.golang.org/appengine/internal/api_race_test.go create mode 100644 vendor/google.golang.org/appengine/internal/api_test.go create mode 100644 vendor/google.golang.org/appengine/internal/app_id.go create mode 100644 vendor/google.golang.org/appengine/internal/app_id_test.go create mode 100644 vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.proto create mode 100644 vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/blobstore/blobstore_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/capability/capability_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/capability/capability_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/channel/channel_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/channel/channel_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go create mode 100755 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto create mode 100644 vendor/google.golang.org/appengine/internal/identity.go create mode 100644 vendor/google.golang.org/appengine/internal/identity_classic.go create mode 100644 vendor/google.golang.org/appengine/internal/identity_vm.go create mode 100644 vendor/google.golang.org/appengine/internal/image/images_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/image/images_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/internal.go create mode 100644 vendor/google.golang.org/appengine/internal/internal_vm_test.go create mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/mail/mail_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/mail/mail_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/main.go create mode 100644 vendor/google.golang.org/appengine/internal/main_vm.go create mode 100644 vendor/google.golang.org/appengine/internal/memcache/memcache_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/memcache/memcache_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/metadata.go create mode 100644 vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/modules/modules_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/net.go create mode 100644 vendor/google.golang.org/appengine/internal/net_test.go create mode 100755 vendor/google.golang.org/appengine/internal/regen.sh create mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto create mode 100644 vendor/google.golang.org/appengine/internal/search/search.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/search/search.proto create mode 100644 vendor/google.golang.org/appengine/internal/socket/socket_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/socket/socket_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/system/system_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/system/system_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/transaction.go create mode 100644 vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/user/user_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/user/user_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/xmpp/xmpp_service.proto create mode 100644 vendor/google.golang.org/appengine/log/api.go create mode 100644 vendor/google.golang.org/appengine/log/log.go create mode 100644 vendor/google.golang.org/appengine/log/log_test.go create mode 100644 vendor/google.golang.org/appengine/mail/mail.go create mode 100644 vendor/google.golang.org/appengine/mail/mail_test.go create mode 100644 vendor/google.golang.org/appengine/memcache/memcache.go create mode 100644 vendor/google.golang.org/appengine/memcache/memcache_test.go create mode 100644 vendor/google.golang.org/appengine/module/module.go create mode 100644 vendor/google.golang.org/appengine/module/module_test.go create mode 100644 vendor/google.golang.org/appengine/namespace.go create mode 100644 vendor/google.golang.org/appengine/namespace_test.go create mode 100644 vendor/google.golang.org/appengine/remote_api/client.go create mode 100644 vendor/google.golang.org/appengine/remote_api/client_test.go create mode 100644 vendor/google.golang.org/appengine/remote_api/remote_api.go create mode 100644 vendor/google.golang.org/appengine/runtime/runtime.go create mode 100644 vendor/google.golang.org/appengine/runtime/runtime_test.go create mode 100644 vendor/google.golang.org/appengine/search/doc.go create mode 100644 vendor/google.golang.org/appengine/search/field.go create mode 100644 vendor/google.golang.org/appengine/search/search.go create mode 100644 vendor/google.golang.org/appengine/search/search_test.go create mode 100644 vendor/google.golang.org/appengine/search/struct.go create mode 100644 vendor/google.golang.org/appengine/search/struct_test.go create mode 100644 vendor/google.golang.org/appengine/socket/doc.go create mode 100644 vendor/google.golang.org/appengine/socket/socket_classic.go create mode 100644 vendor/google.golang.org/appengine/socket/socket_vm.go create mode 100644 vendor/google.golang.org/appengine/taskqueue/taskqueue.go create mode 100644 vendor/google.golang.org/appengine/taskqueue/taskqueue_test.go create mode 100644 vendor/google.golang.org/appengine/timeout.go create mode 100644 vendor/google.golang.org/appengine/urlfetch/urlfetch.go create mode 100644 vendor/google.golang.org/appengine/user/oauth.go create mode 100644 vendor/google.golang.org/appengine/user/user.go create mode 100644 vendor/google.golang.org/appengine/user/user_classic.go create mode 100644 vendor/google.golang.org/appengine/user/user_test.go create mode 100644 vendor/google.golang.org/appengine/user/user_vm.go create mode 100644 vendor/google.golang.org/appengine/xmpp/xmpp.go create mode 100644 vendor/google.golang.org/appengine/xmpp/xmpp_test.go create mode 100644 vendor/gopkg.in/inf.v0/LICENSE create mode 100644 vendor/gopkg.in/inf.v0/benchmark_test.go create mode 100644 vendor/gopkg.in/inf.v0/dec.go create mode 100644 vendor/gopkg.in/inf.v0/dec_go1_2_test.go create mode 100644 vendor/gopkg.in/inf.v0/dec_internal_test.go create mode 100644 vendor/gopkg.in/inf.v0/dec_test.go create mode 100644 vendor/gopkg.in/inf.v0/example_test.go create mode 100644 vendor/gopkg.in/inf.v0/rounder.go create mode 100644 vendor/gopkg.in/inf.v0/rounder_example_test.go create mode 100644 vendor/gopkg.in/inf.v0/rounder_test.go create mode 100644 vendor/k8s.io/apiserver/.import-restrictions create mode 100644 vendor/k8s.io/apiserver/LICENSE create mode 100644 vendor/k8s.io/apiserver/OWNERS create mode 100644 vendor/k8s.io/apiserver/README.md create mode 100644 vendor/k8s.io/apiserver/filter-branch-sha create mode 100755 vendor/k8s.io/apiserver/hack/sync-from-kubernetes.sh create mode 100644 vendor/k8s.io/apiserver/kubernetes-sha create mode 100644 vendor/k8s.io/apiserver/pkg/admission/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/admission/attributes.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/chain.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/chain_test.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/errors.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/handler.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/interfaces.go create mode 100644 vendor/k8s.io/apiserver/pkg/admission/plugins.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/fuzzer/fuzzer.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/install/install.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/v1/conversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/v1/defaults.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/v1/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/v1/generated.pb.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/v1/generated.proto create mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/v1/register.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/v1/types.generated.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/v1/types.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/v1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/v1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/apiserver/pkg/apis/example/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/authenticator/interfaces.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/delegating.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/loopback.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/authenticatorfactory/requestheader.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/group/group_adder.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/group/group_adder_test.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/anonymous/anonymous.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/anonymous/anonymous_test.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/bearertoken.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/bearertoken/bearertoken_test.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/headerrequest/requestheader_test.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/union/union.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/union/unionauth_test.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/x509/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/x509/testdata/client-expired.pem create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/x509/testdata/client-valid.pem create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/x509/testdata/client.config.json create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/x509/testdata/client.csr.json create mode 100755 vendor/k8s.io/apiserver/pkg/authentication/request/x509/testdata/generate.sh create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/x509/testdata/intermediate.config.json create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/x509/testdata/intermediate.csr.json create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/x509/testdata/intermediate.pem create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/x509/testdata/root.csr.json create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/x509/testdata/root.pem create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/request/x509/x509_test.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/serviceaccount/util_test.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/tokenfile.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/token/tokenfile/tokenfile_test.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/user/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/user/user.go create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/authorizer/interfaces.go create mode 100755 vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/authz_test.go create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/builtin.go create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/authorizerfactory/delegating.go create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/union/union.go create mode 100644 vendor/k8s.io/apiserver/pkg/authorization/union/union_test.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/errors.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/handlers/negotiation/negotiate_test.go create mode 100755 vendor/k8s.io/apiserver/pkg/endpoints/metrics/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go create mode 100755 vendor/k8s.io/apiserver/pkg/endpoints/request/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/request/context.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/request/context_test.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/request/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/request/requestcontext.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo.go create mode 100644 vendor/k8s.io/apiserver/pkg/endpoints/request/requestinfo_test.go create mode 100644 vendor/k8s.io/apiserver/pkg/features/kube_features.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/doc.go create mode 100755 vendor/k8s.io/apiserver/pkg/server/filters/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/cors.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/cors_test.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/longrunning.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/maxinflight.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/maxinflight_test.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/timeout.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/timeout_test.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/filters/wrap.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/healthz/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/healthz/healthz.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/healthz/healthz_test.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/httplog/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/httplog/log.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/httplog/log_test.go create mode 100755 vendor/k8s.io/apiserver/pkg/server/options/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/authentication.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/authorization.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/etcd.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/server_run_options.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/options/serving.go create mode 100644 vendor/k8s.io/apiserver/pkg/server/routes/data/README.md create mode 100644 vendor/k8s.io/apiserver/pkg/server/routes/data/swagger/datafile.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/storage/cacher.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/cacher_whitebox_test.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/errors.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/etcd/metrics/metrics.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/interfaces.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/names/generate.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/names/generate_test.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/selection_predicate.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/selection_predicate_test.go create mode 100755 vendor/k8s.io/apiserver/pkg/storage/storagebackend/OWNERS create mode 100644 vendor/k8s.io/apiserver/pkg/storage/storagebackend/config.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/time_budget.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/time_budget_test.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/util.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/util_test.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/watch_cache.go create mode 100644 vendor/k8s.io/apiserver/pkg/storage/watch_cache_test.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/cache/cache.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/cache/cache_test.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/cache/lruexpirecache.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/cache/lruexpirecache_test.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/feature/feature_gate_test.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flag/configuration_map.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flag/flags.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flag/namedcertkey_flag.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flag/namedcertkey_flag_test.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flag/string_flag.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flag/tristate.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flushwriter/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flushwriter/writer.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/flushwriter/writer_test.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/proxy/dial.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/proxy/dial_test.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/proxy/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/proxy/transport.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/proxy/transport_test.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/trace/trace.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/trie/trie.go create mode 100755 vendor/k8s.io/apiserver/pkg/util/webhook/webhook.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/wsstream/conn.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/wsstream/conn_test.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/wsstream/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/wsstream/stream.go create mode 100644 vendor/k8s.io/apiserver/pkg/util/wsstream/stream_test.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authenticator/doc.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authenticator/password/allow/allow.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authenticator/password/allow/allow_test.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authenticator/password/doc.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authenticator/password/keystone/doc.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authenticator/password/keystone/keystone.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authenticator/password/passwordfile/passwordfile.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authenticator/password/passwordfile/passwordfile_test.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authenticator/request/basicauth/basicauth.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authenticator/request/basicauth/basicauth_test.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/anytoken/anytoken.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/anytoken/anytoken_test.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/OWNERS create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/oidc.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/oidc_test.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/oidc/testing/provider.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/tokentest/tokentest.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/certs_test.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authenticator/token/webhook/webhook_test.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/certs_test.go create mode 100755 vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/gencerts.sh create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook.go create mode 100644 vendor/k8s.io/apiserver/plugin/pkg/authorizer/webhook/webhook_test.go diff --git a/lock.json b/lock.json index f2c449d6..49739450 100644 --- a/lock.json +++ b/lock.json @@ -1,6 +1,24 @@ { - "memo": "0d3077faf280e4e13e18e56f085053d4ced593c2fcfcb09d7df1aea8f0bba403", + "memo": "e99fe9f7a283d8fb8e0ec8b05fa68d01a7dfa4c7c48b6e85a84986a079685711", "projects": [ + { + "name": "cloud.google.com/go", + "version": "v0.7.0", + "revision": "2e6a95edb1071d750f6d7db777bf66cd2997af6c", + "packages": [ + "compute/metadata", + "internal" + ] + }, + { + "name": "github.com/Azure/go-ansiterm", + "branch": "master", + "revision": "fa152c58bc15761d0200cb75fe958b89a9d4888e", + "packages": [ + ".", + "winterm" + ] + }, { "name": "github.com/BurntSushi/toml", "version": "v0.2.0", @@ -27,6 +45,22 @@ "." ] }, + { + "name": "github.com/PuerkitoBio/purell", + "version": "v1.1.0", + "revision": "0bcb03f4b4d0a9428594752bd2a3b9aa0a9d4bd4", + "packages": [ + "." + ] + }, + { + "name": "github.com/PuerkitoBio/urlesc", + "branch": "master", + "revision": "5bd2802263f21d8788851d5305584c82a5c75d7e", + "packages": [ + "." + ] + }, { "name": "github.com/Sirupsen/logrus", "branch": "master", @@ -119,6 +153,18 @@ "storageversion" ] }, + { + "name": "github.com/coreos/go-oidc", + "branch": "master", + "revision": "be73733bb8cc830d0205609b95d125215f8e9c70", + "packages": [ + "http", + "jose", + "key", + "oauth2", + "oidc" + ] + }, { "name": "github.com/coreos/go-systemd", "version": "v14", @@ -128,6 +174,24 @@ "dbus" ] }, + { + "name": "github.com/coreos/pkg", + "version": "v3", + "revision": "3ac0863d7acf3bc44daf49afef8919af12f704ef", + "packages": [ + "health", + "httputil", + "timeutil" + ] + }, + { + "name": "github.com/davecgh/go-spew", + "version": "v1.1.0", + "revision": "346938d642f2ec3594ed81d874461961cd0faa76", + "packages": [ + "spew" + ] + }, { "name": "github.com/docker/distribution", "branch": "master", @@ -174,6 +238,8 @@ "pkg/stringutils", "pkg/symlink", "pkg/system", + "pkg/term", + "pkg/term/windows", "pkg/tlsconfig", "pkg/truncindex", "utils/templates" @@ -205,6 +271,25 @@ "." ] }, + { + "name": "github.com/docker/spdystream", + "branch": "master", + "revision": "ed496381df8283605c435b86d4fdd6f4f20b8c6e", + "packages": [ + ".", + "spdy" + ] + }, + { + "name": "github.com/emicklei/go-restful", + "branch": "master", + "revision": "09691a3b6378b740595c1002f40c34dd5f218a22", + "packages": [ + ".", + "log", + "swagger" + ] + }, { "name": "github.com/fsnotify/fsnotify", "branch": "master", @@ -221,6 +306,38 @@ "." ] }, + { + "name": "github.com/go-openapi/jsonpointer", + "branch": "master", + "revision": "779f45308c19820f1a69e9a4cd965f496e0da10f", + "packages": [ + "." + ] + }, + { + "name": "github.com/go-openapi/jsonreference", + "branch": "master", + "revision": "36d33bfe519efae5632669801b180bf1a245da3b", + "packages": [ + "." + ] + }, + { + "name": "github.com/go-openapi/spec", + "branch": "master", + "revision": "02fb9cd3430ed0581e0ceb4804d5d4b3cc702694", + "packages": [ + "." + ] + }, + { + "name": "github.com/go-openapi/swag", + "branch": "master", + "revision": "d5f8ebc3b1c55a4cf6489eeae7354f338cfe299e", + "packages": [ + "." + ] + }, { "name": "github.com/godbus/dbus", "version": "v4.0.0", @@ -240,6 +357,22 @@ "sortkeys" ] }, + { + "name": "github.com/golang/glog", + "branch": "master", + "revision": "23def4e6c14b4da8ac2ed8007337bc5eb5007998", + "packages": [ + "." + ] + }, + { + "name": "github.com/golang/groupcache", + "branch": "master", + "revision": "b710c8433bd175204919eb38776e944233235d03", + "packages": [ + "lru" + ] + }, { "name": "github.com/golang/protobuf", "branch": "master", @@ -248,6 +381,22 @@ "proto" ] }, + { + "name": "github.com/google/gofuzz", + "branch": "master", + "revision": "44d81051d367757e1c7c6a5a86423ece9afcf63c", + "packages": [ + "." + ] + }, + { + "name": "github.com/googleapis/gax-go", + "branch": "master", + "revision": "9af46dd5a1713e8b5cd71106287eba3cefdde50b", + "packages": [ + "." + ] + }, { "name": "github.com/gorilla/context", "version": "v1.1", @@ -272,6 +421,40 @@ "." ] }, + { + "name": "github.com/jonboulle/clockwork", + "version": "v0.1.0", + "revision": "2eee05ed794112d45db504eb05aa693efd2b8b09", + "packages": [ + "." + ] + }, + { + "name": "github.com/juju/ratelimit", + "branch": "master", + "revision": "acf38b000a03e4ab89e40f20f1e548f4e6ac7f72", + "packages": [ + "." + ] + }, + { + "name": "github.com/kr/pty", + "version": "v1.0.0", + "revision": "2c10821df3c3cf905230d078702dfbe9404c9b23", + "packages": [ + "." + ] + }, + { + "name": "github.com/mailru/easyjson", + "branch": "master", + "revision": "99e922cf9de1bc0ab38310c277cff32c2147e747", + "packages": [ + "buffer", + "jlexer", + "jwriter" + ] + }, { "name": "github.com/mattn/go-runewidth", "version": "v0.0.1", @@ -296,6 +479,14 @@ "." ] }, + { + "name": "github.com/mitchellh/go-wordwrap", + "branch": "master", + "revision": "ad45545899c7b13c020ea92b2072220eefad42b8", + "packages": [ + "." + ] + }, { "name": "github.com/mtrmac/gpgme", "branch": "master", @@ -374,6 +565,14 @@ "." ] }, + { + "name": "github.com/spf13/pflag", + "branch": "master", + "revision": "9ff6c6923cfffbcd502984b8e0c80539a94968b7", + "packages": [ + "." + ] + }, { "name": "github.com/syndtr/gocapability", "branch": "master", @@ -390,6 +589,14 @@ "patricia" ] }, + { + "name": "github.com/ugorji/go", + "branch": "master", + "revision": "d23841a297e5489e787e72fceffabf9d2994b52a", + "packages": [ + "codec" + ] + }, { "name": "github.com/urfave/cli", "version": "v1.19.1", @@ -435,7 +642,20 @@ "internal/timeseries", "lex/httplex", "proxy", - "trace" + "trace", + "websocket" + ] + }, + { + "name": "golang.org/x/oauth2", + "branch": "master", + "revision": "a6bd8cefa1811bd24b86f8902872e4e8225f74c4", + "packages": [ + ".", + "google", + "internal", + "jws", + "jwt" ] }, { @@ -447,6 +667,37 @@ "windows" ] }, + { + "name": "golang.org/x/text", + "branch": "master", + "revision": "dafb3384ad25363d928a9e97ce4ad3a2f0667e34", + "packages": [ + "internal/gen", + "internal/triegen", + "internal/ucd", + "transform", + "unicode/cldr", + "unicode/norm", + "width" + ] + }, + { + "name": "google.golang.org/appengine", + "version": "v1.0.0", + "revision": "150dc57a1b433e64154302bdc40b6bb8aefa313a", + "packages": [ + ".", + "internal", + "internal/app_identity", + "internal/base", + "internal/datastore", + "internal/log", + "internal/modules", + "internal/remote_api", + "internal/urlfetch", + "urlfetch" + ] + }, { "name": "google.golang.org/grpc", "version": "v1.0.1-GA", @@ -471,6 +722,14 @@ "." ] }, + { + "name": "gopkg.in/inf.v0", + "version": "v0.9.0", + "revision": "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4", + "packages": [ + "." + ] + }, { "name": "gopkg.in/yaml.v2", "branch": "v2", @@ -484,8 +743,51 @@ "branch": "master", "revision": "21807b270ec15d19215659a5caa08b17f66d6f44", "packages": [ + "pkg/api/errors", + "pkg/api/meta", + "pkg/apimachinery", + "pkg/apimachinery/announced", + "pkg/apimachinery/registered", + "pkg/apis/meta/v1", + "pkg/apis/meta/v1/unstructured", + "pkg/conversion", + "pkg/conversion/queryparams", "pkg/fields", - "pkg/selection" + "pkg/labels", + "pkg/openapi", + "pkg/runtime", + "pkg/runtime/schema", + "pkg/runtime/serializer", + "pkg/runtime/serializer/json", + "pkg/runtime/serializer/protobuf", + "pkg/runtime/serializer/recognizer", + "pkg/runtime/serializer/streaming", + "pkg/runtime/serializer/versioning", + "pkg/selection", + "pkg/types", + "pkg/util/errors", + "pkg/util/framer", + "pkg/util/json", + "pkg/util/net", + "pkg/util/rand", + "pkg/util/runtime", + "pkg/util/sets", + "pkg/util/validation", + "pkg/util/validation/field", + "pkg/util/wait", + "pkg/util/yaml", + "pkg/version", + "pkg/watch", + "third_party/forked/golang/reflect" + ] + }, + { + "name": "k8s.io/apiserver", + "branch": "master", + "revision": "18254ddaaab8024609bdf570493103036d72f86d", + "packages": [ + "pkg/server/httplog", + "pkg/util/wsstream" ] }, { @@ -493,7 +795,31 @@ "branch": "master", "revision": "b766ef93a46ce6dc863462254658ca2861a53314", "packages": [ - "util/homedir" + "pkg/api", + "pkg/api/resource", + "pkg/api/v1", + "pkg/apis/autoscaling", + "pkg/apis/extensions", + "pkg/util", + "pkg/util/intstr", + "pkg/util/labels", + "pkg/util/parsers", + "pkg/version", + "plugin/pkg/client/auth", + "plugin/pkg/client/auth/gcp", + "plugin/pkg/client/auth/oidc", + "rest", + "rest/watch", + "third_party/forked/golang/template", + "tools/clientcmd/api", + "tools/metrics", + "transport", + "util/cert", + "util/clock", + "util/flowcontrol", + "util/homedir", + "util/integer", + "util/jsonpath" ] }, { @@ -501,7 +827,88 @@ "branch": "master", "revision": "760d8e98e8f6ad27aaf50b1a030cb9e7b6859aab", "packages": [ - "pkg/kubelet/api/v1alpha1/runtime" + "pkg/api", + "pkg/api/install", + "pkg/api/resource", + "pkg/api/v1", + "pkg/apis/apps", + "pkg/apis/apps/install", + "pkg/apis/apps/v1beta1", + "pkg/apis/authentication", + "pkg/apis/authentication/install", + "pkg/apis/authentication/v1beta1", + "pkg/apis/authorization", + "pkg/apis/authorization/install", + "pkg/apis/authorization/v1beta1", + "pkg/apis/autoscaling", + "pkg/apis/autoscaling/install", + "pkg/apis/autoscaling/v1", + "pkg/apis/batch", + "pkg/apis/batch/install", + "pkg/apis/batch/v1", + "pkg/apis/batch/v2alpha1", + "pkg/apis/certificates", + "pkg/apis/certificates/install", + "pkg/apis/certificates/v1beta1", + "pkg/apis/extensions", + "pkg/apis/extensions/install", + "pkg/apis/extensions/v1beta1", + "pkg/apis/policy", + "pkg/apis/policy/install", + "pkg/apis/policy/v1beta1", + "pkg/apis/rbac", + "pkg/apis/rbac/install", + "pkg/apis/rbac/v1alpha1", + "pkg/apis/rbac/v1beta1", + "pkg/apis/storage", + "pkg/apis/storage/install", + "pkg/apis/storage/v1beta1", + "pkg/client/clientset_generated/clientset", + "pkg/client/clientset_generated/clientset/typed/apps/v1beta1", + "pkg/client/clientset_generated/clientset/typed/authentication/v1beta1", + "pkg/client/clientset_generated/clientset/typed/authorization/v1beta1", + "pkg/client/clientset_generated/clientset/typed/autoscaling/v1", + "pkg/client/clientset_generated/clientset/typed/batch/v1", + "pkg/client/clientset_generated/clientset/typed/batch/v2alpha1", + "pkg/client/clientset_generated/clientset/typed/certificates/v1beta1", + "pkg/client/clientset_generated/clientset/typed/core/v1", + "pkg/client/clientset_generated/clientset/typed/extensions/v1beta1", + "pkg/client/clientset_generated/clientset/typed/policy/v1beta1", + "pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1", + "pkg/client/clientset_generated/clientset/typed/rbac/v1beta1", + "pkg/client/clientset_generated/clientset/typed/storage/v1beta1", + "pkg/client/record", + "pkg/client/typed/discovery", + "pkg/client/unversioned/remotecommand", + "pkg/cloudprovider", + "pkg/kubelet/api/v1alpha1/runtime", + "pkg/kubelet/container", + "pkg/kubelet/server/portforward", + "pkg/kubelet/server/remotecommand", + "pkg/kubelet/server/streaming", + "pkg/kubelet/util/format", + "pkg/kubelet/util/ioutils", + "pkg/util", + "pkg/util/chmod", + "pkg/util/chown", + "pkg/util/exec", + "pkg/util/hash", + "pkg/util/httpstream", + "pkg/util/httpstream/spdy", + "pkg/util/interrupt", + "pkg/util/intstr", + "pkg/util/io", + "pkg/util/labels", + "pkg/util/mount", + "pkg/util/parsers", + "pkg/util/strategicpatch", + "pkg/util/term", + "pkg/version", + "pkg/volume", + "pkg/volume/util", + "third_party/forked/golang/expansion", + "third_party/forked/golang/json", + "third_party/forked/golang/netutil" ] } ] diff --git a/vendor/cloud.google.com/go/.travis.yml b/vendor/cloud.google.com/go/.travis.yml new file mode 100644 index 00000000..d9be5f73 --- /dev/null +++ b/vendor/cloud.google.com/go/.travis.yml @@ -0,0 +1,16 @@ +sudo: false +language: go +go: +- 1.6 +- 1.7 +- 1.8 +install: +- go get -v cloud.google.com/go/... +script: +- openssl aes-256-cbc -K $encrypted_912ff8fa81ad_key -iv $encrypted_912ff8fa81ad_iv -in key.json.enc -out key.json -d +- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" GCLOUD_TESTS_GOLANG_KEY="$(pwd)/key.json" + go test -race -v cloud.google.com/go/... +env: + matrix: + # The GCLOUD_TESTS_API_KEY environment variable. + secure: VdldogUOoubQ60LhuHJ+g/aJoBiujkSkWEWl79Zb8cvQorcQbxISS+JsOOp4QkUOU4WwaHAm8/3pIH1QMWOR6O78DaLmDKi5Q4RpkVdCpUXy+OAfQaZIcBsispMrjxLXnqFjo9ELnrArfjoeCTzaX0QTCfwQwVmigC8rR30JBKI= diff --git a/vendor/cloud.google.com/go/AUTHORS b/vendor/cloud.google.com/go/AUTHORS new file mode 100644 index 00000000..c364af1d --- /dev/null +++ b/vendor/cloud.google.com/go/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of cloud authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as: +# Name or Organization +# The email address is not required for organizations. + +Filippo Valsorda +Google Inc. +Ingo Oeser +Palm Stone Games, Inc. +Paweł Knap +Péter Szilágyi +Tyler Treat diff --git a/vendor/cloud.google.com/go/CONTRIBUTING.md b/vendor/cloud.google.com/go/CONTRIBUTING.md new file mode 100644 index 00000000..2b4bac99 --- /dev/null +++ b/vendor/cloud.google.com/go/CONTRIBUTING.md @@ -0,0 +1,132 @@ +# Contributing + +1. Sign one of the contributor license agreements below. +1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool. + 1. You will need to ensure that your `GOBIN` directory (by default + `$GOPATH/bin`) is in your `PATH` so that git can find the command. +1. Get the cloud package by running `go get -d cloud.google.com/go`. + 1. If you have already checked out the source, make sure that the remote git + origin is https://code.googlesource.com/gocloud: + + git remote set-url origin https://code.googlesource.com/gocloud +1. Make sure your auth is configured correctly by visiting + https://code.googlesource.com, clicking "Generate Password", and following + the directions. +1. Make changes and create a change by running `git codereview change `, +provide a commit message, and use `git codereview mail` to create a Gerrit CL. +1. Keep amending to the change with `git codereview change` and mail as your receive +feedback. Each new mailed amendment will create a new patch set for your change in Gerrit. + +## Integration Tests + +In addition to the unit tests, you may run the integration test suite. + +To run the integrations tests, creating and configuration of a project in the +Google Developers Console is required. + +After creating a project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount). +Ensure the project-level **Owner** [IAM role](console.cloud.google.com/iam-admin/iam/project) +(or **Editor** and **Logs Configuration Writer** roles) are added to the +service account. + +Once you create a project, set the following environment variables to be able to +run the against the actual APIs. + +- **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455) +- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file. +- **GCLOUD_TESTS_API_KEY**: Your API key. + +Install the [gcloud command-line tool][gcloudcli] to your machine and use it +to create some resources used in integration tests. + +From the project's root directory: + +``` sh +# Set the default project in your env. +$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID + +# Authenticate the gcloud tool with your account. +$ gcloud auth login + +# Create the indexes used in the datastore integration tests. +$ gcloud preview datastore create-indexes datastore/testdata/index.yaml + +# Create a Google Cloud storage bucket with the same name as your test project, +# and with the Stackdriver Logging service account as owner, for the sink +# integration tests in logging. +$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID +$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID + +# Create a Spanner instance for the spanner integration tests. +$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 1 --description 'Instance for go client test' +# NOTE: Spanner instances are priced by the node-hour, so you may want to delete +# the instance after testing with 'gcloud beta spanner instances delete'. +``` + +Once you've set the environment variables, you can run the integration tests by +running: + +``` sh +$ go test -v cloud.google.com/go/... +``` + +## Contributor License Agreements + +Before we can accept your pull requests you'll need to sign a Contributor +License Agreement (CLA): + +- **If you are an individual writing original source code** and **you own the +- intellectual property**, then you'll need to sign an [individual CLA][indvcla]. +- **If you work for a company that wants to allow you to contribute your work**, +then you'll need to sign a [corporate CLA][corpcla]. + +You can sign these electronically (just scroll to the bottom). After that, +we'll be able to accept your pull requests. + +## Contributor Code of Conduct + +As contributors and maintainers of this project, +and in the interest of fostering an open and welcoming community, +we pledge to respect all people who contribute through reporting issues, +posting feature requests, updating documentation, +submitting pull requests or patches, and other activities. + +We are committed to making participation in this project +a harassment-free experience for everyone, +regardless of level of experience, gender, gender identity and expression, +sexual orientation, disability, personal appearance, +body size, race, ethnicity, age, religion, or nationality. + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery +* Personal attacks +* Trolling or insulting/derogatory comments +* Public or private harassment +* Publishing other's private information, +such as physical or electronic +addresses, without explicit permission +* Other unethical or unprofessional conduct. + +Project maintainers have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct. +By adopting this Code of Conduct, +project maintainers commit themselves to fairly and consistently +applying these principles to every aspect of managing this project. +Project maintainers who do not follow or enforce the Code of Conduct +may be permanently removed from the project team. + +This code of conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. + +Instances of abusive, harassing, or otherwise unacceptable behavior +may be reported by opening an issue +or contacting one or more of the project maintainers. + +This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, +available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) + +[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/ +[indvcla]: https://developers.google.com/open-source/cla/individual +[corpcla]: https://developers.google.com/open-source/cla/corporate diff --git a/vendor/cloud.google.com/go/CONTRIBUTORS b/vendor/cloud.google.com/go/CONTRIBUTORS new file mode 100644 index 00000000..d4b376c7 --- /dev/null +++ b/vendor/cloud.google.com/go/CONTRIBUTORS @@ -0,0 +1,37 @@ +# People who have agreed to one of the CLAs and can contribute patches. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# https://developers.google.com/open-source/cla/individual +# https://developers.google.com/open-source/cla/corporate +# +# Names should be added to this file as: +# Name + +# Keep the list alphabetically sorted. + +Alexis Hunt +Andreas Litt +Andrew Gerrand +Brad Fitzpatrick +Burcu Dogan +Dave Day +David Sansome +David Symonds +Filippo Valsorda +Glenn Lewis +Ingo Oeser +Johan Euphrosine +Jonathan Amsterdam +Luna Duclos +Magnus Hiie +Michael McGreevy +Omar Jarjur +Paweł Knap +Péter Szilágyi +Sarah Adams +Thanatat Tamtan +Toby Burress +Tuo Shan +Tyler Treat diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE new file mode 100644 index 00000000..a4c5efd8 --- /dev/null +++ b/vendor/cloud.google.com/go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2014 Google Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/README.md b/vendor/cloud.google.com/go/README.md new file mode 100644 index 00000000..5c188eb2 --- /dev/null +++ b/vendor/cloud.google.com/go/README.md @@ -0,0 +1,528 @@ +# Google Cloud for Go + +[![Build Status](https://travis-ci.org/GoogleCloudPlatform/google-cloud-go.svg?branch=master)](https://travis-ci.org/GoogleCloudPlatform/google-cloud-go) +[![GoDoc](https://godoc.org/cloud.google.com/go?status.svg)](https://godoc.org/cloud.google.com/go) + +``` go +import "cloud.google.com/go" +``` + +Go packages for Google Cloud Platform services. + +To install the packages on your system, + +``` +$ go get -u cloud.google.com/go/... +``` + +**NOTE:** These packages are under development, and may occasionally make +backwards-incompatible changes. + +**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud). + + * [News](#news) + * [Supported APIs](#supported-apis) + * [Go Versions Supported](#go-versions-supported) + * [Authorization](#authorization) + * [Cloud Datastore](#cloud-datastore-) + * [Cloud Storage](#cloud-storage-) + * [Cloud Pub/Sub](#cloud-pub-sub-) + * [Cloud BigQuery](#cloud-bigquery-) + * [Stackdriver Logging](#stackdriver-logging-) + * [Cloud Spanner](#cloud-spanner-) + + +## News + +_February 14, 2017_ + +Release of a client library for Spanner. See +the +[blog post](https://cloudplatform.googleblog.com/2017/02/introducing-Cloud-Spanner-a-global-database-service-for-mission-critical-applications.html). + +Note that although the Spanner service is beta, the Go client library is alpha. + +_December 12, 2016_ + +Beta release of BigQuery, DataStore, Logging and Storage. See the +[blog post](https://cloudplatform.googleblog.com/2016/12/announcing-new-google-cloud-client.html). + +Also, BigQuery now supports structs. Read a row directly into a struct with +`RowIterator.Next`, and upload a row directly from a struct with `Uploader.Put`. +You can also use field tags. See the [package documentation][cloud-bigquery-ref] +for details. + +_December 5, 2016_ + +More changes to BigQuery: + +* The `ValueList` type was removed. It is no longer necessary. Instead of + ```go + var v ValueList + ... it.Next(&v) .. + ``` + use + + ```go + var v []Value + ... it.Next(&v) ... + ``` + +* Previously, repeatedly calling `RowIterator.Next` on the same `[]Value` or + `ValueList` would append to the slice. Now each call resets the size to zero first. + +* Schema inference will infer the SQL type BYTES for a struct field of + type []byte. Previously it inferred STRING. + +* The types `uint`, `uint64` and `uintptr` are no longer supported in schema + inference. BigQuery's integer type is INT64, and those types may hold values + that are not correctly represented in a 64-bit signed integer. + +* The SQL types DATE, TIME and DATETIME are now supported. They correspond to + the `Date`, `Time` and `DateTime` types in the new `cloud.google.com/go/civil` + package. + +_November 17, 2016_ + +Change to BigQuery: values from INTEGER columns will now be returned as int64, +not int. This will avoid errors arising from large values on 32-bit systems. + +_November 8, 2016_ + +New datastore feature: datastore now encodes your nested Go structs as Entity values, +instead of a flattened list of the embedded struct's fields. +This means that you may now have twice-nested slices, eg. +```go +type State struct { + Cities []struct{ + Populations []int + } +} +``` + +See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/79jtrdeuJAg) for +more details. + +_November 8, 2016_ + +Breaking changes to datastore: contexts no longer hold namespaces; instead you +must set a key's namespace explicitly. Also, key functions have been changed +and renamed. + +* The WithNamespace function has been removed. To specify a namespace in a Query, use the Query.Namespace method: + ```go + q := datastore.NewQuery("Kind").Namespace("ns") + ``` + +* All the fields of Key are exported. That means you can construct any Key with a struct literal: + ```go + k := &Key{Kind: "Kind", ID: 37, Namespace: "ns"} + ``` + +* As a result of the above, the Key methods Kind, ID, d.Name, Parent, SetParent and Namespace have been removed. + +* `NewIncompleteKey` has been removed, replaced by `IncompleteKey`. Replace + ```go + NewIncompleteKey(ctx, kind, parent) + ``` + with + ```go + IncompleteKey(kind, parent) + ``` + and if you do use namespaces, make sure you set the namespace on the returned key. + +* `NewKey` has been removed, replaced by `NameKey` and `IDKey`. Replace + ```go + NewKey(ctx, kind, name, 0, parent) + NewKey(ctx, kind, "", id, parent) + ``` + with + ```go + NameKey(kind, name, parent) + IDKey(kind, id, parent) + ``` + and if you do use namespaces, make sure you set the namespace on the returned key. + +* The `Done` variable has been removed. Replace `datastore.Done` with `iterator.Done`, from the package `google.golang.org/api/iterator`. + +* The `Client.Close` method will have a return type of error. It will return the result of closing the underlying gRPC connection. + +See [the announcement](https://groups.google.com/forum/#!topic/google-api-go-announce/hqXtM_4Ix-0) for +more details. + +_October 27, 2016_ + +Breaking change to bigquery: `NewGCSReference` is now a function, +not a method on `Client`. + +New bigquery feature: `Table.LoaderFrom` now accepts a `ReaderSource`, enabling +loading data into a table from a file or any `io.Reader`. + +_October 21, 2016_ + +Breaking change to pubsub: removed `pubsub.Done`. + +Use `iterator.Done` instead, where `iterator` is the package +`google.golang.org/api/iterator`. + + +[Older news](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/old-news.md) + +## Supported APIs + +Google API | Status | Package +-------------------------------|--------------|----------------------------------------------------------- +[Datastore][cloud-datastore] | beta | [`cloud.google.com/go/datastore`][cloud-datastore-ref] +[Storage][cloud-storage] | beta | [`cloud.google.com/go/storage`][cloud-storage-ref] +[Bigtable][cloud-bigtable] | beta | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref] +[BigQuery][cloud-bigquery] | beta | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref] +[Logging][cloud-logging] | beta | [`cloud.google.com/go/logging`][cloud-logging-ref] +[Pub/Sub][cloud-pubsub] | alpha | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref] +[Vision][cloud-vision] | beta | [`cloud.google.com/go/vision`][cloud-vision-ref] +[Language][cloud-language] | alpha | [`cloud.google.com/go/language/apiv1`][cloud-language-ref] +[Speech][cloud-speech] | alpha | [`cloud.google.com/go/speech/apiv1beta`][cloud-speech-ref] +[Spanner][cloud-spanner] | alpha | [`cloud.google.com/go/spanner`][cloud-spanner-ref] + + +> **Alpha status**: the API is still being actively developed. As a +> result, it might change in backward-incompatible ways and is not recommended +> for production use. +> +> **Beta status**: the API is largely complete, but still has outstanding +> features and bugs to be addressed. There may be minor backwards-incompatible +> changes where necessary. +> +> **Stable status**: the API is mature and ready for production use. We will +> continue addressing bugs and feature requests. + +Documentation and examples are available at +https://godoc.org/cloud.google.com/go + +Visit or join the +[google-api-go-announce group](https://groups.google.com/forum/#!forum/google-api-go-announce) +for updates on these packages. + +## Go Versions Supported + +We support the two most recent major versions of Go. If Google App Engine uses +an older version, we support that as well. You can see which versions are +currently supported by looking at the lines following `go:` in +[`.travis.yml`](.travis.yml). + +## Authorization + +By default, each API will use [Google Application Default Credentials][default-creds] +for authorization credentials used in calling the API endpoints. This will allow your +application to run in many environments without requiring explicit configuration. + +```go +client, err := storage.NewClient(ctx) +``` + +To authorize using a +[JSON key file](https://cloud.google.com/iam/docs/managing-service-account-keys), +pass +[`option.WithServiceAccountFile`](https://godoc.org/google.golang.org/api/option#WithServiceAccountFile) +to the `NewClient` function of the desired package. For example: + +```go +client, err := storage.NewClient(ctx, option.WithServiceAccountFile("path/to/keyfile.json")) +``` + +You can exert more control over authorization by using the +[`golang.org/x/oauth2`](https://godoc.org/golang.org/x/oauth2) package to +create an `oauth2.TokenSource`. Then pass +[`option.WithTokenSource`](https://godoc.org/google.golang.org/api/option#WithTokenSource) +to the `NewClient` function: +```go +tokenSource := ... +client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource)) +``` + +## Cloud Datastore [![GoDoc](https://godoc.org/cloud.google.com/go/datastore?status.svg)](https://godoc.org/cloud.google.com/go/datastore) + +- [About Cloud Datastore][cloud-datastore] +- [Activating the API for your project][cloud-datastore-activation] +- [API documentation][cloud-datastore-docs] +- [Go client documentation](https://godoc.org/cloud.google.com/go/datastore) +- [Complete sample program](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/datastore/tasks) + +### Example Usage + +First create a `datastore.Client` to use throughout your application: + +```go +client, err := datastore.NewClient(ctx, "my-project-id") +if err != nil { + log.Fatal(err) +} +``` + +Then use that client to interact with the API: + +```go +type Post struct { + Title string + Body string `datastore:",noindex"` + PublishedAt time.Time +} +keys := []*datastore.Key{ + datastore.NewKey(ctx, "Post", "post1", 0, nil), + datastore.NewKey(ctx, "Post", "post2", 0, nil), +} +posts := []*Post{ + {Title: "Post 1", Body: "...", PublishedAt: time.Now()}, + {Title: "Post 2", Body: "...", PublishedAt: time.Now()}, +} +if _, err := client.PutMulti(ctx, keys, posts); err != nil { + log.Fatal(err) +} +``` + +## Cloud Storage [![GoDoc](https://godoc.org/cloud.google.com/go/storage?status.svg)](https://godoc.org/cloud.google.com/go/storage) + +- [About Cloud Storage][cloud-storage] +- [API documentation][cloud-storage-docs] +- [Go client documentation](https://godoc.org/cloud.google.com/go/storage) +- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/storage) + +### Example Usage + +First create a `storage.Client` to use throughout your application: + +```go +client, err := storage.NewClient(ctx) +if err != nil { + log.Fatal(err) +} +``` + +```go +// Read the object1 from bucket. +rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx) +if err != nil { + log.Fatal(err) +} +defer rc.Close() +body, err := ioutil.ReadAll(rc) +if err != nil { + log.Fatal(err) +} +``` + +## Cloud Pub/Sub [![GoDoc](https://godoc.org/cloud.google.com/go/pubsub?status.svg)](https://godoc.org/cloud.google.com/go/pubsub) + +- [About Cloud Pubsub][cloud-pubsub] +- [API documentation][cloud-pubsub-docs] +- [Go client documentation](https://godoc.org/cloud.google.com/go/pubsub) +- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/pubsub) + +### Example Usage + +First create a `pubsub.Client` to use throughout your application: + +```go +client, err := pubsub.NewClient(ctx, "project-id") +if err != nil { + log.Fatal(err) +} +``` + +Then use the client to publish and subscribe: + +```go +// Publish "hello world" on topic1. +topic := client.Topic("topic1") +msgIDs, err := topic.Publish(ctx, &pubsub.Message{ + Data: []byte("hello world"), +}) +if err != nil { + log.Fatal(err) +} + +// Create an iterator to pull messages via subscription1. +it, err := client.Subscription("subscription1").Pull(ctx) +if err != nil { + log.Println(err) +} +defer it.Stop() + +// Consume N messages from the iterator. +for i := 0; i < N; i++ { + msg, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + log.Fatalf("Failed to retrieve message: %v", err) + } + + fmt.Printf("Message %d: %s\n", i, msg.Data) + msg.Done(true) // Acknowledge that we've consumed the message. +} +``` + +## Cloud BigQuery [![GoDoc](https://godoc.org/cloud.google.com/go/bigquery?status.svg)](https://godoc.org/cloud.google.com/go/bigquery) + +- [About Cloud BigQuery][cloud-bigquery] +- [API documentation][cloud-bigquery-docs] +- [Go client documentation][cloud-bigquery-ref] +- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/bigquery) + +### Example Usage + +First create a `bigquery.Client` to use throughout your application: +```go +c, err := bigquery.NewClient(ctx, "my-project-ID") +if err != nil { + // TODO: Handle error. +} +``` +Then use that client to interact with the API: +```go +// Construct a query. +q := c.Query(` + SELECT year, SUM(number) + FROM [bigquery-public-data:usa_names.usa_1910_2013] + WHERE name = "William" + GROUP BY year + ORDER BY year +`) +// Execute the query. +it, err := q.Read(ctx) +if err != nil { + // TODO: Handle error. +} +// Iterate through the results. +for { + var values []bigquery.Value + err := it.Next(&values) + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(values) +} +``` + + +## Stackdriver Logging [![GoDoc](https://godoc.org/cloud.google.com/go/logging?status.svg)](https://godoc.org/cloud.google.com/go/logging) + +- [About Stackdriver Logging][cloud-logging] +- [API documentation][cloud-logging-docs] +- [Go client documentation][cloud-logging-ref] +- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/logging) + +### Example Usage + +First create a `logging.Client` to use throughout your application: + +```go +ctx := context.Background() +client, err := logging.NewClient(ctx, "my-project") +if err != nil { + // TODO: Handle error. +} +``` +Usually, you'll want to add log entries to a buffer to be periodically flushed +(automatically and asynchronously) to the Stackdriver Logging service. +```go +logger := client.Logger("my-log") +logger.Log(logging.Entry{Payload: "something happened!"}) +``` +Close your client before your program exits, to flush any buffered log entries. +```go +err = client.Close() +if err != nil { + // TODO: Handle error. +} +``` + + +## Cloud Spanner [![GoDoc](https://godoc.org/cloud.google.com/go/spanner?status.svg)](https://godoc.org/cloud.google.com/go/spanner) + +- [About Cloud Spanner][cloud-spanner] +- [API documentation][cloud-spanner-docs] +- [Go client documentation](https://godoc.org/cloud.google.com/go/spanner) + +### Example Usage + +First create a `spanner.Client` to use throughout your application: + +```go +client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D") +if err != nil { + log.Fatal(err) +} +``` + +```go +// Simple Reads And Writes +_, err := client.Apply(ctx, []*spanner.Mutation{ + spanner.Insert("Users", + []string{"name", "email"}, + []interface{}{"alice", "a@example.com"})}) +if err != nil { + log.Fatal(err) +} +row, err := client.Single().ReadRow(ctx, "Users", + spanner.Key{"alice"}, []string{"email"}) +if err != nil { + log.Fatal(err) +} +``` + + +## Contributing + +Contributions are welcome. Please, see the +[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md) +document for details. We're using Gerrit for our code reviews. Please don't open pull +requests against this repo, new pull requests will be automatically closed. + +Please note that this project is released with a Contributor Code of Conduct. +By participating in this project you agree to abide by its terms. +See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md#contributor-code-of-conduct) +for more information. + +[cloud-datastore]: https://cloud.google.com/datastore/ +[cloud-datastore-ref]: https://godoc.org/cloud.google.com/go/datastore +[cloud-datastore-docs]: https://cloud.google.com/datastore/docs +[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate + +[cloud-pubsub]: https://cloud.google.com/pubsub/ +[cloud-pubsub-ref]: https://godoc.org/cloud.google.com/go/pubsub +[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs + +[cloud-storage]: https://cloud.google.com/storage/ +[cloud-storage-ref]: https://godoc.org/cloud.google.com/go/storage +[cloud-storage-docs]: https://cloud.google.com/storage/docs +[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets + +[cloud-bigtable]: https://cloud.google.com/bigtable/ +[cloud-bigtable-ref]: https://godoc.org/cloud.google.com/go/bigtable + +[cloud-bigquery]: https://cloud.google.com/bigquery/ +[cloud-bigquery-docs]: https://cloud.google.com/bigquery/docs +[cloud-bigquery-ref]: https://godoc.org/cloud.google.com/go/bigquery + +[cloud-logging]: https://cloud.google.com/logging/ +[cloud-logging-docs]: https://cloud.google.com/logging/docs +[cloud-logging-ref]: https://godoc.org/cloud.google.com/go/logging + +[cloud-vision]: https://cloud.google.com/vision/ +[cloud-vision-ref]: https://godoc.org/cloud.google.com/go/vision + +[cloud-language]: https://cloud.google.com/natural-language +[cloud-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1 + +[cloud-speech]: https://cloud.google.com/speech +[cloud-speech-ref]: https://godoc.org/cloud.google.com/go/speech/apiv1beta1 + +[cloud-spanner]: https://cloud.google.com/spanner/ +[cloud-spanner-ref]: https://godoc.org/cloud.google.com/go/spanner +[cloud-spanner-docs]: https://cloud.google.com/spanner/docs + +[default-creds]: https://developers.google.com/identity/protocols/application-default-credentials diff --git a/vendor/cloud.google.com/go/appveyor.yml b/vendor/cloud.google.com/go/appveyor.yml new file mode 100644 index 00000000..e66cd00a --- /dev/null +++ b/vendor/cloud.google.com/go/appveyor.yml @@ -0,0 +1,32 @@ +# This file configures AppVeyor (http://www.appveyor.com), +# a Windows-based CI service similar to Travis. + +# Identifier for this run +version: "{build}" + +# Clone the repo into this path, which conforms to the standard +# Go workspace structure. +clone_folder: c:\gopath\src\cloud.google.com\go + +environment: + GOPATH: c:\gopath + GCLOUD_TESTS_GOLANG_PROJECT_ID: dulcet-port-762 + GCLOUD_TESTS_GOLANG_KEY: c:\gopath\src\cloud.google.com\go\key.json + KEYFILE_CONTENTS: + secure: IvRbDAhM2PIQqzVkjzJ4FjizUvoQ+c3vG/qhJQG+HlZ/L5KEkqLu+x6WjLrExrNMyGku4znB2jmbTrUW3Ob4sGG+R5vvqeQ3YMHCVIkw5CxY+/bUDkW5RZWsVbuCnNa/vKsWmCP+/sZW6ICe29yKJ2ZOb6QaauI4s9R6j+cqBbU9pumMGYFRb0Rw3uUU7DKmVFCy+NjTENZIlDP9rmjANgAzigowJJEb2Tg9sLlQKmQeKiBSRN8lKc5Nq60a+fIzHGKvql4eIitDDDpOpyHv15/Xr1BzFw2yDoiR4X1lng0u7q0X9RgX4VIYa6gT16NXBEmQgbuX8gh7SfPMp9RhiZD9sVUaV+yogEabYpyPnmUURo0hXwkctKaBkQlEmKvjHwF5dvbg8+yqGhwtjAgFNimXG3INrwQsfQsZskkQWanutbJf9xy50GyWWFZZdi0uT4oXP/b5P7aklPXKXsvrJKBh7RjEaqBrhi86IJwOjBspvoR4l2WmcQyxb2xzQS1pjbBJFQfYJJ8+JgsstTL8PBO9d4ybJC0li1Om1qnWxkaewvPxxuoHJ9LpRKof19yRYWBmhTXb2tTASKG/zslvl4fgG4DmQBS93WC7dsiGOhAraGw2eCTgd0lYZOhk1FjWl9TS80aktXxzH/7nTvem5ohm+eDl6O0wnTL4KXjQVNSQ1PyLn4lGRJ5MNGzBTRFWIr2API2rca4Fysyfh/UdmazPGlNbY9JPGqb9+F04QzLfqm+Zz/cHy59E7lOSMBlUI4KD6d6ZNNKNRH+/g9i+fSiyiXKugTfda8KBnWGyPwprxuWGYaiQUGUYOwJY5R6x5c4mjImAB310V+Wo33UbWFJiwxEDsiCNqW1meVkBzt2er26vh4qbgCUIQ3iM3gFPfHgy+QxkmIhic7Q1HYacQElt8AAP41M7cCKWCuZidegP37MBB//mjjiNt047ZSQEvB4tqsX/OvfbByVef+cbtVw9T0yjHvmCdPW1XrhyrCCgclu6oYYdbmc5D7BBDRbjjMWGv6YvceAbfGf6ukdB5PuV+TGEN/FoQ1QTRA6Aqf+3fLMg4mS4oyTfw5xyYNbv3qoyLPrp+BnxI53WB9p0hfMg4n9FD6NntBxjDq+Q3Lk/bjC/Y4MaRWdzbMzF9a0lgGfcw9DURlK5p7uGJC9vg34feNoQprxVEZRQ01cHLeob6eGkYm4HxSRx8JY39Mh+9wzJo+k/aIvFleNC3e35NOrkXr6wb5e42n2DwBdPqdNolTLtLFRglAL1LTpp27UjvjieWJAKfoDTR5CKl01sZqt0wPdLLcvsMj6CiPFmccUIOYeZMe86kLBD61Qa5F1EwkgO3Om2qSjW96FzL4skRc+BmU5RrHlAFSldR1wpUgtkUMv9vH5Cy+UJdcvpZ8KbmhZ2PsjF7ddJ1ve9RAw3cP325AyIMwZ77Ef1mgTM0NJze6eSW1qKlEsgt1FADPyeUu1NQTA2H2dueMPGlArWTSUgyWR9AdfpqouT7eg0JWI5w+yUZZC+/rPglYbt84oLmYpwuli0z8FyEQRPIc3EtkfWIv/yYgDr2TZ0N2KvGfpi/MAUWgxI1gleC2uKgEOEtuJthd3XZjF2NoE7IBqjQOINybcJOjyeB5vRLDY1FLuxYzdg1y1etkV4XQig/vje + +install: + # Info for debugging. + - echo %PATH% + - go version + - go env + - go get -v -d -t ./... + + +# Provide a build script, or AppVeyor will call msbuild. +build_script: + - go install -v ./... + - echo %KEYFILE_CONTENTS% > %GCLOUD_TESTS_GOLANG_KEY% + +test_script: + - go test -v ./... diff --git a/vendor/cloud.google.com/go/authexample_test.go b/vendor/cloud.google.com/go/authexample_test.go new file mode 100644 index 00000000..fe75467f --- /dev/null +++ b/vendor/cloud.google.com/go/authexample_test.go @@ -0,0 +1,49 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloud_test + +import ( + "cloud.google.com/go/datastore" + "golang.org/x/net/context" + "google.golang.org/api/option" +) + +func Example_applicationDefaultCredentials() { + // Google Application Default Credentials is the recommended way to authorize + // and authenticate clients. + // + // See the following link on how to create and obtain Application Default Credentials: + // https://developers.google.com/identity/protocols/application-default-credentials. + client, err := datastore.NewClient(context.Background(), "project-id") + if err != nil { + // TODO: handle error. + } + _ = client // Use the client. +} + +func Example_serviceAccountFile() { + // Use a JSON key file associated with a Google service account to + // authenticate and authorize. Service Account keys can be created and + // downloaded from https://console.developers.google.com/permissions/serviceaccounts. + // + // Note: This example uses the datastore client, but the same steps apply to + // the other client libraries underneath this package. + client, err := datastore.NewClient(context.Background(), + "project-id", option.WithServiceAccountFile("/path/to/service-account-key.json")) + if err != nil { + // TODO: handle error. + } + _ = client // Use the client. +} diff --git a/vendor/cloud.google.com/go/bigquery/bigquery.go b/vendor/cloud.google.com/go/bigquery/bigquery.go new file mode 100644 index 00000000..3df26a4c --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/bigquery.go @@ -0,0 +1,76 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +// TODO(mcgreevy): support dry-run mode when creating jobs. + +import ( + "fmt" + + "google.golang.org/api/option" + "google.golang.org/api/transport" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +const prodAddr = "https://www.googleapis.com/bigquery/v2/" + +// ExternalData is a table which is stored outside of BigQuery. It is implemented by GCSReference. +type ExternalData interface { + externalDataConfig() bq.ExternalDataConfiguration +} + +const Scope = "https://www.googleapis.com/auth/bigquery" +const userAgent = "gcloud-golang-bigquery/20160429" + +// Client may be used to perform BigQuery operations. +type Client struct { + service service + projectID string +} + +// NewClient constructs a new Client which can perform BigQuery operations. +// Operations performed via the client are billed to the specified GCP project. +func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { + o := []option.ClientOption{ + option.WithEndpoint(prodAddr), + option.WithScopes(Scope), + option.WithUserAgent(userAgent), + } + o = append(o, opts...) + httpClient, endpoint, err := transport.NewHTTPClient(ctx, o...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + + s, err := newBigqueryService(httpClient, endpoint) + if err != nil { + return nil, fmt.Errorf("constructing bigquery client: %v", err) + } + + c := &Client{ + service: s, + projectID: projectID, + } + return c, nil +} + +// Close closes any resources held by the client. +// Close should be called when the client is no longer needed. +// It need not be called at program exit. +func (c *Client) Close() error { + return nil +} diff --git a/vendor/cloud.google.com/go/bigquery/copy.go b/vendor/cloud.google.com/go/bigquery/copy.go new file mode 100644 index 00000000..640f73c8 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/copy.go @@ -0,0 +1,74 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +// CopyConfig holds the configuration for a copy job. +type CopyConfig struct { + // JobID is the ID to use for the copy job. If unset, a job ID will be automatically created. + JobID string + + // Srcs are the tables from which data will be copied. + Srcs []*Table + + // Dst is the table into which the data will be copied. + Dst *Table + + // CreateDisposition specifies the circumstances under which the destination table will be created. + // The default is CreateIfNeeded. + CreateDisposition TableCreateDisposition + + // WriteDisposition specifies how existing data in the destination table is treated. + // The default is WriteAppend. + WriteDisposition TableWriteDisposition +} + +// A Copier copies data into a BigQuery table from one or more BigQuery tables. +type Copier struct { + CopyConfig + c *Client +} + +// CopierFrom returns a Copier which can be used to copy data into a +// BigQuery table from one or more BigQuery tables. +// The returned Copier may optionally be further configured before its Run method is called. +func (t *Table) CopierFrom(srcs ...*Table) *Copier { + return &Copier{ + c: t.c, + CopyConfig: CopyConfig{ + Srcs: srcs, + Dst: t, + }, + } +} + +// Run initiates a copy job. +func (c *Copier) Run(ctx context.Context) (*Job, error) { + conf := &bq.JobConfigurationTableCopy{ + CreateDisposition: string(c.CreateDisposition), + WriteDisposition: string(c.WriteDisposition), + DestinationTable: c.Dst.tableRefProto(), + } + for _, t := range c.Srcs { + conf.SourceTables = append(conf.SourceTables, t.tableRefProto()) + } + job := &bq.Job{Configuration: &bq.JobConfiguration{Copy: conf}} + setJobRef(job, c.JobID, c.c.projectID) + return c.c.service.insertJob(ctx, c.c.projectID, &insertJobConf{job: job}) +} diff --git a/vendor/cloud.google.com/go/bigquery/copy_test.go b/vendor/cloud.google.com/go/bigquery/copy_test.go new file mode 100644 index 00000000..f55ce28a --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/copy_test.go @@ -0,0 +1,136 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "reflect" + "testing" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +func defaultCopyJob() *bq.Job { + return &bq.Job{ + Configuration: &bq.JobConfiguration{ + Copy: &bq.JobConfigurationTableCopy{ + DestinationTable: &bq.TableReference{ + ProjectId: "d-project-id", + DatasetId: "d-dataset-id", + TableId: "d-table-id", + }, + SourceTables: []*bq.TableReference{ + { + ProjectId: "s-project-id", + DatasetId: "s-dataset-id", + TableId: "s-table-id", + }, + }, + }, + }, + } +} + +func TestCopy(t *testing.T) { + testCases := []struct { + dst *Table + srcs []*Table + config CopyConfig + want *bq.Job + }{ + { + dst: &Table{ + ProjectID: "d-project-id", + DatasetID: "d-dataset-id", + TableID: "d-table-id", + }, + srcs: []*Table{ + { + ProjectID: "s-project-id", + DatasetID: "s-dataset-id", + TableID: "s-table-id", + }, + }, + want: defaultCopyJob(), + }, + { + dst: &Table{ + ProjectID: "d-project-id", + DatasetID: "d-dataset-id", + TableID: "d-table-id", + }, + srcs: []*Table{ + { + ProjectID: "s-project-id", + DatasetID: "s-dataset-id", + TableID: "s-table-id", + }, + }, + config: CopyConfig{ + CreateDisposition: CreateNever, + WriteDisposition: WriteTruncate, + }, + want: func() *bq.Job { + j := defaultCopyJob() + j.Configuration.Copy.CreateDisposition = "CREATE_NEVER" + j.Configuration.Copy.WriteDisposition = "WRITE_TRUNCATE" + return j + }(), + }, + { + dst: &Table{ + ProjectID: "d-project-id", + DatasetID: "d-dataset-id", + TableID: "d-table-id", + }, + srcs: []*Table{ + { + ProjectID: "s-project-id", + DatasetID: "s-dataset-id", + TableID: "s-table-id", + }, + }, + config: CopyConfig{JobID: "job-id"}, + want: func() *bq.Job { + j := defaultCopyJob() + j.JobReference = &bq.JobReference{ + JobId: "job-id", + ProjectId: "client-project-id", + } + return j + }(), + }, + } + + for _, tc := range testCases { + s := &testService{} + c := &Client{ + service: s, + projectID: "client-project-id", + } + tc.dst.c = c + copier := tc.dst.CopierFrom(tc.srcs...) + tc.config.Srcs = tc.srcs + tc.config.Dst = tc.dst + copier.CopyConfig = tc.config + if _, err := copier.Run(context.Background()); err != nil { + t.Errorf("err calling Run: %v", err) + continue + } + if !reflect.DeepEqual(s.Job, tc.want) { + t.Errorf("copying: got:\n%v\nwant:\n%v", s.Job, tc.want) + } + } +} diff --git a/vendor/cloud.google.com/go/bigquery/create_table_test.go b/vendor/cloud.google.com/go/bigquery/create_table_test.go new file mode 100644 index 00000000..2a7d9d10 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/create_table_test.go @@ -0,0 +1,103 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "reflect" + "testing" + "time" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +type createTableRecorder struct { + conf *createTableConf + service +} + +func (rec *createTableRecorder) createTable(ctx context.Context, conf *createTableConf) error { + rec.conf = conf + return nil +} + +func TestCreateTableOptions(t *testing.T) { + s := &createTableRecorder{} + c := &Client{ + projectID: "p", + service: s, + } + ds := c.Dataset("d") + table := ds.Table("t") + exp := time.Now() + q := "query" + if err := table.Create(context.Background(), TableExpiration(exp), ViewQuery(q), UseStandardSQL()); err != nil { + t.Fatalf("err calling Table.Create: %v", err) + } + want := createTableConf{ + projectID: "p", + datasetID: "d", + tableID: "t", + expiration: exp, + viewQuery: q, + useStandardSQL: true, + } + if !reflect.DeepEqual(*s.conf, want) { + t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want) + } + + sc := Schema{fieldSchema("desc", "name", "STRING", false, true)} + if err := table.Create(context.Background(), TableExpiration(exp), sc); err != nil { + t.Fatalf("err calling Table.Create: %v", err) + } + want = createTableConf{ + projectID: "p", + datasetID: "d", + tableID: "t", + expiration: exp, + // No need for an elaborate schema, that is tested in schema_test.go. + schema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"), + }, + }, + } + if !reflect.DeepEqual(*s.conf, want) { + t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want) + } + + partitionCases := []struct { + timePartitioning TimePartitioning + expectedExpiration time.Duration + }{ + {TimePartitioning{}, time.Duration(0)}, + {TimePartitioning{time.Second}, time.Second}, + } + + for _, c := range partitionCases { + if err := table.Create(context.Background(), c.timePartitioning); err != nil { + t.Fatalf("err calling Table.Create: %v", err) + } + want = createTableConf{ + projectID: "p", + datasetID: "d", + tableID: "t", + timePartitioning: &TimePartitioning{c.expectedExpiration}, + } + if !reflect.DeepEqual(*s.conf, want) { + t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want) + } + } +} diff --git a/vendor/cloud.google.com/go/bigquery/dataset.go b/vendor/cloud.google.com/go/bigquery/dataset.go new file mode 100644 index 00000000..80eacf65 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/dataset.go @@ -0,0 +1,188 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "time" + + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +// Dataset is a reference to a BigQuery dataset. +type Dataset struct { + ProjectID string + DatasetID string + c *Client +} + +type DatasetMetadata struct { + CreationTime time.Time + LastModifiedTime time.Time // When the dataset or any of its tables were modified. + DefaultTableExpiration time.Duration + Description string // The user-friendly description of this table. + Name string // The user-friendly name for this table. + ID string + Location string // The geo location of the dataset. + Labels map[string]string // User-provided labels. + // TODO(jba): access rules +} + +// Dataset creates a handle to a BigQuery dataset in the client's project. +func (c *Client) Dataset(id string) *Dataset { + return c.DatasetInProject(c.projectID, id) +} + +// DatasetInProject creates a handle to a BigQuery dataset in the specified project. +func (c *Client) DatasetInProject(projectID, datasetID string) *Dataset { + return &Dataset{ + ProjectID: projectID, + DatasetID: datasetID, + c: c, + } +} + +// Create creates a dataset in the BigQuery service. An error will be returned +// if the dataset already exists. +func (d *Dataset) Create(ctx context.Context) error { + return d.c.service.insertDataset(ctx, d.DatasetID, d.ProjectID) +} + +// Delete deletes the dataset. +func (d *Dataset) Delete(ctx context.Context) error { + return d.c.service.deleteDataset(ctx, d.DatasetID, d.ProjectID) +} + +// Metadata fetches the metadata for the dataset. +func (d *Dataset) Metadata(ctx context.Context) (*DatasetMetadata, error) { + return d.c.service.getDatasetMetadata(ctx, d.ProjectID, d.DatasetID) +} + +// Table creates a handle to a BigQuery table in the dataset. +// To determine if a table exists, call Table.Metadata. +// If the table does not already exist, use Table.Create to create it. +func (d *Dataset) Table(tableID string) *Table { + return &Table{ProjectID: d.ProjectID, DatasetID: d.DatasetID, TableID: tableID, c: d.c} +} + +// Tables returns an iterator over the tables in the Dataset. +func (d *Dataset) Tables(ctx context.Context) *TableIterator { + it := &TableIterator{ + ctx: ctx, + dataset: d, + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.tables) }, + func() interface{} { b := it.tables; it.tables = nil; return b }) + return it +} + +// A TableIterator is an iterator over Tables. +type TableIterator struct { + ctx context.Context + dataset *Dataset + tables []*Table + pageInfo *iterator.PageInfo + nextFunc func() error +} + +// Next returns the next result. Its second return value is Done if there are +// no more results. Once Next returns Done, all subsequent calls will return +// Done. +func (it *TableIterator) Next() (*Table, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + t := it.tables[0] + it.tables = it.tables[1:] + return t, nil +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *TableIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +func (it *TableIterator) fetch(pageSize int, pageToken string) (string, error) { + tables, tok, err := it.dataset.c.service.listTables(it.ctx, it.dataset.ProjectID, it.dataset.DatasetID, pageSize, pageToken) + if err != nil { + return "", err + } + for _, t := range tables { + t.c = it.dataset.c + it.tables = append(it.tables, t) + } + return tok, nil +} + +// Datasets returns an iterator over the datasets in the Client's project. +func (c *Client) Datasets(ctx context.Context) *DatasetIterator { + return c.DatasetsInProject(ctx, c.projectID) +} + +// DatasetsInProject returns an iterator over the datasets in the provided project. +func (c *Client) DatasetsInProject(ctx context.Context, projectID string) *DatasetIterator { + it := &DatasetIterator{ + ctx: ctx, + c: c, + projectID: projectID, + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + return it +} + +// DatasetIterator iterates over the datasets in a project. +type DatasetIterator struct { + // ListHidden causes hidden datasets to be listed when set to true. + ListHidden bool + + // Filter restricts the datasets returned by label. The filter syntax is described in + // https://cloud.google.com/bigquery/docs/labeling-datasets#filtering_datasets_using_labels + Filter string + + ctx context.Context + projectID string + c *Client + pageInfo *iterator.PageInfo + nextFunc func() error + items []*Dataset +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *DatasetIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +func (it *DatasetIterator) Next() (*Dataset, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *DatasetIterator) fetch(pageSize int, pageToken string) (string, error) { + datasets, nextPageToken, err := it.c.service.listDatasets(it.ctx, it.projectID, + pageSize, pageToken, it.ListHidden, it.Filter) + if err != nil { + return "", err + } + for _, d := range datasets { + d.c = it.c + it.items = append(it.items, d) + } + return nextPageToken, nil +} diff --git a/vendor/cloud.google.com/go/bigquery/dataset_test.go b/vendor/cloud.google.com/go/bigquery/dataset_test.go new file mode 100644 index 00000000..01b8ebdb --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/dataset_test.go @@ -0,0 +1,156 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "strconv" + "testing" + + "golang.org/x/net/context" + itest "google.golang.org/api/iterator/testing" +) + +// readServiceStub services read requests by returning data from an in-memory list of values. +type listTablesServiceStub struct { + expectedProject, expectedDataset string + tables []*Table + service +} + +func (s *listTablesServiceStub) listTables(ctx context.Context, projectID, datasetID string, pageSize int, pageToken string) ([]*Table, string, error) { + if projectID != s.expectedProject { + return nil, "", errors.New("wrong project id") + } + if datasetID != s.expectedDataset { + return nil, "", errors.New("wrong dataset id") + } + const maxPageSize = 2 + if pageSize <= 0 || pageSize > maxPageSize { + pageSize = maxPageSize + } + start := 0 + if pageToken != "" { + var err error + start, err = strconv.Atoi(pageToken) + if err != nil { + return nil, "", err + } + } + end := start + pageSize + if end > len(s.tables) { + end = len(s.tables) + } + nextPageToken := "" + if end < len(s.tables) { + nextPageToken = strconv.Itoa(end) + } + return s.tables[start:end], nextPageToken, nil +} + +func TestTables(t *testing.T) { + t1 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t1"} + t2 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t2"} + t3 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t3"} + allTables := []*Table{t1, t2, t3} + c := &Client{ + service: &listTablesServiceStub{ + expectedProject: "x", + expectedDataset: "y", + tables: allTables, + }, + projectID: "x", + } + msg, ok := itest.TestIterator(allTables, + func() interface{} { return c.Dataset("y").Tables(context.Background()) }, + func(it interface{}) (interface{}, error) { return it.(*TableIterator).Next() }) + if !ok { + t.Error(msg) + } +} + +type listDatasetsFake struct { + service + + projectID string + datasets []*Dataset + hidden map[*Dataset]bool +} + +func (df *listDatasetsFake) listDatasets(_ context.Context, projectID string, pageSize int, pageToken string, listHidden bool, filter string) ([]*Dataset, string, error) { + const maxPageSize = 2 + if pageSize <= 0 || pageSize > maxPageSize { + pageSize = maxPageSize + } + if filter != "" { + return nil, "", errors.New("filter not supported") + } + if projectID != df.projectID { + return nil, "", errors.New("bad project ID") + } + start := 0 + if pageToken != "" { + var err error + start, err = strconv.Atoi(pageToken) + if err != nil { + return nil, "", err + } + } + var ( + i int + result []*Dataset + nextPageToken string + ) + for i = start; len(result) < pageSize && i < len(df.datasets); i++ { + if df.hidden[df.datasets[i]] && !listHidden { + continue + } + result = append(result, df.datasets[i]) + } + if i < len(df.datasets) { + nextPageToken = strconv.Itoa(i) + } + return result, nextPageToken, nil +} + +func TestDatasets(t *testing.T) { + service := &listDatasetsFake{projectID: "p"} + client := &Client{service: service} + datasets := []*Dataset{ + {"p", "a", client}, + {"p", "b", client}, + {"p", "hidden", client}, + {"p", "c", client}, + } + service.datasets = datasets + service.hidden = map[*Dataset]bool{datasets[2]: true} + c := &Client{ + projectID: "p", + service: service, + } + msg, ok := itest.TestIterator(datasets, + func() interface{} { it := c.Datasets(context.Background()); it.ListHidden = true; return it }, + func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() }) + if !ok { + t.Fatalf("ListHidden=true: %s", msg) + } + + msg, ok = itest.TestIterator([]*Dataset{datasets[0], datasets[1], datasets[3]}, + func() interface{} { it := c.Datasets(context.Background()); it.ListHidden = false; return it }, + func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() }) + if !ok { + t.Fatalf("ListHidden=false: %s", msg) + } +} diff --git a/vendor/cloud.google.com/go/bigquery/doc.go b/vendor/cloud.google.com/go/bigquery/doc.go new file mode 100644 index 00000000..f644792b --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/doc.go @@ -0,0 +1,295 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package bigquery provides a client for the BigQuery service. + +Note: This package is in beta. Some backwards-incompatible changes may occur. + +The following assumes a basic familiarity with BigQuery concepts. +See https://cloud.google.com/bigquery/docs. + + +Creating a Client + +To start working with this package, create a client: + + ctx := context.Background() + client, err := bigquery.NewClient(ctx, projectID) + if err != nil { + // TODO: Handle error. + } + +Querying + +To query existing tables, create a Query and call its Read method: + + q := client.Query(` + SELECT year, SUM(number) as num + FROM [bigquery-public-data:usa_names.usa_1910_2013] + WHERE name = "William" + GROUP BY year + ORDER BY year + `) + it, err := q.Read(ctx) + if err != nil { + // TODO: Handle error. + } + +Then iterate through the resulting rows. You can store a row using +anything that implements the ValueLoader interface, or with a slice or map of bigquery.Value. +A slice is simplest: + + for { + var values []bigquery.Value + err := it.Next(&values) + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(values) + } + +You can also use a struct whose exported fields match the query: + + type Count struct { + Year int + Num int + } + for { + var c Count + err := it.Next(&c) + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(c) + } + +You can also start the query running and get the results later. +Create the query as above, but call Run instead of Read. This returns a Job, +which represents an asychronous operation. + + job, err := q.Run(ctx) + if err != nil { + // TODO: Handle error. + } + +Get the job's ID, a printable string. You can save this string to retrieve +the results at a later time, even in another process. + + jobID := job.ID() + fmt.Printf("The job ID is %s\n", jobID) + +To retrieve the job's results from the ID, first look up the Job: + + job, err = client.JobFromID(ctx, jobID) + if err != nil { + // TODO: Handle error. + } + +Use the Job.Read method to obtain an iterator, and loop over the rows. +Query.Read is just a convenience method that combines Query.Run and Job.Read. + + it, err = job.Read(ctx) + if err != nil { + // TODO: Handle error. + } + // Proceed with iteration as above. + +Datasets and Tables + +You can refer to datasets in the client's project with the Dataset method, and +in other projects with the DatasetInProject method: + + myDataset := client.Dataset("my_dataset") + yourDataset := client.DatasetInProject("your-project-id", "your_dataset") + +These methods create references to datasets, not the datasets themselves. You can have +a dataset reference even if the dataset doesn't exist yet. Use Dataset.Create to +create a dataset from a reference: + + if err := myDataset.Create(ctx); err != nil { + // TODO: Handle error. + } + +You can refer to tables with Dataset.Table. Like bigquery.Dataset, bigquery.Table is a reference +to an object in BigQuery that may or may not exist. + + table := myDataset.Table("my_table") + +You can create, delete and update the metadata of tables with methods on Table. +Table.Create supports a few options. For instance, you could create a temporary table with: + + err = myDataset.Table("temp").Create(ctx, bigquery.TableExpiration(time.Now().Add(1*time.Hour))) + if err != nil { + // TODO: Handle error. + } + +We'll see how to create a table with a schema in the next section. + +Schemas + +There are two ways to construct schemas with this package. +You can build a schema by hand, like so: + + schema1 := bigquery.Schema{ + &bigquery.FieldSchema{Name: "Name", Required: true, Type: bigquery.StringFieldType}, + &bigquery.FieldSchema{Name: "Grades", Repeated: true, Type: bigquery.IntegerFieldType}, + } + +Or you can infer the schema from a struct: + + type student struct { + Name string + Grades []int + } + schema2, err := bigquery.InferSchema(student{}) + if err != nil { + // TODO: Handle error. + } + // schema1 and schema2 are identical. + +Struct inference supports tags like those of the encoding/json package, +so you can change names or ignore fields: + + type student2 struct { + Name string `bigquery:"full_name"` + Grades []int + Secret string `bigquery:"-"` + } + schema3, err := bigquery.InferSchema(student2{}) + if err != nil { + // TODO: Handle error. + } + // schema3 has fields "full_name" and "Grade". + +Having constructed a schema, you can pass it to Table.Create as an option: + + if err := table.Create(ctx, schema1); err != nil { + // TODO: Handle error. + } + +Copying + +You can copy one or more tables to another table. Begin by constructing a Copier +describing the copy. Then set any desired copy options, and finally call Run to get a Job: + + copier := myDataset.Table("dest").CopierFrom(myDataset.Table("src")) + copier.WriteDisposition = bigquery.WriteTruncate + job, err = copier.Run(ctx) + if err != nil { + // TODO: Handle error. + } + +You can chain the call to Run if you don't want to set options: + + job, err = myDataset.Table("dest").CopierFrom(myDataset.Table("src")).Run(ctx) + if err != nil { + // TODO: Handle error. + } + +You can wait for your job to complete: + + status, err := job.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + +Job.Wait polls with exponential backoff. You can also poll yourself, if you +wish: + + for { + status, err := job.Status(ctx) + if err != nil { + // TODO: Handle error. + } + if status.Done() { + if status.Err() != nil { + log.Fatalf("Job failed with error %v", status.Err()) + } + break + } + time.Sleep(pollInterval) + } + +Loading and Uploading + +There are two ways to populate a table with this package: load the data from a Google Cloud Storage +object, or upload rows directly from your program. + +For loading, first create a GCSReference, configuring it if desired. Then make a Loader, optionally configure +it as well, and call its Run method. + + gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object") + gcsRef.AllowJaggedRows = true + loader := myDataset.Table("dest").LoaderFrom(gcsRef) + loader.CreateDisposition = bigquery.CreateNever + job, err = loader.Run(ctx) + // Poll the job for completion if desired, as above. + +To upload, first define a type that implements the ValueSaver interface, which has a single method named Save. +Then create an Uploader, and call its Put method with a slice of values. + + u := table.Uploader() + // Item implements the ValueSaver interface. + items := []*Item{ + {Name: "n1", Size: 32.6, Count: 7}, + {Name: "n2", Size: 4, Count: 2}, + {Name: "n3", Size: 101.5, Count: 1}, + } + if err := u.Put(ctx, items); err != nil { + // TODO: Handle error. + } + +You can also upload a struct that doesn't implement ValueSaver. Use the StructSaver type +to specify the schema and insert ID by hand, or just supply the struct or struct pointer +directly and the schema will be inferred: + + type Item2 struct { + Name string + Size float64 + Count int + } + // Item implements the ValueSaver interface. + items2 := []*Item2{ + {Name: "n1", Size: 32.6, Count: 7}, + {Name: "n2", Size: 4, Count: 2}, + {Name: "n3", Size: 101.5, Count: 1}, + } + if err := u.Put(ctx, items2); err != nil { + // TODO: Handle error. + } + +Extracting + +If you've been following so far, extracting data from a BigQuery table +into a Google Cloud Storage object will feel familiar. First create an +Extractor, then optionally configure it, and lastly call its Run method. + + extractor := table.ExtractorTo(gcsRef) + extractor.DisableHeader = true + job, err = extractor.Run(ctx) + // Poll the job for completion if desired, as above. + +Authentication + +See examples of authorization and authentication at +https://godoc.org/cloud.google.com/go#pkg-examples. +*/ +package bigquery // import "cloud.google.com/go/bigquery" diff --git a/vendor/cloud.google.com/go/bigquery/error.go b/vendor/cloud.google.com/go/bigquery/error.go new file mode 100644 index 00000000..b59ac6e6 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/error.go @@ -0,0 +1,82 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "fmt" + + bq "google.golang.org/api/bigquery/v2" +) + +// An Error contains detailed information about a failed bigquery operation. +type Error struct { + // Mirrors bq.ErrorProto, but drops DebugInfo + Location, Message, Reason string +} + +func (e Error) Error() string { + return fmt.Sprintf("{Location: %q; Message: %q; Reason: %q}", e.Location, e.Message, e.Reason) +} + +func errorFromErrorProto(ep *bq.ErrorProto) *Error { + if ep == nil { + return nil + } + return &Error{ + Location: ep.Location, + Message: ep.Message, + Reason: ep.Reason, + } +} + +// A MultiError contains multiple related errors. +type MultiError []error + +func (m MultiError) Error() string { + switch len(m) { + case 0: + return "(0 errors)" + case 1: + return m[0].Error() + case 2: + return m[0].Error() + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", m[0].Error(), len(m)-1) +} + +// RowInsertionError contains all errors that occurred when attempting to insert a row. +type RowInsertionError struct { + InsertID string // The InsertID associated with the affected row. + RowIndex int // The 0-based index of the affected row in the batch of rows being inserted. + Errors MultiError +} + +func (e *RowInsertionError) Error() string { + errFmt := "insertion of row [insertID: %q; insertIndex: %v] failed with error: %s" + return fmt.Sprintf(errFmt, e.InsertID, e.RowIndex, e.Errors.Error()) +} + +// PutMultiError contains an error for each row which was not successfully inserted +// into a BigQuery table. +type PutMultiError []RowInsertionError + +func (pme PutMultiError) Error() string { + plural := "s" + if len(pme) == 1 { + plural = "" + } + + return fmt.Sprintf("%v row insertion%s failed", len(pme), plural) +} diff --git a/vendor/cloud.google.com/go/bigquery/error_test.go b/vendor/cloud.google.com/go/bigquery/error_test.go new file mode 100644 index 00000000..c0f40bae --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/error_test.go @@ -0,0 +1,109 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "reflect" + "strings" + "testing" + + bq "google.golang.org/api/bigquery/v2" +) + +func rowInsertionError(msg string) RowInsertionError { + return RowInsertionError{Errors: []error{errors.New(msg)}} +} + +func TestPutMultiErrorString(t *testing.T) { + testCases := []struct { + errs PutMultiError + want string + }{ + { + errs: PutMultiError{}, + want: "0 row insertions failed", + }, + { + errs: PutMultiError{rowInsertionError("a")}, + want: "1 row insertion failed", + }, + { + errs: PutMultiError{rowInsertionError("a"), rowInsertionError("b")}, + want: "2 row insertions failed", + }, + } + + for _, tc := range testCases { + if tc.errs.Error() != tc.want { + t.Errorf("PutMultiError string: got:\n%v\nwant:\n%v", tc.errs.Error(), tc.want) + } + } +} + +func TestMultiErrorString(t *testing.T) { + testCases := []struct { + errs MultiError + want string + }{ + { + errs: MultiError{}, + want: "(0 errors)", + }, + { + errs: MultiError{errors.New("a")}, + want: "a", + }, + { + errs: MultiError{errors.New("a"), errors.New("b")}, + want: "a (and 1 other error)", + }, + { + errs: MultiError{errors.New("a"), errors.New("b"), errors.New("c")}, + want: "a (and 2 other errors)", + }, + } + + for _, tc := range testCases { + if tc.errs.Error() != tc.want { + t.Errorf("PutMultiError string: got:\n%v\nwant:\n%v", tc.errs.Error(), tc.want) + } + } +} + +func TestErrorFromErrorProto(t *testing.T) { + for _, test := range []struct { + in *bq.ErrorProto + want *Error + }{ + {nil, nil}, + { + in: &bq.ErrorProto{Location: "L", Message: "M", Reason: "R"}, + want: &Error{Location: "L", Message: "M", Reason: "R"}, + }, + } { + if got := errorFromErrorProto(test.in); !reflect.DeepEqual(got, test.want) { + t.Errorf("%v: got %v, want %v", test.in, got, test.want) + } + } +} + +func TestErrorString(t *testing.T) { + e := &Error{Location: "", Message: "", Reason: ""} + got := e.Error() + if !strings.Contains(got, "") || !strings.Contains(got, "") || !strings.Contains(got, "") { + t.Errorf(`got %q, expected to see "", "" and ""`, got) + } +} diff --git a/vendor/cloud.google.com/go/bigquery/examples_test.go b/vendor/cloud.google.com/go/bigquery/examples_test.go new file mode 100644 index 00000000..5ceafff7 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/examples_test.go @@ -0,0 +1,652 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery_test + +import ( + "fmt" + "os" + + "cloud.google.com/go/bigquery" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleNewClient() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + _ = client // TODO: Use client. +} + +func ExampleClient_Dataset() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + ds := client.Dataset("my_dataset") + fmt.Println(ds) +} + +func ExampleClient_DatasetInProject() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + ds := client.DatasetInProject("their-project-id", "their-dataset") + fmt.Println(ds) +} + +func ExampleClient_Datasets() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it := client.Datasets(ctx) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleClient_DatasetsInProject() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it := client.DatasetsInProject(ctx, "their-project-id") + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func getJobID() string { return "" } + +func ExampleClient_JobFromID() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + jobID := getJobID() // Get a job ID using Job.ID, the console or elsewhere. + job, err := client.JobFromID(ctx, jobID) + if err != nil { + // TODO: Handle error. + } + fmt.Println(job) +} + +func ExampleNewGCSReference() { + gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object") + fmt.Println(gcsRef) +} + +func ExampleClient_Query() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + q := client.Query("select name, num from t1") + q.DefaultProjectID = "project-id" + // TODO: set other options on the Query. + // TODO: Call Query.Run or Query.Read. +} + +func ExampleClient_Query_parameters() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + q := client.Query("select num from t1 where name = @user") + q.Parameters = []bigquery.QueryParameter{ + {Name: "user", Value: "Elizabeth"}, + } + // TODO: set other options on the Query. + // TODO: Call Query.Run or Query.Read. +} + +func ExampleQuery_Read() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + q := client.Query("select name, num from t1") + it, err := q.Read(ctx) + if err != nil { + // TODO: Handle error. + } + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleRowIterator_Next() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + q := client.Query("select name, num from t1") + it, err := q.Read(ctx) + if err != nil { + // TODO: Handle error. + } + for { + var row []bigquery.Value + err := it.Next(&row) + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(row) + } +} + +func ExampleRowIterator_Next_struct() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + type score struct { + Name string + Num int + } + + q := client.Query("select name, num from t1") + it, err := q.Read(ctx) + if err != nil { + // TODO: Handle error. + } + for { + var s score + err := it.Next(&s) + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(s) + } +} + +func ExampleJob_Read() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + q := client.Query("select name, num from t1") + // Call Query.Run to get a Job, then call Read on the job. + // Note: Query.Read is a shorthand for this. + job, err := q.Run(ctx) + if err != nil { + // TODO: Handle error. + } + it, err := job.Read(ctx) + if err != nil { + // TODO: Handle error. + } + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleJob_Wait() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + ds := client.Dataset("my_dataset") + job, err := ds.Table("t1").CopierFrom(ds.Table("t2")).Run(ctx) + if err != nil { + // TODO: Handle error. + } + status, err := job.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + if status.Err() != nil { + // TODO: Handle error. + } +} + +func ExampleDataset_Create() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + if err := client.Dataset("my_dataset").Create(ctx); err != nil { + // TODO: Handle error. + } +} + +func ExampleDataset_Delete() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + if err := client.Dataset("my_dataset").Delete(ctx); err != nil { + // TODO: Handle error. + } +} + +func ExampleDataset_Metadata() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + md, err := client.Dataset("my_dataset").Metadata(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Println(md) +} + +func ExampleDataset_Table() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + // Table creates a reference to the table. It does not create the actual + // table in BigQuery; to do so, use Table.Create. + t := client.Dataset("my_dataset").Table("my_table") + fmt.Println(t) +} + +func ExampleDataset_Tables() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it := client.Dataset("my_dataset").Tables(ctx) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleDatasetIterator_Next() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it := client.Datasets(ctx) + for { + ds, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(ds) + } +} + +func ExampleInferSchema() { + type Item struct { + Name string + Size float64 + Count int + } + schema, err := bigquery.InferSchema(Item{}) + if err != nil { + fmt.Println(err) + // TODO: Handle error. + } + for _, fs := range schema { + fmt.Println(fs.Name, fs.Type) + } + // Output: + // Name STRING + // Size FLOAT + // Count INTEGER +} + +func ExampleInferSchema_tags() { + type Item struct { + Name string + Size float64 + Count int `bigquery:"number"` + Secret []byte `bigquery:"-"` + } + schema, err := bigquery.InferSchema(Item{}) + if err != nil { + fmt.Println(err) + // TODO: Handle error. + } + for _, fs := range schema { + fmt.Println(fs.Name, fs.Type) + } + // Output: + // Name STRING + // Size FLOAT + // number INTEGER +} + +func ExampleTable_Create() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + t := client.Dataset("my_dataset").Table("new-table") + if err := t.Create(ctx); err != nil { + // TODO: Handle error. + } +} + +func ExampleTable_Create_schema() { + ctx := context.Background() + // Infer table schema from a Go type. + schema, err := bigquery.InferSchema(Item{}) + if err != nil { + // TODO: Handle error. + } + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + t := client.Dataset("my_dataset").Table("new-table") + if err := t.Create(ctx, schema); err != nil { + // TODO: Handle error. + } +} + +func ExampleTable_Delete() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + if err := client.Dataset("my_dataset").Table("my_table").Delete(ctx); err != nil { + // TODO: Handle error. + } +} + +func ExampleTable_Metadata() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + md, err := client.Dataset("my_dataset").Table("my_table").Metadata(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Println(md) +} + +func ExampleTable_Uploader() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + u := client.Dataset("my_dataset").Table("my_table").Uploader() + _ = u // TODO: Use u. +} + +func ExampleTable_Uploader_options() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + u := client.Dataset("my_dataset").Table("my_table").Uploader() + u.SkipInvalidRows = true + u.IgnoreUnknownValues = true + _ = u // TODO: Use u. +} + +func ExampleTable_CopierFrom() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + ds := client.Dataset("my_dataset") + c := ds.Table("combined").CopierFrom(ds.Table("t1"), ds.Table("t2")) + c.WriteDisposition = bigquery.WriteTruncate + // TODO: set other options on the Copier. + job, err := c.Run(ctx) + if err != nil { + // TODO: Handle error. + } + status, err := job.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + if status.Err() != nil { + // TODO: Handle error. + } +} + +func ExampleTable_ExtractorTo() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object") + gcsRef.FieldDelimiter = ":" + // TODO: set other options on the GCSReference. + ds := client.Dataset("my_dataset") + extractor := ds.Table("my_table").ExtractorTo(gcsRef) + extractor.DisableHeader = true + // TODO: set other options on the Extractor. + job, err := extractor.Run(ctx) + if err != nil { + // TODO: Handle error. + } + status, err := job.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + if status.Err() != nil { + // TODO: Handle error. + } +} + +func ExampleTable_LoaderFrom() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object") + gcsRef.AllowJaggedRows = true + // TODO: set other options on the GCSReference. + ds := client.Dataset("my_dataset") + loader := ds.Table("my_table").LoaderFrom(gcsRef) + loader.CreateDisposition = bigquery.CreateNever + // TODO: set other options on the Loader. + job, err := loader.Run(ctx) + if err != nil { + // TODO: Handle error. + } + status, err := job.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + if status.Err() != nil { + // TODO: Handle error. + } +} + +func ExampleTable_LoaderFrom_reader() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + f, err := os.Open("data.csv") + if err != nil { + // TODO: Handle error. + } + rs := bigquery.NewReaderSource(f) + rs.AllowJaggedRows = true + // TODO: set other options on the GCSReference. + ds := client.Dataset("my_dataset") + loader := ds.Table("my_table").LoaderFrom(rs) + loader.CreateDisposition = bigquery.CreateNever + // TODO: set other options on the Loader. + job, err := loader.Run(ctx) + if err != nil { + // TODO: Handle error. + } + status, err := job.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + if status.Err() != nil { + // TODO: Handle error. + } +} + +func ExampleTable_Read() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it := client.Dataset("my_dataset").Table("my_table").Read(ctx) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleTable_Update() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + t := client.Dataset("my_dataset").Table("my_table") + tm, err := t.Update(ctx, bigquery.TableMetadataToUpdate{ + Description: "my favorite table", + }) + if err != nil { + // TODO: Handle error. + } + fmt.Println(tm) +} + +func ExampleTableIterator_Next() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it := client.Dataset("my_dataset").Tables(ctx) + for { + t, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(t) + } +} + +type Item struct { + Name string + Size float64 + Count int +} + +// Save implements the ValueSaver interface. +func (i *Item) Save() (map[string]bigquery.Value, string, error) { + return map[string]bigquery.Value{ + "Name": i.Name, + "Size": i.Size, + "Count": i.Count, + }, "", nil +} + +func ExampleUploader_Put() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + u := client.Dataset("my_dataset").Table("my_table").Uploader() + // Item implements the ValueSaver interface. + items := []*Item{ + {Name: "n1", Size: 32.6, Count: 7}, + {Name: "n2", Size: 4, Count: 2}, + {Name: "n3", Size: 101.5, Count: 1}, + } + if err := u.Put(ctx, items); err != nil { + // TODO: Handle error. + } +} + +var schema bigquery.Schema + +func ExampleUploader_Put_structSaver() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + u := client.Dataset("my_dataset").Table("my_table").Uploader() + + type score struct { + Name string + Num int + } + + // Assume schema holds the table's schema. + savers := []*bigquery.StructSaver{ + {Struct: score{Name: "n1", Num: 12}, Schema: schema, InsertID: "id1"}, + {Struct: score{Name: "n2", Num: 31}, Schema: schema, InsertID: "id2"}, + {Struct: score{Name: "n3", Num: 7}, Schema: schema, InsertID: "id3"}, + } + if err := u.Put(ctx, savers); err != nil { + // TODO: Handle error. + } +} + +func ExampleUploader_Put_struct() { + ctx := context.Background() + client, err := bigquery.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + u := client.Dataset("my_dataset").Table("my_table").Uploader() + + type score struct { + Name string + Num int + } + scores := []score{ + {Name: "n1", Num: 12}, + {Name: "n2", Num: 31}, + {Name: "n3", Num: 7}, + } + // Schema is inferred from the score type. + if err := u.Put(ctx, scores); err != nil { + // TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/bigquery/extract.go b/vendor/cloud.google.com/go/bigquery/extract.go new file mode 100644 index 00000000..7e850b7f --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/extract.go @@ -0,0 +1,76 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +// ExtractConfig holds the configuration for an extract job. +type ExtractConfig struct { + // JobID is the ID to use for the extract job. If empty, a job ID will be automatically created. + JobID string + + // Src is the table from which data will be extracted. + Src *Table + + // Dst is the destination into which the data will be extracted. + Dst *GCSReference + + // DisableHeader disables the printing of a header row in exported data. + DisableHeader bool +} + +// An Extractor extracts data from a BigQuery table into Google Cloud Storage. +type Extractor struct { + ExtractConfig + c *Client +} + +// ExtractorTo returns an Extractor which can be used to extract data from a +// BigQuery table into Google Cloud Storage. +// The returned Extractor may optionally be further configured before its Run method is called. +func (t *Table) ExtractorTo(dst *GCSReference) *Extractor { + return &Extractor{ + c: t.c, + ExtractConfig: ExtractConfig{ + Src: t, + Dst: dst, + }, + } +} + +// Run initiates an extract job. +func (e *Extractor) Run(ctx context.Context) (*Job, error) { + conf := &bq.JobConfigurationExtract{} + job := &bq.Job{Configuration: &bq.JobConfiguration{Extract: conf}} + + setJobRef(job, e.JobID, e.c.projectID) + + conf.DestinationUris = append([]string{}, e.Dst.uris...) + conf.Compression = string(e.Dst.Compression) + conf.DestinationFormat = string(e.Dst.DestinationFormat) + conf.FieldDelimiter = e.Dst.FieldDelimiter + + conf.SourceTable = e.Src.tableRefProto() + + if e.DisableHeader { + f := false + conf.PrintHeader = &f + } + + return e.c.service.insertJob(ctx, e.c.projectID, &insertJobConf{job: job}) +} diff --git a/vendor/cloud.google.com/go/bigquery/extract_test.go b/vendor/cloud.google.com/go/bigquery/extract_test.go new file mode 100644 index 00000000..8079b182 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/extract_test.go @@ -0,0 +1,102 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "reflect" + "testing" + + "golang.org/x/net/context" + + bq "google.golang.org/api/bigquery/v2" +) + +func defaultExtractJob() *bq.Job { + return &bq.Job{ + Configuration: &bq.JobConfiguration{ + Extract: &bq.JobConfigurationExtract{ + SourceTable: &bq.TableReference{ + ProjectId: "project-id", + DatasetId: "dataset-id", + TableId: "table-id", + }, + DestinationUris: []string{"uri"}, + }, + }, + } +} + +func TestExtract(t *testing.T) { + s := &testService{} + c := &Client{ + service: s, + projectID: "project-id", + } + + testCases := []struct { + dst *GCSReference + src *Table + config ExtractConfig + want *bq.Job + }{ + { + dst: defaultGCS(), + src: c.Dataset("dataset-id").Table("table-id"), + want: defaultExtractJob(), + }, + { + dst: defaultGCS(), + src: c.Dataset("dataset-id").Table("table-id"), + config: ExtractConfig{DisableHeader: true}, + want: func() *bq.Job { + j := defaultExtractJob() + f := false + j.Configuration.Extract.PrintHeader = &f + return j + }(), + }, + { + dst: func() *GCSReference { + g := NewGCSReference("uri") + g.Compression = Gzip + g.DestinationFormat = JSON + g.FieldDelimiter = "\t" + return g + }(), + src: c.Dataset("dataset-id").Table("table-id"), + want: func() *bq.Job { + j := defaultExtractJob() + j.Configuration.Extract.Compression = "GZIP" + j.Configuration.Extract.DestinationFormat = "NEWLINE_DELIMITED_JSON" + j.Configuration.Extract.FieldDelimiter = "\t" + return j + }(), + }, + } + + for _, tc := range testCases { + ext := tc.src.ExtractorTo(tc.dst) + tc.config.Src = ext.Src + tc.config.Dst = ext.Dst + ext.ExtractConfig = tc.config + if _, err := ext.Run(context.Background()); err != nil { + t.Errorf("err calling extract: %v", err) + continue + } + if !reflect.DeepEqual(s.Job, tc.want) { + t.Errorf("extracting: got:\n%v\nwant:\n%v", s.Job, tc.want) + } + } +} diff --git a/vendor/cloud.google.com/go/bigquery/file.go b/vendor/cloud.google.com/go/bigquery/file.go new file mode 100644 index 00000000..0bb3f79d --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/file.go @@ -0,0 +1,172 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "io" + + bq "google.golang.org/api/bigquery/v2" +) + +// A ReaderSource is a source for a load operation that gets +// data from an io.Reader. +type ReaderSource struct { + r io.Reader + FileConfig +} + +// NewReaderSource creates a ReaderSource from an io.Reader. You may +// optionally configure properties on the ReaderSource that describe the +// data being read, before passing it to Table.LoaderFrom. +func NewReaderSource(r io.Reader) *ReaderSource { + return &ReaderSource{r: r} +} + +func (r *ReaderSource) populateInsertJobConfForLoad(conf *insertJobConf) { + conf.media = r.r + r.FileConfig.populateLoadConfig(conf.job.Configuration.Load) +} + +// FileConfig contains configuration options that pertain to files, typically +// text files that require interpretation to be used as a BigQuery table. A +// file may live in Google Cloud Storage (see GCSReference), or it may be +// loaded into a table via the Table.LoaderFromReader. +type FileConfig struct { + // SourceFormat is the format of the GCS data to be read. + // Allowed values are: CSV, Avro, JSON, DatastoreBackup. The default is CSV. + SourceFormat DataFormat + + // FieldDelimiter is the separator for fields in a CSV file, used when + // reading or exporting data. The default is ",". + FieldDelimiter string + + // The number of rows at the top of a CSV file that BigQuery will skip when + // reading data. + SkipLeadingRows int64 + + // AllowJaggedRows causes missing trailing optional columns to be tolerated + // when reading CSV data. Missing values are treated as nulls. + AllowJaggedRows bool + + // AllowQuotedNewlines sets whether quoted data sections containing + // newlines are allowed when reading CSV data. + AllowQuotedNewlines bool + + // Indicates if we should automatically infer the options and + // schema for CSV and JSON sources. + AutoDetect bool + + // Encoding is the character encoding of data to be read. + Encoding Encoding + + // MaxBadRecords is the maximum number of bad records that will be ignored + // when reading data. + MaxBadRecords int64 + + // IgnoreUnknownValues causes values not matching the schema to be + // tolerated. Unknown values are ignored. For CSV this ignores extra values + // at the end of a line. For JSON this ignores named values that do not + // match any column name. If this field is not set, records containing + // unknown values are treated as bad records. The MaxBadRecords field can + // be used to customize how bad records are handled. + IgnoreUnknownValues bool + + // Schema describes the data. It is required when reading CSV or JSON data, + // unless the data is being loaded into a table that already exists. + Schema Schema + + // Quote is the value used to quote data sections in a CSV file. The + // default quotation character is the double quote ("), which is used if + // both Quote and ForceZeroQuote are unset. + // To specify that no character should be interpreted as a quotation + // character, set ForceZeroQuote to true. + // Only used when reading data. + Quote string + ForceZeroQuote bool +} + +// quote returns the CSV quote character, or nil if unset. +func (fc *FileConfig) quote() *string { + if fc.ForceZeroQuote { + quote := "" + return "e + } + if fc.Quote == "" { + return nil + } + return &fc.Quote +} + +func (fc *FileConfig) populateLoadConfig(conf *bq.JobConfigurationLoad) { + conf.SkipLeadingRows = fc.SkipLeadingRows + conf.SourceFormat = string(fc.SourceFormat) + conf.Autodetect = fc.AutoDetect + conf.AllowJaggedRows = fc.AllowJaggedRows + conf.AllowQuotedNewlines = fc.AllowQuotedNewlines + conf.Encoding = string(fc.Encoding) + conf.FieldDelimiter = fc.FieldDelimiter + conf.IgnoreUnknownValues = fc.IgnoreUnknownValues + conf.MaxBadRecords = fc.MaxBadRecords + if fc.Schema != nil { + conf.Schema = fc.Schema.asTableSchema() + } + conf.Quote = fc.quote() +} + +func (fc *FileConfig) populateExternalDataConfig(conf *bq.ExternalDataConfiguration) { + format := fc.SourceFormat + if format == "" { + // Format must be explicitly set for external data sources. + format = CSV + } + // TODO(jba): support AutoDetect. + conf.IgnoreUnknownValues = fc.IgnoreUnknownValues + conf.MaxBadRecords = fc.MaxBadRecords + conf.SourceFormat = string(format) + if fc.Schema != nil { + conf.Schema = fc.Schema.asTableSchema() + } + if format == CSV { + conf.CsvOptions = &bq.CsvOptions{ + AllowJaggedRows: fc.AllowJaggedRows, + AllowQuotedNewlines: fc.AllowQuotedNewlines, + Encoding: string(fc.Encoding), + FieldDelimiter: fc.FieldDelimiter, + SkipLeadingRows: fc.SkipLeadingRows, + Quote: fc.quote(), + } + } +} + +// DataFormat describes the format of BigQuery table data. +type DataFormat string + +// Constants describing the format of BigQuery table data. +const ( + CSV DataFormat = "CSV" + Avro DataFormat = "AVRO" + JSON DataFormat = "NEWLINE_DELIMITED_JSON" + DatastoreBackup DataFormat = "DATASTORE_BACKUP" +) + +// Encoding specifies the character encoding of data to be loaded into BigQuery. +// See https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding +// for more details about how this is used. +type Encoding string + +const ( + UTF_8 Encoding = "UTF-8" + ISO_8859_1 Encoding = "ISO-8859-1" +) diff --git a/vendor/cloud.google.com/go/bigquery/file_test.go b/vendor/cloud.google.com/go/bigquery/file_test.go new file mode 100644 index 00000000..eb13303a --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/file_test.go @@ -0,0 +1,90 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "reflect" + "testing" + + "cloud.google.com/go/internal/pretty" + bq "google.golang.org/api/bigquery/v2" +) + +func TestQuote(t *testing.T) { + ptr := func(s string) *string { return &s } + + for _, test := range []struct { + quote string + force bool + want *string + }{ + {"", false, nil}, + {"", true, ptr("")}, + {"-", false, ptr("-")}, + {"-", true, ptr("")}, + } { + fc := FileConfig{ + Quote: test.quote, + ForceZeroQuote: test.force, + } + got := fc.quote() + if (got == nil) != (test.want == nil) { + t.Errorf("%+v\ngot %v\nwant %v", test, pretty.Value(got), pretty.Value(test.want)) + } + if got != nil && test.want != nil && *got != *test.want { + t.Errorf("%+v: got %q, want %q", test, *got, *test.want) + } + } +} + +func TestPopulateLoadConfig(t *testing.T) { + hyphen := "-" + fc := FileConfig{ + SourceFormat: CSV, + FieldDelimiter: "\t", + SkipLeadingRows: 8, + AllowJaggedRows: true, + AllowQuotedNewlines: true, + Encoding: UTF_8, + MaxBadRecords: 7, + IgnoreUnknownValues: true, + Schema: Schema{ + stringFieldSchema(), + nestedFieldSchema(), + }, + Quote: hyphen, + } + want := &bq.JobConfigurationLoad{ + SourceFormat: "CSV", + FieldDelimiter: "\t", + SkipLeadingRows: 8, + AllowJaggedRows: true, + AllowQuotedNewlines: true, + Encoding: "UTF-8", + MaxBadRecords: 7, + IgnoreUnknownValues: true, + Schema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqStringFieldSchema(), + bqNestedFieldSchema(), + }}, + Quote: &hyphen, + } + got := &bq.JobConfigurationLoad{} + fc.populateLoadConfig(got) + if !reflect.DeepEqual(got, want) { + t.Errorf("got:\n%v\nwant:\n%v", pretty.Value(got), pretty.Value(want)) + } +} diff --git a/vendor/cloud.google.com/go/bigquery/gcs.go b/vendor/cloud.google.com/go/bigquery/gcs.go new file mode 100644 index 00000000..af34b6b5 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/gcs.go @@ -0,0 +1,68 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import bq "google.golang.org/api/bigquery/v2" + +// GCSReference is a reference to one or more Google Cloud Storage objects, which together constitute +// an input or output to a BigQuery operation. +type GCSReference struct { + // TODO(jba): Export so that GCSReference can be used to hold data from a Job.get api call and expose it to the user. + uris []string + + FileConfig + + // DestinationFormat is the format to use when writing exported files. + // Allowed values are: CSV, Avro, JSON. The default is CSV. + // CSV is not supported for tables with nested or repeated fields. + DestinationFormat DataFormat + + // Compression specifies the type of compression to apply when writing data + // to Google Cloud Storage, or using this GCSReference as an ExternalData + // source with CSV or JSON SourceFormat. Default is None. + Compression Compression +} + +// NewGCSReference constructs a reference to one or more Google Cloud Storage objects, which together constitute a data source or destination. +// In the simple case, a single URI in the form gs://bucket/object may refer to a single GCS object. +// Data may also be split into mutiple files, if multiple URIs or URIs containing wildcards are provided. +// Each URI may contain one '*' wildcard character, which (if present) must come after the bucket name. +// For more information about the treatment of wildcards and multiple URIs, +// see https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple +func NewGCSReference(uri ...string) *GCSReference { + return &GCSReference{uris: uri} +} + +// Compression is the type of compression to apply when writing data to Google Cloud Storage. +type Compression string + +const ( + None Compression = "NONE" + Gzip Compression = "GZIP" +) + +func (gcs *GCSReference) populateInsertJobConfForLoad(conf *insertJobConf) { + conf.job.Configuration.Load.SourceUris = gcs.uris + gcs.FileConfig.populateLoadConfig(conf.job.Configuration.Load) +} + +func (gcs *GCSReference) externalDataConfig() bq.ExternalDataConfiguration { + conf := bq.ExternalDataConfiguration{ + Compression: string(gcs.Compression), + SourceUris: append([]string{}, gcs.uris...), + } + gcs.FileConfig.populateExternalDataConfig(&conf) + return conf +} diff --git a/vendor/cloud.google.com/go/bigquery/integration_test.go b/vendor/cloud.google.com/go/bigquery/integration_test.go new file mode 100644 index 00000000..fd215c8b --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/integration_test.go @@ -0,0 +1,754 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "flag" + "fmt" + "log" + "net/http" + "os" + "reflect" + "sort" + "strings" + "testing" + "time" + + "cloud.google.com/go/civil" + "cloud.google.com/go/internal/pretty" + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/googleapi" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +var ( + client *Client + dataset *Dataset + schema = Schema{ + {Name: "name", Type: StringFieldType}, + {Name: "num", Type: IntegerFieldType}, + } + fiveMinutesFromNow time.Time +) + +func TestMain(m *testing.M) { + initIntegrationTest() + os.Exit(m.Run()) +} + +func getClient(t *testing.T) *Client { + if client == nil { + t.Skip("Integration tests skipped") + } + return client +} + +// If integration tests will be run, create a unique bucket for them. +func initIntegrationTest() { + flag.Parse() // needed for testing.Short() + if testing.Short() { + return + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, Scope) + if ts == nil { + log.Println("Integration tests skipped. See CONTRIBUTING.md for details") + return + } + projID := testutil.ProjID() + var err error + client, err = NewClient(ctx, projID, option.WithTokenSource(ts)) + if err != nil { + log.Fatalf("NewClient: %v", err) + } + dataset = client.Dataset("bigquery_integration_test") + if err := dataset.Create(ctx); err != nil && !hasStatusCode(err, http.StatusConflict) { // AlreadyExists is 409 + log.Fatalf("creating dataset: %v", err) + } +} + +func TestIntegration_Create(t *testing.T) { + // Check that creating a record field with an empty schema is an error. + if client == nil { + t.Skip("Integration tests skipped") + } + table := dataset.Table("t_bad") + schema := Schema{ + {Name: "rec", Type: RecordFieldType, Schema: Schema{}}, + } + err := table.Create(context.Background(), schema, TableExpiration(time.Now().Add(5*time.Minute))) + if err == nil { + t.Fatal("want error, got nil") + } + if !hasStatusCode(err, http.StatusBadRequest) { + t.Fatalf("want a 400 error, got %v", err) + } +} + +func TestIntegration_CreateView(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + table := newTable(t, schema) + defer table.Delete(ctx) + + // Test that standard SQL views work. + view := dataset.Table("t_view_standardsql") + query := ViewQuery(fmt.Sprintf("SELECT APPROX_COUNT_DISTINCT(name) FROM `%s.%s.%s`", dataset.ProjectID, dataset.DatasetID, table.TableID)) + err := view.Create(context.Background(), UseStandardSQL(), query) + if err != nil { + t.Fatalf("table.create: Did not expect an error, got: %v", err) + } + view.Delete(ctx) +} + +func TestIntegration_TableMetadata(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + table := newTable(t, schema) + defer table.Delete(ctx) + // Check table metadata. + md, err := table.Metadata(ctx) + if err != nil { + t.Fatal(err) + } + // TODO(jba): check md more thorougly. + if got, want := md.ID, fmt.Sprintf("%s:%s.%s", dataset.ProjectID, dataset.DatasetID, table.TableID); got != want { + t.Errorf("metadata.ID: got %q, want %q", got, want) + } + if got, want := md.Type, RegularTable; got != want { + t.Errorf("metadata.Type: got %v, want %v", got, want) + } + if got, want := md.ExpirationTime, fiveMinutesFromNow; !got.Equal(want) { + t.Errorf("metadata.Type: got %v, want %v", got, want) + } + + // Check that timePartitioning is nil by default + if md.TimePartitioning != nil { + t.Errorf("metadata.TimePartitioning: got %v, want %v", md.TimePartitioning, nil) + } + + // Create tables that have time partitioning + partitionCases := []struct { + timePartitioning TimePartitioning + expectedExpiration time.Duration + }{ + {TimePartitioning{}, time.Duration(0)}, + {TimePartitioning{time.Second}, time.Second}, + } + for i, c := range partitionCases { + table := dataset.Table(fmt.Sprintf("t_metadata_partition_%v", i)) + err = table.Create(context.Background(), schema, c.timePartitioning, TableExpiration(time.Now().Add(5*time.Minute))) + if err != nil { + t.Fatal(err) + } + defer table.Delete(ctx) + md, err = table.Metadata(ctx) + if err != nil { + t.Fatal(err) + } + + got := md.TimePartitioning + want := &TimePartitioning{c.expectedExpiration} + if !reflect.DeepEqual(got, want) { + t.Errorf("metadata.TimePartitioning: got %v, want %v", got, want) + } + } +} + +func TestIntegration_DatasetMetadata(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + md, err := dataset.Metadata(ctx) + if err != nil { + t.Fatal(err) + } + if got, want := md.ID, fmt.Sprintf("%s:%s", dataset.ProjectID, dataset.DatasetID); got != want { + t.Errorf("ID: got %q, want %q", got, want) + } + jan2016 := time.Date(2016, 1, 1, 0, 0, 0, 0, time.UTC) + if md.CreationTime.Before(jan2016) { + t.Errorf("CreationTime: got %s, want > 2016-1-1", md.CreationTime) + } + if md.LastModifiedTime.Before(jan2016) { + t.Errorf("LastModifiedTime: got %s, want > 2016-1-1", md.LastModifiedTime) + } + + // Verify that we get a NotFound for a nonexistent dataset. + _, err = client.Dataset("does_not_exist").Metadata(ctx) + if err == nil || !hasStatusCode(err, http.StatusNotFound) { + t.Errorf("got %v, want NotFound error", err) + } +} + +func TestIntegration_DatasetDelete(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + ds := client.Dataset("delete_test") + if err := ds.Create(ctx); err != nil && !hasStatusCode(err, http.StatusConflict) { // AlreadyExists is 409 + t.Fatalf("creating dataset %s: %v", ds, err) + } + if err := ds.Delete(ctx); err != nil { + t.Fatalf("deleting dataset %s: %v", ds, err) + } +} + +func TestIntegration_Tables(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + table := newTable(t, schema) + defer table.Delete(ctx) + + // Iterate over tables in the dataset. + it := dataset.Tables(ctx) + var tables []*Table + for { + tbl, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + t.Fatal(err) + } + tables = append(tables, tbl) + } + // Other tests may be running with this dataset, so there might be more + // than just our table in the list. So don't try for an exact match; just + // make sure that our table is there somewhere. + found := false + for _, tbl := range tables { + if reflect.DeepEqual(tbl, table) { + found = true + break + } + } + if !found { + t.Errorf("Tables: got %v\nshould see %v in the list", pretty.Value(tables), pretty.Value(table)) + } +} + +func TestIntegration_UploadAndRead(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + table := newTable(t, schema) + defer table.Delete(ctx) + + // Populate the table. + upl := table.Uploader() + var ( + wantRows [][]Value + saverRows []*ValuesSaver + ) + for i, name := range []string{"a", "b", "c"} { + row := []Value{name, int64(i)} + wantRows = append(wantRows, row) + saverRows = append(saverRows, &ValuesSaver{ + Schema: schema, + InsertID: name, + Row: row, + }) + } + if err := upl.Put(ctx, saverRows); err != nil { + t.Fatal(putError(err)) + } + + // Wait until the data has been uploaded. This can take a few seconds, according + // to https://cloud.google.com/bigquery/streaming-data-into-bigquery. + if err := waitForRow(ctx, table); err != nil { + t.Fatal(err) + } + + // Read the table. + checkRead(t, "upload", table.Read(ctx), wantRows) + + // Query the table. + q := client.Query(fmt.Sprintf("select name, num from %s", table.TableID)) + q.DefaultProjectID = dataset.ProjectID + q.DefaultDatasetID = dataset.DatasetID + + rit, err := q.Read(ctx) + if err != nil { + t.Fatal(err) + } + checkRead(t, "query", rit, wantRows) + + // Query the long way. + job1, err := q.Run(ctx) + if err != nil { + t.Fatal(err) + } + job2, err := client.JobFromID(ctx, job1.ID()) + if err != nil { + t.Fatal(err) + } + rit, err = job2.Read(ctx) + if err != nil { + t.Fatal(err) + } + checkRead(t, "job.Read", rit, wantRows) + + // Test reading directly into a []Value. + valueLists, err := readAll(table.Read(ctx)) + if err != nil { + t.Fatal(err) + } + it := table.Read(ctx) + for i, vl := range valueLists { + var got []Value + if err := it.Next(&got); err != nil { + t.Fatal(err) + } + want := []Value(vl) + if !reflect.DeepEqual(got, want) { + t.Errorf("%d: got %v, want %v", i, got, want) + } + } + + // Test reading into a map. + it = table.Read(ctx) + for _, vl := range valueLists { + var vm map[string]Value + if err := it.Next(&vm); err != nil { + t.Fatal(err) + } + if got, want := len(vm), len(vl); got != want { + t.Fatalf("valueMap len: got %d, want %d", got, want) + } + for i, v := range vl { + if got, want := vm[schema[i].Name], v; got != want { + t.Errorf("%d, name=%s: got %v, want %v", + i, schema[i].Name, got, want) + } + } + } +} + +type TestStruct struct { + Name string + Nums []int + Sub Sub + Subs []*Sub +} + +type Sub struct { + B bool + SubSub SubSub + SubSubs []*SubSub +} + +type SubSub struct{ Count int } + +func TestIntegration_UploadAndReadStructs(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + schema, err := InferSchema(TestStruct{}) + if err != nil { + t.Fatal(err) + } + + ctx := context.Background() + table := newTable(t, schema) + defer table.Delete(ctx) + + // Populate the table. + upl := table.Uploader() + want := []*TestStruct{ + {Name: "a", Nums: []int{1, 2}, Sub: Sub{B: true}, Subs: []*Sub{{B: false}, {B: true}}}, + {Name: "b", Nums: []int{1}, Subs: []*Sub{{B: false}, {B: false}, {B: true}}}, + {Name: "c", Sub: Sub{B: true}}, + { + Name: "d", + Sub: Sub{SubSub: SubSub{12}, SubSubs: []*SubSub{{1}, {2}, {3}}}, + Subs: []*Sub{{B: false, SubSub: SubSub{4}}, {B: true, SubSubs: []*SubSub{{5}, {6}}}}, + }, + } + var savers []*StructSaver + for _, s := range want { + savers = append(savers, &StructSaver{Schema: schema, Struct: s}) + } + if err := upl.Put(ctx, savers); err != nil { + t.Fatal(putError(err)) + } + + // Wait until the data has been uploaded. This can take a few seconds, according + // to https://cloud.google.com/bigquery/streaming-data-into-bigquery. + if err := waitForRow(ctx, table); err != nil { + t.Fatal(err) + } + + // Test iteration with structs. + it := table.Read(ctx) + var got []*TestStruct + for { + var g TestStruct + err := it.Next(&g) + if err == iterator.Done { + break + } + if err != nil { + t.Fatal(err) + } + got = append(got, &g) + } + sort.Sort(byName(got)) + + // BigQuery does not elide nils. It reports an error for nil fields. + for i, g := range got { + if i >= len(want) { + t.Errorf("%d: got %v, past end of want", i, pretty.Value(g)) + } else if w := want[i]; !reflect.DeepEqual(g, w) { + t.Errorf("%d: got %v, want %v", i, pretty.Value(g), pretty.Value(w)) + } + } +} + +type byName []*TestStruct + +func (b byName) Len() int { return len(b) } +func (b byName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byName) Less(i, j int) bool { return b[i].Name < b[j].Name } + +func TestIntegration_Update(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + table := newTable(t, schema) + defer table.Delete(ctx) + + // Test Update of non-schema fields. + tm, err := table.Metadata(ctx) + if err != nil { + t.Fatal(err) + } + wantDescription := tm.Description + "more" + wantName := tm.Name + "more" + got, err := table.Update(ctx, TableMetadataToUpdate{ + Description: wantDescription, + Name: wantName, + }) + if err != nil { + t.Fatal(err) + } + if got.Description != wantDescription { + t.Errorf("Description: got %q, want %q", got.Description, wantDescription) + } + if got.Name != wantName { + t.Errorf("Name: got %q, want %q", got.Name, wantName) + } + if !reflect.DeepEqual(got.Schema, schema) { + t.Errorf("Schema: got %v, want %v", pretty.Value(got.Schema), pretty.Value(schema)) + } + + // Test schema update. + // Columns can be added. schema2 is the same as schema, except for the + // added column in the middle. + nested := Schema{ + {Name: "nested", Type: BooleanFieldType}, + {Name: "other", Type: StringFieldType}, + } + schema2 := Schema{ + schema[0], + {Name: "rec", Type: RecordFieldType, Schema: nested}, + schema[1], + } + + got, err = table.Update(ctx, TableMetadataToUpdate{Schema: schema2}) + if err != nil { + t.Fatal(err) + } + + // Wherever you add the column, it appears at the end. + schema3 := Schema{schema2[0], schema2[2], schema2[1]} + if !reflect.DeepEqual(got.Schema, schema3) { + t.Errorf("add field:\ngot %v\nwant %v", + pretty.Value(got.Schema), pretty.Value(schema3)) + } + + // Updating with the empty schema succeeds, but is a no-op. + got, err = table.Update(ctx, TableMetadataToUpdate{Schema: Schema{}}) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got.Schema, schema3) { + t.Errorf("empty schema:\ngot %v\nwant %v", + pretty.Value(got.Schema), pretty.Value(schema3)) + } + + // Error cases. + for _, test := range []struct { + desc string + fields []*FieldSchema + }{ + {"change from optional to required", []*FieldSchema{ + schema3[0], + {Name: "num", Type: IntegerFieldType, Required: true}, + schema3[2], + }}, + {"add a required field", []*FieldSchema{ + schema3[0], schema3[1], schema3[2], + {Name: "req", Type: StringFieldType, Required: true}, + }}, + {"remove a field", []*FieldSchema{schema3[0], schema3[1]}}, + {"remove a nested field", []*FieldSchema{ + schema3[0], schema3[1], + {Name: "rec", Type: RecordFieldType, Schema: Schema{nested[0]}}}}, + {"remove all nested fields", []*FieldSchema{ + schema3[0], schema3[1], + {Name: "rec", Type: RecordFieldType, Schema: Schema{}}}}, + } { + for { + _, err = table.Update(ctx, TableMetadataToUpdate{Schema: Schema(test.fields)}) + if !hasStatusCode(err, 403) { + break + } + // We've hit the rate limit for updates. Wait a bit and retry. + t.Logf("%s: retrying after getting %v", test.desc, err) + time.Sleep(4 * time.Second) + } + if err == nil { + t.Errorf("%s: want error, got nil", test.desc) + } else if !hasStatusCode(err, 400) { + t.Errorf("%s: want 400, got %v", test.desc, err) + } + } +} + +func TestIntegration_Load(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + table := newTable(t, schema) + defer table.Delete(ctx) + + // Load the table from a reader. + r := strings.NewReader("a,0\nb,1\nc,2\n") + wantRows := [][]Value{ + []Value{"a", int64(0)}, + []Value{"b", int64(1)}, + []Value{"c", int64(2)}, + } + rs := NewReaderSource(r) + loader := table.LoaderFrom(rs) + loader.WriteDisposition = WriteTruncate + job, err := loader.Run(ctx) + if err != nil { + t.Fatal(err) + } + if err := wait(ctx, job); err != nil { + t.Fatal(err) + } + checkRead(t, "reader load", table.Read(ctx), wantRows) +} + +func TestIntegration_DML(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + table := newTable(t, schema) + defer table.Delete(ctx) + + // Use DML to insert. + wantRows := [][]Value{ + []Value{"a", int64(0)}, + []Value{"b", int64(1)}, + []Value{"c", int64(2)}, + } + query := fmt.Sprintf("INSERT bigquery_integration_test.%s (name, num) "+ + "VALUES ('a', 0), ('b', 1), ('c', 2)", + table.TableID) + q := client.Query(query) + q.UseStandardSQL = true // necessary for DML + job, err := q.Run(ctx) + if err != nil { + t.Fatal(err) + } + if err := wait(ctx, job); err != nil { + t.Fatal(err) + } + checkRead(t, "INSERT", table.Read(ctx), wantRows) +} + +func TestIntegration_TimeTypes(t *testing.T) { + if client == nil { + t.Skip("Integration tests skipped") + } + ctx := context.Background() + dtSchema := Schema{ + {Name: "d", Type: DateFieldType}, + {Name: "t", Type: TimeFieldType}, + {Name: "dt", Type: DateTimeFieldType}, + {Name: "ts", Type: TimestampFieldType}, + } + table := newTable(t, dtSchema) + defer table.Delete(ctx) + + d := civil.Date{2016, 3, 20} + tm := civil.Time{12, 30, 0, 0} + ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC) + wantRows := [][]Value{ + []Value{d, tm, civil.DateTime{d, tm}, ts}, + } + upl := table.Uploader() + if err := upl.Put(ctx, []*ValuesSaver{ + {Schema: dtSchema, Row: wantRows[0]}, + }); err != nil { + t.Fatal(putError(err)) + } + if err := waitForRow(ctx, table); err != nil { + t.Fatal(err) + } + + // SQL wants DATETIMEs with a space between date and time, but the service + // returns them in RFC3339 form, with a "T" between. + query := fmt.Sprintf("INSERT bigquery_integration_test.%s (d, t, dt, ts) "+ + "VALUES ('%s', '%s', '%s %s', '%s')", + table.TableID, d, tm, d, tm, ts.Format("2006-01-02 15:04:05")) + q := client.Query(query) + q.UseStandardSQL = true // necessary for DML + job, err := q.Run(ctx) + if err != nil { + t.Fatal(err) + } + if err := wait(ctx, job); err != nil { + t.Fatal(err) + } + wantRows = append(wantRows, wantRows[0]) + checkRead(t, "TimeTypes", table.Read(ctx), wantRows) +} + +// Creates a new, temporary table with a unique name and the given schema. +func newTable(t *testing.T, s Schema) *Table { + fiveMinutesFromNow = time.Now().Add(5 * time.Minute).Round(time.Second) + name := fmt.Sprintf("t%d", time.Now().UnixNano()) + table := dataset.Table(name) + err := table.Create(context.Background(), s, TableExpiration(fiveMinutesFromNow)) + if err != nil { + t.Fatal(err) + } + return table +} + +func checkRead(t *testing.T, msg string, it *RowIterator, want [][]Value) { + got, err := readAll(it) + if err != nil { + t.Fatalf("%s: %v", msg, err) + } + if len(got) != len(want) { + t.Errorf("%s: got %d rows, want %d", msg, len(got), len(want)) + } + sort.Sort(byCol0(got)) + for i, r := range got { + gotRow := []Value(r) + wantRow := want[i] + if !reflect.DeepEqual(gotRow, wantRow) { + t.Errorf("%s #%d: got %v, want %v", msg, i, gotRow, wantRow) + } + } +} + +func readAll(it *RowIterator) ([][]Value, error) { + var rows [][]Value + for { + var vals []Value + err := it.Next(&vals) + if err == iterator.Done { + return rows, nil + } + if err != nil { + return nil, err + } + rows = append(rows, vals) + } +} + +type byCol0 [][]Value + +func (b byCol0) Len() int { return len(b) } +func (b byCol0) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byCol0) Less(i, j int) bool { + switch a := b[i][0].(type) { + case string: + return a < b[j][0].(string) + case civil.Date: + return a.Before(b[j][0].(civil.Date)) + default: + panic("unknown type") + } +} + +func hasStatusCode(err error, code int) bool { + if e, ok := err.(*googleapi.Error); ok && e.Code == code { + return true + } + return false +} + +// wait polls the job until it is complete or an error is returned. +func wait(ctx context.Context, job *Job) error { + status, err := job.Wait(ctx) + if err != nil { + return fmt.Errorf("getting job status: %v", err) + } + if status.Err() != nil { + return fmt.Errorf("job status error: %#v", status.Err()) + } + return nil +} + +// waitForRow polls the table until it contains a row. +// TODO(jba): use internal.Retry. +func waitForRow(ctx context.Context, table *Table) error { + for { + it := table.Read(ctx) + var v []Value + err := it.Next(&v) + if err == nil { + return nil + } + if err != iterator.Done { + return err + } + time.Sleep(1 * time.Second) + } +} + +func putError(err error) string { + pme, ok := err.(PutMultiError) + if !ok { + return err.Error() + } + var msgs []string + for _, err := range pme { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "\n") +} diff --git a/vendor/cloud.google.com/go/bigquery/iterator.go b/vendor/cloud.google.com/go/bigquery/iterator.go new file mode 100644 index 00000000..dab3e2f5 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/iterator.go @@ -0,0 +1,158 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "fmt" + "reflect" + + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +// A pageFetcher returns a page of rows, starting from the row specified by token. +type pageFetcher interface { + fetch(ctx context.Context, s service, token string) (*readDataResult, error) + setPaging(*pagingConf) +} + +func newRowIterator(ctx context.Context, s service, pf pageFetcher) *RowIterator { + it := &RowIterator{ + ctx: ctx, + service: s, + pf: pf, + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.rows) }, + func() interface{} { r := it.rows; it.rows = nil; return r }) + return it +} + +// A RowIterator provides access to the result of a BigQuery lookup. +type RowIterator struct { + ctx context.Context + service service + pf pageFetcher + pageInfo *iterator.PageInfo + nextFunc func() error + + // StartIndex can be set before the first call to Next. If PageInfo().Token + // is also set, StartIndex is ignored. + StartIndex uint64 + + rows [][]Value + + schema Schema // populated on first call to fetch + structLoader structLoader // used to populate a pointer to a struct +} + +// Next loads the next row into dst. Its return value is iterator.Done if there +// are no more results. Once Next returns iterator.Done, all subsequent calls +// will return iterator.Done. +// +// dst may implement ValueLoader, or may be a *[]Value, *map[string]Value, or struct pointer. +// +// If dst is a *[]Value, it will be set to to new []Value whose i'th element +// will be populated with the i'th column of the row. +// +// If dst is a *map[string]Value, a new map will be created if dst is nil. Then +// for each schema column name, the map key of that name will be set to the column's +// value. +// +// If dst is pointer to a struct, each column in the schema will be matched +// with an exported field of the struct that has the same name, ignoring case. +// Unmatched schema columns and struct fields will be ignored. +// +// Each BigQuery column type corresponds to one or more Go types; a matching struct +// field must be of the correct type. The correspondences are: +// +// STRING string +// BOOL bool +// INTEGER int, int8, int16, int32, int64, uint8, uint16, uint32 +// FLOAT float32, float64 +// BYTES []byte +// TIMESTAMP time.Time +// DATE civil.Date +// TIME civil.Time +// DATETIME civil.DateTime +// +// A repeated field corresponds to a slice or array of the element type. +// A RECORD type (nested schema) corresponds to a nested struct or struct pointer. +// All calls to Next on the same iterator must use the same struct type. +func (it *RowIterator) Next(dst interface{}) error { + var vl ValueLoader + switch dst := dst.(type) { + case ValueLoader: + vl = dst + case *[]Value: + vl = (*valueList)(dst) + case *map[string]Value: + vl = (*valueMap)(dst) + default: + if !isStructPtr(dst) { + return fmt.Errorf("bigquery: cannot convert %T to ValueLoader (need pointer to []Value, map[string]Value, or struct)", dst) + } + } + if err := it.nextFunc(); err != nil { + return err + } + row := it.rows[0] + it.rows = it.rows[1:] + + if vl == nil { + // This can only happen if dst is a pointer to a struct. We couldn't + // set vl above because we need the schema. + if err := it.structLoader.set(dst, it.schema); err != nil { + return err + } + vl = &it.structLoader + } + return vl.Load(row, it.schema) +} + +func isStructPtr(x interface{}) bool { + t := reflect.TypeOf(x) + return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *RowIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +func (it *RowIterator) fetch(pageSize int, pageToken string) (string, error) { + pc := &pagingConf{} + if pageSize > 0 { + pc.recordsPerRequest = int64(pageSize) + pc.setRecordsPerRequest = true + } + if pageToken == "" { + pc.startIndex = it.StartIndex + } + it.pf.setPaging(pc) + var res *readDataResult + var err error + for { + res, err = it.pf.fetch(it.ctx, it.service, pageToken) + if err != errIncompleteJob { + break + } + } + if err != nil { + return "", err + } + it.rows = append(it.rows, res.rows...) + it.schema = res.schema + return res.pageToken, nil +} diff --git a/vendor/cloud.google.com/go/bigquery/iterator_test.go b/vendor/cloud.google.com/go/bigquery/iterator_test.go new file mode 100644 index 00000000..e5610231 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/iterator_test.go @@ -0,0 +1,413 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "fmt" + "reflect" + "testing" + + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +type fetchResponse struct { + result *readDataResult // The result to return. + err error // The error to return. +} + +// pageFetcherStub services fetch requests by returning data from an in-memory list of values. +type pageFetcherStub struct { + fetchResponses map[string]fetchResponse + + err error +} + +func (pf *pageFetcherStub) fetch(ctx context.Context, s service, token string) (*readDataResult, error) { + call, ok := pf.fetchResponses[token] + if !ok { + pf.err = fmt.Errorf("Unexpected page token: %q", token) + } + return call.result, call.err +} + +func (pf *pageFetcherStub) setPaging(pc *pagingConf) {} + +func TestIterator(t *testing.T) { + var ( + iiSchema = Schema{ + {Type: IntegerFieldType}, + {Type: IntegerFieldType}, + } + siSchema = Schema{ + {Type: StringFieldType}, + {Type: IntegerFieldType}, + } + ) + fetchFailure := errors.New("fetch failure") + + testCases := []struct { + desc string + pageToken string + fetchResponses map[string]fetchResponse + want [][]Value + wantErr error + wantSchema Schema + }{ + { + desc: "Iteration over single empty page", + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{}, + schema: Schema{}, + }, + }, + }, + want: [][]Value{}, + wantSchema: Schema{}, + }, + { + desc: "Iteration over single page", + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{{1, 2}, {11, 12}}, + schema: iiSchema, + }, + }, + }, + want: [][]Value{{1, 2}, {11, 12}}, + wantSchema: iiSchema, + }, + { + desc: "Iteration over single page with different schema", + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{{"1", 2}, {"11", 12}}, + schema: siSchema, + }, + }, + }, + want: [][]Value{{"1", 2}, {"11", 12}}, + wantSchema: siSchema, + }, + { + desc: "Iteration over two pages", + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + schema: iiSchema, + }, + }, + "a": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{{101, 102}, {111, 112}}, + schema: iiSchema, + }, + }, + }, + want: [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}}, + wantSchema: iiSchema, + }, + { + desc: "Server response includes empty page", + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + schema: iiSchema, + }, + }, + "a": { + result: &readDataResult{ + pageToken: "b", + rows: [][]Value{}, + schema: iiSchema, + }, + }, + "b": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{{101, 102}, {111, 112}}, + schema: iiSchema, + }, + }, + }, + want: [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}}, + wantSchema: iiSchema, + }, + { + desc: "Fetch error", + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + schema: iiSchema, + }, + }, + "a": { + // We returns some data from this fetch, but also an error. + // So the end result should include only data from the previous fetch. + err: fetchFailure, + result: &readDataResult{ + pageToken: "b", + rows: [][]Value{{101, 102}, {111, 112}}, + schema: iiSchema, + }, + }, + }, + want: [][]Value{{1, 2}, {11, 12}}, + wantErr: fetchFailure, + wantSchema: iiSchema, + }, + + { + desc: "Skip over an entire page", + pageToken: "a", + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + schema: iiSchema, + }, + }, + "a": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{{101, 102}, {111, 112}}, + schema: iiSchema, + }, + }, + }, + want: [][]Value{{101, 102}, {111, 112}}, + wantSchema: iiSchema, + }, + + { + desc: "Skip beyond all data", + pageToken: "b", + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + schema: iiSchema, + }, + }, + "a": { + result: &readDataResult{ + pageToken: "b", + rows: [][]Value{{101, 102}, {111, 112}}, + schema: iiSchema, + }, + }, + "b": { + result: &readDataResult{}, + }, + }, + // In this test case, Next will return false on its first call, + // so we won't even attempt to call Get. + want: [][]Value{}, + wantSchema: Schema{}, + }, + } + + for _, tc := range testCases { + pf := &pageFetcherStub{ + fetchResponses: tc.fetchResponses, + } + it := newRowIterator(context.Background(), nil, pf) + it.PageInfo().Token = tc.pageToken + values, schema, err := consumeRowIterator(it) + if err != tc.wantErr { + t.Fatalf("%s: got %v, want %v", tc.desc, err, tc.wantErr) + } + if (len(values) != 0 || len(tc.want) != 0) && !reflect.DeepEqual(values, tc.want) { + t.Errorf("%s: values:\ngot: %v\nwant:%v", tc.desc, values, tc.want) + } + if (len(schema) != 0 || len(tc.wantSchema) != 0) && !reflect.DeepEqual(schema, tc.wantSchema) { + t.Errorf("%s: iterator.Schema:\ngot: %v\nwant: %v", tc.desc, schema, tc.wantSchema) + } + } +} + +type valueListWithSchema struct { + vals valueList + schema Schema +} + +func (v *valueListWithSchema) Load(vs []Value, s Schema) error { + v.vals.Load(vs, s) + v.schema = s + return nil +} + +// consumeRowIterator reads the schema and all values from a RowIterator and returns them. +func consumeRowIterator(it *RowIterator) ([][]Value, Schema, error) { + var got [][]Value + var schema Schema + for { + var vls valueListWithSchema + err := it.Next(&vls) + if err == iterator.Done { + return got, schema, nil + } + if err != nil { + return got, schema, err + } + got = append(got, vls.vals) + schema = vls.schema + } +} + +type delayedPageFetcher struct { + pageFetcherStub + delayCount int +} + +func (pf *delayedPageFetcher) fetch(ctx context.Context, s service, token string) (*readDataResult, error) { + if pf.delayCount > 0 { + pf.delayCount-- + return nil, errIncompleteJob + } + return pf.pageFetcherStub.fetch(ctx, s, token) +} + +func TestIterateIncompleteJob(t *testing.T) { + want := [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}} + pf := pageFetcherStub{ + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "a", + rows: [][]Value{{1, 2}, {11, 12}}, + }, + }, + "a": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{{101, 102}, {111, 112}}, + }, + }, + }, + } + dpf := &delayedPageFetcher{ + pageFetcherStub: pf, + delayCount: 1, + } + it := newRowIterator(context.Background(), nil, dpf) + + values, _, err := consumeRowIterator(it) + if err != nil { + t.Fatal(err) + } + + if (len(values) != 0 || len(want) != 0) && !reflect.DeepEqual(values, want) { + t.Errorf("values: got:\n%v\nwant:\n%v", values, want) + } + if dpf.delayCount != 0 { + t.Errorf("delayCount: got: %v, want: 0", dpf.delayCount) + } +} + +func TestNextDuringErrorState(t *testing.T) { + pf := &pageFetcherStub{ + fetchResponses: map[string]fetchResponse{ + "": {err: errors.New("bang")}, + }, + } + it := newRowIterator(context.Background(), nil, pf) + var vals []Value + if err := it.Next(&vals); err == nil { + t.Errorf("Expected error after calling Next") + } + if err := it.Next(&vals); err == nil { + t.Errorf("Expected error calling Next again when iterator has a non-nil error.") + } +} + +func TestNextAfterFinished(t *testing.T) { + testCases := []struct { + fetchResponses map[string]fetchResponse + want [][]Value + }{ + { + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{{1, 2}, {11, 12}}, + }, + }, + }, + want: [][]Value{{1, 2}, {11, 12}}, + }, + { + fetchResponses: map[string]fetchResponse{ + "": { + result: &readDataResult{ + pageToken: "", + rows: [][]Value{}, + }, + }, + }, + want: [][]Value{}, + }, + } + + for _, tc := range testCases { + pf := &pageFetcherStub{ + fetchResponses: tc.fetchResponses, + } + it := newRowIterator(context.Background(), nil, pf) + + values, _, err := consumeRowIterator(it) + if err != nil { + t.Fatal(err) + } + if (len(values) != 0 || len(tc.want) != 0) && !reflect.DeepEqual(values, tc.want) { + t.Errorf("values: got:\n%v\nwant:\n%v", values, tc.want) + } + // Try calling Get again. + var vals []Value + if err := it.Next(&vals); err != iterator.Done { + t.Errorf("Expected Done calling Next when there are no more values") + } + } +} + +func TestIteratorNextTypes(t *testing.T) { + it := newRowIterator(context.Background(), nil, nil) + for _, v := range []interface{}{3, "s", []int{}, &[]int{}, + map[string]Value{}, &map[string]interface{}{}, + struct{}{}, + } { + if err := it.Next(v); err == nil { + t.Error("%v: want error, got nil", v) + } + } +} diff --git a/vendor/cloud.google.com/go/bigquery/job.go b/vendor/cloud.google.com/go/bigquery/job.go new file mode 100644 index 00000000..f8905e17 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/job.go @@ -0,0 +1,133 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "cloud.google.com/go/internal" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +// A Job represents an operation which has been submitted to BigQuery for processing. +type Job struct { + service service + projectID string + jobID string + + isQuery bool +} + +// JobFromID creates a Job which refers to an existing BigQuery job. The job +// need not have been created by this package. For example, the job may have +// been created in the BigQuery console. +func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) { + jobType, err := c.service.getJobType(ctx, c.projectID, id) + if err != nil { + return nil, err + } + + return &Job{ + service: c.service, + projectID: c.projectID, + jobID: id, + isQuery: jobType == queryJobType, + }, nil +} + +func (j *Job) ID() string { + return j.jobID +} + +// State is one of a sequence of states that a Job progresses through as it is processed. +type State int + +const ( + Pending State = iota + Running + Done +) + +// JobStatus contains the current State of a job, and errors encountered while processing that job. +type JobStatus struct { + State State + + err error + + // All errors encountered during the running of the job. + // Not all Errors are fatal, so errors here do not necessarily mean that the job has completed or was unsuccessful. + Errors []*Error +} + +// setJobRef initializes job's JobReference if given a non-empty jobID. +// projectID must be non-empty. +func setJobRef(job *bq.Job, jobID, projectID string) { + if jobID == "" { + return + } + // We don't check whether projectID is empty; the server will return an + // error when it encounters the resulting JobReference. + + job.JobReference = &bq.JobReference{ + JobId: jobID, + ProjectId: projectID, + } +} + +// Done reports whether the job has completed. +// After Done returns true, the Err method will return an error if the job completed unsuccesfully. +func (s *JobStatus) Done() bool { + return s.State == Done +} + +// Err returns the error that caused the job to complete unsuccesfully (if any). +func (s *JobStatus) Err() error { + return s.err +} + +// Status returns the current status of the job. It fails if the Status could not be determined. +func (j *Job) Status(ctx context.Context) (*JobStatus, error) { + return j.service.jobStatus(ctx, j.projectID, j.jobID) +} + +// Cancel requests that a job be cancelled. This method returns without waiting for +// cancellation to take effect. To check whether the job has terminated, use Job.Status. +// Cancelled jobs may still incur costs. +func (j *Job) Cancel(ctx context.Context) error { + return j.service.jobCancel(ctx, j.projectID, j.jobID) +} + +// Wait blocks until the job or th context is done. It returns the final status +// of the job. +// If an error occurs while retrieving the status, Wait returns that error. But +// Wait returns nil if the status was retrieved successfully, even if +// status.Err() != nil. So callers must check both errors. See the example. +func (j *Job) Wait(ctx context.Context) (*JobStatus, error) { + var js *JobStatus + err := internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) { + js, err = j.Status(ctx) + if err != nil { + return true, err + } + if js.Done() { + return true, nil + } + return false, nil + }) + if err != nil { + return nil, err + } + return js, nil +} diff --git a/vendor/cloud.google.com/go/bigquery/load.go b/vendor/cloud.google.com/go/bigquery/load.go new file mode 100644 index 00000000..14b55409 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/load.go @@ -0,0 +1,86 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +// LoadConfig holds the configuration for a load job. +type LoadConfig struct { + // JobID is the ID to use for the load job. If unset, a job ID will be automatically created. + JobID string + + // Src is the source from which data will be loaded. + Src LoadSource + + // Dst is the table into which the data will be loaded. + Dst *Table + + // CreateDisposition specifies the circumstances under which the destination table will be created. + // The default is CreateIfNeeded. + CreateDisposition TableCreateDisposition + + // WriteDisposition specifies how existing data in the destination table is treated. + // The default is WriteAppend. + WriteDisposition TableWriteDisposition +} + +// A Loader loads data from Google Cloud Storage into a BigQuery table. +type Loader struct { + LoadConfig + c *Client +} + +// A LoadSource represents a source of data that can be loaded into +// a BigQuery table. +// +// This package defines two LoadSources: GCSReference, for Google Cloud Storage +// objects, and ReaderSource, for data read from an io.Reader. +type LoadSource interface { + populateInsertJobConfForLoad(conf *insertJobConf) +} + +// LoaderFrom returns a Loader which can be used to load data into a BigQuery table. +// The returned Loader may optionally be further configured before its Run method is called. +func (t *Table) LoaderFrom(src LoadSource) *Loader { + return &Loader{ + c: t.c, + LoadConfig: LoadConfig{ + Src: src, + Dst: t, + }, + } +} + +// Run initiates a load job. +func (l *Loader) Run(ctx context.Context) (*Job, error) { + job := &bq.Job{ + Configuration: &bq.JobConfiguration{ + Load: &bq.JobConfigurationLoad{ + CreateDisposition: string(l.CreateDisposition), + WriteDisposition: string(l.WriteDisposition), + }, + }, + } + conf := &insertJobConf{job: job} + l.Src.populateInsertJobConfForLoad(conf) + setJobRef(job, l.JobID, l.c.projectID) + + job.Configuration.Load.DestinationTable = l.Dst.tableRefProto() + + return l.c.service.insertJob(ctx, l.c.projectID, conf) +} diff --git a/vendor/cloud.google.com/go/bigquery/load_test.go b/vendor/cloud.google.com/go/bigquery/load_test.go new file mode 100644 index 00000000..acb62bc4 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/load_test.go @@ -0,0 +1,229 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "reflect" + "strings" + "testing" + + "golang.org/x/net/context" + + "cloud.google.com/go/internal/pretty" + bq "google.golang.org/api/bigquery/v2" +) + +func defaultLoadJob() *bq.Job { + return &bq.Job{ + Configuration: &bq.JobConfiguration{ + Load: &bq.JobConfigurationLoad{ + DestinationTable: &bq.TableReference{ + ProjectId: "project-id", + DatasetId: "dataset-id", + TableId: "table-id", + }, + SourceUris: []string{"uri"}, + }, + }, + } +} + +func stringFieldSchema() *FieldSchema { + return &FieldSchema{Name: "fieldname", Type: StringFieldType} +} + +func nestedFieldSchema() *FieldSchema { + return &FieldSchema{ + Name: "nested", + Type: RecordFieldType, + Schema: Schema{stringFieldSchema()}, + } +} + +func bqStringFieldSchema() *bq.TableFieldSchema { + return &bq.TableFieldSchema{ + Name: "fieldname", + Type: "STRING", + } +} + +func bqNestedFieldSchema() *bq.TableFieldSchema { + return &bq.TableFieldSchema{ + Name: "nested", + Type: "RECORD", + Fields: []*bq.TableFieldSchema{bqStringFieldSchema()}, + } +} + +func TestLoad(t *testing.T) { + c := &Client{projectID: "project-id"} + + testCases := []struct { + dst *Table + src LoadSource + config LoadConfig + want *bq.Job + }{ + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: NewGCSReference("uri"), + want: defaultLoadJob(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + config: LoadConfig{ + CreateDisposition: CreateNever, + WriteDisposition: WriteTruncate, + JobID: "ajob", + }, + src: NewGCSReference("uri"), + want: func() *bq.Job { + j := defaultLoadJob() + j.Configuration.Load.CreateDisposition = "CREATE_NEVER" + j.Configuration.Load.WriteDisposition = "WRITE_TRUNCATE" + j.JobReference = &bq.JobReference{ + JobId: "ajob", + ProjectId: "project-id", + } + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: func() *GCSReference { + g := NewGCSReference("uri") + g.MaxBadRecords = 1 + g.AllowJaggedRows = true + g.AllowQuotedNewlines = true + g.IgnoreUnknownValues = true + return g + }(), + want: func() *bq.Job { + j := defaultLoadJob() + j.Configuration.Load.MaxBadRecords = 1 + j.Configuration.Load.AllowJaggedRows = true + j.Configuration.Load.AllowQuotedNewlines = true + j.Configuration.Load.IgnoreUnknownValues = true + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: func() *GCSReference { + g := NewGCSReference("uri") + g.Schema = Schema{ + stringFieldSchema(), + nestedFieldSchema(), + } + return g + }(), + want: func() *bq.Job { + j := defaultLoadJob() + j.Configuration.Load.Schema = &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqStringFieldSchema(), + bqNestedFieldSchema(), + }} + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: func() *GCSReference { + g := NewGCSReference("uri") + g.SkipLeadingRows = 1 + g.SourceFormat = JSON + g.Encoding = UTF_8 + g.FieldDelimiter = "\t" + g.Quote = "-" + return g + }(), + want: func() *bq.Job { + j := defaultLoadJob() + j.Configuration.Load.SkipLeadingRows = 1 + j.Configuration.Load.SourceFormat = "NEWLINE_DELIMITED_JSON" + j.Configuration.Load.Encoding = "UTF-8" + j.Configuration.Load.FieldDelimiter = "\t" + hyphen := "-" + j.Configuration.Load.Quote = &hyphen + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: NewGCSReference("uri"), + want: func() *bq.Job { + j := defaultLoadJob() + // Quote is left unset in GCSReference, so should be nil here. + j.Configuration.Load.Quote = nil + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: func() *GCSReference { + g := NewGCSReference("uri") + g.ForceZeroQuote = true + return g + }(), + want: func() *bq.Job { + j := defaultLoadJob() + empty := "" + j.Configuration.Load.Quote = &empty + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: func() *ReaderSource { + r := NewReaderSource(strings.NewReader("foo")) + r.SkipLeadingRows = 1 + r.SourceFormat = JSON + r.Encoding = UTF_8 + r.FieldDelimiter = "\t" + r.Quote = "-" + return r + }(), + want: func() *bq.Job { + j := defaultLoadJob() + j.Configuration.Load.SourceUris = nil + j.Configuration.Load.SkipLeadingRows = 1 + j.Configuration.Load.SourceFormat = "NEWLINE_DELIMITED_JSON" + j.Configuration.Load.Encoding = "UTF-8" + j.Configuration.Load.FieldDelimiter = "\t" + hyphen := "-" + j.Configuration.Load.Quote = &hyphen + return j + }(), + }, + } + + for i, tc := range testCases { + s := &testService{} + c.service = s + loader := tc.dst.LoaderFrom(tc.src) + tc.config.Src = tc.src + tc.config.Dst = tc.dst + loader.LoadConfig = tc.config + if _, err := loader.Run(context.Background()); err != nil { + t.Errorf("%d: err calling Loader.Run: %v", i, err) + continue + } + if !reflect.DeepEqual(s.Job, tc.want) { + t.Errorf("loading %d: got:\n%v\nwant:\n%v", + i, pretty.Value(s.Job), pretty.Value(tc.want)) + } + } +} diff --git a/vendor/cloud.google.com/go/bigquery/params.go b/vendor/cloud.google.com/go/bigquery/params.go new file mode 100644 index 00000000..2b97f3a2 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/params.go @@ -0,0 +1,265 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "encoding/base64" + "errors" + "fmt" + "reflect" + "regexp" + "time" + + "cloud.google.com/go/civil" + "cloud.google.com/go/internal/fields" + + bq "google.golang.org/api/bigquery/v2" +) + +var ( + // See https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types#timestamp-type. + timestampFormat = "2006-01-02 15:04:05.999999-07:00" + + // See https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#schema.fields.name + validFieldName = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]{0,127}$") +) + +func bqTagParser(t reflect.StructTag) (name string, keep bool, other interface{}, err error) { + if s := t.Get("bigquery"); s != "" { + if s == "-" { + return "", false, nil, nil + } + if !validFieldName.MatchString(s) { + return "", false, nil, errInvalidFieldName + } + return s, true, nil, nil + } + return "", true, nil, nil +} + +var fieldCache = fields.NewCache(bqTagParser, nil, nil) + +var ( + int64ParamType = &bq.QueryParameterType{Type: "INT64"} + float64ParamType = &bq.QueryParameterType{Type: "FLOAT64"} + boolParamType = &bq.QueryParameterType{Type: "BOOL"} + stringParamType = &bq.QueryParameterType{Type: "STRING"} + bytesParamType = &bq.QueryParameterType{Type: "BYTES"} + dateParamType = &bq.QueryParameterType{Type: "DATE"} + timeParamType = &bq.QueryParameterType{Type: "TIME"} + dateTimeParamType = &bq.QueryParameterType{Type: "DATETIME"} + timestampParamType = &bq.QueryParameterType{Type: "TIMESTAMP"} +) + +var ( + typeOfDate = reflect.TypeOf(civil.Date{}) + typeOfTime = reflect.TypeOf(civil.Time{}) + typeOfDateTime = reflect.TypeOf(civil.DateTime{}) + typeOfGoTime = reflect.TypeOf(time.Time{}) +) + +// A QueryParameter is a parameter to a query. +type QueryParameter struct { + // Name is used for named parameter mode. + // It must match the name in the query case-insensitively. + Name string + + // Value is the value of the parameter. + // The following Go types are supported, with their corresponding + // Bigquery types: + // int, int8, int16, int32, int64, uint8, uint16, uint32: INT64 + // Note that uint, uint64 and uintptr are not supported, because + // they may contain values that cannot fit into a 64-bit signed integer. + // float32, float64: FLOAT64 + // bool: BOOL + // string: STRING + // []byte: BYTES + // time.Time: TIMESTAMP + // Arrays and slices of the above. + // Structs of the above. Only the exported fields are used. + Value interface{} +} + +func (p QueryParameter) toRaw() (*bq.QueryParameter, error) { + pv, err := paramValue(reflect.ValueOf(p.Value)) + if err != nil { + return nil, err + } + pt, err := paramType(reflect.TypeOf(p.Value)) + if err != nil { + return nil, err + } + return &bq.QueryParameter{ + Name: p.Name, + ParameterValue: &pv, + ParameterType: pt, + }, nil +} + +func paramType(t reflect.Type) (*bq.QueryParameterType, error) { + if t == nil { + return nil, errors.New("bigquery: nil parameter") + } + switch t { + case typeOfDate: + return dateParamType, nil + case typeOfTime: + return timeParamType, nil + case typeOfDateTime: + return dateTimeParamType, nil + case typeOfGoTime: + return timestampParamType, nil + } + switch t.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uint16, reflect.Uint32: + return int64ParamType, nil + + case reflect.Float32, reflect.Float64: + return float64ParamType, nil + + case reflect.Bool: + return boolParamType, nil + + case reflect.String: + return stringParamType, nil + + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { + return bytesParamType, nil + } + fallthrough + + case reflect.Array: + et, err := paramType(t.Elem()) + if err != nil { + return nil, err + } + return &bq.QueryParameterType{Type: "ARRAY", ArrayType: et}, nil + + case reflect.Ptr: + if t.Elem().Kind() != reflect.Struct { + break + } + t = t.Elem() + fallthrough + + case reflect.Struct: + var fts []*bq.QueryParameterTypeStructTypes + fields, err := fieldCache.Fields(t) + if err != nil { + return nil, err + } + for _, f := range fields { + pt, err := paramType(f.Type) + if err != nil { + return nil, err + } + fts = append(fts, &bq.QueryParameterTypeStructTypes{ + Name: f.Name, + Type: pt, + }) + } + return &bq.QueryParameterType{Type: "STRUCT", StructTypes: fts}, nil + } + return nil, fmt.Errorf("bigquery: Go type %s cannot be represented as a parameter type", t) +} + +func paramValue(v reflect.Value) (bq.QueryParameterValue, error) { + var res bq.QueryParameterValue + if !v.IsValid() { + return res, errors.New("bigquery: nil parameter") + } + t := v.Type() + switch t { + case typeOfDate: + res.Value = v.Interface().(civil.Date).String() + return res, nil + + case typeOfTime: + // civil.Time has nanosecond resolution, but BigQuery TIME only microsecond. + res.Value = civilTimeParamString(v.Interface().(civil.Time)) + return res, nil + + case typeOfDateTime: + dt := v.Interface().(civil.DateTime) + res.Value = dt.Date.String() + " " + civilTimeParamString(dt.Time) + return res, nil + + case typeOfGoTime: + res.Value = v.Interface().(time.Time).Format(timestampFormat) + return res, nil + } + switch t.Kind() { + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { + res.Value = base64.StdEncoding.EncodeToString(v.Interface().([]byte)) + return res, nil + } + fallthrough + + case reflect.Array: + var vals []*bq.QueryParameterValue + for i := 0; i < v.Len(); i++ { + val, err := paramValue(v.Index(i)) + if err != nil { + return bq.QueryParameterValue{}, err + } + vals = append(vals, &val) + } + return bq.QueryParameterValue{ArrayValues: vals}, nil + + case reflect.Ptr: + if t.Elem().Kind() != reflect.Struct { + return res, fmt.Errorf("bigquery: Go type %s cannot be represented as a parameter value", t) + } + t = t.Elem() + v = v.Elem() + if !v.IsValid() { + // nil pointer becomes empty value + return res, nil + } + fallthrough + + case reflect.Struct: + fields, err := fieldCache.Fields(t) + if err != nil { + return bq.QueryParameterValue{}, err + } + res.StructValues = map[string]bq.QueryParameterValue{} + for _, f := range fields { + fv := v.FieldByIndex(f.Index) + fp, err := paramValue(fv) + if err != nil { + return bq.QueryParameterValue{}, err + } + res.StructValues[f.Name] = fp + } + return res, nil + } + // None of the above: assume a scalar type. (If it's not a valid type, + // paramType will catch the error.) + res.Value = fmt.Sprint(v.Interface()) + return res, nil +} + +func civilTimeParamString(t civil.Time) string { + if t.Nanosecond == 0 { + return t.String() + } else { + micro := (t.Nanosecond + 500) / 1000 // round to nearest microsecond + t.Nanosecond = 0 + return t.String() + fmt.Sprintf(".%06d", micro) + } +} diff --git a/vendor/cloud.google.com/go/bigquery/params_test.go b/vendor/cloud.google.com/go/bigquery/params_test.go new file mode 100644 index 00000000..10da8710 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/params_test.go @@ -0,0 +1,262 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "math" + "reflect" + "testing" + "time" + + "cloud.google.com/go/civil" + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +var scalarTests = []struct { + val interface{} + want string +}{ + {int64(0), "0"}, + {3.14, "3.14"}, + {3.14159e-87, "3.14159e-87"}, + {true, "true"}, + {"string", "string"}, + {"\u65e5\u672c\u8a9e\n", "\u65e5\u672c\u8a9e\n"}, + {math.NaN(), "NaN"}, + {[]byte("foo"), "Zm9v"}, // base64 encoding of "foo" + {time.Date(2016, 3, 20, 4, 22, 9, 5000, time.FixedZone("neg1-2", -3720)), + "2016-03-20 04:22:09.000005-01:02"}, + {civil.Date{2016, 3, 20}, "2016-03-20"}, + {civil.Time{4, 5, 6, 789000000}, "04:05:06.789000"}, + {civil.DateTime{civil.Date{2016, 3, 20}, civil.Time{4, 5, 6, 789000000}}, "2016-03-20 04:05:06.789000"}, +} + +type S1 struct { + A int + B *S2 + C bool +} + +type S2 struct { + D string + e int +} + +var s1 = S1{ + A: 1, + B: &S2{D: "s"}, + C: true, +} + +func sval(s string) bq.QueryParameterValue { + return bq.QueryParameterValue{Value: s} +} + +func TestParamValueScalar(t *testing.T) { + for _, test := range scalarTests { + got, err := paramValue(reflect.ValueOf(test.val)) + if err != nil { + t.Errorf("%v: got %v, want nil", test.val, err) + continue + } + want := sval(test.want) + if !reflect.DeepEqual(got, want) { + t.Errorf("%v:\ngot %+v\nwant %+v", test.val, got, want) + } + } +} + +func TestParamValueArray(t *testing.T) { + qpv := bq.QueryParameterValue{ArrayValues: []*bq.QueryParameterValue{ + {Value: "1"}, + {Value: "2"}, + }, + } + for _, test := range []struct { + val interface{} + want bq.QueryParameterValue + }{ + {[]int(nil), bq.QueryParameterValue{}}, + {[]int{}, bq.QueryParameterValue{}}, + {[]int{1, 2}, qpv}, + {[2]int{1, 2}, qpv}, + } { + got, err := paramValue(reflect.ValueOf(test.val)) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got, test.want) { + t.Errorf("%#v:\ngot %+v\nwant %+v", test.val, got, test.want) + } + } +} + +func TestParamValueStruct(t *testing.T) { + got, err := paramValue(reflect.ValueOf(s1)) + if err != nil { + t.Fatal(err) + } + want := bq.QueryParameterValue{ + StructValues: map[string]bq.QueryParameterValue{ + "A": sval("1"), + "B": bq.QueryParameterValue{ + StructValues: map[string]bq.QueryParameterValue{ + "D": sval("s"), + }, + }, + "C": sval("true"), + }, + } + if !reflect.DeepEqual(got, want) { + t.Errorf("got %+v\nwant %+v", got, want) + } +} + +func TestParamValueErrors(t *testing.T) { + // paramValue lets a few invalid types through, but paramType catches them. + // Since we never call one without the other that's fine. + for _, val := range []interface{}{nil, new([]int)} { + _, err := paramValue(reflect.ValueOf(val)) + if err == nil { + t.Errorf("%v (%T): got nil, want error", val, val) + } + } +} + +func TestParamType(t *testing.T) { + for _, test := range []struct { + val interface{} + want *bq.QueryParameterType + }{ + {0, int64ParamType}, + {uint32(32767), int64ParamType}, + {3.14, float64ParamType}, + {float32(3.14), float64ParamType}, + {math.NaN(), float64ParamType}, + {true, boolParamType}, + {"", stringParamType}, + {"string", stringParamType}, + {time.Now(), timestampParamType}, + {[]byte("foo"), bytesParamType}, + {[]int{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: int64ParamType}}, + {[3]bool{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: boolParamType}}, + {S1{}, &bq.QueryParameterType{ + Type: "STRUCT", + StructTypes: []*bq.QueryParameterTypeStructTypes{ + {Name: "A", Type: int64ParamType}, + {Name: "B", Type: &bq.QueryParameterType{ + Type: "STRUCT", + StructTypes: []*bq.QueryParameterTypeStructTypes{ + {Name: "D", Type: stringParamType}, + }, + }}, + {Name: "C", Type: boolParamType}, + }, + }}, + } { + got, err := paramType(reflect.TypeOf(test.val)) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got, test.want) { + t.Errorf("%v (%T): got %v, want %v", test.val, test.val, got, test.want) + } + } +} + +func TestParamTypeErrors(t *testing.T) { + for _, val := range []interface{}{ + nil, uint(0), new([]int), make(chan int), + } { + _, err := paramType(reflect.TypeOf(val)) + if err == nil { + t.Errorf("%v (%T): got nil, want error", val, val) + } + } +} + +func TestIntegration_ScalarParam(t *testing.T) { + c := getClient(t) + for _, test := range scalarTests { + got, err := paramRoundTrip(c, test.val) + if err != nil { + t.Fatal(err) + } + if !equal(got, test.val) { + t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", got, got, test.val, test.val) + } + } +} + +func TestIntegration_OtherParam(t *testing.T) { + c := getClient(t) + for _, test := range []struct { + val interface{} + want interface{} + }{ + {[]int(nil), []Value(nil)}, + {[]int{}, []Value(nil)}, + {[]int{1, 2}, []Value{int64(1), int64(2)}}, + {[3]int{1, 2, 3}, []Value{int64(1), int64(2), int64(3)}}, + {S1{}, []Value{int64(0), nil, false}}, + {s1, []Value{int64(1), []Value{"s"}, true}}, + } { + got, err := paramRoundTrip(c, test.val) + if err != nil { + t.Fatal(err) + } + if !equal(got, test.want) { + t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", got, got, test.want, test.want) + } + } +} + +func paramRoundTrip(c *Client, x interface{}) (Value, error) { + q := c.Query("select ?") + q.Parameters = []QueryParameter{{Value: x}} + it, err := q.Read(context.Background()) + if err != nil { + return nil, err + } + var val []Value + err = it.Next(&val) + if err != nil { + return nil, err + } + if len(val) != 1 { + return nil, errors.New("wrong number of values") + } + return val[0], nil +} + +func equal(x1, x2 interface{}) bool { + if reflect.TypeOf(x1) != reflect.TypeOf(x2) { + return false + } + switch x1 := x1.(type) { + case float64: + if math.IsNaN(x1) { + return math.IsNaN(x2.(float64)) + } + return x1 == x2 + case time.Time: + // BigQuery is only accurate to the microsecond. + return x1.Round(time.Microsecond).Equal(x2.(time.Time).Round(time.Microsecond)) + default: + return reflect.DeepEqual(x1, x2) + } +} diff --git a/vendor/cloud.google.com/go/bigquery/query.go b/vendor/cloud.google.com/go/bigquery/query.go new file mode 100644 index 00000000..0d131f30 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/query.go @@ -0,0 +1,196 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +// QueryConfig holds the configuration for a query job. +type QueryConfig struct { + // JobID is the ID to use for the query job. If this field is empty, a job ID + // will be automatically created. + JobID string + + // Dst is the table into which the results of the query will be written. + // If this field is nil, a temporary table will be created. + Dst *Table + + // The query to execute. See https://cloud.google.com/bigquery/query-reference for details. + Q string + + // DefaultProjectID and DefaultDatasetID specify the dataset to use for unqualified table names in the query. + // If DefaultProjectID is set, DefaultDatasetID must also be set. + DefaultProjectID string + DefaultDatasetID string + + // TableDefinitions describes data sources outside of BigQuery. + // The map keys may be used as table names in the query string. + TableDefinitions map[string]ExternalData + + // CreateDisposition specifies the circumstances under which the destination table will be created. + // The default is CreateIfNeeded. + CreateDisposition TableCreateDisposition + + // WriteDisposition specifies how existing data in the destination table is treated. + // The default is WriteAppend. + WriteDisposition TableWriteDisposition + + // DisableQueryCache prevents results being fetched from the query cache. + // If this field is false, results are fetched from the cache if they are available. + // The query cache is a best-effort cache that is flushed whenever tables in the query are modified. + // Cached results are only available when TableID is unspecified in the query's destination Table. + // For more information, see https://cloud.google.com/bigquery/querying-data#querycaching + DisableQueryCache bool + + // DisableFlattenedResults prevents results being flattened. + // If this field is false, results from nested and repeated fields are flattened. + // DisableFlattenedResults implies AllowLargeResults + // For more information, see https://cloud.google.com/bigquery/docs/data#nested + DisableFlattenedResults bool + + // AllowLargeResults allows the query to produce arbitrarily large result tables. + // The destination must be a table. + // When using this option, queries will take longer to execute, even if the result set is small. + // For additional limitations, see https://cloud.google.com/bigquery/querying-data#largequeryresults + AllowLargeResults bool + + // Priority specifies the priority with which to schedule the query. + // The default priority is InteractivePriority. + // For more information, see https://cloud.google.com/bigquery/querying-data#batchqueries + Priority QueryPriority + + // MaxBillingTier sets the maximum billing tier for a Query. + // Queries that have resource usage beyond this tier will fail (without + // incurring a charge). If this field is zero, the project default will be used. + MaxBillingTier int + + // MaxBytesBilled limits the number of bytes billed for + // this job. Queries that would exceed this limit will fail (without incurring + // a charge). + // If this field is less than 1, the project default will be + // used. + MaxBytesBilled int64 + + // UseStandardSQL causes the query to use standard SQL. + // The default is false (using legacy SQL). + UseStandardSQL bool + + // Parameters is a list of query parameters. The presence of parameters + // implies the use of standard SQL. + // If the query uses positional syntax ("?"), then no parameter may have a name. + // If the query uses named syntax ("@p"), then all parameters must have names. + // It is illegal to mix positional and named syntax. + Parameters []QueryParameter +} + +// QueryPriority specifies a priority with which a query is to be executed. +type QueryPriority string + +const ( + BatchPriority QueryPriority = "BATCH" + InteractivePriority QueryPriority = "INTERACTIVE" +) + +// A Query queries data from a BigQuery table. Use Client.Query to create a Query. +type Query struct { + client *Client + QueryConfig +} + +// Query creates a query with string q. +// The returned Query may optionally be further configured before its Run method is called. +func (c *Client) Query(q string) *Query { + return &Query{ + client: c, + QueryConfig: QueryConfig{Q: q}, + } +} + +// Run initiates a query job. +func (q *Query) Run(ctx context.Context) (*Job, error) { + job := &bq.Job{ + Configuration: &bq.JobConfiguration{ + Query: &bq.JobConfigurationQuery{}, + }, + } + setJobRef(job, q.JobID, q.client.projectID) + + if err := q.QueryConfig.populateJobQueryConfig(job.Configuration.Query); err != nil { + return nil, err + } + j, err := q.client.service.insertJob(ctx, q.client.projectID, &insertJobConf{job: job}) + if err != nil { + return nil, err + } + j.isQuery = true + return j, nil +} + +func (q *QueryConfig) populateJobQueryConfig(conf *bq.JobConfigurationQuery) error { + conf.Query = q.Q + + if len(q.TableDefinitions) > 0 { + conf.TableDefinitions = make(map[string]bq.ExternalDataConfiguration) + } + for name, data := range q.TableDefinitions { + conf.TableDefinitions[name] = data.externalDataConfig() + } + + if q.DefaultProjectID != "" || q.DefaultDatasetID != "" { + conf.DefaultDataset = &bq.DatasetReference{ + DatasetId: q.DefaultDatasetID, + ProjectId: q.DefaultProjectID, + } + } + + if tier := int64(q.MaxBillingTier); tier > 0 { + conf.MaximumBillingTier = &tier + } + conf.CreateDisposition = string(q.CreateDisposition) + conf.WriteDisposition = string(q.WriteDisposition) + conf.AllowLargeResults = q.AllowLargeResults + conf.Priority = string(q.Priority) + + f := false + if q.DisableQueryCache { + conf.UseQueryCache = &f + } + if q.DisableFlattenedResults { + conf.FlattenResults = &f + // DisableFlattenResults implies AllowLargeResults. + conf.AllowLargeResults = true + } + if q.MaxBytesBilled >= 1 { + conf.MaximumBytesBilled = q.MaxBytesBilled + } + if q.UseStandardSQL || len(q.Parameters) > 0 { + conf.UseLegacySql = false + conf.ForceSendFields = append(conf.ForceSendFields, "UseLegacySql") + } + + if q.Dst != nil && !q.Dst.implicitTable() { + conf.DestinationTable = q.Dst.tableRefProto() + } + for _, p := range q.Parameters { + qp, err := p.toRaw() + if err != nil { + return err + } + conf.QueryParameters = append(conf.QueryParameters, qp) + } + return nil +} diff --git a/vendor/cloud.google.com/go/bigquery/query_test.go b/vendor/cloud.google.com/go/bigquery/query_test.go new file mode 100644 index 00000000..1715d888 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/query_test.go @@ -0,0 +1,305 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "reflect" + "testing" + + "golang.org/x/net/context" + + bq "google.golang.org/api/bigquery/v2" +) + +func defaultQueryJob() *bq.Job { + return &bq.Job{ + Configuration: &bq.JobConfiguration{ + Query: &bq.JobConfigurationQuery{ + DestinationTable: &bq.TableReference{ + ProjectId: "project-id", + DatasetId: "dataset-id", + TableId: "table-id", + }, + Query: "query string", + DefaultDataset: &bq.DatasetReference{ + ProjectId: "def-project-id", + DatasetId: "def-dataset-id", + }, + }, + }, + } +} + +func TestQuery(t *testing.T) { + c := &Client{ + projectID: "project-id", + } + testCases := []struct { + dst *Table + src *QueryConfig + want *bq.Job + }{ + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: defaultQuery, + want: defaultQueryJob(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: &QueryConfig{ + Q: "query string", + }, + want: func() *bq.Job { + j := defaultQueryJob() + j.Configuration.Query.DefaultDataset = nil + return j + }(), + }, + { + dst: &Table{}, + src: defaultQuery, + want: func() *bq.Job { + j := defaultQueryJob() + j.Configuration.Query.DestinationTable = nil + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: &QueryConfig{ + Q: "query string", + TableDefinitions: map[string]ExternalData{ + "atable": func() *GCSReference { + g := NewGCSReference("uri") + g.AllowJaggedRows = true + g.AllowQuotedNewlines = true + g.Compression = Gzip + g.Encoding = UTF_8 + g.FieldDelimiter = ";" + g.IgnoreUnknownValues = true + g.MaxBadRecords = 1 + g.Quote = "'" + g.SkipLeadingRows = 2 + g.Schema = Schema([]*FieldSchema{ + {Name: "name", Type: StringFieldType}, + }) + return g + }(), + }, + }, + want: func() *bq.Job { + j := defaultQueryJob() + j.Configuration.Query.DefaultDataset = nil + td := make(map[string]bq.ExternalDataConfiguration) + quote := "'" + td["atable"] = bq.ExternalDataConfiguration{ + Compression: "GZIP", + IgnoreUnknownValues: true, + MaxBadRecords: 1, + SourceFormat: "CSV", // must be explicitly set. + SourceUris: []string{"uri"}, + CsvOptions: &bq.CsvOptions{ + AllowJaggedRows: true, + AllowQuotedNewlines: true, + Encoding: "UTF-8", + FieldDelimiter: ";", + SkipLeadingRows: 2, + Quote: "e, + }, + Schema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + {Name: "name", Type: "STRING"}, + }, + }, + } + j.Configuration.Query.TableDefinitions = td + return j + }(), + }, + { + dst: &Table{ + ProjectID: "project-id", + DatasetID: "dataset-id", + TableID: "table-id", + }, + src: &QueryConfig{ + Q: "query string", + DefaultProjectID: "def-project-id", + DefaultDatasetID: "def-dataset-id", + CreateDisposition: CreateNever, + WriteDisposition: WriteTruncate, + }, + want: func() *bq.Job { + j := defaultQueryJob() + j.Configuration.Query.WriteDisposition = "WRITE_TRUNCATE" + j.Configuration.Query.CreateDisposition = "CREATE_NEVER" + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: &QueryConfig{ + Q: "query string", + DefaultProjectID: "def-project-id", + DefaultDatasetID: "def-dataset-id", + DisableQueryCache: true, + }, + want: func() *bq.Job { + j := defaultQueryJob() + f := false + j.Configuration.Query.UseQueryCache = &f + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: &QueryConfig{ + Q: "query string", + DefaultProjectID: "def-project-id", + DefaultDatasetID: "def-dataset-id", + AllowLargeResults: true, + }, + want: func() *bq.Job { + j := defaultQueryJob() + j.Configuration.Query.AllowLargeResults = true + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: &QueryConfig{ + Q: "query string", + DefaultProjectID: "def-project-id", + DefaultDatasetID: "def-dataset-id", + DisableFlattenedResults: true, + }, + want: func() *bq.Job { + j := defaultQueryJob() + f := false + j.Configuration.Query.FlattenResults = &f + j.Configuration.Query.AllowLargeResults = true + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: &QueryConfig{ + Q: "query string", + DefaultProjectID: "def-project-id", + DefaultDatasetID: "def-dataset-id", + Priority: QueryPriority("low"), + }, + want: func() *bq.Job { + j := defaultQueryJob() + j.Configuration.Query.Priority = "low" + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: &QueryConfig{ + Q: "query string", + DefaultProjectID: "def-project-id", + DefaultDatasetID: "def-dataset-id", + MaxBillingTier: 3, + MaxBytesBilled: 5, + }, + want: func() *bq.Job { + j := defaultQueryJob() + tier := int64(3) + j.Configuration.Query.MaximumBillingTier = &tier + j.Configuration.Query.MaximumBytesBilled = 5 + return j + }(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: &QueryConfig{ + Q: "query string", + DefaultProjectID: "def-project-id", + DefaultDatasetID: "def-dataset-id", + MaxBytesBilled: -1, + }, + want: defaultQueryJob(), + }, + { + dst: c.Dataset("dataset-id").Table("table-id"), + src: &QueryConfig{ + Q: "query string", + DefaultProjectID: "def-project-id", + DefaultDatasetID: "def-dataset-id", + UseStandardSQL: true, + }, + want: func() *bq.Job { + j := defaultQueryJob() + j.Configuration.Query.UseLegacySql = false + j.Configuration.Query.ForceSendFields = []string{"UseLegacySql"} + return j + }(), + }, + } + for _, tc := range testCases { + s := &testService{} + c.service = s + query := c.Query("") + query.QueryConfig = *tc.src + query.Dst = tc.dst + if _, err := query.Run(context.Background()); err != nil { + t.Errorf("err calling query: %v", err) + continue + } + if !reflect.DeepEqual(s.Job, tc.want) { + t.Errorf("querying: got:\n%v\nwant:\n%v", s.Job, tc.want) + } + } +} + +func TestConfiguringQuery(t *testing.T) { + s := &testService{} + c := &Client{ + projectID: "project-id", + service: s, + } + + query := c.Query("q") + query.JobID = "ajob" + query.DefaultProjectID = "def-project-id" + query.DefaultDatasetID = "def-dataset-id" + // Note: Other configuration fields are tested in other tests above. + // A lot of that can be consolidated once Client.Copy is gone. + + want := &bq.Job{ + Configuration: &bq.JobConfiguration{ + Query: &bq.JobConfigurationQuery{ + Query: "q", + DefaultDataset: &bq.DatasetReference{ + ProjectId: "def-project-id", + DatasetId: "def-dataset-id", + }, + }, + }, + JobReference: &bq.JobReference{ + JobId: "ajob", + ProjectId: "project-id", + }, + } + + if _, err := query.Run(context.Background()); err != nil { + t.Fatalf("err calling Query.Run: %v", err) + } + if !reflect.DeepEqual(s.Job, want) { + t.Errorf("querying: got:\n%v\nwant:\n%v", s.Job, want) + } +} diff --git a/vendor/cloud.google.com/go/bigquery/read.go b/vendor/cloud.google.com/go/bigquery/read.go new file mode 100644 index 00000000..c6a5cc1e --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/read.go @@ -0,0 +1,64 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + + "golang.org/x/net/context" +) + +func (conf *readTableConf) fetch(ctx context.Context, s service, token string) (*readDataResult, error) { + return s.readTabledata(ctx, conf, token) +} + +func (conf *readTableConf) setPaging(pc *pagingConf) { conf.paging = *pc } + +// Read fetches the contents of the table. +func (t *Table) Read(ctx context.Context) *RowIterator { + return newRowIterator(ctx, t.c.service, &readTableConf{ + projectID: t.ProjectID, + datasetID: t.DatasetID, + tableID: t.TableID, + }) +} + +func (conf *readQueryConf) fetch(ctx context.Context, s service, token string) (*readDataResult, error) { + return s.readQuery(ctx, conf, token) +} + +func (conf *readQueryConf) setPaging(pc *pagingConf) { conf.paging = *pc } + +// Read fetches the results of a query job. +// If j is not a query job, Read returns an error. +func (j *Job) Read(ctx context.Context) (*RowIterator, error) { + if !j.isQuery { + return nil, errors.New("Cannot read from a non-query job") + } + return newRowIterator(ctx, j.service, &readQueryConf{ + projectID: j.projectID, + jobID: j.jobID, + }), nil +} + +// Read submits a query for execution and returns the results via a RowIterator. +// It is a shorthand for Query.Run followed by Job.Read. +func (q *Query) Read(ctx context.Context) (*RowIterator, error) { + job, err := q.Run(ctx) + if err != nil { + return nil, err + } + return job.Read(ctx) +} diff --git a/vendor/cloud.google.com/go/bigquery/read_test.go b/vendor/cloud.google.com/go/bigquery/read_test.go new file mode 100644 index 00000000..498f4405 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/read_test.go @@ -0,0 +1,303 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "reflect" + "testing" + + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +type readTabledataArgs struct { + conf *readTableConf + tok string +} + +type readQueryArgs struct { + conf *readQueryConf + tok string +} + +// readServiceStub services read requests by returning data from an in-memory list of values. +type readServiceStub struct { + // values and pageTokens are used as sources of data to return in response to calls to readTabledata or readQuery. + values [][][]Value // contains pages / rows / columns. + pageTokens map[string]string // maps incoming page token to returned page token. + + // arguments are recorded for later inspection. + readTabledataCalls []readTabledataArgs + readQueryCalls []readQueryArgs + + service +} + +func (s *readServiceStub) readValues(tok string) *readDataResult { + result := &readDataResult{ + pageToken: s.pageTokens[tok], + rows: s.values[0], + } + s.values = s.values[1:] + + return result +} +func (s *readServiceStub) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) { + s.readTabledataCalls = append(s.readTabledataCalls, readTabledataArgs{conf, token}) + return s.readValues(token), nil +} + +func (s *readServiceStub) readQuery(ctx context.Context, conf *readQueryConf, token string) (*readDataResult, error) { + s.readQueryCalls = append(s.readQueryCalls, readQueryArgs{conf, token}) + return s.readValues(token), nil +} + +func TestRead(t *testing.T) { + // The data for the service stub to return is populated for each test case in the testCases for loop. + ctx := context.Background() + service := &readServiceStub{} + c := &Client{ + projectID: "project-id", + service: service, + } + + queryJob := &Job{ + projectID: "project-id", + jobID: "job-id", + service: service, + isQuery: true, + } + + for _, readFunc := range []func() *RowIterator{ + func() *RowIterator { + return c.Dataset("dataset-id").Table("table-id").Read(ctx) + }, + func() *RowIterator { + it, err := queryJob.Read(ctx) + if err != nil { + t.Fatal(err) + } + return it + }, + } { + testCases := []struct { + data [][][]Value + pageTokens map[string]string + want [][]Value + }{ + { + data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}}, + pageTokens: map[string]string{"": "a", "a": ""}, + want: [][]Value{{1, 2}, {11, 12}, {30, 40}, {31, 41}}, + }, + { + data: [][][]Value{{{1, 2}, {11, 12}}, {{30, 40}, {31, 41}}}, + pageTokens: map[string]string{"": ""}, // no more pages after first one. + want: [][]Value{{1, 2}, {11, 12}}, + }, + } + for _, tc := range testCases { + service.values = tc.data + service.pageTokens = tc.pageTokens + if got, ok := collectValues(t, readFunc()); ok { + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want) + } + } + } + } +} + +func collectValues(t *testing.T, it *RowIterator) ([][]Value, bool) { + var got [][]Value + for { + var vals []Value + err := it.Next(&vals) + if err == iterator.Done { + break + } + if err != nil { + t.Errorf("err calling Next: %v", err) + return nil, false + } + got = append(got, vals) + } + return got, true +} + +func TestNoMoreValues(t *testing.T) { + c := &Client{ + projectID: "project-id", + service: &readServiceStub{ + values: [][][]Value{{{1, 2}, {11, 12}}}, + }, + } + it := c.Dataset("dataset-id").Table("table-id").Read(context.Background()) + var vals []Value + // We expect to retrieve two values and then fail on the next attempt. + if err := it.Next(&vals); err != nil { + t.Fatalf("Next: got: %v: want: nil", err) + } + if err := it.Next(&vals); err != nil { + t.Fatalf("Next: got: %v: want: nil", err) + } + if err := it.Next(&vals); err != iterator.Done { + t.Fatalf("Next: got: %v: want: iterator.Done", err) + } +} + +// delayedReadStub simulates reading results from a query that has not yet +// completed. Its readQuery method initially reports that the query job is not +// yet complete. Subsequently, it proxies the request through to another +// service stub. +type delayedReadStub struct { + numDelays int + + readServiceStub +} + +func (s *delayedReadStub) readQuery(ctx context.Context, conf *readQueryConf, token string) (*readDataResult, error) { + if s.numDelays > 0 { + s.numDelays-- + return nil, errIncompleteJob + } + return s.readServiceStub.readQuery(ctx, conf, token) +} + +// TestIncompleteJob tests that an Iterator which reads from a query job will block until the job is complete. +func TestIncompleteJob(t *testing.T) { + service := &delayedReadStub{ + numDelays: 2, + readServiceStub: readServiceStub{ + values: [][][]Value{{{1, 2}}}, + }, + } + queryJob := &Job{ + projectID: "project-id", + jobID: "job-id", + service: service, + isQuery: true, + } + it, err := queryJob.Read(context.Background()) + if err != nil { + t.Fatalf("err calling Read: %v", err) + } + var got []Value + want := []Value{1, 2} + if err := it.Next(&got); err != nil { + t.Fatalf("Next: got: %v: want: nil", err) + } + if service.numDelays != 0 { + t.Errorf("remaining numDelays : got: %v want:0", service.numDelays) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("reading: got:\n%v\nwant:\n%v", got, want) + } +} + +type errorReadService struct { + service +} + +var errBang = errors.New("bang!") + +func (s *errorReadService) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) { + return nil, errBang +} + +func TestReadError(t *testing.T) { + // test that service read errors are propagated back to the caller. + c := &Client{ + projectID: "project-id", + service: &errorReadService{}, + } + it := c.Dataset("dataset-id").Table("table-id").Read(context.Background()) + var vals []Value + if err := it.Next(&vals); err != errBang { + t.Fatalf("Get: got: %v: want: %v", err, errBang) + } +} + +func TestReadTabledataOptions(t *testing.T) { + // test that read options are propagated. + s := &readServiceStub{ + values: [][][]Value{{{1, 2}}}, + } + c := &Client{ + projectID: "project-id", + service: s, + } + it := c.Dataset("dataset-id").Table("table-id").Read(context.Background()) + it.PageInfo().MaxSize = 5 + var vals []Value + if err := it.Next(&vals); err != nil { + t.Fatal(err) + } + want := []readTabledataArgs{{ + conf: &readTableConf{ + projectID: "project-id", + datasetID: "dataset-id", + tableID: "table-id", + paging: pagingConf{ + recordsPerRequest: 5, + setRecordsPerRequest: true, + }, + }, + tok: "", + }} + + if !reflect.DeepEqual(s.readTabledataCalls, want) { + t.Errorf("reading: got:\n%v\nwant:\n%v", s.readTabledataCalls, want) + } +} + +func TestReadQueryOptions(t *testing.T) { + // test that read options are propagated. + s := &readServiceStub{ + values: [][][]Value{{{1, 2}}}, + } + queryJob := &Job{ + projectID: "project-id", + jobID: "job-id", + service: s, + isQuery: true, + } + it, err := queryJob.Read(context.Background()) + if err != nil { + t.Fatalf("err calling Read: %v", err) + } + it.PageInfo().MaxSize = 5 + var vals []Value + if err := it.Next(&vals); err != nil { + t.Fatalf("Next: got: %v: want: nil", err) + } + + want := []readQueryArgs{{ + conf: &readQueryConf{ + projectID: "project-id", + jobID: "job-id", + paging: pagingConf{ + recordsPerRequest: 5, + setRecordsPerRequest: true, + }, + }, + tok: "", + }} + + if !reflect.DeepEqual(s.readQueryCalls, want) { + t.Errorf("reading: got:\n%v\nwant:\n%v", s.readQueryCalls, want) + } +} diff --git a/vendor/cloud.google.com/go/bigquery/schema.go b/vendor/cloud.google.com/go/bigquery/schema.go new file mode 100644 index 00000000..d6d88a5d --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/schema.go @@ -0,0 +1,312 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "fmt" + "reflect" + + "cloud.google.com/go/internal/atomiccache" + + bq "google.golang.org/api/bigquery/v2" +) + +// Schema describes the fields in a table or query result. +type Schema []*FieldSchema + +type FieldSchema struct { + // The field name. + // Must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_), + // and must start with a letter or underscore. + // The maximum length is 128 characters. + Name string + + // A description of the field. The maximum length is 16,384 characters. + Description string + + // Whether the field may contain multiple values. + Repeated bool + // Whether the field is required. Ignored if Repeated is true. + Required bool + + // The field data type. If Type is Record, then this field contains a nested schema, + // which is described by Schema. + Type FieldType + // Describes the nested schema if Type is set to Record. + Schema Schema +} + +func (fs *FieldSchema) asTableFieldSchema() *bq.TableFieldSchema { + tfs := &bq.TableFieldSchema{ + Description: fs.Description, + Name: fs.Name, + Type: string(fs.Type), + } + + if fs.Repeated { + tfs.Mode = "REPEATED" + } else if fs.Required { + tfs.Mode = "REQUIRED" + } // else leave as default, which is interpreted as NULLABLE. + + for _, f := range fs.Schema { + tfs.Fields = append(tfs.Fields, f.asTableFieldSchema()) + } + + return tfs +} + +func (s Schema) asTableSchema() *bq.TableSchema { + var fields []*bq.TableFieldSchema + for _, f := range s { + fields = append(fields, f.asTableFieldSchema()) + } + return &bq.TableSchema{Fields: fields} +} + +// customizeCreateTable allows a Schema to be used directly as an option to CreateTable. +func (s Schema) customizeCreateTable(conf *createTableConf) { + conf.schema = s.asTableSchema() +} + +func convertTableFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema { + fs := &FieldSchema{ + Description: tfs.Description, + Name: tfs.Name, + Repeated: tfs.Mode == "REPEATED", + Required: tfs.Mode == "REQUIRED", + Type: FieldType(tfs.Type), + } + + for _, f := range tfs.Fields { + fs.Schema = append(fs.Schema, convertTableFieldSchema(f)) + } + return fs +} + +func convertTableSchema(ts *bq.TableSchema) Schema { + var s Schema + for _, f := range ts.Fields { + s = append(s, convertTableFieldSchema(f)) + } + return s +} + +type FieldType string + +const ( + StringFieldType FieldType = "STRING" + BytesFieldType FieldType = "BYTES" + IntegerFieldType FieldType = "INTEGER" + FloatFieldType FieldType = "FLOAT" + BooleanFieldType FieldType = "BOOLEAN" + TimestampFieldType FieldType = "TIMESTAMP" + RecordFieldType FieldType = "RECORD" + DateFieldType FieldType = "DATE" + TimeFieldType FieldType = "TIME" + DateTimeFieldType FieldType = "DATETIME" +) + +var ( + errNoStruct = errors.New("bigquery: can only infer schema from struct or pointer to struct") + errUnsupportedFieldType = errors.New("bigquery: unsupported type of field in struct") + errInvalidFieldName = errors.New("bigquery: invalid name of field in struct") +) + +var typeOfByteSlice = reflect.TypeOf([]byte{}) + +// InferSchema tries to derive a BigQuery schema from the supplied struct value. +// NOTE: All fields in the returned Schema are configured to be required, +// unless the corresponding field in the supplied struct is a slice or array. +// +// It is considered an error if the struct (including nested structs) contains +// any exported fields that are pointers or one of the following types: +// uint, uint64, uintptr, map, interface, complex64, complex128, func, chan. +// In these cases, an error will be returned. +// Future versions may handle these cases without error. +// +// Recursively defined structs are also disallowed. +func InferSchema(st interface{}) (Schema, error) { + return inferSchemaReflectCached(reflect.TypeOf(st)) +} + +var schemaCache atomiccache.Cache + +type cacheVal struct { + schema Schema + err error +} + +func inferSchemaReflectCached(t reflect.Type) (Schema, error) { + cv := schemaCache.Get(t, func() interface{} { + s, err := inferSchemaReflect(t) + return cacheVal{s, err} + }).(cacheVal) + return cv.schema, cv.err +} + +func inferSchemaReflect(t reflect.Type) (Schema, error) { + rec, err := hasRecursiveType(t, nil) + if err != nil { + return nil, err + } + if rec { + return nil, fmt.Errorf("bigquery: schema inference for recursive type %s", t) + } + return inferStruct(t) +} + +func inferStruct(t reflect.Type) (Schema, error) { + switch t.Kind() { + case reflect.Ptr: + if t.Elem().Kind() != reflect.Struct { + return nil, errNoStruct + } + t = t.Elem() + fallthrough + + case reflect.Struct: + return inferFields(t) + default: + return nil, errNoStruct + } +} + +// inferFieldSchema infers the FieldSchema for a Go type +func inferFieldSchema(rt reflect.Type) (*FieldSchema, error) { + switch rt { + case typeOfByteSlice: + return &FieldSchema{Required: true, Type: BytesFieldType}, nil + case typeOfGoTime: + return &FieldSchema{Required: true, Type: TimestampFieldType}, nil + case typeOfDate: + return &FieldSchema{Required: true, Type: DateFieldType}, nil + case typeOfTime: + return &FieldSchema{Required: true, Type: TimeFieldType}, nil + case typeOfDateTime: + return &FieldSchema{Required: true, Type: DateTimeFieldType}, nil + } + if isSupportedIntType(rt) { + return &FieldSchema{Required: true, Type: IntegerFieldType}, nil + } + switch rt.Kind() { + case reflect.Slice, reflect.Array: + et := rt.Elem() + if et != typeOfByteSlice && (et.Kind() == reflect.Slice || et.Kind() == reflect.Array) { + // Multi dimensional slices/arrays are not supported by BigQuery + return nil, errUnsupportedFieldType + } + + f, err := inferFieldSchema(et) + if err != nil { + return nil, err + } + f.Repeated = true + f.Required = false + return f, nil + case reflect.Struct, reflect.Ptr: + nested, err := inferStruct(rt) + if err != nil { + return nil, err + } + return &FieldSchema{Required: true, Type: RecordFieldType, Schema: nested}, nil + case reflect.String: + return &FieldSchema{Required: true, Type: StringFieldType}, nil + case reflect.Bool: + return &FieldSchema{Required: true, Type: BooleanFieldType}, nil + case reflect.Float32, reflect.Float64: + return &FieldSchema{Required: true, Type: FloatFieldType}, nil + default: + return nil, errUnsupportedFieldType + } +} + +// inferFields extracts all exported field types from struct type. +func inferFields(rt reflect.Type) (Schema, error) { + var s Schema + fields, err := fieldCache.Fields(rt) + if err != nil { + return nil, err + } + for _, field := range fields { + f, err := inferFieldSchema(field.Type) + if err != nil { + return nil, err + } + f.Name = field.Name + s = append(s, f) + } + return s, nil +} + +// isSupportedIntType reports whether t can be properly represented by the +// BigQuery INTEGER/INT64 type. +func isSupportedIntType(t reflect.Type) bool { + switch t.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int, + reflect.Uint8, reflect.Uint16, reflect.Uint32: + return true + default: + return false + } +} + +// typeList is a linked list of reflect.Types. +type typeList struct { + t reflect.Type + next *typeList +} + +func (l *typeList) has(t reflect.Type) bool { + for l != nil { + if l.t == t { + return true + } + l = l.next + } + return false +} + +// hasRecursiveType reports whether t or any type inside t refers to itself, directly or indirectly, +// via exported fields. (Schema inference ignores unexported fields.) +func hasRecursiveType(t reflect.Type, seen *typeList) (bool, error) { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return false, nil + } + if seen.has(t) { + return true, nil + } + fields, err := fieldCache.Fields(t) + if err != nil { + return false, err + } + seen = &typeList{t, seen} + // Because seen is a linked list, additions to it from one field's + // recursive call will not affect the value for subsequent fields' calls. + for _, field := range fields { + ok, err := hasRecursiveType(field.Type, seen) + if err != nil { + return false, err + } + if ok { + return true, nil + } + } + return false, nil +} diff --git a/vendor/cloud.google.com/go/bigquery/schema_test.go b/vendor/cloud.google.com/go/bigquery/schema_test.go new file mode 100644 index 00000000..88e3dda9 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/schema_test.go @@ -0,0 +1,792 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "fmt" + "reflect" + "testing" + "time" + + "cloud.google.com/go/civil" + "cloud.google.com/go/internal/pretty" + + bq "google.golang.org/api/bigquery/v2" +) + +func (fs *FieldSchema) GoString() string { + if fs == nil { + return "" + } + + return fmt.Sprintf("{Name:%s Description:%s Repeated:%t Required:%t Type:%s Schema:%s}", + fs.Name, + fs.Description, + fs.Repeated, + fs.Required, + fs.Type, + fmt.Sprintf("%#v", fs.Schema), + ) +} + +func bqTableFieldSchema(desc, name, typ, mode string) *bq.TableFieldSchema { + return &bq.TableFieldSchema{ + Description: desc, + Name: name, + Mode: mode, + Type: typ, + } +} + +func fieldSchema(desc, name, typ string, repeated, required bool) *FieldSchema { + return &FieldSchema{ + Description: desc, + Name: name, + Repeated: repeated, + Required: required, + Type: FieldType(typ), + } +} + +func TestSchemaConversion(t *testing.T) { + testCases := []struct { + schema Schema + bqSchema *bq.TableSchema + }{ + { + // required + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "STRING", false, true), + }, + }, + { + // repeated + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "STRING", "REPEATED"), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "STRING", true, false), + }, + }, + { + // nullable, string + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "STRING", ""), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "STRING", false, false), + }, + }, + { + // integer + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "INTEGER", ""), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "INTEGER", false, false), + }, + }, + { + // float + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "FLOAT", ""), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "FLOAT", false, false), + }, + }, + { + // boolean + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "BOOLEAN", ""), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "BOOLEAN", false, false), + }, + }, + { + // timestamp + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "name", "TIMESTAMP", ""), + }, + }, + schema: Schema{ + fieldSchema("desc", "name", "TIMESTAMP", false, false), + }, + }, + { + // civil times + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("desc", "f1", "TIME", ""), + bqTableFieldSchema("desc", "f2", "DATE", ""), + bqTableFieldSchema("desc", "f3", "DATETIME", ""), + }, + }, + schema: Schema{ + fieldSchema("desc", "f1", "TIME", false, false), + fieldSchema("desc", "f2", "DATE", false, false), + fieldSchema("desc", "f3", "DATETIME", false, false), + }, + }, + { + // nested + bqSchema: &bq.TableSchema{ + Fields: []*bq.TableFieldSchema{ + { + Description: "An outer schema wrapping a nested schema", + Name: "outer", + Mode: "REQUIRED", + Type: "RECORD", + Fields: []*bq.TableFieldSchema{ + bqTableFieldSchema("inner field", "inner", "STRING", ""), + }, + }, + }, + }, + schema: Schema{ + &FieldSchema{ + Description: "An outer schema wrapping a nested schema", + Name: "outer", + Required: true, + Type: "RECORD", + Schema: []*FieldSchema{ + { + Description: "inner field", + Name: "inner", + Type: "STRING", + }, + }, + }, + }, + }, + } + + for _, tc := range testCases { + bqSchema := tc.schema.asTableSchema() + if !reflect.DeepEqual(bqSchema, tc.bqSchema) { + t.Errorf("converting to TableSchema: got:\n%v\nwant:\n%v", + pretty.Value(bqSchema), pretty.Value(tc.bqSchema)) + } + schema := convertTableSchema(tc.bqSchema) + if !reflect.DeepEqual(schema, tc.schema) { + t.Errorf("converting to Schema: got:\n%v\nwant:\n%v", schema, tc.schema) + } + } +} + +type allStrings struct { + String string + ByteSlice []byte +} + +type allSignedIntegers struct { + Int64 int64 + Int32 int32 + Int16 int16 + Int8 int8 + Int int +} + +type allUnsignedIntegers struct { + Uint32 uint32 + Uint16 uint16 + Uint8 uint8 +} + +type allFloat struct { + Float64 float64 + Float32 float32 + // NOTE: Complex32 and Complex64 are unsupported by BigQuery +} + +type allBoolean struct { + Bool bool +} + +type allTime struct { + Timestamp time.Time + Time civil.Time + Date civil.Date + DateTime civil.DateTime +} + +func reqField(name, typ string) *FieldSchema { + return &FieldSchema{ + Name: name, + Type: FieldType(typ), + Required: true, + } +} + +func TestSimpleInference(t *testing.T) { + testCases := []struct { + in interface{} + want Schema + }{ + { + in: allSignedIntegers{}, + want: Schema{ + reqField("Int64", "INTEGER"), + reqField("Int32", "INTEGER"), + reqField("Int16", "INTEGER"), + reqField("Int8", "INTEGER"), + reqField("Int", "INTEGER"), + }, + }, + { + in: allUnsignedIntegers{}, + want: Schema{ + reqField("Uint32", "INTEGER"), + reqField("Uint16", "INTEGER"), + reqField("Uint8", "INTEGER"), + }, + }, + { + in: allFloat{}, + want: Schema{ + reqField("Float64", "FLOAT"), + reqField("Float32", "FLOAT"), + }, + }, + { + in: allBoolean{}, + want: Schema{ + reqField("Bool", "BOOLEAN"), + }, + }, + { + in: &allBoolean{}, + want: Schema{ + reqField("Bool", "BOOLEAN"), + }, + }, + { + in: allTime{}, + want: Schema{ + reqField("Timestamp", "TIMESTAMP"), + reqField("Time", "TIME"), + reqField("Date", "DATE"), + reqField("DateTime", "DATETIME"), + }, + }, + { + in: allStrings{}, + want: Schema{ + reqField("String", "STRING"), + reqField("ByteSlice", "BYTES"), + }, + }, + } + for _, tc := range testCases { + got, err := InferSchema(tc.in) + if err != nil { + t.Fatalf("%T: error inferring TableSchema: %v", tc.in, err) + } + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("%T: inferring TableSchema: got:\n%#v\nwant:\n%#v", tc.in, + pretty.Value(got), pretty.Value(tc.want)) + } + } +} + +type containsNested struct { + hidden string + NotNested int + Nested struct { + Inside int + } +} + +type containsDoubleNested struct { + NotNested int + Nested struct { + InsideNested struct { + Inside int + } + } +} + +type ptrNested struct { + Ptr *struct{ Inside int } +} + +type dup struct { // more than one field of the same struct type + A, B allBoolean +} + +func TestNestedInference(t *testing.T) { + testCases := []struct { + in interface{} + want Schema + }{ + { + in: containsNested{}, + want: Schema{ + reqField("NotNested", "INTEGER"), + &FieldSchema{ + Name: "Nested", + Required: true, + Type: "RECORD", + Schema: Schema{reqField("Inside", "INTEGER")}, + }, + }, + }, + { + in: containsDoubleNested{}, + want: Schema{ + reqField("NotNested", "INTEGER"), + &FieldSchema{ + Name: "Nested", + Required: true, + Type: "RECORD", + Schema: Schema{ + { + Name: "InsideNested", + Required: true, + Type: "RECORD", + Schema: Schema{reqField("Inside", "INTEGER")}, + }, + }, + }, + }, + }, + { + in: ptrNested{}, + want: Schema{ + &FieldSchema{ + Name: "Ptr", + Required: true, + Type: "RECORD", + Schema: Schema{reqField("Inside", "INTEGER")}, + }, + }, + }, + { + in: dup{}, + want: Schema{ + &FieldSchema{ + Name: "A", + Required: true, + Type: "RECORD", + Schema: Schema{reqField("Bool", "BOOLEAN")}, + }, + &FieldSchema{ + Name: "B", + Required: true, + Type: "RECORD", + Schema: Schema{reqField("Bool", "BOOLEAN")}, + }, + }, + }, + } + + for _, tc := range testCases { + got, err := InferSchema(tc.in) + if err != nil { + t.Fatalf("%T: error inferring TableSchema: %v", tc.in, err) + } + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("%T: inferring TableSchema: got:\n%#v\nwant:\n%#v", tc.in, + pretty.Value(got), pretty.Value(tc.want)) + } + } +} + +type repeated struct { + NotRepeated []byte + RepeatedByteSlice [][]byte + Slice []int + Array [5]bool +} + +type nestedRepeated struct { + NotRepeated int + Repeated []struct { + Inside int + } + RepeatedPtr []*struct{ Inside int } +} + +func repField(name, typ string) *FieldSchema { + return &FieldSchema{ + Name: name, + Type: FieldType(typ), + Repeated: true, + } +} + +func TestRepeatedInference(t *testing.T) { + testCases := []struct { + in interface{} + want Schema + }{ + { + in: repeated{}, + want: Schema{ + reqField("NotRepeated", "BYTES"), + repField("RepeatedByteSlice", "BYTES"), + repField("Slice", "INTEGER"), + repField("Array", "BOOLEAN"), + }, + }, + { + in: nestedRepeated{}, + want: Schema{ + reqField("NotRepeated", "INTEGER"), + { + Name: "Repeated", + Repeated: true, + Type: "RECORD", + Schema: Schema{reqField("Inside", "INTEGER")}, + }, + { + Name: "RepeatedPtr", + Repeated: true, + Type: "RECORD", + Schema: Schema{reqField("Inside", "INTEGER")}, + }, + }, + }, + } + + for i, tc := range testCases { + got, err := InferSchema(tc.in) + if err != nil { + t.Fatalf("%d: error inferring TableSchema: %v", i, err) + } + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, + pretty.Value(got), pretty.Value(tc.want)) + } + } +} + +type Embedded struct { + Embedded int +} + +type embedded struct { + Embedded2 int +} + +type nestedEmbedded struct { + Embedded + embedded +} + +func TestEmbeddedInference(t *testing.T) { + got, err := InferSchema(nestedEmbedded{}) + if err != nil { + t.Fatal(err) + } + want := Schema{ + reqField("Embedded", "INTEGER"), + reqField("Embedded2", "INTEGER"), + } + if !reflect.DeepEqual(got, want) { + t.Errorf("got %v, want %v", pretty.Value(got), pretty.Value(want)) + } +} + +func TestRecursiveInference(t *testing.T) { + type List struct { + Val int + Next *List + } + + _, err := InferSchema(List{}) + if err == nil { + t.Fatal("got nil, want error") + } +} + +type withTags struct { + NoTag int + ExcludeTag int `bigquery:"-"` + SimpleTag int `bigquery:"simple_tag"` + UnderscoreTag int `bigquery:"_id"` + MixedCase int `bigquery:"MIXEDcase"` +} + +type withTagsNested struct { + Nested withTags `bigquery:"nested"` + NestedAnonymous struct { + ExcludeTag int `bigquery:"-"` + Inside int `bigquery:"inside"` + } `bigquery:"anon"` +} + +type withTagsRepeated struct { + Repeated []withTags `bigquery:"repeated"` + RepeatedAnonymous []struct { + ExcludeTag int `bigquery:"-"` + Inside int `bigquery:"inside"` + } `bigquery:"anon"` +} + +type withTagsEmbedded struct { + withTags +} + +var withTagsSchema = Schema{ + reqField("NoTag", "INTEGER"), + reqField("simple_tag", "INTEGER"), + reqField("_id", "INTEGER"), + reqField("MIXEDcase", "INTEGER"), +} + +func TestTagInference(t *testing.T) { + testCases := []struct { + in interface{} + want Schema + }{ + { + in: withTags{}, + want: withTagsSchema, + }, + { + in: withTagsNested{}, + want: Schema{ + &FieldSchema{ + Name: "nested", + Required: true, + Type: "RECORD", + Schema: withTagsSchema, + }, + &FieldSchema{ + Name: "anon", + Required: true, + Type: "RECORD", + Schema: Schema{reqField("inside", "INTEGER")}, + }, + }, + }, + { + in: withTagsRepeated{}, + want: Schema{ + &FieldSchema{ + Name: "repeated", + Repeated: true, + Type: "RECORD", + Schema: withTagsSchema, + }, + &FieldSchema{ + Name: "anon", + Repeated: true, + Type: "RECORD", + Schema: Schema{reqField("inside", "INTEGER")}, + }, + }, + }, + { + in: withTagsEmbedded{}, + want: withTagsSchema, + }, + } + for i, tc := range testCases { + got, err := InferSchema(tc.in) + if err != nil { + t.Fatalf("%d: error inferring TableSchema: %v", i, err) + } + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, + pretty.Value(got), pretty.Value(tc.want)) + } + } +} + +func TestTagInferenceErrors(t *testing.T) { + testCases := []struct { + in interface{} + err error + }{ + { + in: struct { + LongTag int `bigquery:"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxy"` + }{}, + err: errInvalidFieldName, + }, + { + in: struct { + UnsupporedStartChar int `bigquery:"øab"` + }{}, + err: errInvalidFieldName, + }, + { + in: struct { + UnsupportedEndChar int `bigquery:"abø"` + }{}, + err: errInvalidFieldName, + }, + { + in: struct { + UnsupportedMiddleChar int `bigquery:"aøb"` + }{}, + err: errInvalidFieldName, + }, + { + in: struct { + StartInt int `bigquery:"1abc"` + }{}, + err: errInvalidFieldName, + }, + { + in: struct { + Hyphens int `bigquery:"a-b"` + }{}, + err: errInvalidFieldName, + }, + { + in: struct { + OmitEmpty int `bigquery:"abc,omitempty"` + }{}, + err: errInvalidFieldName, + }, + } + for i, tc := range testCases { + want := tc.err + _, got := InferSchema(tc.in) + if !reflect.DeepEqual(got, want) { + t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, want) + } + } +} + +func TestSchemaErrors(t *testing.T) { + testCases := []struct { + in interface{} + err error + }{ + { + in: []byte{}, + err: errNoStruct, + }, + { + in: new(int), + err: errNoStruct, + }, + { + in: struct{ Uint uint }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ Uint64 uint64 }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ Uintptr uintptr }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ Complex complex64 }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ Map map[string]int }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ Chan chan bool }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ Ptr *int }{}, + err: errNoStruct, + }, + { + in: struct{ Interface interface{} }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ MultiDimensional [][]int }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ MultiDimensional [][][]byte }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ ChanSlice []chan bool }{}, + err: errUnsupportedFieldType, + }, + { + in: struct{ NestedChan struct{ Chan []chan bool } }{}, + err: errUnsupportedFieldType, + }, + } + for _, tc := range testCases { + want := tc.err + _, got := InferSchema(tc.in) + if !reflect.DeepEqual(got, want) { + t.Errorf("%#v: got:\n%#v\nwant:\n%#v", tc.in, got, want) + } + } +} + +func TestHasRecursiveType(t *testing.T) { + type ( + nonStruct int + nonRec struct{ A string } + dup struct{ A, B nonRec } + rec struct { + A int + B *rec + } + recUnexported struct { + A int + b *rec + } + hasRec struct { + A int + R *rec + } + ) + for _, test := range []struct { + in interface{} + want bool + }{ + {nonStruct(0), false}, + {nonRec{}, false}, + {dup{}, false}, + {rec{}, true}, + {recUnexported{}, false}, + {hasRec{}, true}, + } { + got, err := hasRecursiveType(reflect.TypeOf(test.in), nil) + if err != nil { + t.Fatal(err) + } + if got != test.want { + t.Errorf("%T: got %t, want %t", test.in, got, test.want) + } + } +} diff --git a/vendor/cloud.google.com/go/bigquery/service.go b/vendor/cloud.google.com/go/bigquery/service.go new file mode 100644 index 00000000..a6b27c1b --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/service.go @@ -0,0 +1,623 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "errors" + "fmt" + "io" + "net/http" + "sync" + "time" + + "cloud.google.com/go/internal" + gax "github.com/googleapis/gax-go" + + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" + "google.golang.org/api/googleapi" +) + +// service provides an internal abstraction to isolate the generated +// BigQuery API; most of this package uses this interface instead. +// The single implementation, *bigqueryService, contains all the knowledge +// of the generated BigQuery API. +type service interface { + // Jobs + insertJob(ctx context.Context, projectId string, conf *insertJobConf) (*Job, error) + getJobType(ctx context.Context, projectId, jobID string) (jobType, error) + jobCancel(ctx context.Context, projectId, jobID string) error + jobStatus(ctx context.Context, projectId, jobID string) (*JobStatus, error) + + // Tables + createTable(ctx context.Context, conf *createTableConf) error + getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error) + deleteTable(ctx context.Context, projectID, datasetID, tableID string) error + + // listTables returns a page of Tables and a next page token. Note: the Tables do not have their c field populated. + listTables(ctx context.Context, projectID, datasetID string, pageSize int, pageToken string) ([]*Table, string, error) + patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf) (*TableMetadata, error) + + // Table data + readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error) + insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error + + // Datasets + insertDataset(ctx context.Context, datasetID, projectID string) error + deleteDataset(ctx context.Context, datasetID, projectID string) error + getDatasetMetadata(ctx context.Context, projectID, datasetID string) (*DatasetMetadata, error) + + // Misc + + // readQuery reads data resulting from a query job. If the job is + // incomplete, an errIncompleteJob is returned. readQuery may be called + // repeatedly to poll for job completion. + readQuery(ctx context.Context, conf *readQueryConf, pageToken string) (*readDataResult, error) + + // listDatasets returns a page of Datasets and a next page token. Note: the Datasets do not have their c field populated. + listDatasets(ctx context.Context, projectID string, maxResults int, pageToken string, all bool, filter string) ([]*Dataset, string, error) +} + +type bigqueryService struct { + s *bq.Service +} + +func newBigqueryService(client *http.Client, endpoint string) (*bigqueryService, error) { + s, err := bq.New(client) + if err != nil { + return nil, fmt.Errorf("constructing bigquery client: %v", err) + } + s.BasePath = endpoint + + return &bigqueryService{s: s}, nil +} + +// getPages calls the supplied getPage function repeatedly until there are no pages left to get. +// token is the token of the initial page to start from. Use an empty string to start from the beginning. +func getPages(token string, getPage func(token string) (nextToken string, err error)) error { + for { + var err error + token, err = getPage(token) + if err != nil { + return err + } + if token == "" { + return nil + } + } +} + +type insertJobConf struct { + job *bq.Job + media io.Reader +} + +func (s *bigqueryService) insertJob(ctx context.Context, projectID string, conf *insertJobConf) (*Job, error) { + call := s.s.Jobs.Insert(projectID, conf.job).Context(ctx) + if conf.media != nil { + call.Media(conf.media) + } + res, err := call.Do() + if err != nil { + return nil, err + } + return &Job{service: s, projectID: projectID, jobID: res.JobReference.JobId}, nil +} + +type pagingConf struct { + recordsPerRequest int64 + setRecordsPerRequest bool + + startIndex uint64 +} + +type readTableConf struct { + projectID, datasetID, tableID string + paging pagingConf + schema Schema // lazily initialized when the first page of data is fetched. +} + +type readDataResult struct { + pageToken string + rows [][]Value + totalRows uint64 + schema Schema +} + +type readQueryConf struct { + projectID, jobID string + paging pagingConf +} + +func (s *bigqueryService) readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error) { + // Prepare request to fetch one page of table data. + req := s.s.Tabledata.List(conf.projectID, conf.datasetID, conf.tableID) + + if pageToken != "" { + req.PageToken(pageToken) + } else { + req.StartIndex(conf.paging.startIndex) + } + + if conf.paging.setRecordsPerRequest { + req.MaxResults(conf.paging.recordsPerRequest) + } + + // Fetch the table schema in the background, if necessary. + var schemaErr error + var schemaFetch sync.WaitGroup + if conf.schema == nil { + schemaFetch.Add(1) + go func() { + defer schemaFetch.Done() + var t *bq.Table + t, schemaErr = s.s.Tables.Get(conf.projectID, conf.datasetID, conf.tableID). + Fields("schema"). + Context(ctx). + Do() + if schemaErr == nil && t.Schema != nil { + conf.schema = convertTableSchema(t.Schema) + } + }() + } + + res, err := req.Context(ctx).Do() + if err != nil { + return nil, err + } + + schemaFetch.Wait() + if schemaErr != nil { + return nil, schemaErr + } + + result := &readDataResult{ + pageToken: res.PageToken, + totalRows: uint64(res.TotalRows), + schema: conf.schema, + } + result.rows, err = convertRows(res.Rows, conf.schema) + if err != nil { + return nil, err + } + return result, nil +} + +var errIncompleteJob = errors.New("internal error: query results not available because job is not complete") + +// getQueryResultsTimeout controls the maximum duration of a request to the +// BigQuery GetQueryResults endpoint. Setting a long timeout here does not +// cause increased overall latency, as results are returned as soon as they are +// available. +const getQueryResultsTimeout = time.Minute + +func (s *bigqueryService) readQuery(ctx context.Context, conf *readQueryConf, pageToken string) (*readDataResult, error) { + req := s.s.Jobs.GetQueryResults(conf.projectID, conf.jobID). + TimeoutMs(getQueryResultsTimeout.Nanoseconds() / 1e6) + + if pageToken != "" { + req.PageToken(pageToken) + } else { + req.StartIndex(conf.paging.startIndex) + } + + if conf.paging.setRecordsPerRequest { + req.MaxResults(conf.paging.recordsPerRequest) + } + + res, err := req.Context(ctx).Do() + if err != nil { + return nil, err + } + + if !res.JobComplete { + return nil, errIncompleteJob + } + schema := convertTableSchema(res.Schema) + result := &readDataResult{ + pageToken: res.PageToken, + totalRows: res.TotalRows, + schema: schema, + } + result.rows, err = convertRows(res.Rows, schema) + if err != nil { + return nil, err + } + return result, nil +} + +type insertRowsConf struct { + templateSuffix string + ignoreUnknownValues bool + skipInvalidRows bool +} + +func (s *bigqueryService) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error { + req := &bq.TableDataInsertAllRequest{ + TemplateSuffix: conf.templateSuffix, + IgnoreUnknownValues: conf.ignoreUnknownValues, + SkipInvalidRows: conf.skipInvalidRows, + } + for _, row := range rows { + m := make(map[string]bq.JsonValue) + for k, v := range row.Row { + m[k] = bq.JsonValue(v) + } + req.Rows = append(req.Rows, &bq.TableDataInsertAllRequestRows{ + InsertId: row.InsertID, + Json: m, + }) + } + var res *bq.TableDataInsertAllResponse + err := runWithRetry(ctx, func() error { + var err error + res, err = s.s.Tabledata.InsertAll(projectID, datasetID, tableID, req).Context(ctx).Do() + return err + }) + if err != nil { + return err + } + if len(res.InsertErrors) == 0 { + return nil + } + + var errs PutMultiError + for _, e := range res.InsertErrors { + if int(e.Index) > len(rows) { + return fmt.Errorf("internal error: unexpected row index: %v", e.Index) + } + rie := RowInsertionError{ + InsertID: rows[e.Index].InsertID, + RowIndex: int(e.Index), + } + for _, errp := range e.Errors { + rie.Errors = append(rie.Errors, errorFromErrorProto(errp)) + } + errs = append(errs, rie) + } + return errs +} + +type jobType int + +const ( + copyJobType jobType = iota + extractJobType + loadJobType + queryJobType +) + +func (s *bigqueryService) getJobType(ctx context.Context, projectID, jobID string) (jobType, error) { + res, err := s.s.Jobs.Get(projectID, jobID). + Fields("configuration"). + Context(ctx). + Do() + + if err != nil { + return 0, err + } + + switch { + case res.Configuration.Copy != nil: + return copyJobType, nil + case res.Configuration.Extract != nil: + return extractJobType, nil + case res.Configuration.Load != nil: + return loadJobType, nil + case res.Configuration.Query != nil: + return queryJobType, nil + default: + return 0, errors.New("unknown job type") + } +} + +func (s *bigqueryService) jobCancel(ctx context.Context, projectID, jobID string) error { + // Jobs.Cancel returns a job entity, but the only relevant piece of + // data it may contain (the status of the job) is unreliable. From the + // docs: "This call will return immediately, and the client will need + // to poll for the job status to see if the cancel completed + // successfully". So it would be misleading to return a status. + _, err := s.s.Jobs.Cancel(projectID, jobID). + Fields(). // We don't need any of the response data. + Context(ctx). + Do() + return err +} + +func (s *bigqueryService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) { + res, err := s.s.Jobs.Get(projectID, jobID). + Fields("status"). // Only fetch what we need. + Context(ctx). + Do() + if err != nil { + return nil, err + } + return jobStatusFromProto(res.Status) +} + +var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done} + +func jobStatusFromProto(status *bq.JobStatus) (*JobStatus, error) { + state, ok := stateMap[status.State] + if !ok { + return nil, fmt.Errorf("unexpected job state: %v", status.State) + } + + newStatus := &JobStatus{ + State: state, + err: nil, + } + if err := errorFromErrorProto(status.ErrorResult); state == Done && err != nil { + newStatus.err = err + } + + for _, ep := range status.Errors { + newStatus.Errors = append(newStatus.Errors, errorFromErrorProto(ep)) + } + return newStatus, nil +} + +// listTables returns a subset of tables that belong to a dataset, and a token for fetching the next subset. +func (s *bigqueryService) listTables(ctx context.Context, projectID, datasetID string, pageSize int, pageToken string) ([]*Table, string, error) { + var tables []*Table + req := s.s.Tables.List(projectID, datasetID). + PageToken(pageToken). + Context(ctx) + if pageSize > 0 { + req.MaxResults(int64(pageSize)) + } + res, err := req.Do() + if err != nil { + return nil, "", err + } + for _, t := range res.Tables { + tables = append(tables, s.convertListedTable(t)) + } + return tables, res.NextPageToken, nil +} + +type createTableConf struct { + projectID, datasetID, tableID string + expiration time.Time + viewQuery string + schema *bq.TableSchema + useStandardSQL bool + timePartitioning *TimePartitioning +} + +// createTable creates a table in the BigQuery service. +// expiration is an optional time after which the table will be deleted and its storage reclaimed. +// If viewQuery is non-empty, the created table will be of type VIEW. +// Note: expiration can only be set during table creation. +// Note: after table creation, a view can be modified only if its table was initially created with a view. +func (s *bigqueryService) createTable(ctx context.Context, conf *createTableConf) error { + table := &bq.Table{ + TableReference: &bq.TableReference{ + ProjectId: conf.projectID, + DatasetId: conf.datasetID, + TableId: conf.tableID, + }, + } + if !conf.expiration.IsZero() { + table.ExpirationTime = conf.expiration.UnixNano() / 1e6 + } + // TODO(jba): make it impossible to provide both a view query and a schema. + if conf.viewQuery != "" { + table.View = &bq.ViewDefinition{ + Query: conf.viewQuery, + } + if conf.useStandardSQL { + table.View.UseLegacySql = false + table.View.ForceSendFields = append(table.View.ForceSendFields, "UseLegacySql") + } + } + if conf.schema != nil { + table.Schema = conf.schema + } + if conf.timePartitioning != nil { + table.TimePartitioning = &bq.TimePartitioning{ + Type: "DAY", + ExpirationMs: int64(conf.timePartitioning.Expiration.Seconds() * 1000), + } + } + + _, err := s.s.Tables.Insert(conf.projectID, conf.datasetID, table).Context(ctx).Do() + return err +} + +func (s *bigqueryService) getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error) { + table, err := s.s.Tables.Get(projectID, datasetID, tableID).Context(ctx).Do() + if err != nil { + return nil, err + } + return bqTableToMetadata(table), nil +} + +func (s *bigqueryService) deleteTable(ctx context.Context, projectID, datasetID, tableID string) error { + return s.s.Tables.Delete(projectID, datasetID, tableID).Context(ctx).Do() +} + +func bqTableToMetadata(t *bq.Table) *TableMetadata { + md := &TableMetadata{ + Description: t.Description, + Name: t.FriendlyName, + Type: TableType(t.Type), + ID: t.Id, + NumBytes: t.NumBytes, + NumRows: t.NumRows, + ExpirationTime: unixMillisToTime(t.ExpirationTime), + CreationTime: unixMillisToTime(t.CreationTime), + LastModifiedTime: unixMillisToTime(int64(t.LastModifiedTime)), + } + if t.Schema != nil { + md.Schema = convertTableSchema(t.Schema) + } + if t.View != nil { + md.View = t.View.Query + } + if t.TimePartitioning != nil { + md.TimePartitioning = &TimePartitioning{time.Duration(t.TimePartitioning.ExpirationMs) * time.Millisecond} + } + + return md +} + +func bqDatasetToMetadata(d *bq.Dataset) *DatasetMetadata { + /// TODO(jba): access + return &DatasetMetadata{ + CreationTime: unixMillisToTime(d.CreationTime), + LastModifiedTime: unixMillisToTime(d.LastModifiedTime), + DefaultTableExpiration: time.Duration(d.DefaultTableExpirationMs) * time.Millisecond, + Description: d.Description, + Name: d.FriendlyName, + ID: d.Id, + Location: d.Location, + Labels: d.Labels, + } +} + +// Convert a number of milliseconds since the Unix epoch to a time.Time. +// Treat an input of zero specially: convert it to the zero time, +// rather than the start of the epoch. +func unixMillisToTime(m int64) time.Time { + if m == 0 { + return time.Time{} + } + return time.Unix(0, m*1e6) +} + +func (s *bigqueryService) convertListedTable(t *bq.TableListTables) *Table { + return &Table{ + ProjectID: t.TableReference.ProjectId, + DatasetID: t.TableReference.DatasetId, + TableID: t.TableReference.TableId, + } +} + +// patchTableConf contains fields to be patched. +type patchTableConf struct { + // These fields are omitted from the patch operation if nil. + Description *string + Name *string + Schema Schema +} + +func (s *bigqueryService) patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf) (*TableMetadata, error) { + t := &bq.Table{} + forceSend := func(field string) { + t.ForceSendFields = append(t.ForceSendFields, field) + } + + if conf.Description != nil { + t.Description = *conf.Description + forceSend("Description") + } + if conf.Name != nil { + t.FriendlyName = *conf.Name + forceSend("FriendlyName") + } + if conf.Schema != nil { + t.Schema = conf.Schema.asTableSchema() + forceSend("Schema") + } + table, err := s.s.Tables.Patch(projectID, datasetID, tableID, t). + Context(ctx). + Do() + if err != nil { + return nil, err + } + return bqTableToMetadata(table), nil +} + +func (s *bigqueryService) insertDataset(ctx context.Context, datasetID, projectID string) error { + ds := &bq.Dataset{ + DatasetReference: &bq.DatasetReference{DatasetId: datasetID}, + } + _, err := s.s.Datasets.Insert(projectID, ds).Context(ctx).Do() + return err +} + +func (s *bigqueryService) deleteDataset(ctx context.Context, datasetID, projectID string) error { + return s.s.Datasets.Delete(projectID, datasetID).Context(ctx).Do() +} + +func (s *bigqueryService) getDatasetMetadata(ctx context.Context, projectID, datasetID string) (*DatasetMetadata, error) { + table, err := s.s.Datasets.Get(projectID, datasetID).Context(ctx).Do() + if err != nil { + return nil, err + } + return bqDatasetToMetadata(table), nil +} + +func (s *bigqueryService) listDatasets(ctx context.Context, projectID string, maxResults int, pageToken string, all bool, filter string) ([]*Dataset, string, error) { + req := s.s.Datasets.List(projectID). + Context(ctx). + PageToken(pageToken). + All(all) + if maxResults > 0 { + req.MaxResults(int64(maxResults)) + } + if filter != "" { + req.Filter(filter) + } + res, err := req.Do() + if err != nil { + return nil, "", err + } + var datasets []*Dataset + for _, d := range res.Datasets { + datasets = append(datasets, s.convertListedDataset(d)) + } + return datasets, res.NextPageToken, nil +} + +func (s *bigqueryService) convertListedDataset(d *bq.DatasetListDatasets) *Dataset { + return &Dataset{ + ProjectID: d.DatasetReference.ProjectId, + DatasetID: d.DatasetReference.DatasetId, + } +} + +// runWithRetry calls the function until it returns nil or a non-retryable error, or +// the context is done. +// See the similar function in ../storage/invoke.go. The main difference is the +// reason for retrying. +func runWithRetry(ctx context.Context, call func() error) error { + backoff := gax.Backoff{ + Initial: 2 * time.Second, + Max: 32 * time.Second, + Multiplier: 2, + } + return internal.Retry(ctx, backoff, func() (stop bool, err error) { + err = call() + if err == nil { + return true, nil + } + e, ok := err.(*googleapi.Error) + if !ok { + return true, err + } + var reason string + if len(e.Errors) > 0 { + reason = e.Errors[0].Reason + } + // Retry using the criteria in + // https://cloud.google.com/bigquery/troubleshooting-errors + if reason == "backendError" && (e.Code == 500 || e.Code == 503) { + return false, nil + } + return true, err + }) +} diff --git a/vendor/cloud.google.com/go/bigquery/table.go b/vendor/cloud.google.com/go/bigquery/table.go new file mode 100644 index 00000000..d214ec5e --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/table.go @@ -0,0 +1,224 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "fmt" + "time" + + "golang.org/x/net/context" + + "cloud.google.com/go/internal/optional" + bq "google.golang.org/api/bigquery/v2" +) + +// A Table is a reference to a BigQuery table. +type Table struct { + // ProjectID, DatasetID and TableID may be omitted if the Table is the destination for a query. + // In this case the result will be stored in an ephemeral table. + ProjectID string + DatasetID string + // TableID must contain only letters (a-z, A-Z), numbers (0-9), or underscores (_). + // The maximum length is 1,024 characters. + TableID string + + c *Client +} + +// TableMetadata contains information about a BigQuery table. +type TableMetadata struct { + Description string // The user-friendly description of this table. + Name string // The user-friendly name for this table. + Schema Schema + View string + + ID string // An opaque ID uniquely identifying the table. + Type TableType + + // The time when this table expires. If not set, the table will persist + // indefinitely. Expired tables will be deleted and their storage reclaimed. + ExpirationTime time.Time + + CreationTime time.Time + LastModifiedTime time.Time + + // The size of the table in bytes. + // This does not include data that is being buffered during a streaming insert. + NumBytes int64 + + // The number of rows of data in this table. + // This does not include data that is being buffered during a streaming insert. + NumRows uint64 + + // The time-based partitioning settings for this table. + TimePartitioning *TimePartitioning +} + +// TableCreateDisposition specifies the circumstances under which destination table will be created. +// Default is CreateIfNeeded. +type TableCreateDisposition string + +const ( + // CreateIfNeeded will create the table if it does not already exist. + // Tables are created atomically on successful completion of a job. + CreateIfNeeded TableCreateDisposition = "CREATE_IF_NEEDED" + + // CreateNever ensures the table must already exist and will not be + // automatically created. + CreateNever TableCreateDisposition = "CREATE_NEVER" +) + +// TableWriteDisposition specifies how existing data in a destination table is treated. +// Default is WriteAppend. +type TableWriteDisposition string + +const ( + // WriteAppend will append to any existing data in the destination table. + // Data is appended atomically on successful completion of a job. + WriteAppend TableWriteDisposition = "WRITE_APPEND" + + // WriteTruncate overrides the existing data in the destination table. + // Data is overwritten atomically on successful completion of a job. + WriteTruncate TableWriteDisposition = "WRITE_TRUNCATE" + + // WriteEmpty fails writes if the destination table already contains data. + WriteEmpty TableWriteDisposition = "WRITE_EMPTY" +) + +// TableType is the type of table. +type TableType string + +const ( + RegularTable TableType = "TABLE" + ViewTable TableType = "VIEW" +) + +func (t *Table) tableRefProto() *bq.TableReference { + return &bq.TableReference{ + ProjectId: t.ProjectID, + DatasetId: t.DatasetID, + TableId: t.TableID, + } +} + +// FullyQualifiedName returns the ID of the table in projectID:datasetID.tableID format. +func (t *Table) FullyQualifiedName() string { + return fmt.Sprintf("%s:%s.%s", t.ProjectID, t.DatasetID, t.TableID) +} + +// implicitTable reports whether Table is an empty placeholder, which signifies that a new table should be created with an auto-generated Table ID. +func (t *Table) implicitTable() bool { + return t.ProjectID == "" && t.DatasetID == "" && t.TableID == "" +} + +// Create creates a table in the BigQuery service. +func (t *Table) Create(ctx context.Context, options ...CreateTableOption) error { + conf := &createTableConf{ + projectID: t.ProjectID, + datasetID: t.DatasetID, + tableID: t.TableID, + } + for _, o := range options { + o.customizeCreateTable(conf) + } + return t.c.service.createTable(ctx, conf) +} + +// Metadata fetches the metadata for the table. +func (t *Table) Metadata(ctx context.Context) (*TableMetadata, error) { + return t.c.service.getTableMetadata(ctx, t.ProjectID, t.DatasetID, t.TableID) +} + +// Delete deletes the table. +func (t *Table) Delete(ctx context.Context) error { + return t.c.service.deleteTable(ctx, t.ProjectID, t.DatasetID, t.TableID) +} + +// A CreateTableOption is an optional argument to CreateTable. +type CreateTableOption interface { + customizeCreateTable(*createTableConf) +} + +type tableExpiration time.Time + +// TableExpiration returns a CreateTableOption that will cause the created table to be deleted after the expiration time. +func TableExpiration(exp time.Time) CreateTableOption { return tableExpiration(exp) } + +func (opt tableExpiration) customizeCreateTable(conf *createTableConf) { + conf.expiration = time.Time(opt) +} + +type viewQuery string + +// ViewQuery returns a CreateTableOption that causes the created table to be a virtual table defined by the supplied query. +// For more information see: https://cloud.google.com/bigquery/querying-data#views +func ViewQuery(query string) CreateTableOption { return viewQuery(query) } + +func (opt viewQuery) customizeCreateTable(conf *createTableConf) { + conf.viewQuery = string(opt) +} + +type useStandardSQL struct{} + +// UseStandardSQL returns a CreateTableOption to set the table to use standard SQL. +// The default setting is false (using legacy SQL). +func UseStandardSQL() CreateTableOption { return useStandardSQL{} } + +func (opt useStandardSQL) customizeCreateTable(conf *createTableConf) { + conf.useStandardSQL = true +} + +// TimePartitioning is a CreateTableOption that can be used to set time-based +// date partitioning on a table. +// For more information see: https://cloud.google.com/bigquery/docs/creating-partitioned-tables +type TimePartitioning struct { + // (Optional) The amount of time to keep the storage for a partition. + // If the duration is empty (0), the data in the partitions do not expire. + Expiration time.Duration +} + +func (opt TimePartitioning) customizeCreateTable(conf *createTableConf) { + conf.timePartitioning = &opt +} + +// Update modifies specific Table metadata fields. +func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate) (*TableMetadata, error) { + var conf patchTableConf + if tm.Description != nil { + s := optional.ToString(tm.Description) + conf.Description = &s + } + if tm.Name != nil { + s := optional.ToString(tm.Name) + conf.Name = &s + } + conf.Schema = tm.Schema + return t.c.service.patchTable(ctx, t.ProjectID, t.DatasetID, t.TableID, &conf) +} + +// TableMetadataToUpdate is used when updating a table's metadata. +// Only non-nil fields will be updated. +type TableMetadataToUpdate struct { + // Description is the user-friendly description of this table. + Description optional.String + + // Name is the user-friendly name for this table. + Name optional.String + + // Schema is the table's schema. + // When updating a schema, you can add columns but not remove them. + Schema Schema + // TODO(jba): support updating the view +} diff --git a/vendor/cloud.google.com/go/bigquery/uploader.go b/vendor/cloud.google.com/go/bigquery/uploader.go new file mode 100644 index 00000000..505d4113 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/uploader.go @@ -0,0 +1,162 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "fmt" + "reflect" + + "golang.org/x/net/context" +) + +// An Uploader does streaming inserts into a BigQuery table. +// It is safe for concurrent use. +type Uploader struct { + t *Table + + // SkipInvalidRows causes rows containing invalid data to be silently + // ignored. The default value is false, which causes the entire request to + // fail if there is an attempt to insert an invalid row. + SkipInvalidRows bool + + // IgnoreUnknownValues causes values not matching the schema to be ignored. + // The default value is false, which causes records containing such values + // to be treated as invalid records. + IgnoreUnknownValues bool + + // A TableTemplateSuffix allows Uploaders to create tables automatically. + // + // Experimental: this option is experimental and may be modified or removed in future versions, + // regardless of any other documented package stability guarantees. + // + // When you specify a suffix, the table you upload data to + // will be used as a template for creating a new table, with the same schema, + // called + . + // + // More information is available at + // https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables + TableTemplateSuffix string +} + +// Uploader returns an Uploader that can be used to append rows to t. +// The returned Uploader may optionally be further configured before its Put method is called. +func (t *Table) Uploader() *Uploader { + return &Uploader{t: t} +} + +// Put uploads one or more rows to the BigQuery service. +// +// If src is ValueSaver, then its Save method is called to produce a row for uploading. +// +// If src is a struct or pointer to a struct, then a schema is inferred from it +// and used to create a StructSaver. The InsertID of the StructSaver will be +// empty. +// +// If src is a slice of ValueSavers, structs, or struct pointers, then each +// element of the slice is treated as above, and multiple rows are uploaded. +// +// Put returns a PutMultiError if one or more rows failed to be uploaded. +// The PutMultiError contains a RowInsertionError for each failed row. +// +// Put will retry on temporary errors (see +// https://cloud.google.com/bigquery/troubleshooting-errors). This can result +// in duplicate rows if you do not use insert IDs. Also, if the error persists, +// the call will run indefinitely. Pass a context with a timeout to prevent +// hanging calls. +func (u *Uploader) Put(ctx context.Context, src interface{}) error { + savers, err := valueSavers(src) + if err != nil { + return err + } + return u.putMulti(ctx, savers) +} + +func valueSavers(src interface{}) ([]ValueSaver, error) { + saver, ok, err := toValueSaver(src) + if err != nil { + return nil, err + } + if ok { + return []ValueSaver{saver}, nil + } + srcVal := reflect.ValueOf(src) + if srcVal.Kind() != reflect.Slice { + return nil, fmt.Errorf("%T is not a ValueSaver, struct, struct pointer, or slice", src) + + } + var savers []ValueSaver + for i := 0; i < srcVal.Len(); i++ { + s := srcVal.Index(i).Interface() + saver, ok, err := toValueSaver(s) + if err != nil { + return nil, err + } + if !ok { + return nil, fmt.Errorf("src[%d] has type %T, which is not a ValueSaver, struct or struct pointer", i, s) + } + savers = append(savers, saver) + } + return savers, nil +} + +// Make a ValueSaver from x, which must implement ValueSaver already +// or be a struct or pointer to struct. +func toValueSaver(x interface{}) (ValueSaver, bool, error) { + if saver, ok := x.(ValueSaver); ok { + return saver, ok, nil + } + v := reflect.ValueOf(x) + // Support Put with []interface{} + if v.Kind() == reflect.Interface { + v = v.Elem() + } + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if v.Kind() != reflect.Struct { + return nil, false, nil + } + schema, err := inferSchemaReflect(v.Type()) + if err != nil { + return nil, false, err + } + return &StructSaver{Struct: x, Schema: schema}, true, nil +} + +func (u *Uploader) putMulti(ctx context.Context, src []ValueSaver) error { + var rows []*insertionRow + for _, saver := range src { + row, insertID, err := saver.Save() + if err != nil { + return err + } + rows = append(rows, &insertionRow{InsertID: insertID, Row: row}) + } + + return u.t.c.service.insertRows(ctx, u.t.ProjectID, u.t.DatasetID, u.t.TableID, rows, &insertRowsConf{ + skipInvalidRows: u.SkipInvalidRows, + ignoreUnknownValues: u.IgnoreUnknownValues, + templateSuffix: u.TableTemplateSuffix, + }) +} + +// An insertionRow represents a row of data to be inserted into a table. +type insertionRow struct { + // If InsertID is non-empty, BigQuery will use it to de-duplicate insertions of + // this row on a best-effort basis. + InsertID string + // The data to be inserted, represented as a map from field name to Value. + Row map[string]Value +} diff --git a/vendor/cloud.google.com/go/bigquery/uploader_test.go b/vendor/cloud.google.com/go/bigquery/uploader_test.go new file mode 100644 index 00000000..6deb5854 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/uploader_test.go @@ -0,0 +1,285 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "reflect" + "testing" + + "cloud.google.com/go/internal/pretty" + + "golang.org/x/net/context" +) + +type testSaver struct { + ir *insertionRow + err error +} + +func (ts testSaver) Save() (map[string]Value, string, error) { + return ts.ir.Row, ts.ir.InsertID, ts.err +} + +func TestRejectsNonValueSavers(t *testing.T) { + client := &Client{projectID: "project-id"} + u := Uploader{t: client.Dataset("dataset-id").Table("table-id")} + + testCases := []struct { + src interface{} + }{ + { + src: 1, + }, + { + src: []int{1, 2}, + }, + { + src: []interface{}{ + testSaver{ir: &insertionRow{"a", map[string]Value{"one": 1}}}, + 1, + }, + }, + } + + for _, tc := range testCases { + if err := u.Put(context.Background(), tc.src); err == nil { + t.Errorf("put value: %v; got nil, want error", tc.src) + } + } +} + +type insertRowsRecorder struct { + rowBatches [][]*insertionRow + service +} + +func (irr *insertRowsRecorder) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error { + irr.rowBatches = append(irr.rowBatches, rows) + return nil +} + +func TestInsertsData(t *testing.T) { + testCases := []struct { + data [][]*insertionRow + }{ + { + data: [][]*insertionRow{ + { + &insertionRow{"a", map[string]Value{"one": 1}}, + }, + }, + }, + { + + data: [][]*insertionRow{ + { + &insertionRow{"a", map[string]Value{"one": 1}}, + &insertionRow{"b", map[string]Value{"two": 2}}, + }, + }, + }, + { + + data: [][]*insertionRow{ + { + &insertionRow{"a", map[string]Value{"one": 1}}, + }, + { + &insertionRow{"b", map[string]Value{"two": 2}}, + }, + }, + }, + { + + data: [][]*insertionRow{ + { + &insertionRow{"a", map[string]Value{"one": 1}}, + &insertionRow{"b", map[string]Value{"two": 2}}, + }, + { + &insertionRow{"c", map[string]Value{"three": 3}}, + &insertionRow{"d", map[string]Value{"four": 4}}, + }, + }, + }, + } + for _, tc := range testCases { + irr := &insertRowsRecorder{} + client := &Client{ + projectID: "project-id", + service: irr, + } + u := client.Dataset("dataset-id").Table("table-id").Uploader() + for _, batch := range tc.data { + if len(batch) == 0 { + continue + } + var toUpload interface{} + if len(batch) == 1 { + toUpload = testSaver{ir: batch[0]} + } else { + savers := []testSaver{} + for _, row := range batch { + savers = append(savers, testSaver{ir: row}) + } + toUpload = savers + } + + err := u.Put(context.Background(), toUpload) + if err != nil { + t.Errorf("expected successful Put of ValueSaver; got: %v", err) + } + } + if got, want := irr.rowBatches, tc.data; !reflect.DeepEqual(got, want) { + t.Errorf("got: %v, want: %v", got, want) + } + } +} + +type uploadOptionRecorder struct { + received *insertRowsConf + service +} + +func (u *uploadOptionRecorder) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error { + u.received = conf + return nil +} + +func TestUploadOptionsPropagate(t *testing.T) { + // we don't care for the data in this testcase. + dummyData := testSaver{ir: &insertionRow{}} + recorder := new(uploadOptionRecorder) + c := &Client{service: recorder} + table := &Table{ + ProjectID: "project-id", + DatasetID: "dataset-id", + TableID: "table-id", + c: c, + } + + tests := [...]struct { + ul *Uploader + conf insertRowsConf + }{ + { + // test zero options lead to zero value for insertRowsConf + ul: table.Uploader(), + }, + { + ul: func() *Uploader { + u := table.Uploader() + u.TableTemplateSuffix = "suffix" + return u + }(), + conf: insertRowsConf{ + templateSuffix: "suffix", + }, + }, + { + ul: func() *Uploader { + u := table.Uploader() + u.IgnoreUnknownValues = true + return u + }(), + conf: insertRowsConf{ + ignoreUnknownValues: true, + }, + }, + { + ul: func() *Uploader { + u := table.Uploader() + u.SkipInvalidRows = true + return u + }(), + conf: insertRowsConf{ + skipInvalidRows: true, + }, + }, + { // multiple upload options combine + ul: func() *Uploader { + u := table.Uploader() + u.TableTemplateSuffix = "suffix" + u.IgnoreUnknownValues = true + u.SkipInvalidRows = true + return u + }(), + conf: insertRowsConf{ + templateSuffix: "suffix", + skipInvalidRows: true, + ignoreUnknownValues: true, + }, + }, + } + + for i, tc := range tests { + err := tc.ul.Put(context.Background(), dummyData) + if err != nil { + t.Fatalf("%d: expected successful Put of ValueSaver; got: %v", i, err) + } + + if recorder.received == nil { + t.Fatalf("%d: received no options at all!", i) + } + + want := tc.conf + got := *recorder.received + if got != want { + t.Errorf("%d: got %#v, want %#v, ul=%#v", i, got, want, tc.ul) + } + } +} + +func TestValueSavers(t *testing.T) { + ts := &testSaver{ir: &insertionRow{}} + type T struct{ I int } + schema, err := InferSchema(T{}) + if err != nil { + t.Fatal(err) + } + for _, test := range []struct { + in interface{} + want []ValueSaver + }{ + {ts, []ValueSaver{ts}}, + {T{I: 1}, []ValueSaver{&StructSaver{Schema: schema, Struct: T{I: 1}}}}, + {[]ValueSaver{ts, ts}, []ValueSaver{ts, ts}}, + {[]interface{}{ts, ts}, []ValueSaver{ts, ts}}, + {[]T{{I: 1}, {I: 2}}, []ValueSaver{ + &StructSaver{Schema: schema, Struct: T{I: 1}}, + &StructSaver{Schema: schema, Struct: T{I: 2}}, + }}, + {[]interface{}{T{I: 1}, &T{I: 2}}, []ValueSaver{ + &StructSaver{Schema: schema, Struct: T{I: 1}}, + &StructSaver{Schema: schema, Struct: &T{I: 2}}, + }}, + } { + got, err := valueSavers(test.in) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got, test.want) { + + t.Errorf("%+v: got %v, want %v", test.in, pretty.Value(got), pretty.Value(test.want)) + } + // Make sure Save is successful. + for i, vs := range got { + _, _, err := vs.Save() + if err != nil { + t.Fatalf("%+v, #%d: got error %v, want nil", test.in, i, err) + } + } + } +} diff --git a/vendor/cloud.google.com/go/bigquery/utils_test.go b/vendor/cloud.google.com/go/bigquery/utils_test.go new file mode 100644 index 00000000..c781f9c3 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/utils_test.go @@ -0,0 +1,47 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "golang.org/x/net/context" + bq "google.golang.org/api/bigquery/v2" +) + +func defaultGCS() *GCSReference { + return &GCSReference{ + uris: []string{"uri"}, + } +} + +var defaultQuery = &QueryConfig{ + Q: "query string", + DefaultProjectID: "def-project-id", + DefaultDatasetID: "def-dataset-id", +} + +type testService struct { + *bq.Job + + service +} + +func (s *testService) insertJob(ctx context.Context, projectID string, conf *insertJobConf) (*Job, error) { + s.Job = conf.job + return &Job{}, nil +} + +func (s *testService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) { + return &JobStatus{State: Done}, nil +} diff --git a/vendor/cloud.google.com/go/bigquery/value.go b/vendor/cloud.google.com/go/bigquery/value.go new file mode 100644 index 00000000..509853a8 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/value.go @@ -0,0 +1,637 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "encoding/base64" + "errors" + "fmt" + "reflect" + "strconv" + "time" + + "cloud.google.com/go/civil" + + bq "google.golang.org/api/bigquery/v2" +) + +// Value stores the contents of a single cell from a BigQuery result. +type Value interface{} + +// ValueLoader stores a slice of Values representing a result row from a Read operation. +// See RowIterator.Next for more information. +type ValueLoader interface { + Load(v []Value, s Schema) error +} + +// valueList converts a []Value to implement ValueLoader. +type valueList []Value + +// Load stores a sequence of values in a valueList. +// It resets the slice length to zero, then appends each value to it. +func (vs *valueList) Load(v []Value, _ Schema) error { + *vs = append((*vs)[:0], v...) + return nil +} + +// valueMap converts a map[string]Value to implement ValueLoader. +type valueMap map[string]Value + +// Load stores a sequence of values in a valueMap. +func (vm *valueMap) Load(v []Value, s Schema) error { + if *vm == nil { + *vm = map[string]Value{} + } + loadMap(*vm, v, s) + return nil +} + +func loadMap(m map[string]Value, vals []Value, s Schema) { + for i, f := range s { + val := vals[i] + var v interface{} + switch { + case f.Schema == nil: + v = val + case !f.Repeated: + m2 := map[string]Value{} + loadMap(m2, val.([]Value), f.Schema) + v = m2 + default: // repeated and nested + sval := val.([]Value) + vs := make([]Value, len(sval)) + for j, e := range sval { + m2 := map[string]Value{} + loadMap(m2, e.([]Value), f.Schema) + vs[j] = m2 + } + v = vs + } + m[f.Name] = v + } +} + +type structLoader struct { + typ reflect.Type // type of struct + err error + ops []structLoaderOp + + vstructp reflect.Value // pointer to current struct value; changed by set +} + +// A setFunc is a function that sets a struct field or slice/array +// element to a value. +type setFunc func(v reflect.Value, val interface{}) error + +// A structLoaderOp instructs the loader to set a struct field to a row value. +type structLoaderOp struct { + fieldIndex []int + valueIndex int + setFunc setFunc + repeated bool +} + +func setAny(v reflect.Value, x interface{}) error { + v.Set(reflect.ValueOf(x)) + return nil +} + +func setInt(v reflect.Value, x interface{}) error { + xx := x.(int64) + if v.OverflowInt(xx) { + return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type()) + } + v.SetInt(xx) + return nil +} + +func setFloat(v reflect.Value, x interface{}) error { + xx := x.(float64) + if v.OverflowFloat(xx) { + return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type()) + } + v.SetFloat(xx) + return nil +} + +func setBool(v reflect.Value, x interface{}) error { + v.SetBool(x.(bool)) + return nil +} + +func setString(v reflect.Value, x interface{}) error { + v.SetString(x.(string)) + return nil +} + +func setBytes(v reflect.Value, x interface{}) error { + v.SetBytes(x.([]byte)) + return nil +} + +// set remembers a value for the next call to Load. The value must be +// a pointer to a struct. (This is checked in RowIterator.Next.) +func (sl *structLoader) set(structp interface{}, schema Schema) error { + if sl.err != nil { + return sl.err + } + sl.vstructp = reflect.ValueOf(structp) + typ := sl.vstructp.Type().Elem() + if sl.typ == nil { + // First call: remember the type and compile the schema. + sl.typ = typ + ops, err := compileToOps(typ, schema) + if err != nil { + sl.err = err + return err + } + sl.ops = ops + } else if sl.typ != typ { + return fmt.Errorf("bigquery: struct type changed from %s to %s", sl.typ, typ) + } + return nil +} + +// compileToOps produces a sequence of operations that will set the fields of a +// value of structType to the contents of a row with schema. +func compileToOps(structType reflect.Type, schema Schema) ([]structLoaderOp, error) { + var ops []structLoaderOp + fields, err := fieldCache.Fields(structType) + if err != nil { + return nil, err + } + for i, schemaField := range schema { + // Look for an exported struct field with the same name as the schema + // field, ignoring case (BigQuery column names are case-insensitive, + // and we want to act like encoding/json anyway). + structField := fields.Match(schemaField.Name) + if structField == nil { + // Ignore schema fields with no corresponding struct field. + continue + } + op := structLoaderOp{ + fieldIndex: structField.Index, + valueIndex: i, + } + t := structField.Type + if schemaField.Repeated { + if t.Kind() != reflect.Slice && t.Kind() != reflect.Array { + return nil, fmt.Errorf("bigquery: repeated schema field %s requires slice or array, but struct field %s has type %s", + schemaField.Name, structField.Name, t) + } + t = t.Elem() + op.repeated = true + } + if schemaField.Type == RecordFieldType { + // Field can be a struct or a pointer to a struct. + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return nil, fmt.Errorf("bigquery: field %s has type %s, expected struct or *struct", + structField.Name, structField.Type) + } + nested, err := compileToOps(t, schemaField.Schema) + if err != nil { + return nil, err + } + op.setFunc = func(v reflect.Value, val interface{}) error { + return setNested(nested, v, val.([]Value)) + } + } else { + op.setFunc = determineSetFunc(t, schemaField.Type) + if op.setFunc == nil { + return nil, fmt.Errorf("bigquery: schema field %s of type %s is not assignable to struct field %s of type %s", + schemaField.Name, schemaField.Type, structField.Name, t) + } + } + ops = append(ops, op) + } + return ops, nil +} + +// determineSetFunc chooses the best function for setting a field of type ftype +// to a value whose schema field type is sftype. It returns nil if stype +// is not assignable to ftype. +// determineSetFunc considers only basic types. See compileToOps for +// handling of repetition and nesting. +func determineSetFunc(ftype reflect.Type, stype FieldType) setFunc { + switch stype { + case StringFieldType: + if ftype.Kind() == reflect.String { + return setString + } + + case BytesFieldType: + if ftype == typeOfByteSlice { + return setBytes + } + + case IntegerFieldType: + if isSupportedIntType(ftype) { + return setInt + } + + case FloatFieldType: + switch ftype.Kind() { + case reflect.Float32, reflect.Float64: + return setFloat + } + + case BooleanFieldType: + if ftype.Kind() == reflect.Bool { + return setBool + } + + case TimestampFieldType: + if ftype == typeOfGoTime { + return setAny + } + + case DateFieldType: + if ftype == typeOfDate { + return setAny + } + + case TimeFieldType: + if ftype == typeOfTime { + return setAny + } + + case DateTimeFieldType: + if ftype == typeOfDateTime { + return setAny + } + } + return nil +} + +func (sl *structLoader) Load(values []Value, _ Schema) error { + if sl.err != nil { + return sl.err + } + return runOps(sl.ops, sl.vstructp.Elem(), values) +} + +// runOps executes a sequence of ops, setting the fields of vstruct to the +// supplied values. +func runOps(ops []structLoaderOp, vstruct reflect.Value, values []Value) error { + for _, op := range ops { + field := vstruct.FieldByIndex(op.fieldIndex) + var err error + if op.repeated { + err = setRepeated(field, values[op.valueIndex].([]Value), op.setFunc) + } else { + err = op.setFunc(field, values[op.valueIndex]) + } + if err != nil { + return err + } + } + return nil +} + +func setNested(ops []structLoaderOp, v reflect.Value, vals []Value) error { + // v is either a struct or a pointer to a struct. + if v.Kind() == reflect.Ptr { + // If the pointer is nil, set it to a zero struct value. + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + return runOps(ops, v, vals) +} + +func setRepeated(field reflect.Value, vslice []Value, setElem setFunc) error { + vlen := len(vslice) + var flen int + switch field.Type().Kind() { + case reflect.Slice: + // Make a slice of the right size, avoiding allocation if possible. + switch { + case field.Len() < vlen: + field.Set(reflect.MakeSlice(field.Type(), vlen, vlen)) + case field.Len() > vlen: + field.SetLen(vlen) + } + flen = vlen + + case reflect.Array: + flen = field.Len() + if flen > vlen { + // Set extra elements to their zero value. + z := reflect.Zero(field.Type().Elem()) + for i := vlen; i < flen; i++ { + field.Index(i).Set(z) + } + } + default: + return fmt.Errorf("bigquery: impossible field type %s", field.Type()) + } + for i, val := range vslice { + if i < flen { // avoid writing past the end of a short array + if err := setElem(field.Index(i), val); err != nil { + return err + } + } + } + return nil +} + +// A ValueSaver returns a row of data to be inserted into a table. +type ValueSaver interface { + // Save returns a row to be inserted into a BigQuery table, represented + // as a map from field name to Value. + // If insertID is non-empty, BigQuery will use it to de-duplicate + // insertions of this row on a best-effort basis. + Save() (row map[string]Value, insertID string, err error) +} + +// ValuesSaver implements ValueSaver for a slice of Values. +type ValuesSaver struct { + Schema Schema + + // If non-empty, BigQuery will use InsertID to de-duplicate insertions + // of this row on a best-effort basis. + InsertID string + + Row []Value +} + +// Save implements ValueSaver. +func (vls *ValuesSaver) Save() (map[string]Value, string, error) { + m, err := valuesToMap(vls.Row, vls.Schema) + return m, vls.InsertID, err +} + +func valuesToMap(vs []Value, schema Schema) (map[string]Value, error) { + if len(vs) != len(schema) { + return nil, errors.New("Schema does not match length of row to be inserted") + } + + m := make(map[string]Value) + for i, fieldSchema := range schema { + if fieldSchema.Type != RecordFieldType { + m[fieldSchema.Name] = vs[i] + continue + } + // Nested record, possibly repeated. + vals, ok := vs[i].([]Value) + if !ok { + return nil, errors.New("nested record is not a []Value") + } + if !fieldSchema.Repeated { + value, err := valuesToMap(vals, fieldSchema.Schema) + if err != nil { + return nil, err + } + m[fieldSchema.Name] = value + continue + } + // A repeated nested field is converted into a slice of maps. + var maps []Value + for _, v := range vals { + sv, ok := v.([]Value) + if !ok { + return nil, errors.New("nested record in slice is not a []Value") + } + value, err := valuesToMap(sv, fieldSchema.Schema) + if err != nil { + return nil, err + } + maps = append(maps, value) + } + m[fieldSchema.Name] = maps + } + return m, nil +} + +// StructSaver implements ValueSaver for a struct. +// The struct is converted to a map of values by using the values of struct +// fields corresponding to schema fields. Additional and missing +// fields are ignored, as are nested struct pointers that are nil. +type StructSaver struct { + // Schema determines what fields of the struct are uploaded. It should + // match the table's schema. + Schema Schema + + // If non-empty, BigQuery will use InsertID to de-duplicate insertions + // of this row on a best-effort basis. + InsertID string + + // Struct should be a struct or a pointer to a struct. + Struct interface{} +} + +// Save implements ValueSaver. +func (ss *StructSaver) Save() (row map[string]Value, insertID string, err error) { + vstruct := reflect.ValueOf(ss.Struct) + row, err = structToMap(vstruct, ss.Schema) + if err != nil { + return nil, "", err + } + return row, ss.InsertID, nil +} + +func structToMap(vstruct reflect.Value, schema Schema) (map[string]Value, error) { + if vstruct.Kind() == reflect.Ptr { + vstruct = vstruct.Elem() + } + if !vstruct.IsValid() { + return nil, nil + } + m := map[string]Value{} + if vstruct.Kind() != reflect.Struct { + return nil, fmt.Errorf("bigquery: type is %s, need struct or struct pointer", vstruct.Type()) + } + fields, err := fieldCache.Fields(vstruct.Type()) + if err != nil { + return nil, err + } + for _, schemaField := range schema { + // Look for an exported struct field with the same name as the schema + // field, ignoring case. + structField := fields.Match(schemaField.Name) + if structField == nil { + continue + } + val, err := structFieldToUploadValue(vstruct.FieldByIndex(structField.Index), schemaField) + if err != nil { + return nil, err + } + // Add the value to the map, unless it is nil. + if val != nil { + m[schemaField.Name] = val + } + } + return m, nil +} + +// structFieldToUploadValue converts a struct field to a value suitable for ValueSaver.Save, using +// the schemaField as a guide. +// structFieldToUploadValue is careful to return a true nil interface{} when needed, so its +// caller can easily identify a nil value. +func structFieldToUploadValue(vfield reflect.Value, schemaField *FieldSchema) (interface{}, error) { + if schemaField.Repeated && (vfield.Kind() != reflect.Slice && vfield.Kind() != reflect.Array) { + return nil, fmt.Errorf("bigquery: repeated schema field %s requires slice or array, but value has type %s", + schemaField.Name, vfield.Type()) + } + + // A non-nested field can be represented by its Go value. + if schemaField.Type != RecordFieldType { + if !schemaField.Repeated || vfield.Len() > 0 { + return vfield.Interface(), nil + } + // The service treats a null repeated field as an error. Return + // nil to omit the field entirely. + return nil, nil + } + // A non-repeated nested field is converted into a map[string]Value. + if !schemaField.Repeated { + m, err := structToMap(vfield, schemaField.Schema) + if err != nil { + return nil, err + } + if m == nil { + return nil, nil + } + return m, nil + } + // A repeated nested field is converted into a slice of maps. + if vfield.Len() == 0 { + return nil, nil + } + var vals []Value + for i := 0; i < vfield.Len(); i++ { + m, err := structToMap(vfield.Index(i), schemaField.Schema) + if err != nil { + return nil, err + } + vals = append(vals, m) + } + return vals, nil +} + +// convertRows converts a series of TableRows into a series of Value slices. +// schema is used to interpret the data from rows; its length must match the +// length of each row. +func convertRows(rows []*bq.TableRow, schema Schema) ([][]Value, error) { + var rs [][]Value + for _, r := range rows { + row, err := convertRow(r, schema) + if err != nil { + return nil, err + } + rs = append(rs, row) + } + return rs, nil +} + +func convertRow(r *bq.TableRow, schema Schema) ([]Value, error) { + if len(schema) != len(r.F) { + return nil, errors.New("schema length does not match row length") + } + var values []Value + for i, cell := range r.F { + fs := schema[i] + v, err := convertValue(cell.V, fs.Type, fs.Schema) + if err != nil { + return nil, err + } + values = append(values, v) + } + return values, nil +} + +func convertValue(val interface{}, typ FieldType, schema Schema) (Value, error) { + switch val := val.(type) { + case nil: + return nil, nil + case []interface{}: + return convertRepeatedRecord(val, typ, schema) + case map[string]interface{}: + return convertNestedRecord(val, schema) + case string: + return convertBasicType(val, typ) + default: + return nil, fmt.Errorf("got value %v; expected a value of type %s", val, typ) + } +} + +func convertRepeatedRecord(vals []interface{}, typ FieldType, schema Schema) (Value, error) { + var values []Value + for _, cell := range vals { + // each cell contains a single entry, keyed by "v" + val := cell.(map[string]interface{})["v"] + v, err := convertValue(val, typ, schema) + if err != nil { + return nil, err + } + values = append(values, v) + } + return values, nil +} + +func convertNestedRecord(val map[string]interface{}, schema Schema) (Value, error) { + // convertNestedRecord is similar to convertRow, as a record has the same structure as a row. + + // Nested records are wrapped in a map with a single key, "f". + record := val["f"].([]interface{}) + if len(record) != len(schema) { + return nil, errors.New("schema length does not match record length") + } + + var values []Value + for i, cell := range record { + // each cell contains a single entry, keyed by "v" + val := cell.(map[string]interface{})["v"] + + fs := schema[i] + v, err := convertValue(val, fs.Type, fs.Schema) + if err != nil { + return nil, err + } + values = append(values, v) + } + return values, nil +} + +// convertBasicType returns val as an interface with a concrete type specified by typ. +func convertBasicType(val string, typ FieldType) (Value, error) { + switch typ { + case StringFieldType: + return val, nil + case BytesFieldType: + return base64.StdEncoding.DecodeString(val) + case IntegerFieldType: + return strconv.ParseInt(val, 10, 64) + case FloatFieldType: + return strconv.ParseFloat(val, 64) + case BooleanFieldType: + return strconv.ParseBool(val) + case TimestampFieldType: + f, err := strconv.ParseFloat(val, 64) + return Value(time.Unix(0, int64(f*1e9)).UTC()), err + case DateFieldType: + return civil.ParseDate(val) + case TimeFieldType: + return civil.ParseTime(val) + case DateTimeFieldType: + return civil.ParseDateTime(val) + default: + return nil, fmt.Errorf("unrecognized type: %s", typ) + } +} diff --git a/vendor/cloud.google.com/go/bigquery/value_test.go b/vendor/cloud.google.com/go/bigquery/value_test.go new file mode 100644 index 00000000..01accd22 --- /dev/null +++ b/vendor/cloud.google.com/go/bigquery/value_test.go @@ -0,0 +1,885 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigquery + +import ( + "encoding/base64" + "fmt" + "math" + "reflect" + "testing" + "time" + + "cloud.google.com/go/civil" + "cloud.google.com/go/internal/pretty" + + bq "google.golang.org/api/bigquery/v2" +) + +func TestConvertBasicValues(t *testing.T) { + schema := []*FieldSchema{ + {Type: StringFieldType}, + {Type: IntegerFieldType}, + {Type: FloatFieldType}, + {Type: BooleanFieldType}, + {Type: BytesFieldType}, + } + row := &bq.TableRow{ + F: []*bq.TableCell{ + {V: "a"}, + {V: "1"}, + {V: "1.2"}, + {V: "true"}, + {V: base64.StdEncoding.EncodeToString([]byte("foo"))}, + }, + } + got, err := convertRow(row, schema) + if err != nil { + t.Fatalf("error converting: %v", err) + } + want := []Value{"a", int64(1), 1.2, true, []byte("foo")} + if !reflect.DeepEqual(got, want) { + t.Errorf("converting basic values: got:\n%v\nwant:\n%v", got, want) + } +} + +func TestConvertTime(t *testing.T) { + // TODO(jba): add tests for civil time types. + schema := []*FieldSchema{ + {Type: TimestampFieldType}, + } + thyme := time.Date(1970, 1, 1, 10, 0, 0, 10, time.UTC) + row := &bq.TableRow{ + F: []*bq.TableCell{ + {V: fmt.Sprintf("%.10f", float64(thyme.UnixNano())/1e9)}, + }, + } + got, err := convertRow(row, schema) + if err != nil { + t.Fatalf("error converting: %v", err) + } + if !got[0].(time.Time).Equal(thyme) { + t.Errorf("converting basic values: got:\n%v\nwant:\n%v", got, thyme) + } + if got[0].(time.Time).Location() != time.UTC { + t.Errorf("expected time zone UTC: got:\n%v", got) + } +} + +func TestConvertNullValues(t *testing.T) { + schema := []*FieldSchema{ + {Type: StringFieldType}, + } + row := &bq.TableRow{ + F: []*bq.TableCell{ + {V: nil}, + }, + } + got, err := convertRow(row, schema) + if err != nil { + t.Fatalf("error converting: %v", err) + } + want := []Value{nil} + if !reflect.DeepEqual(got, want) { + t.Errorf("converting null values: got:\n%v\nwant:\n%v", got, want) + } +} + +func TestBasicRepetition(t *testing.T) { + schema := []*FieldSchema{ + {Type: IntegerFieldType, Repeated: true}, + } + row := &bq.TableRow{ + F: []*bq.TableCell{ + { + V: []interface{}{ + map[string]interface{}{ + "v": "1", + }, + map[string]interface{}{ + "v": "2", + }, + map[string]interface{}{ + "v": "3", + }, + }, + }, + }, + } + got, err := convertRow(row, schema) + if err != nil { + t.Fatalf("error converting: %v", err) + } + want := []Value{[]Value{int64(1), int64(2), int64(3)}} + if !reflect.DeepEqual(got, want) { + t.Errorf("converting basic repeated values: got:\n%v\nwant:\n%v", got, want) + } +} + +func TestNestedRecordContainingRepetition(t *testing.T) { + schema := []*FieldSchema{ + { + Type: RecordFieldType, + Schema: Schema{ + {Type: IntegerFieldType, Repeated: true}, + }, + }, + } + row := &bq.TableRow{ + F: []*bq.TableCell{ + { + V: map[string]interface{}{ + "f": []interface{}{ + map[string]interface{}{ + "v": []interface{}{ + map[string]interface{}{"v": "1"}, + map[string]interface{}{"v": "2"}, + map[string]interface{}{"v": "3"}, + }, + }, + }, + }, + }, + }, + } + + got, err := convertRow(row, schema) + if err != nil { + t.Fatalf("error converting: %v", err) + } + want := []Value{[]Value{[]Value{int64(1), int64(2), int64(3)}}} + if !reflect.DeepEqual(got, want) { + t.Errorf("converting basic repeated values: got:\n%v\nwant:\n%v", got, want) + } +} + +func TestRepeatedRecordContainingRepetition(t *testing.T) { + schema := []*FieldSchema{ + { + Type: RecordFieldType, + Repeated: true, + Schema: Schema{ + {Type: IntegerFieldType, Repeated: true}, + }, + }, + } + row := &bq.TableRow{F: []*bq.TableCell{ + { + V: []interface{}{ // repeated records. + map[string]interface{}{ // first record. + "v": map[string]interface{}{ // pointless single-key-map wrapper. + "f": []interface{}{ // list of record fields. + map[string]interface{}{ // only record (repeated ints) + "v": []interface{}{ // pointless wrapper. + map[string]interface{}{ + "v": "1", + }, + map[string]interface{}{ + "v": "2", + }, + map[string]interface{}{ + "v": "3", + }, + }, + }, + }, + }, + }, + map[string]interface{}{ // second record. + "v": map[string]interface{}{ + "f": []interface{}{ + map[string]interface{}{ + "v": []interface{}{ + map[string]interface{}{ + "v": "4", + }, + map[string]interface{}{ + "v": "5", + }, + map[string]interface{}{ + "v": "6", + }, + }, + }, + }, + }, + }, + }, + }, + }} + + got, err := convertRow(row, schema) + if err != nil { + t.Fatalf("error converting: %v", err) + } + want := []Value{ // the row is a list of length 1, containing an entry for the repeated record. + []Value{ // the repeated record is a list of length 2, containing an entry for each repetition. + []Value{ // the record is a list of length 1, containing an entry for the repeated integer field. + []Value{int64(1), int64(2), int64(3)}, // the repeated integer field is a list of length 3. + }, + []Value{ // second record + []Value{int64(4), int64(5), int64(6)}, + }, + }, + } + if !reflect.DeepEqual(got, want) { + t.Errorf("converting repeated records with repeated values: got:\n%v\nwant:\n%v", got, want) + } +} + +func TestRepeatedRecordContainingRecord(t *testing.T) { + schema := []*FieldSchema{ + { + Type: RecordFieldType, + Repeated: true, + Schema: Schema{ + { + Type: StringFieldType, + }, + { + Type: RecordFieldType, + Schema: Schema{ + {Type: IntegerFieldType}, + {Type: StringFieldType}, + }, + }, + }, + }, + } + row := &bq.TableRow{F: []*bq.TableCell{ + { + V: []interface{}{ // repeated records. + map[string]interface{}{ // first record. + "v": map[string]interface{}{ // pointless single-key-map wrapper. + "f": []interface{}{ // list of record fields. + map[string]interface{}{ // first record field (name) + "v": "first repeated record", + }, + map[string]interface{}{ // second record field (nested record). + "v": map[string]interface{}{ // pointless single-key-map wrapper. + "f": []interface{}{ // nested record fields + map[string]interface{}{ + "v": "1", + }, + map[string]interface{}{ + "v": "two", + }, + }, + }, + }, + }, + }, + }, + map[string]interface{}{ // second record. + "v": map[string]interface{}{ + "f": []interface{}{ + map[string]interface{}{ + "v": "second repeated record", + }, + map[string]interface{}{ + "v": map[string]interface{}{ + "f": []interface{}{ + map[string]interface{}{ + "v": "3", + }, + map[string]interface{}{ + "v": "four", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }} + + got, err := convertRow(row, schema) + if err != nil { + t.Fatalf("error converting: %v", err) + } + // TODO: test with flattenresults. + want := []Value{ // the row is a list of length 1, containing an entry for the repeated record. + []Value{ // the repeated record is a list of length 2, containing an entry for each repetition. + []Value{ // record contains a string followed by a nested record. + "first repeated record", + []Value{ + int64(1), + "two", + }, + }, + []Value{ // second record. + "second repeated record", + []Value{ + int64(3), + "four", + }, + }, + }, + } + if !reflect.DeepEqual(got, want) { + t.Errorf("converting repeated records containing record : got:\n%v\nwant:\n%v", got, want) + } +} + +func TestValuesSaverConvertsToMap(t *testing.T) { + testCases := []struct { + vs ValuesSaver + want *insertionRow + }{ + { + vs: ValuesSaver{ + Schema: []*FieldSchema{ + {Name: "intField", Type: IntegerFieldType}, + {Name: "strField", Type: StringFieldType}, + }, + InsertID: "iid", + Row: []Value{1, "a"}, + }, + want: &insertionRow{ + InsertID: "iid", + Row: map[string]Value{"intField": 1, "strField": "a"}, + }, + }, + { + vs: ValuesSaver{ + Schema: []*FieldSchema{ + {Name: "intField", Type: IntegerFieldType}, + { + Name: "recordField", + Type: RecordFieldType, + Schema: []*FieldSchema{ + {Name: "nestedInt", Type: IntegerFieldType, Repeated: true}, + }, + }, + }, + InsertID: "iid", + Row: []Value{1, []Value{[]Value{2, 3}}}, + }, + want: &insertionRow{ + InsertID: "iid", + Row: map[string]Value{ + "intField": 1, + "recordField": map[string]Value{ + "nestedInt": []Value{2, 3}, + }, + }, + }, + }, + { // repeated nested field + vs: ValuesSaver{ + Schema: Schema{ + { + Name: "records", + Type: RecordFieldType, + Schema: Schema{ + {Name: "x", Type: IntegerFieldType}, + {Name: "y", Type: IntegerFieldType}, + }, + Repeated: true, + }, + }, + InsertID: "iid", + Row: []Value{ // a row is a []Value + []Value{ // repeated field's value is a []Value + []Value{1, 2}, // first record of the repeated field + []Value{3, 4}, // second record + }, + }, + }, + want: &insertionRow{ + InsertID: "iid", + Row: map[string]Value{ + "records": []Value{ + map[string]Value{"x": 1, "y": 2}, + map[string]Value{"x": 3, "y": 4}, + }, + }, + }, + }, + } + for _, tc := range testCases { + data, insertID, err := tc.vs.Save() + if err != nil { + t.Errorf("Expected successful save; got: %v", err) + } + got := &insertionRow{insertID, data} + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("saving ValuesSaver:\ngot:\n%+v\nwant:\n%+v", got, tc.want) + } + } +} + +func TestStructSaver(t *testing.T) { + schema := Schema{ + {Name: "s", Type: StringFieldType}, + {Name: "r", Type: IntegerFieldType, Repeated: true}, + {Name: "nested", Type: RecordFieldType, Schema: Schema{ + {Name: "b", Type: BooleanFieldType}, + }}, + {Name: "rnested", Type: RecordFieldType, Repeated: true, Schema: Schema{ + {Name: "b", Type: BooleanFieldType}, + }}, + } + + type ( + N struct{ B bool } + T struct { + S string + R []int + Nested *N + Rnested []*N + } + ) + + check := func(msg string, in interface{}, want map[string]Value) { + ss := StructSaver{ + Schema: schema, + InsertID: "iid", + Struct: in, + } + got, gotIID, err := ss.Save() + if err != nil { + t.Fatalf("%s: %v", msg, err) + } + if wantIID := "iid"; gotIID != wantIID { + t.Errorf("%s: InsertID: got %q, want %q", msg, gotIID, wantIID) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("%s:\ngot\n%#v\nwant\n%#v", msg, got, want) + } + } + + in := T{ + S: "x", + R: []int{1, 2}, + Nested: &N{B: true}, + Rnested: []*N{{true}, {false}}, + } + want := map[string]Value{ + "s": "x", + "r": []int{1, 2}, + "nested": map[string]Value{"b": true}, + "rnested": []Value{map[string]Value{"b": true}, map[string]Value{"b": false}}, + } + check("all values", in, want) + check("all values, ptr", &in, want) + check("empty struct", T{}, map[string]Value{"s": ""}) + + // Missing and extra fields ignored. + type T2 struct { + S string + // missing R, Nested, RNested + Extra int + } + check("missing and extra", T2{S: "x"}, map[string]Value{"s": "x"}) + + check("nils in slice", T{Rnested: []*N{{true}, nil, {false}}}, + map[string]Value{ + "s": "", + "rnested": []Value{map[string]Value{"b": true}, map[string]Value(nil), map[string]Value{"b": false}}, + }) +} + +func TestConvertRows(t *testing.T) { + schema := []*FieldSchema{ + {Type: StringFieldType}, + {Type: IntegerFieldType}, + {Type: FloatFieldType}, + {Type: BooleanFieldType}, + } + rows := []*bq.TableRow{ + {F: []*bq.TableCell{ + {V: "a"}, + {V: "1"}, + {V: "1.2"}, + {V: "true"}, + }}, + {F: []*bq.TableCell{ + {V: "b"}, + {V: "2"}, + {V: "2.2"}, + {V: "false"}, + }}, + } + want := [][]Value{ + {"a", int64(1), 1.2, true}, + {"b", int64(2), 2.2, false}, + } + got, err := convertRows(rows, schema) + if err != nil { + t.Fatalf("got %v, want nil", err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("\ngot %v\nwant %v", got, want) + } +} + +func TestValueList(t *testing.T) { + schema := Schema{ + {Name: "s", Type: StringFieldType}, + {Name: "i", Type: IntegerFieldType}, + {Name: "f", Type: FloatFieldType}, + {Name: "b", Type: BooleanFieldType}, + } + want := []Value{"x", 7, 3.14, true} + var got []Value + vl := (*valueList)(&got) + if err := vl.Load(want, schema); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + + // Load truncates, not appends. + // https://github.com/GoogleCloudPlatform/google-cloud-go/issues/437 + if err := vl.Load(want, schema); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } +} + +func TestValueMap(t *testing.T) { + ns := Schema{ + {Name: "x", Type: IntegerFieldType}, + {Name: "y", Type: IntegerFieldType}, + } + schema := Schema{ + {Name: "s", Type: StringFieldType}, + {Name: "i", Type: IntegerFieldType}, + {Name: "f", Type: FloatFieldType}, + {Name: "b", Type: BooleanFieldType}, + {Name: "n", Type: RecordFieldType, Schema: ns}, + {Name: "rn", Type: RecordFieldType, Schema: ns, Repeated: true}, + } + in := []Value{"x", 7, 3.14, true, + []Value{1, 2}, + []Value{[]Value{3, 4}, []Value{5, 6}}, + } + var vm valueMap + if err := vm.Load(in, schema); err != nil { + t.Fatal(err) + } + want := map[string]Value{ + "s": "x", + "i": 7, + "f": 3.14, + "b": true, + "n": map[string]Value{"x": 1, "y": 2}, + "rn": []Value{ + map[string]Value{"x": 3, "y": 4}, + map[string]Value{"x": 5, "y": 6}, + }, + } + if !reflect.DeepEqual(vm, valueMap(want)) { + t.Errorf("got\n%+v\nwant\n%+v", vm, want) + } + +} + +var ( + // For testing StructLoader + schema2 = Schema{ + {Name: "s", Type: StringFieldType}, + {Name: "s2", Type: StringFieldType}, + {Name: "by", Type: BytesFieldType}, + {Name: "I", Type: IntegerFieldType}, + {Name: "F", Type: FloatFieldType}, + {Name: "B", Type: BooleanFieldType}, + {Name: "TS", Type: TimestampFieldType}, + {Name: "D", Type: DateFieldType}, + {Name: "T", Type: TimeFieldType}, + {Name: "DT", Type: DateTimeFieldType}, + {Name: "nested", Type: RecordFieldType, Schema: Schema{ + {Name: "nestS", Type: StringFieldType}, + {Name: "nestI", Type: IntegerFieldType}, + }}, + {Name: "t", Type: StringFieldType}, + } + + testTimestamp = time.Date(2016, 11, 5, 7, 50, 22, 8, time.UTC) + testDate = civil.Date{2016, 11, 5} + testTime = civil.Time{7, 50, 22, 8} + testDateTime = civil.DateTime{testDate, testTime} + + testValues = []Value{"x", "y", []byte{1, 2, 3}, int64(7), 3.14, true, + testTimestamp, testDate, testTime, testDateTime, + []Value{"nested", int64(17)}, "z"} +) + +type testStruct1 struct { + B bool + I int + times + S string + S2 String + By []byte + s string + F float64 + Nested nested + Tagged string `bigquery:"t"` +} + +type String string + +type nested struct { + NestS string + NestI int +} + +type times struct { + TS time.Time + T civil.Time + D civil.Date + DT civil.DateTime +} + +func TestStructLoader(t *testing.T) { + var ts1 testStruct1 + if err := load(&ts1, schema2, testValues); err != nil { + t.Fatal(err) + } + // Note: the schema field named "s" gets matched to the exported struct + // field "S", not the unexported "s". + want := &testStruct1{ + B: true, + I: 7, + F: 3.14, + times: times{TS: testTimestamp, T: testTime, D: testDate, DT: testDateTime}, + S: "x", + S2: "y", + By: []byte{1, 2, 3}, + Nested: nested{NestS: "nested", NestI: 17}, + Tagged: "z", + } + if !reflect.DeepEqual(&ts1, want) { + t.Errorf("got %+v, want %+v", pretty.Value(ts1), pretty.Value(*want)) + d, _, err := pretty.Diff(*want, ts1) + if err == nil { + t.Logf("diff:\n%s", d) + } + } + + // Test pointers to nested structs. + type nestedPtr struct{ Nested *nested } + var np nestedPtr + if err := load(&np, schema2, testValues); err != nil { + t.Fatal(err) + } + want2 := &nestedPtr{Nested: &nested{NestS: "nested", NestI: 17}} + if !reflect.DeepEqual(&np, want2) { + t.Errorf("got %+v, want %+v", pretty.Value(np), pretty.Value(*want2)) + } + + // Existing values should be reused. + nst := &nested{NestS: "x", NestI: -10} + np = nestedPtr{Nested: nst} + if err := load(&np, schema2, testValues); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(&np, want2) { + t.Errorf("got %+v, want %+v", pretty.Value(np), pretty.Value(*want2)) + } + if np.Nested != nst { + t.Error("nested struct pointers not equal") + } +} + +type repStruct struct { + Nums []int + ShortNums [2]int // to test truncation + LongNums [5]int // to test padding with zeroes + Nested []*nested +} + +var ( + repSchema = Schema{ + {Name: "nums", Type: IntegerFieldType, Repeated: true}, + {Name: "shortNums", Type: IntegerFieldType, Repeated: true}, + {Name: "longNums", Type: IntegerFieldType, Repeated: true}, + {Name: "nested", Type: RecordFieldType, Repeated: true, Schema: Schema{ + {Name: "nestS", Type: StringFieldType}, + {Name: "nestI", Type: IntegerFieldType}, + }}, + } + v123 = []Value{int64(1), int64(2), int64(3)} + repValues = []Value{v123, v123, v123, + []Value{ + []Value{"x", int64(1)}, + []Value{"y", int64(2)}, + }, + } +) + +func TestStructLoaderRepeated(t *testing.T) { + var r1 repStruct + if err := load(&r1, repSchema, repValues); err != nil { + t.Fatal(err) + } + want := repStruct{ + Nums: []int{1, 2, 3}, + ShortNums: [...]int{1, 2}, // extra values discarded + LongNums: [...]int{1, 2, 3, 0, 0}, + Nested: []*nested{{"x", 1}, {"y", 2}}, + } + if !reflect.DeepEqual(r1, want) { + t.Errorf("got %+v, want %+v", pretty.Value(r1), pretty.Value(want)) + } + + r2 := repStruct{ + Nums: []int{-1, -2, -3, -4, -5}, // truncated to zero and appended to + LongNums: [...]int{-1, -2, -3, -4, -5}, // unset elements are zeroed + } + if err := load(&r2, repSchema, repValues); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(r2, want) { + t.Errorf("got %+v, want %+v", pretty.Value(r2), pretty.Value(want)) + } + if got, want := cap(r2.Nums), 5; got != want { + t.Errorf("cap(r2.Nums) = %d, want %d", got, want) + } + + // Short slice case. + r3 := repStruct{Nums: []int{-1}} + if err := load(&r3, repSchema, repValues); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(r3, want) { + t.Errorf("got %+v, want %+v", pretty.Value(r3), pretty.Value(want)) + } + if got, want := cap(r3.Nums), 3; got != want { + t.Errorf("cap(r3.Nums) = %d, want %d", got, want) + } + +} + +func TestStructLoaderOverflow(t *testing.T) { + type S struct { + I int16 + F float32 + } + schema := Schema{ + {Name: "I", Type: IntegerFieldType}, + {Name: "F", Type: FloatFieldType}, + } + var s S + if err := load(&s, schema, []Value{int64(math.MaxInt16 + 1), 0}); err == nil { + t.Error("int: got nil, want error") + } + if err := load(&s, schema, []Value{int64(0), math.MaxFloat32 * 2}); err == nil { + t.Error("float: got nil, want error") + } +} + +func TestStructLoaderFieldOverlap(t *testing.T) { + // It's OK if the struct has fields that the schema does not, and vice versa. + type S1 struct { + I int + X [][]int // not in the schema; does not even correspond to a valid BigQuery type + // many schema fields missing + } + var s1 S1 + if err := load(&s1, schema2, testValues); err != nil { + t.Fatal(err) + } + want1 := S1{I: 7} + if !reflect.DeepEqual(s1, want1) { + t.Errorf("got %+v, want %+v", pretty.Value(s1), pretty.Value(want1)) + } + + // It's even valid to have no overlapping fields at all. + type S2 struct{ Z int } + + var s2 S2 + if err := load(&s2, schema2, testValues); err != nil { + t.Fatal(err) + } + want2 := S2{} + if !reflect.DeepEqual(s2, want2) { + t.Errorf("got %+v, want %+v", pretty.Value(s2), pretty.Value(want2)) + } +} + +func TestStructLoaderErrors(t *testing.T) { + check := func(sp interface{}) { + var sl structLoader + err := sl.set(sp, schema2) + if err == nil { + t.Errorf("%T: got nil, want error", sp) + } + } + + type bad1 struct{ F int32 } // wrong type for FLOAT column + check(&bad1{}) + + type bad2 struct{ I uint } // unsupported integer type + check(&bad2{}) + + // Using more than one struct type with the same structLoader. + type different struct { + B bool + I int + times + S string + s string + Nums []int + } + + var sl structLoader + if err := sl.set(&testStruct1{}, schema2); err != nil { + t.Fatal(err) + } + err := sl.set(&different{}, schema2) + if err == nil { + t.Error("different struct types: got nil, want error") + } +} + +func load(pval interface{}, schema Schema, vals []Value) error { + var sl structLoader + if err := sl.set(pval, schema); err != nil { + return err + } + return sl.Load(vals, nil) +} + +func BenchmarkStructLoader_NoCompile(b *testing.B) { + benchmarkStructLoader(b, false) +} + +func BenchmarkStructLoader_Compile(b *testing.B) { + benchmarkStructLoader(b, true) +} + +func benchmarkStructLoader(b *testing.B, compile bool) { + var ts1 testStruct1 + for i := 0; i < b.N; i++ { + var sl structLoader + for j := 0; j < 10; j++ { + if err := load(&ts1, schema2, testValues); err != nil { + b.Fatal(err) + } + if !compile { + sl.typ = nil + } + } + } +} diff --git a/vendor/cloud.google.com/go/bigtable/admin.go b/vendor/cloud.google.com/go/bigtable/admin.go new file mode 100644 index 00000000..f65054e0 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/admin.go @@ -0,0 +1,335 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "fmt" + "regexp" + "strings" + + btopt "cloud.google.com/go/bigtable/internal/option" + "cloud.google.com/go/longrunning" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +const adminAddr = "bigtableadmin.googleapis.com:443" + +// AdminClient is a client type for performing admin operations within a specific instance. +type AdminClient struct { + conn *grpc.ClientConn + tClient btapb.BigtableTableAdminClient + + project, instance string + + // Metadata to be sent with each request. + md metadata.MD +} + +// NewAdminClient creates a new AdminClient for a given project and instance. +func NewAdminClient(ctx context.Context, project, instance string, opts ...option.ClientOption) (*AdminClient, error) { + o, err := btopt.DefaultClientOptions(adminAddr, AdminScope, clientUserAgent) + if err != nil { + return nil, err + } + o = append(o, opts...) + conn, err := transport.DialGRPC(ctx, o...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + return &AdminClient{ + conn: conn, + tClient: btapb.NewBigtableTableAdminClient(conn), + project: project, + instance: instance, + md: metadata.Pairs(resourcePrefixHeader, fmt.Sprintf("projects/%s/instances/%s", project, instance)), + }, nil +} + +// Close closes the AdminClient. +func (ac *AdminClient) Close() error { + return ac.conn.Close() +} + +func (ac *AdminClient) instancePrefix() string { + return fmt.Sprintf("projects/%s/instances/%s", ac.project, ac.instance) +} + +// Tables returns a list of the tables in the instance. +func (ac *AdminClient) Tables(ctx context.Context) ([]string, error) { + ctx = mergeMetadata(ctx, ac.md) + prefix := ac.instancePrefix() + req := &btapb.ListTablesRequest{ + Parent: prefix, + } + res, err := ac.tClient.ListTables(ctx, req) + if err != nil { + return nil, err + } + names := make([]string, 0, len(res.Tables)) + for _, tbl := range res.Tables { + names = append(names, strings.TrimPrefix(tbl.Name, prefix+"/tables/")) + } + return names, nil +} + +// CreateTable creates a new table in the instance. +// This method may return before the table's creation is complete. +func (ac *AdminClient) CreateTable(ctx context.Context, table string) error { + ctx = mergeMetadata(ctx, ac.md) + prefix := ac.instancePrefix() + req := &btapb.CreateTableRequest{ + Parent: prefix, + TableId: table, + } + _, err := ac.tClient.CreateTable(ctx, req) + return err +} + +// CreatePresplitTable creates a new table in the instance. +// The list of row keys will be used to initially split the table into multiple tablets. +// Given two split keys, "s1" and "s2", three tablets will be created, +// spanning the key ranges: [, s1), [s1, s2), [s2, ). +// This method may return before the table's creation is complete. +func (ac *AdminClient) CreatePresplitTable(ctx context.Context, table string, split_keys []string) error { + var req_splits []*btapb.CreateTableRequest_Split + for _, split := range split_keys { + req_splits = append(req_splits, &btapb.CreateTableRequest_Split{[]byte(split)}) + } + ctx = mergeMetadata(ctx, ac.md) + prefix := ac.instancePrefix() + req := &btapb.CreateTableRequest{ + Parent: prefix, + TableId: table, + InitialSplits: req_splits, + } + _, err := ac.tClient.CreateTable(ctx, req) + return err +} + +// CreateColumnFamily creates a new column family in a table. +func (ac *AdminClient) CreateColumnFamily(ctx context.Context, table, family string) error { + // TODO(dsymonds): Permit specifying gcexpr and any other family settings. + ctx = mergeMetadata(ctx, ac.md) + prefix := ac.instancePrefix() + req := &btapb.ModifyColumnFamiliesRequest{ + Name: prefix + "/tables/" + table, + Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ + Id: family, + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}}, + }}, + } + _, err := ac.tClient.ModifyColumnFamilies(ctx, req) + return err +} + +// DeleteTable deletes a table and all of its data. +func (ac *AdminClient) DeleteTable(ctx context.Context, table string) error { + ctx = mergeMetadata(ctx, ac.md) + prefix := ac.instancePrefix() + req := &btapb.DeleteTableRequest{ + Name: prefix + "/tables/" + table, + } + _, err := ac.tClient.DeleteTable(ctx, req) + return err +} + +// DeleteColumnFamily deletes a column family in a table and all of its data. +func (ac *AdminClient) DeleteColumnFamily(ctx context.Context, table, family string) error { + ctx = mergeMetadata(ctx, ac.md) + prefix := ac.instancePrefix() + req := &btapb.ModifyColumnFamiliesRequest{ + Name: prefix + "/tables/" + table, + Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ + Id: family, + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Drop{true}, + }}, + } + _, err := ac.tClient.ModifyColumnFamilies(ctx, req) + return err +} + +// TableInfo represents information about a table. +type TableInfo struct { + Families []string +} + +// TableInfo retrieves information about a table. +func (ac *AdminClient) TableInfo(ctx context.Context, table string) (*TableInfo, error) { + ctx = mergeMetadata(ctx, ac.md) + prefix := ac.instancePrefix() + req := &btapb.GetTableRequest{ + Name: prefix + "/tables/" + table, + } + res, err := ac.tClient.GetTable(ctx, req) + if err != nil { + return nil, err + } + ti := &TableInfo{} + for fam := range res.ColumnFamilies { + ti.Families = append(ti.Families, fam) + } + return ti, nil +} + +// SetGCPolicy specifies which cells in a column family should be garbage collected. +// GC executes opportunistically in the background; table reads may return data +// matching the GC policy. +func (ac *AdminClient) SetGCPolicy(ctx context.Context, table, family string, policy GCPolicy) error { + ctx = mergeMetadata(ctx, ac.md) + prefix := ac.instancePrefix() + req := &btapb.ModifyColumnFamiliesRequest{ + Name: prefix + "/tables/" + table, + Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ + Id: family, + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{&btapb.ColumnFamily{GcRule: policy.proto()}}, + }}, + } + _, err := ac.tClient.ModifyColumnFamilies(ctx, req) + return err +} + +const instanceAdminAddr = "bigtableadmin.googleapis.com:443" + +// InstanceAdminClient is a client type for performing admin operations on instances. +// These operations can be substantially more dangerous than those provided by AdminClient. +type InstanceAdminClient struct { + conn *grpc.ClientConn + iClient btapb.BigtableInstanceAdminClient + + project string + + // Metadata to be sent with each request. + md metadata.MD +} + +// NewInstanceAdminClient creates a new InstanceAdminClient for a given project. +func NewInstanceAdminClient(ctx context.Context, project string, opts ...option.ClientOption) (*InstanceAdminClient, error) { + o, err := btopt.DefaultClientOptions(instanceAdminAddr, InstanceAdminScope, clientUserAgent) + if err != nil { + return nil, err + } + o = append(o, opts...) + conn, err := transport.DialGRPC(ctx, o...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + return &InstanceAdminClient{ + conn: conn, + iClient: btapb.NewBigtableInstanceAdminClient(conn), + + project: project, + md: metadata.Pairs(resourcePrefixHeader, "projects/"+project), + }, nil +} + +// Close closes the InstanceAdminClient. +func (iac *InstanceAdminClient) Close() error { + return iac.conn.Close() +} + +// StorageType is the type of storage used for all tables in an instance +type StorageType int + +const ( + SSD StorageType = iota + HDD +) + +func (st StorageType) proto() btapb.StorageType { + if st == HDD { + return btapb.StorageType_HDD + } + return btapb.StorageType_SSD +} + +// InstanceInfo represents information about an instance +type InstanceInfo struct { + Name string // name of the instance + DisplayName string // display name for UIs +} + +// InstanceConf contains the information necessary to create an Instance +type InstanceConf struct { + InstanceId, DisplayName, ClusterId, Zone string + NumNodes int32 + StorageType StorageType +} + +var instanceNameRegexp = regexp.MustCompile(`^projects/([^/]+)/instances/([a-z][-a-z0-9]*)$`) + +// CreateInstance creates a new instance in the project. +// This method will return when the instance has been created or when an error occurs. +func (iac *InstanceAdminClient) CreateInstance(ctx context.Context, conf *InstanceConf) error { + ctx = mergeMetadata(ctx, iac.md) + req := &btapb.CreateInstanceRequest{ + Parent: "projects/" + iac.project, + InstanceId: conf.InstanceId, + Instance: &btapb.Instance{DisplayName: conf.DisplayName}, + Clusters: map[string]*btapb.Cluster{ + conf.ClusterId: { + ServeNodes: conf.NumNodes, + DefaultStorageType: conf.StorageType.proto(), + Location: "projects/" + iac.project + "/locations/" + conf.Zone, + }, + }, + } + + lro, err := iac.iClient.CreateInstance(ctx, req) + if err != nil { + return err + } + resp := btapb.Instance{} + return longrunning.InternalNewOperation(iac.conn, lro).Wait(ctx, &resp) +} + +// DeleteInstance deletes an instance from the project. +func (iac *InstanceAdminClient) DeleteInstance(ctx context.Context, instanceId string) error { + ctx = mergeMetadata(ctx, iac.md) + req := &btapb.DeleteInstanceRequest{"projects/" + iac.project + "/instances/" + instanceId} + _, err := iac.iClient.DeleteInstance(ctx, req) + return err +} + +// Instances returns a list of instances in the project. +func (iac *InstanceAdminClient) Instances(ctx context.Context) ([]*InstanceInfo, error) { + ctx = mergeMetadata(ctx, iac.md) + req := &btapb.ListInstancesRequest{ + Parent: "projects/" + iac.project, + } + res, err := iac.iClient.ListInstances(ctx, req) + if err != nil { + return nil, err + } + + var is []*InstanceInfo + for _, i := range res.Instances { + m := instanceNameRegexp.FindStringSubmatch(i.Name) + if m == nil { + return nil, fmt.Errorf("malformed instance name %q", i.Name) + } + is = append(is, &InstanceInfo{ + Name: m[2], + DisplayName: i.DisplayName, + }) + } + return is, nil +} diff --git a/vendor/cloud.google.com/go/bigtable/admin_test.go b/vendor/cloud.google.com/go/bigtable/admin_test.go new file mode 100644 index 00000000..e025fb59 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/admin_test.go @@ -0,0 +1,91 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bigtable + +import ( + "sort" + "testing" + "time" + + "golang.org/x/net/context" +) + +func TestAdminIntegration(t *testing.T) { + testEnv, err := NewIntegrationEnv() + if err != nil { + t.Fatalf("IntegrationEnv: %v", err) + } + defer testEnv.Close() + + timeout := 2 * time.Second + if testEnv.Config().UseProd { + timeout = 5 * time.Minute + } + ctx, _ := context.WithTimeout(context.Background(), timeout) + + adminClient, err := testEnv.NewAdminClient() + if err != nil { + t.Fatalf("NewAdminClient: %v", err) + } + defer adminClient.Close() + + list := func() []string { + tbls, err := adminClient.Tables(ctx) + if err != nil { + t.Fatalf("Fetching list of tables: %v", err) + } + sort.Strings(tbls) + return tbls + } + containsAll := func(got, want []string) bool { + gotSet := make(map[string]bool) + + for _, s := range got { + gotSet[s] = true + } + for _, s := range want { + if !gotSet[s] { + return false + } + } + return true + } + + defer adminClient.DeleteTable(ctx, "mytable") + + if err := adminClient.CreateTable(ctx, "mytable"); err != nil { + t.Fatalf("Creating table: %v", err) + } + + defer adminClient.DeleteTable(ctx, "myothertable") + + if err := adminClient.CreateTable(ctx, "myothertable"); err != nil { + t.Fatalf("Creating table: %v", err) + } + + if got, want := list(), []string{"myothertable", "mytable"}; !containsAll(got, want) { + t.Errorf("adminClient.Tables returned %#v, want %#v", got, want) + } + if err := adminClient.DeleteTable(ctx, "myothertable"); err != nil { + t.Fatalf("Deleting table: %v", err) + } + tables := list() + if got, want := tables, []string{"mytable"}; !containsAll(got, want) { + t.Errorf("adminClient.Tables returned %#v, want %#v", got, want) + } + if got, unwanted := tables, []string{"myothertable"}; containsAll(got, unwanted) { + t.Errorf("adminClient.Tables return %#v. unwanted %#v", got, unwanted) + } +} diff --git a/vendor/cloud.google.com/go/bigtable/bigtable.go b/vendor/cloud.google.com/go/bigtable/bigtable.go new file mode 100644 index 00000000..692a6a1a --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/bigtable.go @@ -0,0 +1,735 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable // import "cloud.google.com/go/bigtable" + +import ( + "errors" + "fmt" + "io" + "strconv" + "time" + + "cloud.google.com/go/bigtable/internal/gax" + btopt "cloud.google.com/go/bigtable/internal/option" + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + btpb "google.golang.org/genproto/googleapis/bigtable/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +const prodAddr = "bigtable.googleapis.com:443" + +// Client is a client for reading and writing data to tables in an instance. +// +// A Client is safe to use concurrently, except for its Close method. +type Client struct { + conn *grpc.ClientConn + client btpb.BigtableClient + project, instance string +} + +// NewClient creates a new Client for a given project and instance. +func NewClient(ctx context.Context, project, instance string, opts ...option.ClientOption) (*Client, error) { + o, err := btopt.DefaultClientOptions(prodAddr, Scope, clientUserAgent) + if err != nil { + return nil, err + } + // Default to a small connection pool that can be overridden. + o = append(o, option.WithGRPCConnectionPool(4)) + o = append(o, opts...) + conn, err := transport.DialGRPC(ctx, o...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + return &Client{ + conn: conn, + client: btpb.NewBigtableClient(conn), + project: project, + instance: instance, + }, nil +} + +// Close closes the Client. +func (c *Client) Close() error { + return c.conn.Close() +} + +var ( + idempotentRetryCodes = []codes.Code{codes.DeadlineExceeded, codes.Unavailable, codes.Aborted,codes.Internal} + isIdempotentRetryCode = make(map[codes.Code]bool) + retryOptions = []gax.CallOption{ + gax.WithDelayTimeoutSettings(100*time.Millisecond, 2000*time.Millisecond, 1.2), + gax.WithRetryCodes(idempotentRetryCodes), + } +) + +func init() { + for _, code := range idempotentRetryCodes { + isIdempotentRetryCode[code] = true + } +} + +func (c *Client) fullTableName(table string) string { + return fmt.Sprintf("projects/%s/instances/%s/tables/%s", c.project, c.instance, table) +} + +// A Table refers to a table. +// +// A Table is safe to use concurrently. +type Table struct { + c *Client + table string + + // Metadata to be sent with each request. + md metadata.MD +} + +// Open opens a table. +func (c *Client) Open(table string) *Table { + return &Table{ + c: c, + table: table, + md: metadata.Pairs(resourcePrefixHeader, c.fullTableName(table)), + } +} + +// TODO(dsymonds): Read method that returns a sequence of ReadItems. + +// ReadRows reads rows from a table. f is called for each row. +// If f returns false, the stream is shut down and ReadRows returns. +// f owns its argument, and f is called serially in order by row key. +// +// By default, the yielded rows will contain all values in all cells. +// Use RowFilter to limit the cells returned. +func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts ...ReadOption) error { + ctx = mergeMetadata(ctx, t.md) + + var prevRowKey string + err := gax.Invoke(ctx, func(ctx context.Context) error { + req := &btpb.ReadRowsRequest{ + TableName: t.c.fullTableName(t.table), + Rows: arg.proto(), + } + for _, opt := range opts { + opt.set(req) + } + ctx, cancel := context.WithCancel(ctx) // for aborting the stream + defer cancel() + + stream, err := t.c.client.ReadRows(ctx, req) + if err != nil { + return err + } + cr := newChunkReader() + for { + res, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + // Reset arg for next Invoke call. + arg = arg.retainRowsAfter(prevRowKey) + return err + } + + for _, cc := range res.Chunks { + row, err := cr.Process(cc) + if err != nil { + // No need to prepare for a retry, this is an unretryable error. + return err + } + if row == nil { + continue + } + prevRowKey = row.Key() + if !f(row) { + // Cancel and drain stream. + cancel() + for { + if _, err := stream.Recv(); err != nil { + // The stream has ended. We don't return an error + // because the caller has intentionally interrupted the scan. + return nil + } + } + } + } + if err := cr.Close(); err != nil { + // No need to prepare for a retry, this is an unretryable error. + return err + } + } + return err + }, retryOptions...) + + return err +} + +// ReadRow is a convenience implementation of a single-row reader. +// A missing row will return a zero-length map and a nil error. +func (t *Table) ReadRow(ctx context.Context, row string, opts ...ReadOption) (Row, error) { + var r Row + err := t.ReadRows(ctx, SingleRow(row), func(rr Row) bool { + r = rr + return true + }, opts...) + return r, err +} + +// decodeFamilyProto adds the cell data from f to the given row. +func decodeFamilyProto(r Row, row string, f *btpb.Family) { + fam := f.Name // does not have colon + for _, col := range f.Columns { + for _, cell := range col.Cells { + ri := ReadItem{ + Row: row, + Column: fam + ":" + string(col.Qualifier), + Timestamp: Timestamp(cell.TimestampMicros), + Value: cell.Value, + } + r[fam] = append(r[fam], ri) + } + } +} + +// RowSet is a set of rows to be read. It is satisfied by RowList and RowRange. +type RowSet interface { + proto() *btpb.RowSet + + // retainRowsAfter returns a new RowSet that does not include the + // given row key or any row key lexicographically less than it. + retainRowsAfter(lastRowKey string) RowSet +} + +// RowList is a sequence of row keys. +type RowList []string + +func (r RowList) proto() *btpb.RowSet { + keys := make([][]byte, len(r)) + for i, row := range r { + keys[i] = []byte(row) + } + return &btpb.RowSet{RowKeys: keys} +} + +func (r RowList) retainRowsAfter(lastRowKey string) RowSet { + var retryKeys RowList + for _, key := range r { + if key > lastRowKey { + retryKeys = append(retryKeys, key) + } + } + return retryKeys +} + +// A RowRange is a half-open interval [Start, Limit) encompassing +// all the rows with keys at least as large as Start, and less than Limit. +// (Bigtable string comparison is the same as Go's.) +// A RowRange can be unbounded, encompassing all keys at least as large as Start. +type RowRange struct { + start string + limit string +} + +// NewRange returns the new RowRange [begin, end). +func NewRange(begin, end string) RowRange { + return RowRange{ + start: begin, + limit: end, + } +} + +// Unbounded tests whether a RowRange is unbounded. +func (r RowRange) Unbounded() bool { + return r.limit == "" +} + +// Contains says whether the RowRange contains the key. +func (r RowRange) Contains(row string) bool { + return r.start <= row && (r.limit == "" || r.limit > row) +} + +// String provides a printable description of a RowRange. +func (r RowRange) String() string { + a := strconv.Quote(r.start) + if r.Unbounded() { + return fmt.Sprintf("[%s,∞)", a) + } + return fmt.Sprintf("[%s,%q)", a, r.limit) +} + +func (r RowRange) proto() *btpb.RowSet { + rr := &btpb.RowRange{ + StartKey: &btpb.RowRange_StartKeyClosed{[]byte(r.start)}, + } + if !r.Unbounded() { + rr.EndKey = &btpb.RowRange_EndKeyOpen{[]byte(r.limit)} + } + return &btpb.RowSet{RowRanges: []*btpb.RowRange{rr}} +} + +func (r RowRange) retainRowsAfter(lastRowKey string) RowSet { + if lastRowKey == "" { + return r + } + // Set the beginning of the range to the row after the last scanned. + start := lastRowKey + "\x00" + if r.Unbounded() { + return InfiniteRange(start) + } + return NewRange(start, r.limit) +} + +// SingleRow returns a RowSet for reading a single row. +func SingleRow(row string) RowSet { + return RowList{row} +} + +// PrefixRange returns a RowRange consisting of all keys starting with the prefix. +func PrefixRange(prefix string) RowRange { + return RowRange{ + start: prefix, + limit: prefixSuccessor(prefix), + } +} + +// InfiniteRange returns the RowRange consisting of all keys at least as +// large as start. +func InfiniteRange(start string) RowRange { + return RowRange{ + start: start, + limit: "", + } +} + +// prefixSuccessor returns the lexically smallest string greater than the +// prefix, if it exists, or "" otherwise. In either case, it is the string +// needed for the Limit of a RowRange. +func prefixSuccessor(prefix string) string { + if prefix == "" { + return "" // infinite range + } + n := len(prefix) + for n--; n >= 0 && prefix[n] == '\xff'; n-- { + } + if n == -1 { + return "" + } + ans := []byte(prefix[:n]) + ans = append(ans, prefix[n]+1) + return string(ans) +} + +// A ReadOption is an optional argument to ReadRows. +type ReadOption interface { + set(req *btpb.ReadRowsRequest) +} + +// RowFilter returns a ReadOption that applies f to the contents of read rows. +func RowFilter(f Filter) ReadOption { return rowFilter{f} } + +type rowFilter struct{ f Filter } + +func (rf rowFilter) set(req *btpb.ReadRowsRequest) { req.Filter = rf.f.proto() } + +// LimitRows returns a ReadOption that will limit the number of rows to be read. +func LimitRows(limit int64) ReadOption { return limitRows{limit} } + +type limitRows struct{ limit int64 } + +func (lr limitRows) set(req *btpb.ReadRowsRequest) { req.RowsLimit = lr.limit } + +// mutationsAreRetryable returns true if all mutations are idempotent +// and therefore retryable. A mutation is idempotent iff all cell timestamps +// have an explicit timestamp set and do not rely on the timestamp being set on the server. +func mutationsAreRetryable(muts []*btpb.Mutation) bool { + serverTime := int64(ServerTime) + for _, mut := range muts { + setCell := mut.GetSetCell() + if setCell != nil && setCell.TimestampMicros == serverTime { + return false + } + } + return true +} + +// Apply applies a Mutation to a specific row. +func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...ApplyOption) error { + ctx = mergeMetadata(ctx, t.md) + after := func(res proto.Message) { + for _, o := range opts { + o.after(res) + } + } + + var callOptions []gax.CallOption + if m.cond == nil { + req := &btpb.MutateRowRequest{ + TableName: t.c.fullTableName(t.table), + RowKey: []byte(row), + Mutations: m.ops, + } + if mutationsAreRetryable(m.ops) { + callOptions = retryOptions + } + var res *btpb.MutateRowResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + res, err = t.c.client.MutateRow(ctx, req) + return err + }, callOptions...) + if err == nil { + after(res) + } + return err + } + + req := &btpb.CheckAndMutateRowRequest{ + TableName: t.c.fullTableName(t.table), + RowKey: []byte(row), + PredicateFilter: m.cond.proto(), + } + if m.mtrue != nil { + req.TrueMutations = m.mtrue.ops + } + if m.mfalse != nil { + req.FalseMutations = m.mfalse.ops + } + if mutationsAreRetryable(req.TrueMutations) && mutationsAreRetryable(req.FalseMutations) { + callOptions = retryOptions + } + var cmRes *btpb.CheckAndMutateRowResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + cmRes, err = t.c.client.CheckAndMutateRow(ctx, req) + return err + }, callOptions...) + if err == nil { + after(cmRes) + } + return err +} + +// An ApplyOption is an optional argument to Apply. +type ApplyOption interface { + after(res proto.Message) +} + +type applyAfterFunc func(res proto.Message) + +func (a applyAfterFunc) after(res proto.Message) { a(res) } + +// GetCondMutationResult returns an ApplyOption that reports whether the conditional +// mutation's condition matched. +func GetCondMutationResult(matched *bool) ApplyOption { + return applyAfterFunc(func(res proto.Message) { + if res, ok := res.(*btpb.CheckAndMutateRowResponse); ok { + *matched = res.PredicateMatched + } + }) +} + +// Mutation represents a set of changes for a single row of a table. +type Mutation struct { + ops []*btpb.Mutation + + // for conditional mutations + cond Filter + mtrue, mfalse *Mutation +} + +// NewMutation returns a new mutation. +func NewMutation() *Mutation { + return new(Mutation) +} + +// NewCondMutation returns a conditional mutation. +// The given row filter determines which mutation is applied: +// If the filter matches any cell in the row, mtrue is applied; +// otherwise, mfalse is applied. +// Either given mutation may be nil. +func NewCondMutation(cond Filter, mtrue, mfalse *Mutation) *Mutation { + return &Mutation{cond: cond, mtrue: mtrue, mfalse: mfalse} +} + +// Set sets a value in a specified column, with the given timestamp. +// The timestamp will be truncated to millisecond granularity. +// A timestamp of ServerTime means to use the server timestamp. +func (m *Mutation) Set(family, column string, ts Timestamp, value []byte) { + m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{ + FamilyName: family, + ColumnQualifier: []byte(column), + TimestampMicros: int64(ts.TruncateToMilliseconds()), + Value: value, + }}}) +} + +// DeleteCellsInColumn will delete all the cells whose columns are family:column. +func (m *Mutation) DeleteCellsInColumn(family, column string) { + m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{&btpb.Mutation_DeleteFromColumn{ + FamilyName: family, + ColumnQualifier: []byte(column), + }}}) +} + +// DeleteTimestampRange deletes all cells whose columns are family:column +// and whose timestamps are in the half-open interval [start, end). +// If end is zero, it will be interpreted as infinity. +// The timestamps will be truncated to millisecond granularity. +func (m *Mutation) DeleteTimestampRange(family, column string, start, end Timestamp) { + m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{&btpb.Mutation_DeleteFromColumn{ + FamilyName: family, + ColumnQualifier: []byte(column), + TimeRange: &btpb.TimestampRange{ + StartTimestampMicros: int64(start.TruncateToMilliseconds()), + EndTimestampMicros: int64(end.TruncateToMilliseconds()), + }, + }}}) +} + +// DeleteCellsInFamily will delete all the cells whose columns are family:*. +func (m *Mutation) DeleteCellsInFamily(family string) { + m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromFamily_{&btpb.Mutation_DeleteFromFamily{ + FamilyName: family, + }}}) +} + +// DeleteRow deletes the entire row. +func (m *Mutation) DeleteRow() { + m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromRow_{&btpb.Mutation_DeleteFromRow{}}}) +} + +// entryErr is a container that combines an entry with the error that was returned for it. +// Err may be nil if no error was returned for the Entry, or if the Entry has not yet been processed. +type entryErr struct { + Entry *btpb.MutateRowsRequest_Entry + Err error +} + +// ApplyBulk applies multiple Mutations. +// Each mutation is individually applied atomically, +// but the set of mutations may be applied in any order. +// +// Two types of failures may occur. If the entire process +// fails, (nil, err) will be returned. If specific mutations +// fail to apply, ([]err, nil) will be returned, and the errors +// will correspond to the relevant rowKeys/muts arguments. +// +// Conditional mutations cannot be applied in bulk and providing one will result in an error. +func (t *Table) ApplyBulk(ctx context.Context, rowKeys []string, muts []*Mutation, opts ...ApplyOption) ([]error, error) { + ctx = mergeMetadata(ctx, t.md) + if len(rowKeys) != len(muts) { + return nil, fmt.Errorf("mismatched rowKeys and mutation array lengths: %d, %d", len(rowKeys), len(muts)) + } + + origEntries := make([]*entryErr, len(rowKeys)) + for i, key := range rowKeys { + mut := muts[i] + if mut.cond != nil { + return nil, errors.New("conditional mutations cannot be applied in bulk") + } + origEntries[i] = &entryErr{Entry: &btpb.MutateRowsRequest_Entry{RowKey: []byte(key), Mutations: mut.ops}} + } + + // entries will be reduced after each invocation to just what needs to be retried. + entries := make([]*entryErr, len(rowKeys)) + copy(entries, origEntries) + err := gax.Invoke(ctx, func(ctx context.Context) error { + err := t.doApplyBulk(ctx, entries, opts...) + if err != nil { + // We want to retry the entire request with the current entries + return err + } + entries = t.getApplyBulkRetries(entries) + if len(entries) > 0 && len(idempotentRetryCodes) > 0 { + // We have at least one mutation that needs to be retried. + // Return an arbitrary error that is retryable according to callOptions. + return grpc.Errorf(idempotentRetryCodes[0], "Synthetic error: partial failure of ApplyBulk") + } + return nil + }, retryOptions...) + + if err != nil { + return nil, err + } + + // Accumulate all of the errors into an array to return, interspersed with nils for successful + // entries. The absence of any errors means we should return nil. + var errs []error + var foundErr bool + for _, entry := range origEntries { + if entry.Err != nil { + foundErr = true + } + errs = append(errs, entry.Err) + } + if foundErr { + return errs, nil + } + return nil, nil +} + +// getApplyBulkRetries returns the entries that need to be retried +func (t *Table) getApplyBulkRetries(entries []*entryErr) []*entryErr { + var retryEntries []*entryErr + for _, entry := range entries { + err := entry.Err + if err != nil && isIdempotentRetryCode[grpc.Code(err)] && mutationsAreRetryable(entry.Entry.Mutations) { + // There was an error and the entry is retryable. + retryEntries = append(retryEntries, entry) + } + } + return retryEntries +} + +// doApplyBulk does the work of a single ApplyBulk invocation +func (t *Table) doApplyBulk(ctx context.Context, entryErrs []*entryErr, opts ...ApplyOption) error { + after := func(res proto.Message) { + for _, o := range opts { + o.after(res) + } + } + + entries := make([]*btpb.MutateRowsRequest_Entry, len(entryErrs)) + for i, entryErr := range entryErrs { + entries[i] = entryErr.Entry + } + req := &btpb.MutateRowsRequest{ + TableName: t.c.fullTableName(t.table), + Entries: entries, + } + stream, err := t.c.client.MutateRows(ctx, req) + if err != nil { + return err + } + for { + res, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + + for i, entry := range res.Entries { + status := entry.Status + if status.Code == int32(codes.OK) { + entryErrs[i].Err = nil + } else { + entryErrs[i].Err = grpc.Errorf(codes.Code(status.Code), status.Message) + } + } + after(res) + } + return nil +} + +// Timestamp is in units of microseconds since 1 January 1970. +type Timestamp int64 + +// ServerTime is a specific Timestamp that may be passed to (*Mutation).Set. +// It indicates that the server's timestamp should be used. +const ServerTime Timestamp = -1 + +// Time converts a time.Time into a Timestamp. +func Time(t time.Time) Timestamp { return Timestamp(t.UnixNano() / 1e3) } + +// Now returns the Timestamp representation of the current time on the client. +func Now() Timestamp { return Time(time.Now()) } + +// Time converts a Timestamp into a time.Time. +func (ts Timestamp) Time() time.Time { return time.Unix(0, int64(ts)*1e3) } + +// TruncateToMilliseconds truncates a Timestamp to millisecond granularity, +// which is currently the only granularity supported. +func (ts Timestamp) TruncateToMilliseconds() Timestamp { + if ts == ServerTime { + return ts + } + return ts - ts % 1000 +} + +// ApplyReadModifyWrite applies a ReadModifyWrite to a specific row. +// It returns the newly written cells. +func (t *Table) ApplyReadModifyWrite(ctx context.Context, row string, m *ReadModifyWrite) (Row, error) { + ctx = mergeMetadata(ctx, t.md) + req := &btpb.ReadModifyWriteRowRequest{ + TableName: t.c.fullTableName(t.table), + RowKey: []byte(row), + Rules: m.ops, + } + res, err := t.c.client.ReadModifyWriteRow(ctx, req) + if err != nil { + return nil, err + } + if res.Row == nil { + return nil, errors.New("unable to apply ReadModifyWrite: res.Row=nil") + } + r := make(Row) + for _, fam := range res.Row.Families { // res is *btpb.Row, fam is *btpb.Family + decodeFamilyProto(r, row, fam) + } + return r, nil +} + +// ReadModifyWrite represents a set of operations on a single row of a table. +// It is like Mutation but for non-idempotent changes. +// When applied, these operations operate on the latest values of the row's cells, +// and result in a new value being written to the relevant cell with a timestamp +// that is max(existing timestamp, current server time). +// +// The application of a ReadModifyWrite is atomic; concurrent ReadModifyWrites will +// be executed serially by the server. +type ReadModifyWrite struct { + ops []*btpb.ReadModifyWriteRule +} + +// NewReadModifyWrite returns a new ReadModifyWrite. +func NewReadModifyWrite() *ReadModifyWrite { return new(ReadModifyWrite) } + +// AppendValue appends a value to a specific cell's value. +// If the cell is unset, it will be treated as an empty value. +func (m *ReadModifyWrite) AppendValue(family, column string, v []byte) { + m.ops = append(m.ops, &btpb.ReadModifyWriteRule{ + FamilyName: family, + ColumnQualifier: []byte(column), + Rule: &btpb.ReadModifyWriteRule_AppendValue{v}, + }) +} + +// Increment interprets the value in a specific cell as a 64-bit big-endian signed integer, +// and adds a value to it. If the cell is unset, it will be treated as zero. +// If the cell is set and is not an 8-byte value, the entire ApplyReadModifyWrite +// operation will fail. +func (m *ReadModifyWrite) Increment(family, column string, delta int64) { + m.ops = append(m.ops, &btpb.ReadModifyWriteRule{ + FamilyName: family, + ColumnQualifier: []byte(column), + Rule: &btpb.ReadModifyWriteRule_IncrementAmount{delta}, + }) +} + +// mergeMetadata returns a context populated by the existing metadata, if any, +// joined with internal metadata. +func mergeMetadata(ctx context.Context, md metadata.MD) context.Context { + mdCopy, _ := metadata.FromContext(ctx) + return metadata.NewContext(ctx, metadata.Join(mdCopy, md)) +} diff --git a/vendor/cloud.google.com/go/bigtable/bigtable_test.go b/vendor/cloud.google.com/go/bigtable/bigtable_test.go new file mode 100644 index 00000000..2713875a --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/bigtable_test.go @@ -0,0 +1,854 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "fmt" + "math/rand" + "reflect" + "strings" + "sync" + "testing" + "time" + + "golang.org/x/net/context" +) + +func TestPrefix(t *testing.T) { + tests := []struct { + prefix, succ string + }{ + {"", ""}, + {"\xff", ""}, // when used, "" means Infinity + {"x\xff", "y"}, + {"\xfe", "\xff"}, + } + for _, tc := range tests { + got := prefixSuccessor(tc.prefix) + if got != tc.succ { + t.Errorf("prefixSuccessor(%q) = %q, want %s", tc.prefix, got, tc.succ) + continue + } + r := PrefixRange(tc.prefix) + if tc.succ == "" && r.limit != "" { + t.Errorf("PrefixRange(%q) got limit %q", tc.prefix, r.limit) + } + if tc.succ != "" && r.limit != tc.succ { + t.Errorf("PrefixRange(%q) got limit %q, want %q", tc.prefix, r.limit, tc.succ) + } + } +} + +func TestClientIntegration(t *testing.T) { + start := time.Now() + lastCheckpoint := start + checkpoint := func(s string) { + n := time.Now() + t.Logf("[%s] %v since start, %v since last checkpoint", s, n.Sub(start), n.Sub(lastCheckpoint)) + lastCheckpoint = n + } + + testEnv, err := NewIntegrationEnv() + if err != nil { + t.Fatalf("IntegrationEnv: %v", err) + } + + timeout := 30 * time.Second + if testEnv.Config().UseProd { + timeout = 5 * time.Minute + t.Logf("Running test against production") + } else { + t.Logf("bttest.Server running on %s", testEnv.Config().AdminEndpoint) + } + ctx, _ := context.WithTimeout(context.Background(), timeout) + + client, err := testEnv.NewClient() + if err != nil { + t.Fatalf("Client: %v", err) + } + defer client.Close() + checkpoint("dialed Client") + + adminClient, err := testEnv.NewAdminClient() + if err != nil { + t.Fatalf("AdminClient: %v", err) + } + defer adminClient.Close() + checkpoint("dialed AdminClient") + + table := testEnv.Config().Table + + // Delete the table at the end of the test. + // Do this even before creating the table so that if this is running + // against production and CreateTable fails there's a chance of cleaning it up. + defer adminClient.DeleteTable(ctx, table) + + if err := adminClient.CreateTable(ctx, table); err != nil { + t.Fatalf("Creating table: %v", err) + } + checkpoint("created table") + if err := adminClient.CreateColumnFamily(ctx, table, "follows"); err != nil { + t.Fatalf("Creating column family: %v", err) + } + checkpoint(`created "follows" column family`) + + tbl := client.Open(table) + + // Insert some data. + initialData := map[string][]string{ + "wmckinley": {"tjefferson"}, + "gwashington": {"jadams"}, + "tjefferson": {"gwashington", "jadams"}, // wmckinley set conditionally below + "jadams": {"gwashington", "tjefferson"}, + } + for row, ss := range initialData { + mut := NewMutation() + for _, name := range ss { + mut.Set("follows", name, 0, []byte("1")) + } + if err := tbl.Apply(ctx, row, mut); err != nil { + t.Errorf("Mutating row %q: %v", row, err) + } + } + checkpoint("inserted initial data") + + // Do a conditional mutation with a complex filter. + mutTrue := NewMutation() + mutTrue.Set("follows", "wmckinley", 0, []byte("1")) + filter := ChainFilters(ColumnFilter("gwash[iz].*"), ValueFilter(".")) + mut := NewCondMutation(filter, mutTrue, nil) + if err := tbl.Apply(ctx, "tjefferson", mut); err != nil { + t.Errorf("Conditionally mutating row: %v", err) + } + // Do a second condition mutation with a filter that does not match, + // and thus no changes should be made. + mutTrue = NewMutation() + mutTrue.DeleteRow() + filter = ColumnFilter("snoop.dogg") + mut = NewCondMutation(filter, mutTrue, nil) + if err := tbl.Apply(ctx, "tjefferson", mut); err != nil { + t.Errorf("Conditionally mutating row: %v", err) + } + checkpoint("did two conditional mutations") + + // Fetch a row. + row, err := tbl.ReadRow(ctx, "jadams") + if err != nil { + t.Fatalf("Reading a row: %v", err) + } + wantRow := Row{ + "follows": []ReadItem{ + {Row: "jadams", Column: "follows:gwashington", Value: []byte("1")}, + {Row: "jadams", Column: "follows:tjefferson", Value: []byte("1")}, + }, + } + if !reflect.DeepEqual(row, wantRow) { + t.Errorf("Read row mismatch.\n got %#v\nwant %#v", row, wantRow) + } + checkpoint("tested ReadRow") + + // Do a bunch of reads with filters. + readTests := []struct { + desc string + rr RowSet + filter Filter // may be nil + limit ReadOption // may be nil + + // We do the read, grab all the cells, turn them into "--", + // and join with a comma. + want string + }{ + { + desc: "read all, unfiltered", + rr: RowRange{}, + want: "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,tjefferson-gwashington-1,tjefferson-jadams-1,tjefferson-wmckinley-1,wmckinley-tjefferson-1", + }, + { + desc: "read with InfiniteRange, unfiltered", + rr: InfiniteRange("tjefferson"), + want: "tjefferson-gwashington-1,tjefferson-jadams-1,tjefferson-wmckinley-1,wmckinley-tjefferson-1", + }, + { + desc: "read with NewRange, unfiltered", + rr: NewRange("gargamel", "hubbard"), + want: "gwashington-jadams-1", + }, + { + desc: "read with PrefixRange, unfiltered", + rr: PrefixRange("jad"), + want: "jadams-gwashington-1,jadams-tjefferson-1", + }, + { + desc: "read with SingleRow, unfiltered", + rr: SingleRow("wmckinley"), + want: "wmckinley-tjefferson-1", + }, + { + desc: "read all, with ColumnFilter", + rr: RowRange{}, + filter: ColumnFilter(".*j.*"), // matches "jadams" and "tjefferson" + want: "gwashington-jadams-1,jadams-tjefferson-1,tjefferson-jadams-1,wmckinley-tjefferson-1", + }, + { + desc: "read range, with ColumnRangeFilter", + rr: RowRange{}, + filter: ColumnRangeFilter("follows", "h", "k"), + want: "gwashington-jadams-1,tjefferson-jadams-1", + }, + { + desc: "read range from empty, with ColumnRangeFilter", + rr: RowRange{}, + filter: ColumnRangeFilter("follows", "", "u"), + want: "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,tjefferson-gwashington-1,tjefferson-jadams-1,wmckinley-tjefferson-1", + }, + { + desc: "read range from start to empty, with ColumnRangeFilter", + rr: RowRange{}, + filter: ColumnRangeFilter("follows", "h", ""), + want: "gwashington-jadams-1,jadams-tjefferson-1,tjefferson-jadams-1,tjefferson-wmckinley-1,wmckinley-tjefferson-1", + }, + { + desc: "read with RowKeyFilter", + rr: RowRange{}, + filter: RowKeyFilter(".*wash.*"), + want: "gwashington-jadams-1", + }, + { + desc: "read with RowKeyFilter, no matches", + rr: RowRange{}, + filter: RowKeyFilter(".*xxx.*"), + want: "", + }, + { + desc: "read with FamilyFilter, no matches", + rr: RowRange{}, + filter: FamilyFilter(".*xxx.*"), + want: "", + }, + { + desc: "read with ColumnFilter + row limit", + rr: RowRange{}, + filter: ColumnFilter(".*j.*"), // matches "jadams" and "tjefferson" + limit: LimitRows(2), + want: "gwashington-jadams-1,jadams-tjefferson-1", + }, + { + desc: "read all, strip values", + rr: RowRange{}, + filter: StripValueFilter(), + want: "gwashington-jadams-,jadams-gwashington-,jadams-tjefferson-,tjefferson-gwashington-,tjefferson-jadams-,tjefferson-wmckinley-,wmckinley-tjefferson-", + }, + { + desc: "read with ColumnFilter + row limit + strip values", + rr: RowRange{}, + filter: ChainFilters(ColumnFilter(".*j.*"), StripValueFilter()), // matches "jadams" and "tjefferson" + limit: LimitRows(2), + want: "gwashington-jadams-,jadams-tjefferson-", + }, + { + desc: "read with condition, strip values on true", + rr: RowRange{}, + filter: ConditionFilter(ColumnFilter(".*j.*"), StripValueFilter(), nil), + want: "gwashington-jadams-,jadams-gwashington-,jadams-tjefferson-,tjefferson-gwashington-,tjefferson-jadams-,tjefferson-wmckinley-,wmckinley-tjefferson-", + }, + { + desc: "read with condition, strip values on false", + rr: RowRange{}, + filter: ConditionFilter(ColumnFilter(".*xxx.*"), nil, StripValueFilter()), + want: "gwashington-jadams-,jadams-gwashington-,jadams-tjefferson-,tjefferson-gwashington-,tjefferson-jadams-,tjefferson-wmckinley-,wmckinley-tjefferson-", + }, + { + desc: "read with ValueRangeFilter + row limit", + rr: RowRange{}, + filter: ValueRangeFilter([]byte("1"), []byte("5")), // matches our value of "1" + limit: LimitRows(2), + want: "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1", + }, + { + desc: "read with ValueRangeFilter, no match on exclusive end", + rr: RowRange{}, + filter: ValueRangeFilter([]byte("0"), []byte("1")), // no match + want: "", + }, + { + desc: "read with ValueRangeFilter, no matches", + rr: RowRange{}, + filter: ValueRangeFilter([]byte("3"), []byte("5")), // matches nothing + want: "", + }, + { + desc: "read with InterleaveFilter, no matches on all filters", + rr: RowRange{}, + filter: InterleaveFilters(ColumnFilter(".*x.*"), ColumnFilter(".*z.*")), + want: "", + }, + { + desc: "read with InterleaveFilter, no duplicate cells", + rr: RowRange{}, + filter: InterleaveFilters(ColumnFilter(".*g.*"), ColumnFilter(".*j.*")), + want: "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,tjefferson-gwashington-1,tjefferson-jadams-1,wmckinley-tjefferson-1", + }, + { + desc: "read with InterleaveFilter, with duplicate cells", + rr: RowRange{}, + filter: InterleaveFilters(ColumnFilter(".*g.*"), ColumnFilter(".*g.*")), + want: "jadams-gwashington-1,jadams-gwashington-1,tjefferson-gwashington-1,tjefferson-gwashington-1", + }, + } + for _, tc := range readTests { + var opts []ReadOption + if tc.filter != nil { + opts = append(opts, RowFilter(tc.filter)) + } + if tc.limit != nil { + opts = append(opts, tc.limit) + } + var elt []string + err := tbl.ReadRows(context.Background(), tc.rr, func(r Row) bool { + for _, ris := range r { + for _, ri := range ris { + elt = append(elt, formatReadItem(ri)) + } + } + return true + }, opts...) + if err != nil { + t.Errorf("%s: %v", tc.desc, err) + continue + } + if got := strings.Join(elt, ","); got != tc.want { + t.Errorf("%s: wrong reads.\n got %q\nwant %q", tc.desc, got, tc.want) + } + } + // Read a RowList + var elt []string + keys := RowList{"wmckinley", "gwashington", "jadams"} + want := "gwashington-jadams-1,jadams-gwashington-1,jadams-tjefferson-1,wmckinley-tjefferson-1" + err = tbl.ReadRows(ctx, keys, func(r Row) bool { + for _, ris := range r { + for _, ri := range ris { + elt = append(elt, formatReadItem(ri)) + } + } + return true + }) + if err != nil { + t.Errorf("read RowList: %v", err) + } + + if got := strings.Join(elt, ","); got != want { + t.Errorf("bulk read: wrong reads.\n got %q\nwant %q", got, want) + } + checkpoint("tested ReadRows in a few ways") + + // Do a scan and stop part way through. + // Verify that the ReadRows callback doesn't keep running. + stopped := false + err = tbl.ReadRows(ctx, InfiniteRange(""), func(r Row) bool { + if r.Key() < "h" { + return true + } + if !stopped { + stopped = true + return false + } + t.Errorf("ReadRows kept scanning to row %q after being told to stop", r.Key()) + return false + }) + if err != nil { + t.Errorf("Partial ReadRows: %v", err) + } + checkpoint("did partial ReadRows test") + + // Delete a row and check it goes away. + mut = NewMutation() + mut.DeleteRow() + if err := tbl.Apply(ctx, "wmckinley", mut); err != nil { + t.Errorf("Apply DeleteRow: %v", err) + } + row, err = tbl.ReadRow(ctx, "wmckinley") + if err != nil { + t.Fatalf("Reading a row after DeleteRow: %v", err) + } + if len(row) != 0 { + t.Fatalf("Read non-zero row after DeleteRow: %v", row) + } + checkpoint("exercised DeleteRow") + + // Check ReadModifyWrite. + + if err := adminClient.CreateColumnFamily(ctx, table, "counter"); err != nil { + t.Fatalf("Creating column family: %v", err) + } + + appendRMW := func(b []byte) *ReadModifyWrite { + rmw := NewReadModifyWrite() + rmw.AppendValue("counter", "likes", b) + return rmw + } + incRMW := func(n int64) *ReadModifyWrite { + rmw := NewReadModifyWrite() + rmw.Increment("counter", "likes", n) + return rmw + } + rmwSeq := []struct { + desc string + rmw *ReadModifyWrite + want []byte + }{ + { + desc: "append #1", + rmw: appendRMW([]byte{0, 0, 0}), + want: []byte{0, 0, 0}, + }, + { + desc: "append #2", + rmw: appendRMW([]byte{0, 0, 0, 0, 17}), // the remaining 40 bits to make a big-endian 17 + want: []byte{0, 0, 0, 0, 0, 0, 0, 17}, + }, + { + desc: "increment", + rmw: incRMW(8), + want: []byte{0, 0, 0, 0, 0, 0, 0, 25}, + }, + } + for _, step := range rmwSeq { + row, err := tbl.ApplyReadModifyWrite(ctx, "gwashington", step.rmw) + if err != nil { + t.Fatalf("ApplyReadModifyWrite %+v: %v", step.rmw, err) + } + clearTimestamps(row) + wantRow := Row{"counter": []ReadItem{{Row: "gwashington", Column: "counter:likes", Value: step.want}}} + if !reflect.DeepEqual(row, wantRow) { + t.Fatalf("After %s,\n got %v\nwant %v", step.desc, row, wantRow) + } + } + checkpoint("tested ReadModifyWrite") + + // Test arbitrary timestamps more thoroughly. + if err := adminClient.CreateColumnFamily(ctx, table, "ts"); err != nil { + t.Fatalf("Creating column family: %v", err) + } + const numVersions = 4 + mut = NewMutation() + for i := 0; i < numVersions; i++ { + // Timestamps are used in thousands because the server + // only permits that granularity. + mut.Set("ts", "col", Timestamp(i*1000), []byte(fmt.Sprintf("val-%d", i))) + } + if err := tbl.Apply(ctx, "testrow", mut); err != nil { + t.Fatalf("Mutating row: %v", err) + } + r, err := tbl.ReadRow(ctx, "testrow") + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + // These should be returned in descending timestamp order. + {Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")}, + {Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")}, + {Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")}, + {Row: "testrow", Column: "ts:col", Timestamp: 0, Value: []byte("val-0")}, + }} + if !reflect.DeepEqual(r, wantRow) { + t.Errorf("Cell with multiple versions,\n got %v\nwant %v", r, wantRow) + } + // Do the same read, but filter to the latest two versions. + r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(2))) + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + {Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")}, + {Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")}, + }} + if !reflect.DeepEqual(r, wantRow) { + t.Errorf("Cell with multiple versions and LatestNFilter(2),\n got %v\nwant %v", r, wantRow) + } + // Check timestamp range filtering (with truncation) + r, err = tbl.ReadRow(ctx, "testrow", RowFilter(TimestampRangeFilterMicros(1001, 3000))) + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + {Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")}, + {Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")}, + }} + if !reflect.DeepEqual(r, wantRow) { + t.Errorf("Cell with multiple versions and TimestampRangeFilter(1000, 3000),\n got %v\nwant %v", r, wantRow) + } + r, err = tbl.ReadRow(ctx, "testrow", RowFilter(TimestampRangeFilterMicros(1000, 0))) + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + {Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")}, + {Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")}, + {Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")}, + }} + if !reflect.DeepEqual(r, wantRow) { + t.Errorf("Cell with multiple versions and TimestampRangeFilter(1000, 0),\n got %v\nwant %v", r, wantRow) + } + // Delete non-existing cells, no such column family in this row + // Should not delete anything + if err := adminClient.CreateColumnFamily(ctx, table, "non-existing"); err != nil { + t.Fatalf("Creating column family: %v", err) + } + mut = NewMutation() + mut.DeleteTimestampRange("non-existing", "col", 2000, 3000) // half-open interval + if err := tbl.Apply(ctx, "testrow", mut); err != nil { + t.Fatalf("Mutating row: %v", err) + } + r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(3))) + if err != nil { + t.Fatalf("Reading row: %v", err) + } + if !reflect.DeepEqual(r, wantRow) { + t.Errorf("Cell was deleted unexpectly,\n got %v\nwant %v", r, wantRow) + } + // Delete non-existing cells, no such column in this column family + // Should not delete anything + mut = NewMutation() + mut.DeleteTimestampRange("ts", "non-existing", 2000, 3000) // half-open interval + if err := tbl.Apply(ctx, "testrow", mut); err != nil { + t.Fatalf("Mutating row: %v", err) + } + r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(3))) + if err != nil { + t.Fatalf("Reading row: %v", err) + } + if !reflect.DeepEqual(r, wantRow) { + t.Errorf("Cell was deleted unexpectly,\n got %v\nwant %v", r, wantRow) + } + // Delete the cell with timestamp 2000 and repeat the last read, + // checking that we get ts 3000 and ts 1000. + mut = NewMutation() + mut.DeleteTimestampRange("ts", "col", 2001, 3000) // half-open interval + if err := tbl.Apply(ctx, "testrow", mut); err != nil { + t.Fatalf("Mutating row: %v", err) + } + r, err = tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(2))) + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + {Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")}, + {Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")}, + }} + if !reflect.DeepEqual(r, wantRow) { + t.Errorf("Cell with multiple versions and LatestNFilter(2), after deleting timestamp 2000,\n got %v\nwant %v", r, wantRow) + } + checkpoint("tested multiple versions in a cell") + + // Check DeleteCellsInFamily + if err := adminClient.CreateColumnFamily(ctx, table, "status"); err != nil { + t.Fatalf("Creating column family: %v", err) + } + + mut = NewMutation() + mut.Set("status", "start", 0, []byte("1")) + mut.Set("status", "end", 0, []byte("2")) + mut.Set("ts", "col", 0, []byte("3")) + if err := tbl.Apply(ctx, "row1", mut); err != nil { + t.Errorf("Mutating row: %v", err) + } + if err := tbl.Apply(ctx, "row2", mut); err != nil { + t.Errorf("Mutating row: %v", err) + } + + mut = NewMutation() + mut.DeleteCellsInFamily("status") + if err := tbl.Apply(ctx, "row1", mut); err != nil { + t.Errorf("Delete cf: %v", err) + } + + // ColumnFamily removed + r, err = tbl.ReadRow(ctx, "row1") + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + {Row: "row1", Column: "ts:col", Timestamp: 0, Value: []byte("3")}, + }} + if !reflect.DeepEqual(r, wantRow) { + t.Errorf("column family was not deleted.\n got %v\n want %v", r, wantRow) + } + + // ColumnFamily not removed + r, err = tbl.ReadRow(ctx, "row2") + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{ + "ts": []ReadItem{ + {Row: "row2", Column: "ts:col", Timestamp: 0, Value: []byte("3")}, + }, + "status": []ReadItem{ + {Row: "row2", Column: "status:end", Timestamp: 0, Value: []byte("2")}, + {Row: "row2", Column: "status:start", Timestamp: 0, Value: []byte("1")}, + }, + } + if !reflect.DeepEqual(r, wantRow) { + t.Errorf("Column family was deleted unexpectly.\n got %v\n want %v", r, wantRow) + } + checkpoint("tested family delete") + + // Check DeleteCellsInColumn + mut = NewMutation() + mut.Set("status", "start", 0, []byte("1")) + mut.Set("status", "middle", 0, []byte("2")) + mut.Set("status", "end", 0, []byte("3")) + if err := tbl.Apply(ctx, "row3", mut); err != nil { + t.Errorf("Mutating row: %v", err) + } + mut = NewMutation() + mut.DeleteCellsInColumn("status", "middle") + if err := tbl.Apply(ctx, "row3", mut); err != nil { + t.Errorf("Delete column: %v", err) + } + r, err = tbl.ReadRow(ctx, "row3") + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{ + "status": []ReadItem{ + {Row: "row3", Column: "status:end", Timestamp: 0, Value: []byte("3")}, + {Row: "row3", Column: "status:start", Timestamp: 0, Value: []byte("1")}, + }, + } + if !reflect.DeepEqual(r, wantRow) { + t.Errorf("Column was not deleted.\n got %v\n want %v", r, wantRow) + } + mut = NewMutation() + mut.DeleteCellsInColumn("status", "start") + if err := tbl.Apply(ctx, "row3", mut); err != nil { + t.Errorf("Delete column: %v", err) + } + r, err = tbl.ReadRow(ctx, "row3") + if err != nil { + t.Fatalf("Reading row: %v", err) + } + wantRow = Row{ + "status": []ReadItem{ + {Row: "row3", Column: "status:end", Timestamp: 0, Value: []byte("3")}, + }, + } + if !reflect.DeepEqual(r, wantRow) { + t.Errorf("Column was not deleted.\n got %v\n want %v", r, wantRow) + } + mut = NewMutation() + mut.DeleteCellsInColumn("status", "end") + if err := tbl.Apply(ctx, "row3", mut); err != nil { + t.Errorf("Delete column: %v", err) + } + r, err = tbl.ReadRow(ctx, "row3") + if err != nil { + t.Fatalf("Reading row: %v", err) + } + if len(r) != 0 { + t.Errorf("Delete column: got %v, want empty row", r) + } + // Add same cell after delete + mut = NewMutation() + mut.Set("status", "end", 0, []byte("3")) + if err := tbl.Apply(ctx, "row3", mut); err != nil { + t.Errorf("Mutating row: %v", err) + } + r, err = tbl.ReadRow(ctx, "row3") + if err != nil { + t.Fatalf("Reading row: %v", err) + } + if !reflect.DeepEqual(r, wantRow) { + t.Errorf("Column was not deleted correctly.\n got %v\n want %v", r, wantRow) + } + checkpoint("tested column delete") + + // Do highly concurrent reads/writes. + // TODO(dsymonds): Raise this to 1000 when https://github.com/grpc/grpc-go/issues/205 is resolved. + const maxConcurrency = 100 + var wg sync.WaitGroup + for i := 0; i < maxConcurrency; i++ { + wg.Add(1) + go func() { + defer wg.Done() + switch r := rand.Intn(100); { // r ∈ [0,100) + case 0 <= r && r < 30: + // Do a read. + _, err := tbl.ReadRow(ctx, "testrow", RowFilter(LatestNFilter(1))) + if err != nil { + t.Errorf("Concurrent read: %v", err) + } + case 30 <= r && r < 100: + // Do a write. + mut := NewMutation() + mut.Set("ts", "col", 0, []byte("data")) + if err := tbl.Apply(ctx, "testrow", mut); err != nil { + t.Errorf("Concurrent write: %v", err) + } + } + }() + } + wg.Wait() + checkpoint("tested high concurrency") + + // Large reads, writes and scans. + bigBytes := make([]byte, 3<<20) // 3 MB is large, but less than current gRPC max of 4 MB. + nonsense := []byte("lorem ipsum dolor sit amet, ") + fill(bigBytes, nonsense) + mut = NewMutation() + mut.Set("ts", "col", 0, bigBytes) + if err := tbl.Apply(ctx, "bigrow", mut); err != nil { + t.Errorf("Big write: %v", err) + } + r, err = tbl.ReadRow(ctx, "bigrow") + if err != nil { + t.Errorf("Big read: %v", err) + } + wantRow = Row{"ts": []ReadItem{ + {Row: "bigrow", Column: "ts:col", Value: bigBytes}, + }} + if !reflect.DeepEqual(r, wantRow) { + t.Errorf("Big read returned incorrect bytes: %v", r) + } + // Now write 1000 rows, each with 82 KB values, then scan them all. + medBytes := make([]byte, 82<<10) + fill(medBytes, nonsense) + sem := make(chan int, 50) // do up to 50 mutations at a time. + for i := 0; i < 1000; i++ { + mut := NewMutation() + mut.Set("ts", "big-scan", 0, medBytes) + row := fmt.Sprintf("row-%d", i) + wg.Add(1) + go func() { + defer wg.Done() + defer func() { <-sem }() + sem <- 1 + if err := tbl.Apply(ctx, row, mut); err != nil { + t.Errorf("Preparing large scan: %v", err) + } + }() + } + wg.Wait() + n := 0 + err = tbl.ReadRows(ctx, PrefixRange("row-"), func(r Row) bool { + for _, ris := range r { + for _, ri := range ris { + n += len(ri.Value) + } + } + return true + }, RowFilter(ColumnFilter("big-scan"))) + if err != nil { + t.Errorf("Doing large scan: %v", err) + } + if want := 1000 * len(medBytes); n != want { + t.Errorf("Large scan returned %d bytes, want %d", n, want) + } + // Scan a subset of the 1000 rows that we just created, using a LimitRows ReadOption. + rc := 0 + wantRc := 3 + err = tbl.ReadRows(ctx, PrefixRange("row-"), func(r Row) bool { + rc++ + return true + }, LimitRows(int64(wantRc))) + if rc != wantRc { + t.Errorf("Scan with row limit returned %d rows, want %d", rc, wantRc) + } + checkpoint("tested big read/write/scan") + + // Test bulk mutations + if err := adminClient.CreateColumnFamily(ctx, table, "bulk"); err != nil { + t.Fatalf("Creating column family: %v", err) + } + bulkData := map[string][]string{ + "red sox": {"2004", "2007", "2013"}, + "patriots": {"2001", "2003", "2004", "2014"}, + "celtics": {"1981", "1984", "1986", "2008"}, + } + var rowKeys []string + var muts []*Mutation + for row, ss := range bulkData { + mut := NewMutation() + for _, name := range ss { + mut.Set("bulk", name, 0, []byte("1")) + } + rowKeys = append(rowKeys, row) + muts = append(muts, mut) + } + status, err := tbl.ApplyBulk(ctx, rowKeys, muts) + if err != nil { + t.Fatalf("Bulk mutating rows %q: %v", rowKeys, err) + } + if status != nil { + t.Errorf("non-nil errors: %v", err) + } + checkpoint("inserted bulk data") + + // Read each row back + for rowKey, ss := range bulkData { + row, err := tbl.ReadRow(ctx, rowKey) + if err != nil { + t.Fatalf("Reading a bulk row: %v", err) + } + var wantItems []ReadItem + for _, val := range ss { + wantItems = append(wantItems, ReadItem{Row: rowKey, Column: "bulk:" + val, Value: []byte("1")}) + } + wantRow := Row{"bulk": wantItems} + if !reflect.DeepEqual(row, wantRow) { + t.Errorf("Read row mismatch.\n got %#v\nwant %#v", row, wantRow) + } + } + checkpoint("tested reading from bulk insert") + + // Test bulk write errors. + // Note: Setting timestamps as ServerTime makes sure the mutations are not retried on error. + badMut := NewMutation() + badMut.Set("badfamily", "col", ServerTime, nil) + badMut2 := NewMutation() + badMut2.Set("badfamily2", "goodcol", ServerTime, []byte("1")) + status, err = tbl.ApplyBulk(ctx, []string{"badrow", "badrow2"}, []*Mutation{badMut, badMut2}) + if err != nil { + t.Fatalf("Bulk mutating rows %q: %v", rowKeys, err) + } + if status == nil { + t.Errorf("No errors for bad bulk mutation") + } else if status[0] == nil || status[1] == nil { + t.Errorf("No error for bad bulk mutation") + } +} + +func formatReadItem(ri ReadItem) string { + // Use the column qualifier only to make the test data briefer. + col := ri.Column[strings.Index(ri.Column, ":")+1:] + return fmt.Sprintf("%s-%s-%s", ri.Row, col, ri.Value) +} + +func fill(b, sub []byte) { + for len(b) > len(sub) { + n := copy(b, sub) + b = b[n:] + } +} + +func clearTimestamps(r Row) { + for _, ris := range r { + for i := range ris { + ris[i].Timestamp = 0 + } + } +} diff --git a/vendor/cloud.google.com/go/bigtable/bttest/example_test.go b/vendor/cloud.google.com/go/bigtable/bttest/example_test.go new file mode 100644 index 00000000..5cfc370d --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/bttest/example_test.go @@ -0,0 +1,83 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package bttest_test + +import ( + "fmt" + "log" + + "cloud.google.com/go/bigtable" + "cloud.google.com/go/bigtable/bttest" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +func ExampleNewServer() { + + srv, err := bttest.NewServer("127.0.0.1:0") + + if err != nil { + log.Fatalln(err) + } + + ctx := context.Background() + + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) + if err != nil { + log.Fatalln(err) + } + + proj, instance := "proj", "instance" + + adminClient, err := bigtable.NewAdminClient(ctx, proj, instance, option.WithGRPCConn(conn)) + if err != nil { + log.Fatalln(err) + } + + if err = adminClient.CreateTable(ctx, "example"); err != nil { + log.Fatalln(err) + } + + if err = adminClient.CreateColumnFamily(ctx, "example", "links"); err != nil { + log.Fatalln(err) + } + + client, err := bigtable.NewClient(ctx, proj, instance, option.WithGRPCConn(conn)) + if err != nil { + log.Fatalln(err) + } + tbl := client.Open("example") + + mut := bigtable.NewMutation() + mut.Set("links", "golang.org", bigtable.Now(), []byte("Gophers!")) + if err = tbl.Apply(ctx, "com.google.cloud", mut); err != nil { + log.Fatalln(err) + } + + if row, err := tbl.ReadRow(ctx, "com.google.cloud"); err != nil { + log.Fatalln(err) + } else { + for _, column := range row["links"] { + fmt.Println(column.Column) + fmt.Println(string(column.Value)) + } + } + + // Output: + // links:golang.org + // Gophers! +} diff --git a/vendor/cloud.google.com/go/bigtable/bttest/inmem.go b/vendor/cloud.google.com/go/bigtable/bttest/inmem.go new file mode 100644 index 00000000..89717db1 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/bttest/inmem.go @@ -0,0 +1,1230 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package bttest contains test helpers for working with the bigtable package. + +To use a Server, create it, and then connect to it with no security: +(The project/instance values are ignored.) + srv, err := bttest.NewServer("127.0.0.1:0") + ... + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) + ... + client, err := bigtable.NewClient(ctx, proj, instance, + option.WithGRPCConn(conn)) + ... +*/ +package bttest // import "cloud.google.com/go/bigtable/bttest" + +import ( + "encoding/binary" + "fmt" + "log" + "math/rand" + "net" + "regexp" + "sort" + "strings" + "sync" + "time" + + "bytes" + emptypb "github.com/golang/protobuf/ptypes/empty" + "github.com/golang/protobuf/ptypes/wrappers" + "golang.org/x/net/context" + btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2" + btpb "google.golang.org/genproto/googleapis/bigtable/v2" + statpb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// Server is an in-memory Cloud Bigtable fake. +// It is unauthenticated, and only a rough approximation. +type Server struct { + Addr string + + l net.Listener + srv *grpc.Server + s *server +} + +// server is the real implementation of the fake. +// It is a separate and unexported type so the API won't be cluttered with +// methods that are only relevant to the fake's implementation. +type server struct { + mu sync.Mutex + tables map[string]*table // keyed by fully qualified name + gcc chan int // set when gcloop starts, closed when server shuts down + + // Any unimplemented methods will cause a panic. + btapb.BigtableTableAdminServer + btpb.BigtableServer +} + +// NewServer creates a new Server. +// The Server will be listening for gRPC connections, without TLS, +// on the provided address. The resolved address is named by the Addr field. +func NewServer(laddr string, opt ...grpc.ServerOption) (*Server, error) { + l, err := net.Listen("tcp", laddr) + if err != nil { + return nil, err + } + + s := &Server{ + Addr: l.Addr().String(), + l: l, + srv: grpc.NewServer(opt...), + s: &server{ + tables: make(map[string]*table), + }, + } + btapb.RegisterBigtableTableAdminServer(s.srv, s.s) + btpb.RegisterBigtableServer(s.srv, s.s) + + go s.srv.Serve(s.l) + + return s, nil +} + +// Close shuts down the server. +func (s *Server) Close() { + s.s.mu.Lock() + if s.s.gcc != nil { + close(s.s.gcc) + } + s.s.mu.Unlock() + + s.srv.Stop() + s.l.Close() +} + +func (s *server) CreateTable(ctx context.Context, req *btapb.CreateTableRequest) (*btapb.Table, error) { + tbl := req.Parent + "/tables/" + req.TableId + + s.mu.Lock() + if _, ok := s.tables[tbl]; ok { + s.mu.Unlock() + return nil, fmt.Errorf("table %q already exists", tbl) + } + s.tables[tbl] = newTable(req) + s.mu.Unlock() + + return &btapb.Table{Name: tbl}, nil +} + +func (s *server) ListTables(ctx context.Context, req *btapb.ListTablesRequest) (*btapb.ListTablesResponse, error) { + res := &btapb.ListTablesResponse{} + prefix := req.Parent + "/tables/" + + s.mu.Lock() + for tbl := range s.tables { + if strings.HasPrefix(tbl, prefix) { + res.Tables = append(res.Tables, &btapb.Table{Name: tbl}) + } + } + s.mu.Unlock() + + return res, nil +} + +func (s *server) GetTable(ctx context.Context, req *btapb.GetTableRequest) (*btapb.Table, error) { + tbl := req.Name + + s.mu.Lock() + tblIns, ok := s.tables[tbl] + s.mu.Unlock() + if !ok { + return nil, grpc.Errorf(codes.NotFound, "table %q not found", tbl) + } + + return &btapb.Table{ + Name: tbl, + ColumnFamilies: toColumnFamilies(tblIns.columnFamilies()), + }, nil +} + +func (s *server) DeleteTable(ctx context.Context, req *btapb.DeleteTableRequest) (*emptypb.Empty, error) { + s.mu.Lock() + defer s.mu.Unlock() + if _, ok := s.tables[req.Name]; !ok { + return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.Name) + } + delete(s.tables, req.Name) + return &emptypb.Empty{}, nil +} + +func (s *server) ModifyColumnFamilies(ctx context.Context, req *btapb.ModifyColumnFamiliesRequest) (*btapb.Table, error) { + tblName := req.Name[strings.LastIndex(req.Name, "/")+1:] + + s.mu.Lock() + tbl, ok := s.tables[req.Name] + s.mu.Unlock() + if !ok { + return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.Name) + } + + tbl.mu.Lock() + defer tbl.mu.Unlock() + + for _, mod := range req.Modifications { + if create := mod.GetCreate(); create != nil { + if _, ok := tbl.families[mod.Id]; ok { + return nil, fmt.Errorf("family %q already exists", mod.Id) + } + newcf := &columnFamily{ + name: req.Name + "/columnFamilies/" + mod.Id, + order: tbl.counter, + gcRule: create.GcRule, + } + tbl.counter++ + tbl.families[mod.Id] = newcf + } else if mod.GetDrop() { + if _, ok := tbl.families[mod.Id]; !ok { + return nil, fmt.Errorf("can't delete unknown family %q", mod.Id) + } + delete(tbl.families, mod.Id) + } else if modify := mod.GetUpdate(); modify != nil { + if _, ok := tbl.families[mod.Id]; !ok { + return nil, fmt.Errorf("no such family %q", mod.Id) + } + newcf := &columnFamily{ + name: req.Name + "/columnFamilies/" + mod.Id, + gcRule: modify.GcRule, + } + // assume that we ALWAYS want to replace by the new setting + // we may need partial update through + tbl.families[mod.Id] = newcf + } + } + + s.needGC() + return &btapb.Table{ + Name: tblName, + ColumnFamilies: toColumnFamilies(tbl.families), + }, nil +} + +func (s *server) DropRowRange(ctx context.Context, req *btapb.DropRowRangeRequest) (*emptypb.Empty, error) { + s.mu.Lock() + defer s.mu.Unlock() + tbl, ok := s.tables[req.Name] + if !ok { + return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.Name) + } + + if req.GetDeleteAllDataFromTable() { + tbl.rows = nil + tbl.rowIndex = make(map[string]*row) + } else { + // Delete rows by prefix + prefixBytes := req.GetRowKeyPrefix() + if prefixBytes == nil { + return nil, fmt.Errorf("missing row key prefix") + } + prefix := string(prefixBytes) + + start := -1 + end := 0 + for i, row := range tbl.rows { + match := strings.HasPrefix(row.key, prefix) + if match { + // Delete the mapping. Row will be deleted from sorted range below. + delete(tbl.rowIndex, row.key) + } + if match && start == -1 { + start = i + } else if !match && start != -1 { + break + } + end++ + } + if start != -1 { + // Delete the range, using method from https://github.com/golang/go/wiki/SliceTricks + copy(tbl.rows[start:], tbl.rows[end:]) + for k, n := len(tbl.rows)-end+start, len(tbl.rows); k < n; k++ { + tbl.rows[k] = nil + } + tbl.rows = tbl.rows[:len(tbl.rows)-end+start] + } + } + + return &emptypb.Empty{}, nil +} + +func (s *server) ReadRows(req *btpb.ReadRowsRequest, stream btpb.Bigtable_ReadRowsServer) error { + s.mu.Lock() + tbl, ok := s.tables[req.TableName] + s.mu.Unlock() + if !ok { + return grpc.Errorf(codes.NotFound, "table %q not found", req.TableName) + } + + // Rows to read can be specified by a set of row keys and/or a set of row ranges. + // Output is a stream of sorted, de-duped rows. + tbl.mu.RLock() + + rowSet := make(map[string]*row) + if req.Rows != nil { + // Add the explicitly given keys + for _, key := range req.Rows.RowKeys { + start := string(key) + addRows(start, start+"\x00", tbl, rowSet) + } + + // Add keys from row ranges + for _, rr := range req.Rows.RowRanges { + var start, end string + switch sk := rr.StartKey.(type) { + case *btpb.RowRange_StartKeyClosed: + start = string(sk.StartKeyClosed) + case *btpb.RowRange_StartKeyOpen: + start = string(sk.StartKeyOpen) + "\x00" + } + switch ek := rr.EndKey.(type) { + case *btpb.RowRange_EndKeyClosed: + end = string(ek.EndKeyClosed) + "\x00" + case *btpb.RowRange_EndKeyOpen: + end = string(ek.EndKeyOpen) + } + + addRows(start, end, tbl, rowSet) + } + } else { + // Read all rows + addRows("", "", tbl, rowSet) + } + tbl.mu.RUnlock() + + rows := make([]*row, 0, len(rowSet)) + for _, r := range rowSet { + rows = append(rows, r) + } + sort.Sort(byRowKey(rows)) + + limit := int(req.RowsLimit) + count := 0 + for _, r := range rows { + if limit > 0 && count >= limit { + return nil + } + streamed, err := streamRow(stream, r, req.Filter) + if err != nil { + return err + } + if streamed { + count++ + } + } + return nil +} + +func addRows(start, end string, tbl *table, rowSet map[string]*row) { + si, ei := 0, len(tbl.rows) // half-open interval + if start != "" { + si = sort.Search(len(tbl.rows), func(i int) bool { return tbl.rows[i].key >= start }) + } + if end != "" { + ei = sort.Search(len(tbl.rows), func(i int) bool { return tbl.rows[i].key >= end }) + } + if si < ei { + for _, row := range tbl.rows[si:ei] { + rowSet[row.key] = row + } + } +} + +// streamRow filters the given row and sends it via the given stream. +// Returns true if at least one cell matched the filter and was streamed, false otherwise. +func streamRow(stream btpb.Bigtable_ReadRowsServer, r *row, f *btpb.RowFilter) (bool, error) { + r.mu.Lock() + nr := r.copy() + r.mu.Unlock() + r = nr + + if !filterRow(f, r) { + return false, nil + } + + rrr := &btpb.ReadRowsResponse{} + families := r.sortedFamilies() + for _, fam := range families { + for _, colName := range fam.colNames { + cells := fam.cells[colName] + if len(cells) == 0 { + continue + } + // TODO(dsymonds): Apply transformers. + for _, cell := range cells { + rrr.Chunks = append(rrr.Chunks, &btpb.ReadRowsResponse_CellChunk{ + RowKey: []byte(r.key), + FamilyName: &wrappers.StringValue{Value: fam.name}, + Qualifier: &wrappers.BytesValue{Value: []byte(colName)}, + TimestampMicros: cell.ts, + Value: cell.value, + }) + } + } + } + // We can't have a cell with just COMMIT set, which would imply a new empty cell. + // So modify the last cell to have the COMMIT flag set. + if len(rrr.Chunks) > 0 { + rrr.Chunks[len(rrr.Chunks)-1].RowStatus = &btpb.ReadRowsResponse_CellChunk_CommitRow{true} + } + + return true, stream.Send(rrr) +} + +// filterRow modifies a row with the given filter. Returns true if at least one cell from the row matches, +// false otherwise. +func filterRow(f *btpb.RowFilter, r *row) bool { + if f == nil { + return true + } + // Handle filters that apply beyond just including/excluding cells. + switch f := f.Filter.(type) { + case *btpb.RowFilter_Chain_: + for _, sub := range f.Chain.Filters { + filterRow(sub, r) + } + return true + case *btpb.RowFilter_Interleave_: + srs := make([]*row, 0, len(f.Interleave.Filters)) + for _, sub := range f.Interleave.Filters { + sr := r.copy() + filterRow(sub, sr) + srs = append(srs, sr) + } + // merge + // TODO(dsymonds): is this correct? + r.families = make(map[string]*family) + for _, sr := range srs { + for _, fam := range sr.families { + f := r.getOrCreateFamily(fam.name, fam.order) + for colName, cs := range fam.cells { + f.cells[colName] = append(f.cellsByColumn(colName), cs...) + } + } + } + for _, fam := range r.families { + for _, cs := range fam.cells { + sort.Sort(byDescTS(cs)) + } + } + return true + case *btpb.RowFilter_CellsPerColumnLimitFilter: + lim := int(f.CellsPerColumnLimitFilter) + for _, fam := range r.families { + for col, cs := range fam.cells { + if len(cs) > lim { + fam.cells[col] = cs[:lim] + } + } + } + return true + case *btpb.RowFilter_Condition_: + if filterRow(f.Condition.PredicateFilter, r.copy()) { + if f.Condition.TrueFilter == nil { + return false + } + return filterRow(f.Condition.TrueFilter, r) + } + if f.Condition.FalseFilter == nil { + return false + } + return filterRow(f.Condition.FalseFilter, r) + case *btpb.RowFilter_RowKeyRegexFilter: + pat := string(f.RowKeyRegexFilter) + rx, err := regexp.Compile(pat) + if err != nil { + log.Printf("Bad rowkey_regex_filter pattern %q: %v", pat, err) + return false + } + if !rx.MatchString(r.key) { + return false + } + } + + // Any other case, operate on a per-cell basis. + cellCount := 0 + for _, fam := range r.families { + for colName, cs := range fam.cells { + fam.cells[colName] = filterCells(f, fam.name, colName, cs) + cellCount += len(fam.cells[colName]) + } + } + return cellCount > 0 +} + +func filterCells(f *btpb.RowFilter, fam, col string, cs []cell) []cell { + var ret []cell + for _, cell := range cs { + if includeCell(f, fam, col, cell) { + cell = modifyCell(f, cell) + ret = append(ret, cell) + } + } + return ret +} + +func modifyCell(f *btpb.RowFilter, c cell) cell { + if f == nil { + return c + } + // Consider filters that may modify the cell contents + switch f.Filter.(type) { + case *btpb.RowFilter_StripValueTransformer: + return cell{ts: c.ts} + default: + return c + } +} + +func includeCell(f *btpb.RowFilter, fam, col string, cell cell) bool { + if f == nil { + return true + } + // TODO(dsymonds): Implement many more filters. + switch f := f.Filter.(type) { + case *btpb.RowFilter_CellsPerColumnLimitFilter: + // Don't log, row-level filter + return true + case *btpb.RowFilter_RowKeyRegexFilter: + // Don't log, row-level filter + return true + case *btpb.RowFilter_StripValueTransformer: + // Don't log, cell-modifying filter + return true + default: + log.Printf("WARNING: don't know how to handle filter of type %T (ignoring it)", f) + return true + case *btpb.RowFilter_FamilyNameRegexFilter: + pat := string(f.FamilyNameRegexFilter) + rx, err := regexp.Compile(pat) + if err != nil { + log.Printf("Bad family_name_regex_filter pattern %q: %v", pat, err) + return false + } + return rx.MatchString(fam) + case *btpb.RowFilter_ColumnQualifierRegexFilter: + pat := string(f.ColumnQualifierRegexFilter) + rx, err := regexp.Compile(pat) + if err != nil { + log.Printf("Bad column_qualifier_regex_filter pattern %q: %v", pat, err) + return false + } + return rx.MatchString(col) + case *btpb.RowFilter_ValueRegexFilter: + pat := string(f.ValueRegexFilter) + rx, err := regexp.Compile(pat) + if err != nil { + log.Printf("Bad value_regex_filter pattern %q: %v", pat, err) + return false + } + return rx.Match(cell.value) + case *btpb.RowFilter_ColumnRangeFilter: + if fam != f.ColumnRangeFilter.FamilyName { + return false + } + // Start qualifier defaults to empty string closed + inRangeStart := func() bool { return col >= "" } + switch sq := f.ColumnRangeFilter.StartQualifier.(type) { + case *btpb.ColumnRange_StartQualifierOpen: + inRangeStart = func() bool { return col > string(sq.StartQualifierOpen) } + case *btpb.ColumnRange_StartQualifierClosed: + inRangeStart = func() bool { return col >= string(sq.StartQualifierClosed) } + } + // End qualifier defaults to no upper boundary + inRangeEnd := func() bool { return true } + switch eq := f.ColumnRangeFilter.EndQualifier.(type) { + case *btpb.ColumnRange_EndQualifierClosed: + inRangeEnd = func() bool { return col <= string(eq.EndQualifierClosed) } + case *btpb.ColumnRange_EndQualifierOpen: + inRangeEnd = func() bool { return col < string(eq.EndQualifierOpen) } + } + return inRangeStart() && inRangeEnd() + case *btpb.RowFilter_TimestampRangeFilter: + // Lower bound is inclusive and defaults to 0, upper bound is exclusive and defaults to infinity. + return cell.ts >= f.TimestampRangeFilter.StartTimestampMicros && + (f.TimestampRangeFilter.EndTimestampMicros == 0 || cell.ts < f.TimestampRangeFilter.EndTimestampMicros) + case *btpb.RowFilter_ValueRangeFilter: + v := cell.value + // Start value defaults to empty string closed + inRangeStart := func() bool { return bytes.Compare(v, []byte{}) >= 0 } + switch sv := f.ValueRangeFilter.StartValue.(type) { + case *btpb.ValueRange_StartValueOpen: + inRangeStart = func() bool { return bytes.Compare(v, sv.StartValueOpen) > 0 } + case *btpb.ValueRange_StartValueClosed: + inRangeStart = func() bool { return bytes.Compare(v, sv.StartValueClosed) >= 0 } + } + // End value defaults to no upper boundary + inRangeEnd := func() bool { return true } + switch ev := f.ValueRangeFilter.EndValue.(type) { + case *btpb.ValueRange_EndValueClosed: + inRangeEnd = func() bool { return bytes.Compare(v, ev.EndValueClosed) <= 0 } + case *btpb.ValueRange_EndValueOpen: + inRangeEnd = func() bool { return bytes.Compare(v, ev.EndValueOpen) < 0 } + } + return inRangeStart() && inRangeEnd() + } +} + +func (s *server) MutateRow(ctx context.Context, req *btpb.MutateRowRequest) (*btpb.MutateRowResponse, error) { + s.mu.Lock() + tbl, ok := s.tables[req.TableName] + s.mu.Unlock() + if !ok { + return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.TableName) + } + + fs := tbl.columnFamilies() + r := tbl.mutableRow(string(req.RowKey)) + r.mu.Lock() + defer r.mu.Unlock() + + if err := applyMutations(tbl, r, req.Mutations, fs); err != nil { + return nil, err + } + return &btpb.MutateRowResponse{}, nil +} + +func (s *server) MutateRows(req *btpb.MutateRowsRequest, stream btpb.Bigtable_MutateRowsServer) error { + s.mu.Lock() + tbl, ok := s.tables[req.TableName] + s.mu.Unlock() + if !ok { + return grpc.Errorf(codes.NotFound, "table %q not found", req.TableName) + } + + res := &btpb.MutateRowsResponse{Entries: make([]*btpb.MutateRowsResponse_Entry, len(req.Entries))} + + fs := tbl.columnFamilies() + + for i, entry := range req.Entries { + r := tbl.mutableRow(string(entry.RowKey)) + r.mu.Lock() + code, msg := int32(codes.OK), "" + if err := applyMutations(tbl, r, entry.Mutations, fs); err != nil { + code = int32(codes.Internal) + msg = err.Error() + } + res.Entries[i] = &btpb.MutateRowsResponse_Entry{ + Index: int64(i), + Status: &statpb.Status{Code: code, Message: msg}, + } + r.mu.Unlock() + } + stream.Send(res) + return nil +} + +func (s *server) CheckAndMutateRow(ctx context.Context, req *btpb.CheckAndMutateRowRequest) (*btpb.CheckAndMutateRowResponse, error) { + s.mu.Lock() + tbl, ok := s.tables[req.TableName] + s.mu.Unlock() + if !ok { + return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.TableName) + } + + res := &btpb.CheckAndMutateRowResponse{} + + fs := tbl.columnFamilies() + + r := tbl.mutableRow(string(req.RowKey)) + r.mu.Lock() + defer r.mu.Unlock() + + // Figure out which mutation to apply. + whichMut := false + if req.PredicateFilter == nil { + // Use true_mutations iff row contains any cells. + whichMut = !r.isEmpty() + } else { + // Use true_mutations iff any cells in the row match the filter. + // TODO(dsymonds): This could be cheaper. + nr := r.copy() + filterRow(req.PredicateFilter, nr) + whichMut = !nr.isEmpty() + // TODO(dsymonds): Figure out if this is supposed to be set + // even when there's no predicate filter. + res.PredicateMatched = whichMut + } + muts := req.FalseMutations + if whichMut { + muts = req.TrueMutations + } + + if err := applyMutations(tbl, r, muts, fs); err != nil { + return nil, err + } + return res, nil +} + +// applyMutations applies a sequence of mutations to a row. +// fam should be a snapshot of the keys of tbl.families. +// It assumes r.mu is locked. +func applyMutations(tbl *table, r *row, muts []*btpb.Mutation, fs map[string]*columnFamily) error { + for _, mut := range muts { + switch mut := mut.Mutation.(type) { + default: + return fmt.Errorf("can't handle mutation type %T", mut) + case *btpb.Mutation_SetCell_: + set := mut.SetCell + if _, ok := fs[set.FamilyName]; !ok { + return fmt.Errorf("unknown family %q", set.FamilyName) + } + ts := set.TimestampMicros + if ts == -1 { // bigtable.ServerTime + ts = newTimestamp() + } + if !tbl.validTimestamp(ts) { + return fmt.Errorf("invalid timestamp %d", ts) + } + fam := set.FamilyName + col := string(set.ColumnQualifier) + + newCell := cell{ts: ts, value: set.Value} + f := r.getOrCreateFamily(fam, fs[fam].order) + f.cells[col] = appendOrReplaceCell(f.cellsByColumn(col), newCell) + case *btpb.Mutation_DeleteFromColumn_: + del := mut.DeleteFromColumn + if _, ok := fs[del.FamilyName]; !ok { + return fmt.Errorf("unknown family %q", del.FamilyName) + } + fam := del.FamilyName + col := string(del.ColumnQualifier) + if _, ok := r.families[fam]; ok { + cs := r.families[fam].cells[col] + if del.TimeRange != nil { + tsr := del.TimeRange + if !tbl.validTimestamp(tsr.StartTimestampMicros) { + return fmt.Errorf("invalid timestamp %d", tsr.StartTimestampMicros) + } + if !tbl.validTimestamp(tsr.EndTimestampMicros) { + return fmt.Errorf("invalid timestamp %d", tsr.EndTimestampMicros) + } + // Find half-open interval to remove. + // Cells are in descending timestamp order, + // so the predicates to sort.Search are inverted. + si, ei := 0, len(cs) + if tsr.StartTimestampMicros > 0 { + ei = sort.Search(len(cs), func(i int) bool { return cs[i].ts < tsr.StartTimestampMicros }) + } + if tsr.EndTimestampMicros > 0 { + si = sort.Search(len(cs), func(i int) bool { return cs[i].ts < tsr.EndTimestampMicros }) + } + if si < ei { + copy(cs[si:], cs[ei:]) + cs = cs[:len(cs)-(ei-si)] + } + } else { + cs = nil + } + if len(cs) == 0 { + delete(r.families[fam].cells, col) + colNames := r.families[fam].colNames + i := sort.Search(len(colNames), func(i int) bool { return colNames[i] >= col }) + if i < len(colNames) && colNames[i] == col { + r.families[fam].colNames = append(colNames[:i], colNames[i+1:]...) + } + if len(r.families[fam].cells) == 0 { + delete(r.families, fam) + } + } else { + r.families[fam].cells[col] = cs + } + } + case *btpb.Mutation_DeleteFromRow_: + r.families = make(map[string]*family) + case *btpb.Mutation_DeleteFromFamily_: + fampre := mut.DeleteFromFamily.FamilyName + delete(r.families, fampre) + } + } + return nil +} + +func maxTimestamp(x, y int64) int64 { + if x > y { + return x + } + return y +} + +func newTimestamp() int64 { + ts := time.Now().UnixNano() / 1e3 + ts -= ts % 1000 // round to millisecond granularity + return ts +} + +func appendOrReplaceCell(cs []cell, newCell cell) []cell { + replaced := false + for i, cell := range cs { + if cell.ts == newCell.ts { + cs[i] = newCell + replaced = true + break + } + } + if !replaced { + cs = append(cs, newCell) + } + sort.Sort(byDescTS(cs)) + return cs +} + +func (s *server) ReadModifyWriteRow(ctx context.Context, req *btpb.ReadModifyWriteRowRequest) (*btpb.ReadModifyWriteRowResponse, error) { + s.mu.Lock() + tbl, ok := s.tables[req.TableName] + s.mu.Unlock() + if !ok { + return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.TableName) + } + + updates := make(map[string]cell) // copy of updated cells; keyed by full column name + + fs := tbl.columnFamilies() + + r := tbl.mutableRow(string(req.RowKey)) + r.mu.Lock() + defer r.mu.Unlock() + // Assume all mutations apply to the most recent version of the cell. + // TODO(dsymonds): Verify this assumption and document it in the proto. + for _, rule := range req.Rules { + if _, ok := fs[rule.FamilyName]; !ok { + return nil, fmt.Errorf("unknown family %q", rule.FamilyName) + } + + fam := rule.FamilyName + col := string(rule.ColumnQualifier) + isEmpty := false + f := r.getOrCreateFamily(fam, fs[fam].order) + cs := f.cells[col] + isEmpty = len(cs) == 0 + + ts := newTimestamp() + var newCell, prevCell cell + if !isEmpty { + cells := r.families[fam].cells[col] + prevCell = cells[0] + + // ts is the max of now or the prev cell's timestamp in case the + // prev cell is in the future + ts = maxTimestamp(ts, prevCell.ts) + } + + switch rule := rule.Rule.(type) { + default: + return nil, fmt.Errorf("unknown RMW rule oneof %T", rule) + case *btpb.ReadModifyWriteRule_AppendValue: + newCell = cell{ts: ts, value: append(prevCell.value, rule.AppendValue...)} + case *btpb.ReadModifyWriteRule_IncrementAmount: + var v int64 + if !isEmpty { + prevVal := prevCell.value + if len(prevVal) != 8 { + return nil, fmt.Errorf("increment on non-64-bit value") + } + v = int64(binary.BigEndian.Uint64(prevVal)) + } + v += rule.IncrementAmount + var val [8]byte + binary.BigEndian.PutUint64(val[:], uint64(v)) + newCell = cell{ts: ts, value: val[:]} + } + key := strings.Join([]string{fam, col}, ":") + updates[key] = newCell + f.cells[col] = appendOrReplaceCell(f.cellsByColumn(col), newCell) + } + + res := &btpb.Row{ + Key: req.RowKey, + } + for col, cell := range updates { + i := strings.Index(col, ":") + fam, qual := col[:i], col[i+1:] + var f *btpb.Family + for _, ff := range res.Families { + if ff.Name == fam { + f = ff + break + } + } + if f == nil { + f = &btpb.Family{Name: fam} + res.Families = append(res.Families, f) + } + f.Columns = append(f.Columns, &btpb.Column{ + Qualifier: []byte(qual), + Cells: []*btpb.Cell{{ + Value: cell.value, + }}, + }) + } + return &btpb.ReadModifyWriteRowResponse{Row: res}, nil +} + +func (s *server) SampleRowKeys(req *btpb.SampleRowKeysRequest, stream btpb.Bigtable_SampleRowKeysServer) error { + s.mu.Lock() + tbl, ok := s.tables[req.TableName] + s.mu.Unlock() + if !ok { + return grpc.Errorf(codes.NotFound, "table %q not found", req.TableName) + } + + tbl.mu.RLock() + defer tbl.mu.RUnlock() + + // The return value of SampleRowKeys is very loosely defined. Return at least the + // final row key in the table and choose other row keys randomly. + var offset int64 + for i, row := range tbl.rows { + if i == len(tbl.rows)-1 || rand.Int31n(100) == 0 { + resp := &btpb.SampleRowKeysResponse{ + RowKey: []byte(row.key), + OffsetBytes: offset, + } + err := stream.Send(resp) + if err != nil { + return err + } + } + offset += int64(row.size()) + } + return nil +} + +// needGC is invoked whenever the server needs gcloop running. +func (s *server) needGC() { + s.mu.Lock() + if s.gcc == nil { + s.gcc = make(chan int) + go s.gcloop(s.gcc) + } + s.mu.Unlock() +} + +func (s *server) gcloop(done <-chan int) { + const ( + minWait = 500 // ms + maxWait = 1500 // ms + ) + + for { + // Wait for a random time interval. + d := time.Duration(minWait+rand.Intn(maxWait-minWait)) * time.Millisecond + select { + case <-time.After(d): + case <-done: + return // server has been closed + } + + // Do a GC pass over all tables. + var tables []*table + s.mu.Lock() + for _, tbl := range s.tables { + tables = append(tables, tbl) + } + s.mu.Unlock() + for _, tbl := range tables { + tbl.gc() + } + } +} + +type table struct { + mu sync.RWMutex + counter uint64 // increment by 1 when a new family is created + families map[string]*columnFamily // keyed by plain family name + rows []*row // sorted by row key + rowIndex map[string]*row // indexed by row key +} + +func newTable(ctr *btapb.CreateTableRequest) *table { + fams := make(map[string]*columnFamily) + c := uint64(0) + if ctr.Table != nil { + for id, cf := range ctr.Table.ColumnFamilies { + fams[id] = &columnFamily{ + name: ctr.Parent + "/columnFamilies/" + id, + order: c, + gcRule: cf.GcRule, + } + c++ + } + } + return &table{ + families: fams, + counter: c, + rowIndex: make(map[string]*row), + } +} + +func (t *table) validTimestamp(ts int64) bool { + // Assume millisecond granularity is required. + return ts%1000 == 0 +} + +func (t *table) columnFamilies() map[string]*columnFamily { + cp := make(map[string]*columnFamily) + t.mu.RLock() + for fam, cf := range t.families { + cp[fam] = cf + } + t.mu.RUnlock() + return cp +} + +func (t *table) mutableRow(row string) *row { + // Try fast path first. + t.mu.RLock() + r := t.rowIndex[row] + t.mu.RUnlock() + if r != nil { + return r + } + + // We probably need to create the row. + t.mu.Lock() + r = t.rowIndex[row] + if r == nil { + r = newRow(row) + t.rowIndex[row] = r + t.rows = append(t.rows, r) + sort.Sort(byRowKey(t.rows)) // yay, inefficient! + } + t.mu.Unlock() + return r +} + +func (t *table) gc() { + // This method doesn't add or remove rows, so we only need a read lock for the table. + t.mu.RLock() + defer t.mu.RUnlock() + + // Gather GC rules we'll apply. + rules := make(map[string]*btapb.GcRule) // keyed by "fam" + for fam, cf := range t.families { + if cf.gcRule != nil { + rules[fam] = cf.gcRule + } + } + if len(rules) == 0 { + return + } + + for _, r := range t.rows { + r.mu.Lock() + r.gc(rules) + r.mu.Unlock() + } +} + +type byRowKey []*row + +func (b byRowKey) Len() int { return len(b) } +func (b byRowKey) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byRowKey) Less(i, j int) bool { return b[i].key < b[j].key } + +type row struct { + key string + + mu sync.Mutex + families map[string]*family // keyed by family name +} + +func newRow(key string) *row { + return &row{ + key: key, + families: make(map[string]*family), + } +} + +// copy returns a copy of the row. +// Cell values are aliased. +// r.mu should be held. +func (r *row) copy() *row { + nr := newRow(r.key) + for _, fam := range r.families { + nr.families[fam.name] = &family{ + name: fam.name, + order: fam.order, + colNames: fam.colNames, + cells: make(map[string][]cell), + } + for col, cs := range fam.cells { + // Copy the []cell slice, but not the []byte inside each cell. + nr.families[fam.name].cells[col] = append([]cell(nil), cs...) + } + } + return nr +} + +// isEmpty returns true if a row doesn't contain any cell +func (r *row) isEmpty() bool { + for _, fam := range r.families { + for _, cs := range fam.cells { + if len(cs) > 0 { + return false + } + } + } + return true +} + +// sortedFamilies returns a column family set +// sorted in ascending creation order in a row. +func (r *row) sortedFamilies() []*family { + var families []*family + for _, fam := range r.families { + families = append(families, fam) + } + sort.Sort(byCreationOrder(families)) + return families +} + +func (r *row) getOrCreateFamily(name string, order uint64) *family { + if _, ok := r.families[name]; !ok { + r.families[name] = &family{ + name: name, + order: order, + cells: make(map[string][]cell), + } + } + return r.families[name] +} + +// gc applies the given GC rules to the row. +// r.mu should be held. +func (r *row) gc(rules map[string]*btapb.GcRule) { + for _, fam := range r.families { + rule, ok := rules[fam.name] + if !ok { + continue + } + for col, cs := range fam.cells { + r.families[fam.name].cells[col] = applyGC(cs, rule) + } + } +} + +// size returns the total size of all cell values in the row. +func (r *row) size() int { + size := 0 + for _, fam := range r.families { + for _, cells := range fam.cells { + for _, cell := range cells { + size += len(cell.value) + } + } + } + return size +} + +func (r *row) String() string { + return r.key +} + +var gcTypeWarn sync.Once + +// applyGC applies the given GC rule to the cells. +func applyGC(cells []cell, rule *btapb.GcRule) []cell { + switch rule := rule.Rule.(type) { + default: + // TODO(dsymonds): Support GcRule_Intersection_ + gcTypeWarn.Do(func() { + log.Printf("Unsupported GC rule type %T", rule) + }) + case *btapb.GcRule_Union_: + for _, sub := range rule.Union.Rules { + cells = applyGC(cells, sub) + } + return cells + case *btapb.GcRule_MaxAge: + // Timestamps are in microseconds. + cutoff := time.Now().UnixNano() / 1e3 + cutoff -= rule.MaxAge.Seconds * 1e6 + cutoff -= int64(rule.MaxAge.Nanos) / 1e3 + // The slice of cells in in descending timestamp order. + // This sort.Search will return the index of the first cell whose timestamp is chronologically before the cutoff. + si := sort.Search(len(cells), func(i int) bool { return cells[i].ts < cutoff }) + if si < len(cells) { + log.Printf("bttest: GC MaxAge(%v) deleted %d cells.", rule.MaxAge, len(cells)-si) + } + return cells[:si] + case *btapb.GcRule_MaxNumVersions: + n := int(rule.MaxNumVersions) + if len(cells) > n { + cells = cells[:n] + } + return cells + } + return cells +} + +type family struct { + name string // Column family name + order uint64 // Creation order of column family + colNames []string // Collumn names are sorted in lexicographical ascending order + cells map[string][]cell // Keyed by collumn name; cells are in descending timestamp order +} + +type byCreationOrder []*family + +func (b byCreationOrder) Len() int { return len(b) } +func (b byCreationOrder) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byCreationOrder) Less(i, j int) bool { return b[i].order < b[j].order } + +// cellsByColumn adds the column name to colNames set if it does not exist +// and returns all cells within a column +func (f *family) cellsByColumn(name string) []cell { + if _, ok := f.cells[name]; !ok { + f.colNames = append(f.colNames, name) + sort.Strings(f.colNames) + } + return f.cells[name] +} + +type cell struct { + ts int64 + value []byte +} + +type byDescTS []cell + +func (b byDescTS) Len() int { return len(b) } +func (b byDescTS) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byDescTS) Less(i, j int) bool { return b[i].ts > b[j].ts } + +type columnFamily struct { + name string + order uint64 // Creation order of column family + gcRule *btapb.GcRule +} + +func (c *columnFamily) proto() *btapb.ColumnFamily { + return &btapb.ColumnFamily{ + GcRule: c.gcRule, + } +} + +func toColumnFamilies(families map[string]*columnFamily) map[string]*btapb.ColumnFamily { + fs := make(map[string]*btapb.ColumnFamily) + for k, v := range families { + fs[k] = v.proto() + } + return fs +} diff --git a/vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go b/vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go new file mode 100644 index 00000000..0e837bc6 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go @@ -0,0 +1,517 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bttest + +import ( + "fmt" + "math/rand" + "sync" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/context" + btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2" + btpb "google.golang.org/genproto/googleapis/bigtable/v2" + "google.golang.org/grpc" + "strconv" +) + +func TestConcurrentMutationsReadModifyAndGC(t *testing.T) { + s := &server{ + tables: make(map[string]*table), + } + ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) + defer cancel() + if _, err := s.CreateTable( + ctx, + &btapb.CreateTableRequest{Parent: "cluster", TableId: "t"}); err != nil { + t.Fatal(err) + } + const name = `cluster/tables/t` + tbl := s.tables[name] + req := &btapb.ModifyColumnFamiliesRequest{ + Name: name, + Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ + Id: "cf", + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}}, + }}, + } + _, err := s.ModifyColumnFamilies(ctx, req) + if err != nil { + t.Fatal(err) + } + req = &btapb.ModifyColumnFamiliesRequest{ + Name: name, + Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ + Id: "cf", + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{&btapb.ColumnFamily{ + GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}, + }}, + }}, + } + if _, err := s.ModifyColumnFamilies(ctx, req); err != nil { + t.Fatal(err) + } + + var wg sync.WaitGroup + var ts int64 + ms := func() []*btpb.Mutation { + return []*btpb.Mutation{{ + Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{ + FamilyName: "cf", + ColumnQualifier: []byte(`col`), + TimestampMicros: atomic.AddInt64(&ts, 1000), + }}, + }} + } + + rmw := func() *btpb.ReadModifyWriteRowRequest { + return &btpb.ReadModifyWriteRowRequest{ + TableName: name, + RowKey: []byte(fmt.Sprint(rand.Intn(100))), + Rules: []*btpb.ReadModifyWriteRule{{ + FamilyName: "cf", + ColumnQualifier: []byte("col"), + Rule: &btpb.ReadModifyWriteRule_IncrementAmount{1}, + }}, + } + } + for i := 0; i < 100; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for ctx.Err() == nil { + req := &btpb.MutateRowRequest{ + TableName: name, + RowKey: []byte(fmt.Sprint(rand.Intn(100))), + Mutations: ms(), + } + s.MutateRow(ctx, req) + } + }() + wg.Add(1) + go func() { + defer wg.Done() + for ctx.Err() == nil { + _, _ = s.ReadModifyWriteRow(ctx, rmw()) + } + }() + + wg.Add(1) + go func() { + defer wg.Done() + tbl.gc() + }() + } + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + case <-time.After(1 * time.Second): + t.Error("Concurrent mutations and GCs haven't completed after 1s") + } +} + +func TestCreateTableWithFamily(t *testing.T) { + // The Go client currently doesn't support creating a table with column families + // in one operation but it is allowed by the API. This must still be supported by the + // fake server so this test lives here instead of in the main bigtable + // integration test. + s := &server{ + tables: make(map[string]*table), + } + ctx := context.Background() + newTbl := btapb.Table{ + ColumnFamilies: map[string]*btapb.ColumnFamily{ + "cf1": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{123}}}, + "cf2": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{456}}}, + }, + } + cTbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) + if err != nil { + t.Fatalf("Creating table: %v", err) + } + tbl, err := s.GetTable(ctx, &btapb.GetTableRequest{Name: cTbl.Name}) + if err != nil { + t.Fatalf("Getting table: %v", err) + } + cf := tbl.ColumnFamilies["cf1"] + if cf == nil { + t.Fatalf("Missing col family cf1") + } + if got, want := cf.GcRule.GetMaxNumVersions(), int32(123); got != want { + t.Errorf("Invalid MaxNumVersions: wanted:%d, got:%d", want, got) + } + cf = tbl.ColumnFamilies["cf2"] + if cf == nil { + t.Fatalf("Missing col family cf2") + } + if got, want := cf.GcRule.GetMaxNumVersions(), int32(456); got != want { + t.Errorf("Invalid MaxNumVersions: wanted:%d, got:%d", want, got) + } +} + +type MockSampleRowKeysServer struct { + responses []*btpb.SampleRowKeysResponse + grpc.ServerStream +} + +func (s *MockSampleRowKeysServer) Send(resp *btpb.SampleRowKeysResponse) error { + s.responses = append(s.responses, resp) + return nil +} + +func TestSampleRowKeys(t *testing.T) { + s := &server{ + tables: make(map[string]*table), + } + ctx := context.Background() + newTbl := btapb.Table{ + ColumnFamilies: map[string]*btapb.ColumnFamily{ + "cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}}, + }, + } + tbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) + if err != nil { + t.Fatalf("Creating table: %v", err) + } + + // Populate the table + val := []byte("value") + rowCount := 1000 + for i := 0; i < rowCount; i++ { + req := &btpb.MutateRowRequest{ + TableName: tbl.Name, + RowKey: []byte("row-" + strconv.Itoa(i)), + Mutations: []*btpb.Mutation{{ + Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{ + FamilyName: "cf", + ColumnQualifier: []byte("col"), + TimestampMicros: 0, + Value: val, + }}, + }}, + } + if _, err := s.MutateRow(ctx, req); err != nil { + t.Fatalf("Populating table: %v", err) + } + } + + mock := &MockSampleRowKeysServer{} + if err := s.SampleRowKeys(&btpb.SampleRowKeysRequest{TableName: tbl.Name}, mock); err != nil { + t.Errorf("SampleRowKeys error: %v", err) + } + if len(mock.responses) == 0 { + t.Fatal("Response count: got 0, want > 0") + } + // Make sure the offset of the final response is the offset of the final row + got := mock.responses[len(mock.responses)-1].OffsetBytes + want := int64((rowCount - 1) * len(val)) + if got != want { + t.Errorf("Invalid offset: got %d, want %d", got, want) + } +} + +func TestDropRowRange(t *testing.T) { + s := &server{ + tables: make(map[string]*table), + } + ctx := context.Background() + newTbl := btapb.Table{ + ColumnFamilies: map[string]*btapb.ColumnFamily{ + "cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}}, + }, + } + tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) + if err != nil { + t.Fatalf("Creating table: %v", err) + } + + tbl := s.tables[tblInfo.Name] + + // Populate the table + prefixes := []string{"AAA", "BBB", "CCC", "DDD"} + count := 3 + doWrite := func() { + for _, prefix := range prefixes { + for i := 0; i < count; i++ { + req := &btpb.MutateRowRequest{ + TableName: tblInfo.Name, + RowKey: []byte(prefix + strconv.Itoa(i)), + Mutations: []*btpb.Mutation{{ + Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{ + FamilyName: "cf", + ColumnQualifier: []byte("col"), + TimestampMicros: 0, + Value: []byte{}, + }}, + }}, + } + if _, err := s.MutateRow(ctx, req); err != nil { + t.Fatalf("Populating table: %v", err) + } + } + } + } + + doWrite() + tblSize := len(tbl.rows) + req := &btapb.DropRowRangeRequest{ + Name: tblInfo.Name, + Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("AAA")}, + } + if _, err = s.DropRowRange(ctx, req); err != nil { + t.Fatalf("Dropping first range: %v", err) + } + got, want := len(tbl.rows), tblSize-count + if got != want { + t.Errorf("Row count after first drop: got %d (%v), want %d", got, tbl.rows, want) + } + + req = &btapb.DropRowRangeRequest{ + Name: tblInfo.Name, + Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("DDD")}, + } + if _, err = s.DropRowRange(ctx, req); err != nil { + t.Fatalf("Dropping second range: %v", err) + } + got, want = len(tbl.rows), tblSize-(2*count) + if got != want { + t.Errorf("Row count after second drop: got %d (%v), want %d", got, tbl.rows, want) + } + + req = &btapb.DropRowRangeRequest{ + Name: tblInfo.Name, + Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("XXX")}, + } + if _, err = s.DropRowRange(ctx, req); err != nil { + t.Fatalf("Dropping invalid range: %v", err) + } + got, want = len(tbl.rows), tblSize-(2*count) + if got != want { + t.Errorf("Row count after invalid drop: got %d (%v), want %d", got, tbl.rows, want) + } + + req = &btapb.DropRowRangeRequest{ + Name: tblInfo.Name, + Target: &btapb.DropRowRangeRequest_DeleteAllDataFromTable{true}, + } + if _, err = s.DropRowRange(ctx, req); err != nil { + t.Fatalf("Dropping all data: %v", err) + } + got, want = len(tbl.rows), 0 + if got != want { + t.Errorf("Row count after drop all: got %d, want %d", got, want) + } + + // Test that we can write rows, delete some and then write them again. + count = 1 + doWrite() + + req = &btapb.DropRowRangeRequest{ + Name: tblInfo.Name, + Target: &btapb.DropRowRangeRequest_DeleteAllDataFromTable{true}, + } + if _, err = s.DropRowRange(ctx, req); err != nil { + t.Fatalf("Dropping all data: %v", err) + } + got, want = len(tbl.rows), 0 + if got != want { + t.Errorf("Row count after drop all: got %d, want %d", got, want) + } + + doWrite() + got, want = len(tbl.rows), len(prefixes) + if got != want { + t.Errorf("Row count after rewrite: got %d, want %d", got, want) + } + + req = &btapb.DropRowRangeRequest{ + Name: tblInfo.Name, + Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("BBB")}, + } + if _, err = s.DropRowRange(ctx, req); err != nil { + t.Fatalf("Dropping range: %v", err) + } + doWrite() + got, want = len(tbl.rows), len(prefixes) + if got != want { + t.Errorf("Row count after drop range: got %d, want %d", got, want) + } +} + +type MockReadRowsServer struct { + responses []*btpb.ReadRowsResponse + grpc.ServerStream +} + +func (s *MockReadRowsServer) Send(resp *btpb.ReadRowsResponse) error { + s.responses = append(s.responses, resp) + return nil +} + +func TestReadRowsOrder(t *testing.T) { + s := &server{ + tables: make(map[string]*table), + } + ctx := context.Background() + newTbl := btapb.Table{ + ColumnFamilies: map[string]*btapb.ColumnFamily{ + "cf0": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}}, + }, + } + tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl}) + if err != nil { + t.Fatalf("Creating table: %v", err) + } + count := 3 + mcf := func(i int) *btapb.ModifyColumnFamiliesRequest { + return &btapb.ModifyColumnFamiliesRequest{ + Name: tblInfo.Name, + Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{ + Id: "cf" + strconv.Itoa(i), + Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}}, + }}, + } + } + for i := 1; i <= count; i++ { + _, err = s.ModifyColumnFamilies(ctx, mcf(i)) + if err != nil { + t.Fatal(err) + } + } + // Populate the table + for fc := 0; fc < count; fc++ { + for cc := count; cc > 0; cc-- { + for tc := 0; tc < count; tc++ { + req := &btpb.MutateRowRequest{ + TableName: tblInfo.Name, + RowKey: []byte("row"), + Mutations: []*btpb.Mutation{{ + Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{ + FamilyName: "cf" + strconv.Itoa(fc), + ColumnQualifier: []byte("col" + strconv.Itoa(cc)), + TimestampMicros: int64((tc + 1) * 1000), + Value: []byte{}, + }}, + }}, + } + if _, err := s.MutateRow(ctx, req); err != nil { + t.Fatalf("Populating table: %v", err) + } + } + } + } + req := &btpb.ReadRowsRequest{ + TableName: tblInfo.Name, + Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}}, + } + mock := &MockReadRowsServer{} + if err = s.ReadRows(req, mock); err != nil { + t.Errorf("ReadRows error: %v", err) + } + if len(mock.responses) == 0 { + t.Fatal("Response count: got 0, want > 0") + } + if len(mock.responses[0].Chunks) != 27 { + t.Fatal("Chunk count: got %d, want 27", len(mock.responses[0].Chunks)) + } + testOrder := func(ms *MockReadRowsServer) { + var prevFam, prevCol string + var prevTime int64 + for _, cc := range ms.responses[0].Chunks { + if prevFam == "" { + prevFam = cc.FamilyName.Value + prevCol = string(cc.Qualifier.Value) + prevTime = cc.TimestampMicros + continue + } + if cc.FamilyName.Value < prevFam { + t.Errorf("Family order is not correct: got %s < %s", cc.FamilyName.Value, prevFam) + } else if cc.FamilyName.Value == prevFam { + if string(cc.Qualifier.Value) < prevCol { + t.Errorf("Column order is not correct: got %s < %s", string(cc.Qualifier.Value), prevCol) + } else if string(cc.Qualifier.Value) == prevCol { + if cc.TimestampMicros > prevTime { + t.Errorf("cell order is not correct: got %d > %d", cc.TimestampMicros, prevTime) + } + } + } + prevFam = cc.FamilyName.Value + prevCol = string(cc.Qualifier.Value) + prevTime = cc.TimestampMicros + } + } + testOrder(mock) + + // Read with interleave filter + inter := &btpb.RowFilter_Interleave{} + fnr := &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{"1"}} + cqr := &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{[]byte("2")}} + inter.Filters = append(inter.Filters, fnr, cqr) + req = &btpb.ReadRowsRequest{ + TableName: tblInfo.Name, + Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}}, + Filter: &btpb.RowFilter{ + Filter: &btpb.RowFilter_Interleave_{inter}, + }, + } + mock = &MockReadRowsServer{} + if err = s.ReadRows(req, mock); err != nil { + t.Errorf("ReadRows error: %v", err) + } + if len(mock.responses) == 0 { + t.Fatal("Response count: got 0, want > 0") + } + if len(mock.responses[0].Chunks) != 18 { + t.Fatal("Chunk count: got %d, want 18", len(mock.responses[0].Chunks)) + } + testOrder(mock) + + // Check order after ReadModifyWriteRow + rmw := func(i int) *btpb.ReadModifyWriteRowRequest { + return &btpb.ReadModifyWriteRowRequest{ + TableName: tblInfo.Name, + RowKey: []byte("row"), + Rules: []*btpb.ReadModifyWriteRule{{ + FamilyName: "cf3", + ColumnQualifier: []byte("col" + strconv.Itoa(i)), + Rule: &btpb.ReadModifyWriteRule_IncrementAmount{1}, + }}, + } + } + for i := count; i > 0; i-- { + s.ReadModifyWriteRow(ctx, rmw(i)) + } + req = &btpb.ReadRowsRequest{ + TableName: tblInfo.Name, + Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}}, + } + mock = &MockReadRowsServer{} + if err = s.ReadRows(req, mock); err != nil { + t.Errorf("ReadRows error: %v", err) + } + if len(mock.responses) == 0 { + t.Fatal("Response count: got 0, want > 0") + } + if len(mock.responses[0].Chunks) != 30 { + t.Fatal("Chunk count: got %d, want 30", len(mock.responses[0].Chunks)) + } + testOrder(mock) +} diff --git a/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go b/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go new file mode 100644 index 00000000..4afd1e8d --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go @@ -0,0 +1,789 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +// Command docs are in cbtdoc.go. + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io" + "log" + "os" + "regexp" + "sort" + "strconv" + "strings" + "text/tabwriter" + "text/template" + "time" + + "cloud.google.com/go/bigtable" + "cloud.google.com/go/bigtable/internal/cbtconfig" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +var ( + oFlag = flag.String("o", "", "if set, redirect stdout to this file") + + config *cbtconfig.Config + client *bigtable.Client + adminClient *bigtable.AdminClient + instanceAdminClient *bigtable.InstanceAdminClient +) + +func getCredentialOpts(opts []option.ClientOption) []option.ClientOption { + if ts := config.TokenSource; ts != nil { + opts = append(opts, option.WithTokenSource(ts)) + } + if tlsCreds := config.TLSCreds; tlsCreds != nil { + opts = append(opts, option.WithGRPCDialOption(grpc.WithTransportCredentials(tlsCreds))) + } + return opts +} + +func getClient() *bigtable.Client { + if client == nil { + var opts []option.ClientOption + if ep := config.DataEndpoint; ep != "" { + opts = append(opts, option.WithEndpoint(ep)) + } + opts = getCredentialOpts(opts) + var err error + client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance, opts...) + if err != nil { + log.Fatalf("Making bigtable.Client: %v", err) + } + } + return client +} + +func getAdminClient() *bigtable.AdminClient { + if adminClient == nil { + var opts []option.ClientOption + if ep := config.AdminEndpoint; ep != "" { + opts = append(opts, option.WithEndpoint(ep)) + } + opts = getCredentialOpts(opts) + var err error + adminClient, err = bigtable.NewAdminClient(context.Background(), config.Project, config.Instance, opts...) + if err != nil { + log.Fatalf("Making bigtable.AdminClient: %v", err) + } + } + return adminClient +} + +func getInstanceAdminClient() *bigtable.InstanceAdminClient { + if instanceAdminClient == nil { + var opts []option.ClientOption + if ep := config.AdminEndpoint; ep != "" { + opts = append(opts, option.WithEndpoint(ep)) + } + opts = getCredentialOpts(opts) + var err error + instanceAdminClient, err = bigtable.NewInstanceAdminClient(context.Background(), config.Project, opts...) + if err != nil { + log.Fatalf("Making bigtable.InstanceAdminClient: %v", err) + } + } + return instanceAdminClient +} + +func main() { + var err error + config, err = cbtconfig.Load() + if err != nil { + log.Fatal(err) + } + config.RegisterFlags() + + flag.Usage = func() { usage(os.Stderr) } + flag.Parse() + if flag.NArg() == 0 { + usage(os.Stderr) + os.Exit(1) + } + + if *oFlag != "" { + f, err := os.Create(*oFlag) + if err != nil { + log.Fatal(err) + } + defer func() { + if err := f.Close(); err != nil { + log.Fatal(err) + } + }() + os.Stdout = f + } + + ctx := context.Background() + for _, cmd := range commands { + if cmd.Name == flag.Arg(0) { + if err := config.CheckFlags(cmd.Required); err != nil { + log.Fatal(err) + } + cmd.do(ctx, flag.Args()[1:]...) + return + } + } + log.Fatalf("Unknown command %q", flag.Arg(0)) +} + +func usage(w io.Writer) { + fmt.Fprintf(w, "Usage: %s [flags] ...\n", os.Args[0]) + flag.CommandLine.SetOutput(w) + flag.CommandLine.PrintDefaults() + fmt.Fprintf(w, "\n%s", cmdSummary) +} + +var cmdSummary string // generated in init, below + +func init() { + var buf bytes.Buffer + tw := tabwriter.NewWriter(&buf, 10, 8, 4, '\t', 0) + for _, cmd := range commands { + fmt.Fprintf(tw, "cbt %s\t%s\n", cmd.Name, cmd.Desc) + } + tw.Flush() + buf.WriteString(configHelp) + cmdSummary = buf.String() +} + +var configHelp = ` +For convenience, values of the -project, -instance, -creds, +-admin-endpoint and -data-endpoint flags may be specified in +` + cbtconfig.Filename() + ` in this format: + project = my-project-123 + instance = my-instance + creds = path-to-account-key.json + admin-endpoint = hostname:port + data-endpoint = hostname:port +All values are optional, and all will be overridden by flags. +` + +var commands = []struct { + Name, Desc string + do func(context.Context, ...string) + Usage string + Required cbtconfig.RequiredFlags +}{ + { + Name: "count", + Desc: "Count rows in a table", + do: doCount, + Usage: "cbt count
", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "createfamily", + Desc: "Create a column family", + do: doCreateFamily, + Usage: "cbt createfamily
", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "createtable", + Desc: "Create a table", + do: doCreateTable, + Usage: "cbt createtable
[initial_splits...]\n" + + " initial_splits=row A row key to be used to initially split the table " + + "into multiple tablets. Can be repeated to create multiple splits.", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "deletefamily", + Desc: "Delete a column family", + do: doDeleteFamily, + Usage: "cbt deletefamily
", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "deleterow", + Desc: "Delete a row", + do: doDeleteRow, + Usage: "cbt deleterow
", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "deletetable", + Desc: "Delete a table", + do: doDeleteTable, + Usage: "cbt deletetable
", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "doc", + Desc: "Print godoc-suitable documentation for cbt", + do: doDoc, + Usage: "cbt doc", + Required: cbtconfig.NoneRequired, + }, + { + Name: "help", + Desc: "Print help text", + do: doHelp, + Usage: "cbt help [command]", + Required: cbtconfig.NoneRequired, + }, + { + Name: "listinstances", + Desc: "List instances in a project", + do: doListInstances, + Usage: "cbt listinstances", + Required: cbtconfig.ProjectRequired, + }, + { + Name: "lookup", + Desc: "Read from a single row", + do: doLookup, + Usage: "cbt lookup
", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "ls", + Desc: "List tables and column families", + do: doLS, + Usage: "cbt ls List tables\n" + + "cbt ls
List column families in
", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "mddoc", + Desc: "Print documentation for cbt in Markdown format", + do: doMDDoc, + Usage: "cbt mddoc", + Required: cbtconfig.NoneRequired, + }, + { + Name: "read", + Desc: "Read rows", + do: doRead, + Usage: "cbt read
[start=] [end=] [prefix=] [count=]\n" + + " start= Start reading at this row\n" + + " end= Stop reading before this row\n" + + " prefix= Read rows with this prefix\n" + + " count= Read only this many rows\n", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "set", + Desc: "Set value of a cell", + do: doSet, + Usage: "cbt set
family:column=val[@ts] ...\n" + + " family:column=val[@ts] may be repeated to set multiple cells.\n" + + "\n" + + " ts is an optional integer timestamp.\n" + + " If it cannot be parsed, the `@ts` part will be\n" + + " interpreted as part of the value.", + Required: cbtconfig.ProjectAndInstanceRequired, + }, + { + Name: "setgcpolicy", + Desc: "Set the GC policy for a column family", + do: doSetGCPolicy, + Usage: "cbt setgcpolicy
( maxage= | maxversions= )\n" + + "\n" + + ` maxage= Maximum timestamp age to preserve (e.g. "1h", "4d")` + "\n" + + " maxversions= Maximum number of versions to preserve", + Required: cbtconfig.ProjectAndInstanceRequired, + }, +} + +func doCount(ctx context.Context, args ...string) { + if len(args) != 1 { + log.Fatal("usage: cbt count
") + } + tbl := getClient().Open(args[0]) + + n := 0 + err := tbl.ReadRows(ctx, bigtable.InfiniteRange(""), func(_ bigtable.Row) bool { + n++ + return true + }, bigtable.RowFilter(bigtable.StripValueFilter())) + if err != nil { + log.Fatalf("Reading rows: %v", err) + } + fmt.Println(n) +} + +func doCreateFamily(ctx context.Context, args ...string) { + if len(args) != 2 { + log.Fatal("usage: cbt createfamily
") + } + err := getAdminClient().CreateColumnFamily(ctx, args[0], args[1]) + if err != nil { + log.Fatalf("Creating column family: %v", err) + } +} + +func doCreateTable(ctx context.Context, args ...string) { + if len(args) < 1 { + log.Fatal("usage: cbt createtable
[initial_splits...]") + } + var err error + if len(args) > 1 { + splits := args[1:] + err = getAdminClient().CreatePresplitTable(ctx, args[0], splits) + } else { + err = getAdminClient().CreateTable(ctx, args[0]) + } + if err != nil { + log.Fatalf("Creating table: %v", err) + } +} + +func doDeleteFamily(ctx context.Context, args ...string) { + if len(args) != 2 { + log.Fatal("usage: cbt deletefamily
") + } + err := getAdminClient().DeleteColumnFamily(ctx, args[0], args[1]) + if err != nil { + log.Fatalf("Deleting column family: %v", err) + } +} + +func doDeleteRow(ctx context.Context, args ...string) { + if len(args) != 2 { + log.Fatal("usage: cbt deleterow
") + } + tbl := getClient().Open(args[0]) + mut := bigtable.NewMutation() + mut.DeleteRow() + if err := tbl.Apply(ctx, args[1], mut); err != nil { + log.Fatalf("Deleting row: %v", err) + } +} + +func doDeleteTable(ctx context.Context, args ...string) { + if len(args) != 1 { + log.Fatalf("Can't do `cbt deletetable %s`", args) + } + err := getAdminClient().DeleteTable(ctx, args[0]) + if err != nil { + log.Fatalf("Deleting table: %v", err) + } +} + +// to break circular dependencies +var ( + doDocFn func(ctx context.Context, args ...string) + doHelpFn func(ctx context.Context, args ...string) + doMDDocFn func(ctx context.Context, args ...string) +) + +func init() { + doDocFn = doDocReal + doHelpFn = doHelpReal + doMDDocFn = doMDDocReal +} + +func doDoc(ctx context.Context, args ...string) { doDocFn(ctx, args...) } +func doHelp(ctx context.Context, args ...string) { doHelpFn(ctx, args...) } +func doMDDoc(ctx context.Context, args ...string) { doMDDocFn(ctx, args...) } + +func docFlags() []*flag.Flag { + // Only include specific flags, in a specific order. + var flags []*flag.Flag + for _, name := range []string{"project", "instance", "creds"} { + f := flag.Lookup(name) + if f == nil { + log.Fatalf("Flag not linked: -%s", name) + } + flags = append(flags, f) + } + return flags +} + +func doDocReal(ctx context.Context, args ...string) { + data := map[string]interface{}{ + "Commands": commands, + "Flags": docFlags(), + } + var buf bytes.Buffer + if err := docTemplate.Execute(&buf, data); err != nil { + log.Fatalf("Bad doc template: %v", err) + } + out, err := format.Source(buf.Bytes()) + if err != nil { + log.Fatalf("Bad doc output: %v", err) + } + os.Stdout.Write(out) +} + +func indentLines(s, ind string) string { + ss := strings.Split(s, "\n") + for i, p := range ss { + ss[i] = ind + p + } + return strings.Join(ss, "\n") +} + +var docTemplate = template.Must(template.New("doc").Funcs(template.FuncMap{ + "indent": indentLines, +}). + Parse(` +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED. +// Run "go generate" to regenerate. +//go:generate go run cbt.go -o cbtdoc.go doc + +/* +Cbt is a tool for doing basic interactions with Cloud Bigtable. + +Usage: + + cbt [options] command [arguments] + +The commands are: +{{range .Commands}} + {{printf "%-25s %s" .Name .Desc}}{{end}} + +Use "cbt help " for more information about a command. + +The options are: +{{range .Flags}} + -{{.Name}} string + {{.Usage}}{{end}} + +{{range .Commands}} +{{.Desc}} + +Usage: +{{indent .Usage "\t"}} + + + +{{end}} +*/ +package main +`)) + +func doHelpReal(ctx context.Context, args ...string) { + if len(args) == 0 { + usage(os.Stdout) + return + } + for _, cmd := range commands { + if cmd.Name == args[0] { + fmt.Println(cmd.Usage) + return + } + } + log.Fatalf("Don't know command %q", args[0]) +} + +func doListInstances(ctx context.Context, args ...string) { + if len(args) != 0 { + log.Fatalf("usage: cbt listinstances") + } + is, err := getInstanceAdminClient().Instances(ctx) + if err != nil { + log.Fatalf("Getting list of instances: %v", err) + } + tw := tabwriter.NewWriter(os.Stdout, 10, 8, 4, '\t', 0) + fmt.Fprintf(tw, "Instance Name\tInfo\n") + fmt.Fprintf(tw, "-------------\t----\n") + for _, i := range is { + fmt.Fprintf(tw, "%s\t%s\n", i.Name, i.DisplayName) + } + tw.Flush() +} + +func doLookup(ctx context.Context, args ...string) { + if len(args) != 2 { + log.Fatalf("usage: cbt lookup
") + } + table, row := args[0], args[1] + tbl := getClient().Open(table) + r, err := tbl.ReadRow(ctx, row) + if err != nil { + log.Fatalf("Reading row: %v", err) + } + printRow(r) +} + +func printRow(r bigtable.Row) { + fmt.Println(strings.Repeat("-", 40)) + fmt.Println(r.Key()) + + var fams []string + for fam := range r { + fams = append(fams, fam) + } + sort.Strings(fams) + for _, fam := range fams { + ris := r[fam] + sort.Sort(byColumn(ris)) + for _, ri := range ris { + ts := time.Unix(0, int64(ri.Timestamp)*1e3) + fmt.Printf(" %-40s @ %s\n", ri.Column, ts.Format("2006/01/02-15:04:05.000000")) + fmt.Printf(" %q\n", ri.Value) + } + } +} + +type byColumn []bigtable.ReadItem + +func (b byColumn) Len() int { return len(b) } +func (b byColumn) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byColumn) Less(i, j int) bool { return b[i].Column < b[j].Column } + +func doLS(ctx context.Context, args ...string) { + switch len(args) { + default: + log.Fatalf("Can't do `cbt ls %s`", args) + case 0: + tables, err := getAdminClient().Tables(ctx) + if err != nil { + log.Fatalf("Getting list of tables: %v", err) + } + sort.Strings(tables) + for _, table := range tables { + fmt.Println(table) + } + case 1: + table := args[0] + ti, err := getAdminClient().TableInfo(ctx, table) + if err != nil { + log.Fatalf("Getting table info: %v", err) + } + sort.Strings(ti.Families) + for _, fam := range ti.Families { + fmt.Println(fam) + } + } +} + +func doMDDocReal(ctx context.Context, args ...string) { + data := map[string]interface{}{ + "Commands": commands, + "Flags": docFlags(), + } + var buf bytes.Buffer + if err := mddocTemplate.Execute(&buf, data); err != nil { + log.Fatalf("Bad mddoc template: %v", err) + } + io.Copy(os.Stdout, &buf) +} + +var mddocTemplate = template.Must(template.New("mddoc").Funcs(template.FuncMap{ + "indent": indentLines, +}). + Parse(` +Cbt is a tool for doing basic interactions with Cloud Bigtable. + +Usage: + + cbt [options] command [arguments] + +The commands are: +{{range .Commands}} + {{printf "%-25s %s" .Name .Desc}}{{end}} + +Use "cbt help " for more information about a command. + +The options are: +{{range .Flags}} + -{{.Name}} string + {{.Usage}}{{end}} + +{{range .Commands}} +## {{.Desc}} + +{{indent .Usage "\t"}} + + + +{{end}} +`)) + +func doRead(ctx context.Context, args ...string) { + if len(args) < 1 { + log.Fatalf("usage: cbt read
[args ...]") + } + tbl := getClient().Open(args[0]) + + parsed := make(map[string]string) + for _, arg := range args[1:] { + i := strings.Index(arg, "=") + if i < 0 { + log.Fatalf("Bad arg %q", arg) + } + key, val := arg[:i], arg[i+1:] + switch key { + default: + log.Fatalf("Unknown arg key %q", key) + case "limit": + // Be nicer; we used to support this, but renamed it to "end". + log.Fatalf("Unknown arg key %q; did you mean %q?", key, "end") + case "start", "end", "prefix", "count": + parsed[key] = val + } + } + if (parsed["start"] != "" || parsed["end"] != "") && parsed["prefix"] != "" { + log.Fatal(`"start"/"end" may not be mixed with "prefix"`) + } + + var rr bigtable.RowRange + if start, end := parsed["start"], parsed["end"]; end != "" { + rr = bigtable.NewRange(start, end) + } else if start != "" { + rr = bigtable.InfiniteRange(start) + } + if prefix := parsed["prefix"]; prefix != "" { + rr = bigtable.PrefixRange(prefix) + } + + var opts []bigtable.ReadOption + if count := parsed["count"]; count != "" { + n, err := strconv.ParseInt(count, 0, 64) + if err != nil { + log.Fatalf("Bad count %q: %v", count, err) + } + opts = append(opts, bigtable.LimitRows(n)) + } + + // TODO(dsymonds): Support filters. + err := tbl.ReadRows(ctx, rr, func(r bigtable.Row) bool { + printRow(r) + return true + }, opts...) + if err != nil { + log.Fatalf("Reading rows: %v", err) + } +} + +var setArg = regexp.MustCompile(`([^:]+):([^=]*)=(.*)`) + +func doSet(ctx context.Context, args ...string) { + if len(args) < 3 { + log.Fatalf("usage: cbt set
family:[column]=val[@ts] ...") + } + tbl := getClient().Open(args[0]) + row := args[1] + mut := bigtable.NewMutation() + for _, arg := range args[2:] { + m := setArg.FindStringSubmatch(arg) + if m == nil { + log.Fatalf("Bad set arg %q", arg) + } + val := m[3] + ts := bigtable.Now() + if i := strings.LastIndex(val, "@"); i >= 0 { + // Try parsing a timestamp. + n, err := strconv.ParseInt(val[i+1:], 0, 64) + if err == nil { + val = val[:i] + ts = bigtable.Timestamp(n) + } + } + mut.Set(m[1], m[2], ts, []byte(val)) + } + if err := tbl.Apply(ctx, row, mut); err != nil { + log.Fatalf("Applying mutation: %v", err) + } +} + +func doSetGCPolicy(ctx context.Context, args ...string) { + if len(args) < 3 { + log.Fatalf("usage: cbt setgcpolicy
( maxage= | maxversions= )") + } + table := args[0] + fam := args[1] + + var pol bigtable.GCPolicy + switch p := args[2]; { + case strings.HasPrefix(p, "maxage="): + d, err := parseDuration(p[7:]) + if err != nil { + log.Fatal(err) + } + pol = bigtable.MaxAgePolicy(d) + case strings.HasPrefix(p, "maxversions="): + n, err := strconv.ParseUint(p[12:], 10, 16) + if err != nil { + log.Fatal(err) + } + pol = bigtable.MaxVersionsPolicy(int(n)) + default: + log.Fatalf("Bad GC policy %q", p) + } + if err := getAdminClient().SetGCPolicy(ctx, table, fam, pol); err != nil { + log.Fatalf("Setting GC policy: %v", err) + } +} + +// parseDuration parses a duration string. +// It is similar to Go's time.ParseDuration, except with a different set of supported units, +// and only simple formats supported. +func parseDuration(s string) (time.Duration, error) { + // [0-9]+[a-z]+ + + // Split [0-9]+ from [a-z]+. + i := 0 + for ; i < len(s); i++ { + c := s[i] + if c < '0' || c > '9' { + break + } + } + ds, u := s[:i], s[i:] + if ds == "" || u == "" { + return 0, fmt.Errorf("invalid duration %q", s) + } + // Parse them. + d, err := strconv.ParseUint(ds, 10, 32) + if err != nil { + return 0, fmt.Errorf("invalid duration %q: %v", s, err) + } + unit, ok := unitMap[u] + if !ok { + return 0, fmt.Errorf("unknown unit %q in duration %q", u, s) + } + if d > uint64((1<<63-1)/unit) { + // overflow + return 0, fmt.Errorf("invalid duration %q overflows", s) + } + return time.Duration(d) * unit, nil +} + +var unitMap = map[string]time.Duration{ + "ms": time.Millisecond, + "s": time.Second, + "m": time.Minute, + "h": time.Hour, + "d": 24 * time.Hour, +} diff --git a/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt_test.go b/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt_test.go new file mode 100644 index 00000000..350e4f00 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt_test.go @@ -0,0 +1,59 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package main + +import ( + "testing" + "time" +) + +func TestParseDuration(t *testing.T) { + tests := []struct { + in string + // out or fail are mutually exclusive + out time.Duration + fail bool + }{ + {in: "10ms", out: 10 * time.Millisecond}, + {in: "3s", out: 3 * time.Second}, + {in: "60m", out: 60 * time.Minute}, + {in: "12h", out: 12 * time.Hour}, + {in: "7d", out: 168 * time.Hour}, + + {in: "", fail: true}, + {in: "0", fail: true}, + {in: "7ns", fail: true}, + {in: "14mo", fail: true}, + {in: "3.5h", fail: true}, + {in: "106752d", fail: true}, // overflow + } + for _, tc := range tests { + got, err := parseDuration(tc.in) + if !tc.fail && err != nil { + t.Errorf("parseDuration(%q) unexpectedly failed: %v", tc.in, err) + continue + } + if tc.fail && err == nil { + t.Errorf("parseDuration(%q) did not fail", tc.in) + continue + } + if tc.fail { + continue + } + if got != tc.out { + t.Errorf("parseDuration(%q) = %v, want %v", tc.in, got, tc.out) + } + } +} diff --git a/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbtdoc.go b/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbtdoc.go new file mode 100644 index 00000000..81981f36 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/cmd/cbt/cbtdoc.go @@ -0,0 +1,191 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// DO NOT EDIT. THIS IS AUTOMATICALLY GENERATED. +// Run "go generate" to regenerate. +//go:generate go run cbt.go -o cbtdoc.go doc + +/* +Cbt is a tool for doing basic interactions with Cloud Bigtable. + +Usage: + + cbt [options] command [arguments] + +The commands are: + + count Count rows in a table + createfamily Create a column family + createtable Create a table + deletefamily Delete a column family + deleterow Delete a row + deletetable Delete a table + doc Print godoc-suitable documentation for cbt + help Print help text + listinstances List instances in a project + lookup Read from a single row + ls List tables and column families + mddoc Print documentation for cbt in Markdown format + read Read rows + set Set value of a cell + setgcpolicy Set the GC policy for a column family + +Use "cbt help " for more information about a command. + +The options are: + + -project string + project ID + -instance string + Cloud Bigtable instance + -creds string + if set, use application credentials in this file + + +Count rows in a table + +Usage: + cbt count
+ + + + +Create a column family + +Usage: + cbt createfamily
+ + + + +Create a table + +Usage: + cbt createtable
+ + + + +Delete a column family + +Usage: + cbt deletefamily
+ + + + +Delete a row + +Usage: + cbt deleterow
+ + + + +Delete a table + +Usage: + cbt deletetable
+ + + + +Print godoc-suitable documentation for cbt + +Usage: + cbt doc + + + + +Print help text + +Usage: + cbt help [command] + + + + +List instances in a project + +Usage: + cbt listinstances + + + + +Read from a single row + +Usage: + cbt lookup
+ + + + +List tables and column families + +Usage: + cbt ls List tables + cbt ls
List column families in
+ + + + +Print documentation for cbt in Markdown format + +Usage: + cbt mddoc + + + + +Read rows + +Usage: + cbt read
[start=] [end=] [prefix=] [count=] + start= Start reading at this row + end= Stop reading before this row + prefix= Read rows with this prefix + count= Read only this many rows + + + + + +Set value of a cell + +Usage: + cbt set
family:column=val[@ts] ... + family:column=val[@ts] may be repeated to set multiple cells. + + ts is an optional integer timestamp. + If it cannot be parsed, the `@ts` part will be + interpreted as part of the value. + + + + +Set the GC policy for a column family + +Usage: + cbt setgcpolicy
( maxage= | maxversions= ) + + maxage= Maximum timestamp age to preserve (e.g. "1h", "4d") + maxversions= Maximum number of versions to preserve + + + + +*/ +package main diff --git a/vendor/cloud.google.com/go/bigtable/cmd/emulator/cbtemulator.go b/vendor/cloud.google.com/go/bigtable/cmd/emulator/cbtemulator.go new file mode 100644 index 00000000..f561c14c --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/cmd/emulator/cbtemulator.go @@ -0,0 +1,44 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +cbtemulator launches the in-memory Cloud Bigtable server on the given address. +*/ +package main + +import ( + "flag" + "fmt" + "log" + + "cloud.google.com/go/bigtable/bttest" + "google.golang.org/grpc" +) + +var ( + host = flag.String("host", "localhost", "the address to bind to on the local machine") + port = flag.Int("port", 9000, "the port number to bind to on the local machine") +) + +func main() { + grpc.EnableTracing = false + flag.Parse() + srv, err := bttest.NewServer(fmt.Sprintf("%s:%d", *host, *port)) + if err != nil { + log.Fatalf("failed to start emulator: %v", err) + } + + fmt.Printf("Cloud Bigtable emulator running on %s\n", srv.Addr) + select {} +} diff --git a/vendor/cloud.google.com/go/bigtable/cmd/loadtest/loadtest.go b/vendor/cloud.google.com/go/bigtable/cmd/loadtest/loadtest.go new file mode 100644 index 00000000..8c760a6b --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/cmd/loadtest/loadtest.go @@ -0,0 +1,186 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Loadtest does some load testing through the Go client library for Cloud Bigtable. +*/ +package main + +import ( + "bytes" + "flag" + "fmt" + "log" + "math/rand" + "os" + "sync" + "sync/atomic" + "time" + + "cloud.google.com/go/bigtable" + "cloud.google.com/go/bigtable/internal/cbtconfig" + "cloud.google.com/go/bigtable/internal/stat" + "golang.org/x/net/context" + "google.golang.org/api/option" +) + +var ( + runFor = flag.Duration("run_for", 5*time.Second, "how long to run the load test for") + scratchTable = flag.String("scratch_table", "loadtest-scratch", "name of table to use; should not already exist") + csvOutput = flag.String("csv_output", "", + "output path for statistics in .csv format. If this file already exists it will be overwritten.") + poolSize = flag.Int("pool_size", 1, "size of the gRPC connection pool to use for the data client") + reqCount = flag.Int("req_count", 100, "number of concurrent requests") + + config *cbtconfig.Config + client *bigtable.Client + adminClient *bigtable.AdminClient +) + +func main() { + var err error + config, err = cbtconfig.Load() + if err != nil { + log.Fatal(err) + } + config.RegisterFlags() + + flag.Parse() + if err := config.CheckFlags(cbtconfig.ProjectAndInstanceRequired); err != nil { + log.Fatal(err) + } + if config.Creds != "" { + os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds) + } + if flag.NArg() != 0 { + flag.Usage() + os.Exit(1) + } + + var options []option.ClientOption + if *poolSize > 1 { + options = append(options, option.WithGRPCConnectionPool(*poolSize)) + } + + var csvFile *os.File + if *csvOutput != "" { + csvFile, err = os.Create(*csvOutput) + if err != nil { + log.Fatalf("creating csv output file: %v", err) + } + defer csvFile.Close() + log.Printf("Writing statistics to %q ...", *csvOutput) + } + + log.Printf("Dialing connections...") + client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance, options...) + if err != nil { + log.Fatalf("Making bigtable.Client: %v", err) + } + defer client.Close() + adminClient, err = bigtable.NewAdminClient(context.Background(), config.Project, config.Instance) + if err != nil { + log.Fatalf("Making bigtable.AdminClient: %v", err) + } + defer adminClient.Close() + + // Create a scratch table. + log.Printf("Setting up scratch table...") + if err := adminClient.CreateTable(context.Background(), *scratchTable); err != nil { + log.Fatalf("Making scratch table %q: %v", *scratchTable, err) + } + if err := adminClient.CreateColumnFamily(context.Background(), *scratchTable, "f"); err != nil { + log.Fatalf("Making scratch table column family: %v", err) + } + // Upon a successful run, delete the table. Don't bother checking for errors. + defer adminClient.DeleteTable(context.Background(), *scratchTable) + + log.Printf("Starting load test... (run for %v)", *runFor) + tbl := client.Open(*scratchTable) + sem := make(chan int, *reqCount) // limit the number of requests happening at once + var reads, writes stats + stopTime := time.Now().Add(*runFor) + var wg sync.WaitGroup + for time.Now().Before(stopTime) { + sem <- 1 + wg.Add(1) + go func() { + defer wg.Done() + defer func() { <-sem }() + + ok := true + opStart := time.Now() + var stats *stats + defer func() { + stats.Record(ok, time.Since(opStart)) + }() + + row := fmt.Sprintf("row%d", rand.Intn(100)) // operate on 1 of 100 rows + + switch rand.Intn(10) { + default: + // read + stats = &reads + _, err := tbl.ReadRow(context.Background(), row, bigtable.RowFilter(bigtable.LatestNFilter(1))) + if err != nil { + log.Printf("Error doing read: %v", err) + ok = false + } + case 0, 1, 2, 3, 4: + // write + stats = &writes + mut := bigtable.NewMutation() + mut.Set("f", "col", bigtable.Now(), bytes.Repeat([]byte("0"), 1<<10)) // 1 KB write + if err := tbl.Apply(context.Background(), row, mut); err != nil { + log.Printf("Error doing mutation: %v", err) + ok = false + } + } + }() + } + wg.Wait() + + readsAgg := stat.NewAggregate("reads", reads.ds, reads.tries-reads.ok) + writesAgg := stat.NewAggregate("writes", writes.ds, writes.tries-writes.ok) + log.Printf("Reads (%d ok / %d tries):\n%v", reads.ok, reads.tries, readsAgg) + log.Printf("Writes (%d ok / %d tries):\n%v", writes.ok, writes.tries, writesAgg) + + if csvFile != nil { + stat.WriteCSV([]*stat.Aggregate{readsAgg, writesAgg}, csvFile) + } +} + +var allStats int64 // atomic + +type stats struct { + mu sync.Mutex + tries, ok int + ds []time.Duration +} + +func (s *stats) Record(ok bool, d time.Duration) { + s.mu.Lock() + s.tries++ + if ok { + s.ok++ + } + s.ds = append(s.ds, d) + s.mu.Unlock() + + if n := atomic.AddInt64(&allStats, 1); n%1000 == 0 { + log.Printf("Progress: done %d ops", n) + } +} diff --git a/vendor/cloud.google.com/go/bigtable/cmd/scantest/scantest.go b/vendor/cloud.google.com/go/bigtable/cmd/scantest/scantest.go new file mode 100644 index 00000000..72e3743b --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/cmd/scantest/scantest.go @@ -0,0 +1,155 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Scantest does scan-related load testing against Cloud Bigtable. The logic here +mimics a similar test written using the Java client. +*/ +package main + +import ( + "bytes" + "flag" + "fmt" + "log" + "math/rand" + "os" + "sync" + "sync/atomic" + "text/tabwriter" + "time" + + "cloud.google.com/go/bigtable" + "cloud.google.com/go/bigtable/internal/cbtconfig" + "cloud.google.com/go/bigtable/internal/stat" + "golang.org/x/net/context" +) + +var ( + runFor = flag.Duration("run_for", 5*time.Second, "how long to run the load test for") + numScans = flag.Int("concurrent_scans", 1, "number of concurrent scans") + rowLimit = flag.Int("row_limit", 10000, "max number of records per scan") + + config *cbtconfig.Config + client *bigtable.Client +) + +func main() { + flag.Usage = func() { + fmt.Printf("Usage: scantest [options] \n\n") + flag.PrintDefaults() + } + + var err error + config, err = cbtconfig.Load() + if err != nil { + log.Fatal(err) + } + config.RegisterFlags() + + flag.Parse() + if err := config.CheckFlags(cbtconfig.ProjectAndInstanceRequired); err != nil { + log.Fatal(err) + } + if config.Creds != "" { + os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", config.Creds) + } + if flag.NArg() != 1 { + flag.Usage() + os.Exit(1) + } + + table := flag.Arg(0) + + log.Printf("Dialing connections...") + client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance) + if err != nil { + log.Fatalf("Making bigtable.Client: %v", err) + } + defer client.Close() + + log.Printf("Starting scan test... (run for %v)", *runFor) + tbl := client.Open(table) + sem := make(chan int, *numScans) // limit the number of requests happening at once + var scans stats + + stopTime := time.Now().Add(*runFor) + var wg sync.WaitGroup + for time.Now().Before(stopTime) { + sem <- 1 + wg.Add(1) + go func() { + defer wg.Done() + defer func() { <-sem }() + + ok := true + opStart := time.Now() + defer func() { + scans.Record(ok, time.Since(opStart)) + }() + + // Start at a random row key + key := fmt.Sprintf("user%d", rand.Int63()) + limit := bigtable.LimitRows(int64(*rowLimit)) + noop := func(bigtable.Row) bool { return true } + if err := tbl.ReadRows(context.Background(), bigtable.NewRange(key, ""), noop, limit); err != nil { + log.Printf("Error during scan: %v", err) + ok = false + } + }() + } + wg.Wait() + + agg := stat.NewAggregate("scans", scans.ds, scans.tries-scans.ok) + log.Printf("Scans (%d ok / %d tries):\nscan times:\n%v\nthroughput (rows/second):\n%v", + scans.ok, scans.tries, agg, throughputString(agg)) +} + +func throughputString(agg *stat.Aggregate) string { + var buf bytes.Buffer + tw := tabwriter.NewWriter(&buf, 0, 0, 1, ' ', 0) // one-space padding + rowLimitF := float64(*rowLimit) + fmt.Fprintf( + tw, + "min:\t%.2f\nmedian:\t%.2f\nmax:\t%.2f\n", + rowLimitF/agg.Max.Seconds(), + rowLimitF/agg.Median.Seconds(), + rowLimitF/agg.Min.Seconds()) + tw.Flush() + return buf.String() +} + +var allStats int64 // atomic + +type stats struct { + mu sync.Mutex + tries, ok int + ds []time.Duration +} + +func (s *stats) Record(ok bool, d time.Duration) { + s.mu.Lock() + s.tries++ + if ok { + s.ok++ + } + s.ds = append(s.ds, d) + s.mu.Unlock() + + if n := atomic.AddInt64(&allStats, 1); n%1000 == 0 { + log.Printf("Progress: done %d ops", n) + } +} diff --git a/vendor/cloud.google.com/go/bigtable/doc.go b/vendor/cloud.google.com/go/bigtable/doc.go new file mode 100644 index 00000000..0d7706f0 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/doc.go @@ -0,0 +1,125 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package bigtable is an API to Google Cloud Bigtable. + +See https://cloud.google.com/bigtable/docs/ for general product documentation. + +Setup and Credentials + +Use NewClient or NewAdminClient to create a client that can be used to access +the data or admin APIs respectively. Both require credentials that have permission +to access the Cloud Bigtable API. + +If your program is run on Google App Engine or Google Compute Engine, using the Application Default Credentials +(https://developers.google.com/accounts/docs/application-default-credentials) +is the simplest option. Those credentials will be used by default when NewClient or NewAdminClient are called. + +To use alternate credentials, pass them to NewClient or NewAdminClient using option.WithTokenSource. +For instance, you can use service account credentials by visiting +https://cloud.google.com/console/project/MYPROJECT/apiui/credential, +creating a new OAuth "Client ID", storing the JSON key somewhere accessible, and writing + jsonKey, err := ioutil.ReadFile(pathToKeyFile) + ... + config, err := google.JWTConfigFromJSON(jsonKey, bigtable.Scope) // or bigtable.AdminScope, etc. + ... + client, err := bigtable.NewClient(ctx, project, instance, option.WithTokenSource(config.TokenSource(ctx))) + ... +Here, `google` means the golang.org/x/oauth2/google package +and `option` means the google.golang.org/api/option package. + +Reading + +The principal way to read from a Bigtable is to use the ReadRows method on *Table. +A RowRange specifies a contiguous portion of a table. A Filter may be provided through +RowFilter to limit or transform the data that is returned. + tbl := client.Open("mytable") + ... + // Read all the rows starting with "com.google.", + // but only fetch the columns in the "links" family. + rr := bigtable.PrefixRange("com.google.") + err := tbl.ReadRows(ctx, rr, func(r Row) bool { + // do something with r + return true // keep going + }, bigtable.RowFilter(bigtable.FamilyFilter("links"))) + ... + +To read a single row, use the ReadRow helper method. + r, err := tbl.ReadRow(ctx, "com.google.cloud") // "com.google.cloud" is the entire row key + ... + +Writing + +This API exposes two distinct forms of writing to a Bigtable: a Mutation and a ReadModifyWrite. +The former expresses idempotent operations. +The latter expresses non-idempotent operations and returns the new values of updated cells. +These operations are performed by creating a Mutation or ReadModifyWrite (with NewMutation or NewReadModifyWrite), +building up one or more operations on that, and then using the Apply or ApplyReadModifyWrite +methods on a Table. + +For instance, to set a couple of cells in a table, + tbl := client.Open("mytable") + mut := bigtable.NewMutation() + mut.Set("links", "maps.google.com", bigtable.Now(), []byte("1")) + mut.Set("links", "golang.org", bigtable.Now(), []byte("1")) + err := tbl.Apply(ctx, "com.google.cloud", mut) + ... + +To increment an encoded value in one cell, + tbl := client.Open("mytable") + rmw := bigtable.NewReadModifyWrite() + rmw.Increment("links", "golang.org", 12) // add 12 to the cell in column "links:golang.org" + r, err := tbl.ApplyReadModifyWrite(ctx, "com.google.cloud", rmw) + ... + +Retries + +If a read or write operation encounters a transient error it will be retried until a successful +response, an unretryable error or the context deadline is reached. Non-idempotent writes (where +the timestamp is set to ServerTime) will not be retried. In the case of ReadRows, retried calls +will not re-scan rows that have already been processed. + +Authentication + +See examples of authorization and authentication at +https://godoc.org/cloud.google.com/go#pkg-examples. + +*/ +package bigtable // import "cloud.google.com/go/bigtable" + +// Scope constants for authentication credentials. +// These should be used when using credential creation functions such as oauth.NewServiceAccountFromFile. +const ( + // Scope is the OAuth scope for Cloud Bigtable data operations. + Scope = "https://www.googleapis.com/auth/bigtable.data" + // ReadonlyScope is the OAuth scope for Cloud Bigtable read-only data operations. + ReadonlyScope = "https://www.googleapis.com/auth/bigtable.readonly" + + // AdminScope is the OAuth scope for Cloud Bigtable table admin operations. + AdminScope = "https://www.googleapis.com/auth/bigtable.admin.table" + + // InstanceAdminScope is the OAuth scope for Cloud Bigtable instance (and cluster) admin operations. + InstanceAdminScope = "https://www.googleapis.com/auth/bigtable.admin.cluster" +) + +// clientUserAgent identifies the version of this package. +// It should be bumped upon significant changes only. +const clientUserAgent = "cbt-go/20160628" + +// resourcePrefixHeader is the name of the metadata header used to indicate +// the resource being operated on. +const resourcePrefixHeader = "google-cloud-resource-prefix" diff --git a/vendor/cloud.google.com/go/bigtable/export_test.go b/vendor/cloud.google.com/go/bigtable/export_test.go new file mode 100644 index 00000000..f5936ade --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/export_test.go @@ -0,0 +1,203 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "errors" + "flag" + "fmt" + "strings" + "time" + + "cloud.google.com/go/bigtable/bttest" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +var legacyUseProd string +var integrationConfig IntegrationTestConfig + +func init() { + c := &integrationConfig + + flag.BoolVar(&c.UseProd, "it.use-prod", false, "Use remote bigtable instead of local emulator") + flag.StringVar(&c.AdminEndpoint, "it.admin-endpoint", "", "Admin api host and port") + flag.StringVar(&c.DataEndpoint, "it.data-endpoint", "", "Data api host and port") + flag.StringVar(&c.Project, "it.project", "", "Project to use for integration test") + flag.StringVar(&c.Instance, "it.instance", "", "Bigtable instance to use") + flag.StringVar(&c.Cluster, "it.cluster", "", "Bigtable cluster to use") + flag.StringVar(&c.Table, "it.table", "", "Bigtable table to create") + + // Backwards compat + flag.StringVar(&legacyUseProd, "use_prod", "", `DEPRECATED: if set to "proj,instance,table", run integration test against production`) + +} + +// IntegrationTestConfig contains parameters to pick and setup a IntegrationEnv for testing +type IntegrationTestConfig struct { + UseProd bool + AdminEndpoint string + DataEndpoint string + Project string + Instance string + Cluster string + Table string +} + +// IntegrationEnv represents a testing environment. +// The environment can be implemented using production or an emulator +type IntegrationEnv interface { + Config() IntegrationTestConfig + NewAdminClient() (*AdminClient, error) + NewClient() (*Client, error) + Close() +} + +// NewIntegrationEnv creates a new environment based on the command line args +func NewIntegrationEnv() (IntegrationEnv, error) { + c := integrationConfig + + if legacyUseProd != "" { + fmt.Println("WARNING: using legacy commandline arg -use_prod, please switch to -it.*") + parts := strings.SplitN(legacyUseProd, ",", 3) + c.UseProd = true + c.Project = parts[0] + c.Instance = parts[1] + c.Table = parts[2] + } + + if integrationConfig.UseProd { + return NewProdEnv(c) + } else { + return NewEmulatedEnv(c) + } +} + +// EmulatedEnv encapsulates the state of an emulator +type EmulatedEnv struct { + config IntegrationTestConfig + server *bttest.Server +} + +// NewEmulatedEnv builds and starts the emulator based environment +func NewEmulatedEnv(config IntegrationTestConfig) (*EmulatedEnv, error) { + srv, err := bttest.NewServer("127.0.0.1:0") + if err != nil { + return nil, err + } + + if config.Project == "" { + config.Project = "project" + } + if config.Instance == "" { + config.Instance = "instance" + } + if config.Table == "" { + config.Table = "mytable" + } + config.AdminEndpoint = srv.Addr + config.DataEndpoint = srv.Addr + + env := &EmulatedEnv{ + config: config, + server: srv, + } + return env, nil +} + +// Close stops & cleans up the emulator +func (e *EmulatedEnv) Close() { + e.server.Close() +} + +// Config gets the config used to build this environment +func (e *EmulatedEnv) Config() IntegrationTestConfig { + return e.config +} + +// NewAdminClient builds a new connected admin client for this environment +func (e *EmulatedEnv) NewAdminClient() (*AdminClient, error) { + timeout := 20 * time.Second + ctx, _ := context.WithTimeout(context.Background(), timeout) + conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure()) + if err != nil { + return nil, err + } + return NewAdminClient(ctx, e.config.Project, e.config.Instance, option.WithGRPCConn(conn)) +} + +// NewClient builds a new connected data client for this environment +func (e *EmulatedEnv) NewClient() (*Client, error) { + timeout := 20 * time.Second + ctx, _ := context.WithTimeout(context.Background(), timeout) + conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure()) + if err != nil { + return nil, err + } + return NewClient(ctx, e.config.Project, e.config.Instance, option.WithGRPCConn(conn)) +} + +// ProdEnv encapsulates the state necessary to connect to the external Bigtable service +type ProdEnv struct { + config IntegrationTestConfig +} + +// NewProdEnv builds the environment representation +func NewProdEnv(config IntegrationTestConfig) (*ProdEnv, error) { + if config.Project == "" { + return nil, errors.New("Project not set") + } + if config.Instance == "" { + return nil, errors.New("Instance not set") + } + if config.Table == "" { + return nil, errors.New("Table not set") + } + + return &ProdEnv{config}, nil +} + +// Close is a no-op for production environments +func (e *ProdEnv) Close() {} + +// Config gets the config used to build this environment +func (e *ProdEnv) Config() IntegrationTestConfig { + return e.config +} + +// NewAdminClient builds a new connected admin client for this environment +func (e *ProdEnv) NewAdminClient() (*AdminClient, error) { + timeout := 20 * time.Second + ctx, _ := context.WithTimeout(context.Background(), timeout) + var clientOpts []option.ClientOption + if endpoint := e.config.AdminEndpoint; endpoint != "" { + clientOpts = append(clientOpts, option.WithEndpoint(endpoint)) + } + return NewAdminClient(ctx, e.config.Project, e.config.Instance, clientOpts...) +} + +// NewClient builds a connected data client for this environment +func (e *ProdEnv) NewClient() (*Client, error) { + timeout := 20 * time.Second + ctx, _ := context.WithTimeout(context.Background(), timeout) + var clientOpts []option.ClientOption + if endpoint := e.config.DataEndpoint; endpoint != "" { + clientOpts = append(clientOpts, option.WithEndpoint(endpoint)) + } + return NewClient(ctx, e.config.Project, e.config.Instance, clientOpts...) +} diff --git a/vendor/cloud.google.com/go/bigtable/filter.go b/vendor/cloud.google.com/go/bigtable/filter.go new file mode 100644 index 00000000..fb85498c --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/filter.go @@ -0,0 +1,288 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "fmt" + "strings" + "time" + + btpb "google.golang.org/genproto/googleapis/bigtable/v2" +) + +// A Filter represents a row filter. +type Filter interface { + String() string + proto() *btpb.RowFilter +} + +// ChainFilters returns a filter that applies a sequence of filters. +func ChainFilters(sub ...Filter) Filter { return chainFilter{sub} } + +type chainFilter struct { + sub []Filter +} + +func (cf chainFilter) String() string { + var ss []string + for _, sf := range cf.sub { + ss = append(ss, sf.String()) + } + return "(" + strings.Join(ss, " | ") + ")" +} + +func (cf chainFilter) proto() *btpb.RowFilter { + chain := &btpb.RowFilter_Chain{} + for _, sf := range cf.sub { + chain.Filters = append(chain.Filters, sf.proto()) + } + return &btpb.RowFilter{ + Filter: &btpb.RowFilter_Chain_{chain}, + } +} + +// InterleaveFilters returns a filter that applies a set of filters in parallel +// and interleaves the results. +func InterleaveFilters(sub ...Filter) Filter { return interleaveFilter{sub} } + +type interleaveFilter struct { + sub []Filter +} + +func (ilf interleaveFilter) String() string { + var ss []string + for _, sf := range ilf.sub { + ss = append(ss, sf.String()) + } + return "(" + strings.Join(ss, " + ") + ")" +} + +func (ilf interleaveFilter) proto() *btpb.RowFilter { + inter := &btpb.RowFilter_Interleave{} + for _, sf := range ilf.sub { + inter.Filters = append(inter.Filters, sf.proto()) + } + return &btpb.RowFilter{ + Filter: &btpb.RowFilter_Interleave_{inter}, + } +} + +// RowKeyFilter returns a filter that matches cells from rows whose +// key matches the provided RE2 pattern. +// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. +func RowKeyFilter(pattern string) Filter { return rowKeyFilter(pattern) } + +type rowKeyFilter string + +func (rkf rowKeyFilter) String() string { return fmt.Sprintf("row(%s)", string(rkf)) } + +func (rkf rowKeyFilter) proto() *btpb.RowFilter { + return &btpb.RowFilter{Filter: &btpb.RowFilter_RowKeyRegexFilter{[]byte(rkf)}} +} + +// FamilyFilter returns a filter that matches cells whose family name +// matches the provided RE2 pattern. +// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. +func FamilyFilter(pattern string) Filter { return familyFilter(pattern) } + +type familyFilter string + +func (ff familyFilter) String() string { return fmt.Sprintf("col(%s:)", string(ff)) } + +func (ff familyFilter) proto() *btpb.RowFilter { + return &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{string(ff)}} +} + +// ColumnFilter returns a filter that matches cells whose column name +// matches the provided RE2 pattern. +// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. +func ColumnFilter(pattern string) Filter { return columnFilter(pattern) } + +type columnFilter string + +func (cf columnFilter) String() string { return fmt.Sprintf("col(.*:%s)", string(cf)) } + +func (cf columnFilter) proto() *btpb.RowFilter { + return &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{[]byte(cf)}} +} + +// ValueFilter returns a filter that matches cells whose value +// matches the provided RE2 pattern. +// See https://github.com/google/re2/wiki/Syntax for the accepted syntax. +func ValueFilter(pattern string) Filter { return valueFilter(pattern) } + +type valueFilter string + +func (vf valueFilter) String() string { return fmt.Sprintf("value_match(%s)", string(vf)) } + +func (vf valueFilter) proto() *btpb.RowFilter { + return &btpb.RowFilter{Filter: &btpb.RowFilter_ValueRegexFilter{[]byte(vf)}} +} + +// LatestNFilter returns a filter that matches the most recent N cells in each column. +func LatestNFilter(n int) Filter { return latestNFilter(n) } + +type latestNFilter int32 + +func (lnf latestNFilter) String() string { return fmt.Sprintf("col(*,%d)", lnf) } + +func (lnf latestNFilter) proto() *btpb.RowFilter { + return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerColumnLimitFilter{int32(lnf)}} +} + +// StripValueFilter returns a filter that replaces each value with the empty string. +func StripValueFilter() Filter { return stripValueFilter{} } + +type stripValueFilter struct{} + +func (stripValueFilter) String() string { return "strip_value()" } +func (stripValueFilter) proto() *btpb.RowFilter { + return &btpb.RowFilter{Filter: &btpb.RowFilter_StripValueTransformer{true}} +} + +// TimestampRangeFilter returns a filter that matches any rows whose timestamp is within the given time bounds. A zero +// time means no bound. +// The timestamp will be truncated to millisecond granularity. +func TimestampRangeFilter(startTime time.Time, endTime time.Time) Filter { + trf := timestampRangeFilter{} + if !startTime.IsZero() { + trf.startTime = Time(startTime) + } + if !endTime.IsZero() { + trf.endTime = Time(endTime) + } + return trf +} + +// TimestampRangeFilterMicros returns a filter that matches any rows whose timestamp is within the given time bounds, +// specified in units of microseconds since 1 January 1970. A zero value for the end time is interpreted as no bound. +// The timestamp will be truncated to millisecond granularity. +func TimestampRangeFilterMicros(startTime Timestamp, endTime Timestamp) Filter { + return timestampRangeFilter{startTime, endTime} +} + +type timestampRangeFilter struct { + startTime Timestamp + endTime Timestamp +} + +func (trf timestampRangeFilter) String() string { + return fmt.Sprintf("timestamp_range(%s,%s)", trf.startTime, trf.endTime) +} + +func (trf timestampRangeFilter) proto() *btpb.RowFilter { + return &btpb.RowFilter{ + Filter: &btpb.RowFilter_TimestampRangeFilter{ + &btpb.TimestampRange{ + int64(trf.startTime.TruncateToMilliseconds()), + int64(trf.endTime.TruncateToMilliseconds()), + }, + }} +} + +// ColumnRangeFilter returns a filter that matches a contiguous range of columns within a single +// family, as specified by an inclusive start qualifier and exclusive end qualifier. +func ColumnRangeFilter(family, start, end string) Filter { + return columnRangeFilter{family, start, end} +} + +type columnRangeFilter struct { + family string + start string + end string +} + +func (crf columnRangeFilter) String() string { + return fmt.Sprintf("columnRangeFilter(%s,%s,%s)", crf.family, crf.start, crf.end) +} + +func (crf columnRangeFilter) proto() *btpb.RowFilter { + r := &btpb.ColumnRange{FamilyName: crf.family} + if crf.start != "" { + r.StartQualifier = &btpb.ColumnRange_StartQualifierClosed{[]byte(crf.start)} + } + if crf.end != "" { + r.EndQualifier = &btpb.ColumnRange_EndQualifierOpen{[]byte(crf.end)} + } + return &btpb.RowFilter{&btpb.RowFilter_ColumnRangeFilter{r}} +} + +// ValueRangeFilter returns a filter that matches cells with values that fall within +// the given range, as specified by an inclusive start value and exclusive end value. +func ValueRangeFilter(start, end []byte) Filter { + return valueRangeFilter{start, end} +} + +type valueRangeFilter struct { + start []byte + end []byte +} + +func (vrf valueRangeFilter) String() string { + return fmt.Sprintf("valueRangeFilter(%s,%s)", vrf.start, vrf.end) +} + +func (vrf valueRangeFilter) proto() *btpb.RowFilter { + r := &btpb.ValueRange{} + if vrf.start != nil { + r.StartValue = &btpb.ValueRange_StartValueClosed{vrf.start} + } + if vrf.end != nil { + r.EndValue = &btpb.ValueRange_EndValueOpen{vrf.end} + } + return &btpb.RowFilter{&btpb.RowFilter_ValueRangeFilter{r}} +} + +// ConditionFilter returns a filter that evaluates to one of two possible filters depending +// on whether or not the given predicate filter matches at least one cell. +// If the matched filter is nil then no results will be returned. +// IMPORTANT NOTE: The predicate filter does not execute atomically with the +// true and false filters, which may lead to inconsistent or unexpected +// results. Additionally, condition filters have poor performance, especially +// when filters are set for the false condition. +func ConditionFilter(predicateFilter, trueFilter, falseFilter Filter) Filter { + return conditionFilter{predicateFilter, trueFilter, falseFilter} +} + +type conditionFilter struct { + predicateFilter Filter + trueFilter Filter + falseFilter Filter +} + +func (cf conditionFilter) String() string { + return fmt.Sprintf("conditionFilter(%s,%s,%s)", cf.predicateFilter, cf.trueFilter, cf.falseFilter) +} + +func (cf conditionFilter) proto() *btpb.RowFilter { + var tf *btpb.RowFilter + var ff *btpb.RowFilter + if cf.trueFilter != nil { + tf = cf.trueFilter.proto() + } + if cf.falseFilter != nil { + ff = cf.falseFilter.proto() + } + return &btpb.RowFilter{ + &btpb.RowFilter_Condition_{&btpb.RowFilter_Condition{ + cf.predicateFilter.proto(), + tf, + ff, + }}} +} + +// TODO(dsymonds): More filters: sampling diff --git a/vendor/cloud.google.com/go/bigtable/gc.go b/vendor/cloud.google.com/go/bigtable/gc.go new file mode 100644 index 00000000..621c7b35 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/gc.go @@ -0,0 +1,131 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "fmt" + "strings" + "time" + + durpb "github.com/golang/protobuf/ptypes/duration" + bttdpb "google.golang.org/genproto/googleapis/bigtable/admin/v2" +) + +// A GCPolicy represents a rule that determines which cells are eligible for garbage collection. +type GCPolicy interface { + String() string + proto() *bttdpb.GcRule +} + +// IntersectionPolicy returns a GC policy that only applies when all its sub-policies apply. +func IntersectionPolicy(sub ...GCPolicy) GCPolicy { return intersectionPolicy{sub} } + +type intersectionPolicy struct { + sub []GCPolicy +} + +func (ip intersectionPolicy) String() string { + var ss []string + for _, sp := range ip.sub { + ss = append(ss, sp.String()) + } + return "(" + strings.Join(ss, " && ") + ")" +} + +func (ip intersectionPolicy) proto() *bttdpb.GcRule { + inter := &bttdpb.GcRule_Intersection{} + for _, sp := range ip.sub { + inter.Rules = append(inter.Rules, sp.proto()) + } + return &bttdpb.GcRule{ + Rule: &bttdpb.GcRule_Intersection_{inter}, + } +} + +// UnionPolicy returns a GC policy that applies when any of its sub-policies apply. +func UnionPolicy(sub ...GCPolicy) GCPolicy { return unionPolicy{sub} } + +type unionPolicy struct { + sub []GCPolicy +} + +func (up unionPolicy) String() string { + var ss []string + for _, sp := range up.sub { + ss = append(ss, sp.String()) + } + return "(" + strings.Join(ss, " || ") + ")" +} + +func (up unionPolicy) proto() *bttdpb.GcRule { + union := &bttdpb.GcRule_Union{} + for _, sp := range up.sub { + union.Rules = append(union.Rules, sp.proto()) + } + return &bttdpb.GcRule{ + Rule: &bttdpb.GcRule_Union_{union}, + } +} + +// MaxVersionsPolicy returns a GC policy that applies to all versions of a cell +// except for the most recent n. +func MaxVersionsPolicy(n int) GCPolicy { return maxVersionsPolicy(n) } + +type maxVersionsPolicy int + +func (mvp maxVersionsPolicy) String() string { return fmt.Sprintf("versions() > %d", int(mvp)) } + +func (mvp maxVersionsPolicy) proto() *bttdpb.GcRule { + return &bttdpb.GcRule{Rule: &bttdpb.GcRule_MaxNumVersions{int32(mvp)}} +} + +// MaxAgePolicy returns a GC policy that applies to all cells +// older than the given age. +func MaxAgePolicy(d time.Duration) GCPolicy { return maxAgePolicy(d) } + +type maxAgePolicy time.Duration + +var units = []struct { + d time.Duration + suffix string +}{ + {24 * time.Hour, "d"}, + {time.Hour, "h"}, + {time.Minute, "m"}, +} + +func (ma maxAgePolicy) String() string { + d := time.Duration(ma) + for _, u := range units { + if d%u.d == 0 { + return fmt.Sprintf("age() > %d%s", d/u.d, u.suffix) + } + } + return fmt.Sprintf("age() > %d", d/time.Microsecond) +} + +func (ma maxAgePolicy) proto() *bttdpb.GcRule { + // This doesn't handle overflows, etc. + // Fix this if people care about GC policies over 290 years. + ns := time.Duration(ma).Nanoseconds() + return &bttdpb.GcRule{ + Rule: &bttdpb.GcRule_MaxAge{&durpb.Duration{ + Seconds: ns / 1e9, + Nanos: int32(ns % 1e9), + }}, + } +} diff --git a/vendor/cloud.google.com/go/bigtable/internal/cbtconfig/cbtconfig.go b/vendor/cloud.google.com/go/bigtable/internal/cbtconfig/cbtconfig.go new file mode 100644 index 00000000..073406f0 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/internal/cbtconfig/cbtconfig.go @@ -0,0 +1,246 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cbtconfig encapsulates common code for reading configuration from .cbtrc and gcloud. +package cbtconfig + +import ( + "bufio" + "bytes" + "crypto/tls" + "crypto/x509" + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "time" + + "golang.org/x/oauth2" + "google.golang.org/grpc/credentials" +) + +// Config represents a configuration. +type Config struct { + Project, Instance string // required + Creds string // optional + AdminEndpoint string // optional + DataEndpoint string // optional + CertFile string // optional + TokenSource oauth2.TokenSource // derived + TLSCreds credentials.TransportCredentials // derived +} + +type RequiredFlags uint + +const NoneRequired RequiredFlags = 0 +const ( + ProjectRequired RequiredFlags = 1 << iota + InstanceRequired +) +const ProjectAndInstanceRequired RequiredFlags = ProjectRequired | InstanceRequired + +// RegisterFlags registers a set of standard flags for this config. +// It should be called before flag.Parse. +func (c *Config) RegisterFlags() { + flag.StringVar(&c.Project, "project", c.Project, "project ID, if unset uses gcloud configured project") + flag.StringVar(&c.Instance, "instance", c.Instance, "Cloud Bigtable instance") + flag.StringVar(&c.Creds, "creds", c.Creds, "if set, use application credentials in this file") + flag.StringVar(&c.AdminEndpoint, "admin-endpoint", c.AdminEndpoint, "Override the admin api endpoint") + flag.StringVar(&c.DataEndpoint, "data-endpoint", c.DataEndpoint, "Override the data api endpoint") + flag.StringVar(&c.CertFile, "cert-file", c.CertFile, "Override the TLS certificates file") +} + +// CheckFlags checks that the required config values are set. +func (c *Config) CheckFlags(required RequiredFlags) error { + var missing []string + if c.CertFile != "" { + b, err := ioutil.ReadFile(c.CertFile) + if err != nil { + return fmt.Errorf("Failed to load certificates from %s: %v", c.CertFile, err) + } + + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM(b) { + return fmt.Errorf("Failed to append certificates from %s", c.CertFile) + } + + c.TLSCreds = credentials.NewTLS(&tls.Config{RootCAs: cp}) + } + if required != NoneRequired { + c.SetFromGcloud() + } + if required&ProjectRequired != 0 && c.Project == "" { + missing = append(missing, "-project") + } + if required&InstanceRequired != 0 && c.Instance == "" { + missing = append(missing, "-instance") + } + if len(missing) > 0 { + return fmt.Errorf("Missing %s", strings.Join(missing, " and ")) + } + return nil +} + +// Filename returns the filename consulted for standard configuration. +func Filename() string { + // TODO(dsymonds): Might need tweaking for Windows. + return filepath.Join(os.Getenv("HOME"), ".cbtrc") +} + +// Load loads a .cbtrc file. +// If the file is not present, an empty config is returned. +func Load() (*Config, error) { + filename := Filename() + data, err := ioutil.ReadFile(filename) + if err != nil { + // silent fail if the file isn't there + if os.IsNotExist(err) { + return &Config{}, nil + } + return nil, fmt.Errorf("Reading %s: %v", filename, err) + } + c := new(Config) + s := bufio.NewScanner(bytes.NewReader(data)) + for s.Scan() { + line := s.Text() + i := strings.Index(line, "=") + if i < 0 { + return nil, fmt.Errorf("Bad line in %s: %q", filename, line) + } + key, val := strings.TrimSpace(line[:i]), strings.TrimSpace(line[i+1:]) + switch key { + default: + return nil, fmt.Errorf("Unknown key in %s: %q", filename, key) + case "project": + c.Project = val + case "instance": + c.Instance = val + case "creds": + c.Creds = val + case "admin-endpoint": + c.AdminEndpoint = val + case "data-endpoint": + c.DataEndpoint = val + } + + } + return c, s.Err() +} + +type GcloudCredential struct { + AccessToken string `json:"access_token"` + Expiry time.Time `json:"token_expiry"` +} + +func (cred *GcloudCredential) Token() *oauth2.Token { + return &oauth2.Token{AccessToken: cred.AccessToken, TokenType: "Bearer", Expiry: cred.Expiry} +} + +type GcloudConfig struct { + Configuration struct { + Properties struct { + Core struct { + Project string `json:"project"` + } `json:"core"` + } `json:"properties"` + } `json:"configuration"` + Credential GcloudCredential `json:"credential"` +} + +type GcloudCmdTokenSource struct { + Command string + Args []string +} + +// Token implements the oauth2.TokenSource interface +func (g *GcloudCmdTokenSource) Token() (*oauth2.Token, error) { + gcloudConfig, err := LoadGcloudConfig(g.Command, g.Args) + if err != nil { + return nil, err + } + return gcloudConfig.Credential.Token(), nil +} + +// LoadGcloudConfig retrieves the gcloud configuration values we need use via the +// 'config-helper' command +func LoadGcloudConfig(gcloudCmd string, gcloudCmdArgs []string) (*GcloudConfig, error) { + out, err := exec.Command(gcloudCmd, gcloudCmdArgs...).Output() + if err != nil { + return nil, fmt.Errorf("Could not retrieve gcloud configuration") + } + + var gcloudConfig GcloudConfig + if err := json.Unmarshal(out, &gcloudConfig); err != nil { + return nil, fmt.Errorf("Could not parse gcloud configuration") + } + + return &gcloudConfig, nil +} + +// SetFromGcloud retrieves and sets any missing config values from the gcloud +// configuration if possible possible +func (c *Config) SetFromGcloud() error { + + if c.Creds == "" { + c.Creds = os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") + if c.Creds == "" { + log.Printf("-creds flag unset, will use gcloud credential") + } + } else { + os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", c.Creds) + } + + if c.Project == "" { + log.Printf("-project flag unset, will use gcloud active project") + } + + if c.Creds != "" && c.Project != "" { + return nil + } + + gcloudCmd := "gcloud" + if runtime.GOOS == "windows" { + gcloudCmd = gcloudCmd + ".cmd" + } + + gcloudCmdArgs := []string{"config", "config-helper", + "--format=json(configuration.properties.core.project,credential)"} + + gcloudConfig, err := LoadGcloudConfig(gcloudCmd, gcloudCmdArgs) + if err != nil { + return err + } + + if c.Project == "" && gcloudConfig.Configuration.Properties.Core.Project != "" { + log.Printf("gcloud active project is \"%s\"", + gcloudConfig.Configuration.Properties.Core.Project) + c.Project = gcloudConfig.Configuration.Properties.Core.Project + } + + if c.Creds == "" { + c.TokenSource = oauth2.ReuseTokenSource( + gcloudConfig.Credential.Token(), + &GcloudCmdTokenSource{Command: gcloudCmd, Args: gcloudCmdArgs}) + } + + return nil +} diff --git a/vendor/cloud.google.com/go/bigtable/internal/gax/call_option.go b/vendor/cloud.google.com/go/bigtable/internal/gax/call_option.go new file mode 100644 index 00000000..60a18bee --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/internal/gax/call_option.go @@ -0,0 +1,106 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This is ia snapshot from github.com/googleapis/gax-go with minor modifications. +package gax + +import ( + "time" + + "google.golang.org/grpc/codes" +) + +type CallOption interface { + Resolve(*CallSettings) +} + +type callOptions []CallOption + +func (opts callOptions) Resolve(s *CallSettings) *CallSettings { + for _, opt := range opts { + opt.Resolve(s) + } + return s +} + +// Encapsulates the call settings for a particular API call. +type CallSettings struct { + Timeout time.Duration + RetrySettings RetrySettings +} + +// Per-call configurable settings for retrying upon transient failure. +type RetrySettings struct { + RetryCodes map[codes.Code]bool + BackoffSettings BackoffSettings +} + +// Parameters to the exponential backoff algorithm for retrying. +type BackoffSettings struct { + DelayTimeoutSettings MultipliableDuration + RPCTimeoutSettings MultipliableDuration +} + +type MultipliableDuration struct { + Initial time.Duration + Max time.Duration + Multiplier float64 +} + +func (w CallSettings) Resolve(s *CallSettings) { + s.Timeout = w.Timeout + s.RetrySettings = w.RetrySettings + + s.RetrySettings.RetryCodes = make(map[codes.Code]bool, len(w.RetrySettings.RetryCodes)) + for key, value := range w.RetrySettings.RetryCodes { + s.RetrySettings.RetryCodes[key] = value + } +} + +type withRetryCodes []codes.Code + +func (w withRetryCodes) Resolve(s *CallSettings) { + s.RetrySettings.RetryCodes = make(map[codes.Code]bool) + for _, code := range w { + s.RetrySettings.RetryCodes[code] = true + } +} + +// WithRetryCodes sets a list of Google API canonical error codes upon which a +// retry should be attempted. +func WithRetryCodes(retryCodes []codes.Code) CallOption { + return withRetryCodes(retryCodes) +} + +type withDelayTimeoutSettings MultipliableDuration + +func (w withDelayTimeoutSettings) Resolve(s *CallSettings) { + s.RetrySettings.BackoffSettings.DelayTimeoutSettings = MultipliableDuration(w) +} + +// WithDelayTimeoutSettings specifies: +// - The initial delay time, in milliseconds, between the completion of +// the first failed request and the initiation of the first retrying +// request. +// - The multiplier by which to increase the delay time between the +// completion of failed requests, and the initiation of the subsequent +// retrying request. +// - The maximum delay time, in milliseconds, between requests. When this +// value is reached, `RetryDelayMultiplier` will no longer be used to +// increase delay time. +func WithDelayTimeoutSettings(initial time.Duration, max time.Duration, multiplier float64) CallOption { + return withDelayTimeoutSettings(MultipliableDuration{initial, max, multiplier}) +} diff --git a/vendor/cloud.google.com/go/bigtable/internal/gax/invoke.go b/vendor/cloud.google.com/go/bigtable/internal/gax/invoke.go new file mode 100644 index 00000000..b7be7d41 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/internal/gax/invoke.go @@ -0,0 +1,84 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This is ia snapshot from github.com/googleapis/gax-go with minor modifications. +package gax + +import ( + "math/rand" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "log" + "os" +) + +var logger *log.Logger = log.New(os.Stderr, "", log.LstdFlags) + +// A user defined call stub. +type APICall func(context.Context) error + +// scaleDuration returns the product of a and mult. +func scaleDuration(a time.Duration, mult float64) time.Duration { + ns := float64(a) * mult + return time.Duration(ns) +} + +// invokeWithRetry calls stub using an exponential backoff retry mechanism +// based on the values provided in callSettings. +func invokeWithRetry(ctx context.Context, stub APICall, callSettings CallSettings) error { + retrySettings := callSettings.RetrySettings + backoffSettings := callSettings.RetrySettings.BackoffSettings + delay := backoffSettings.DelayTimeoutSettings.Initial + for { + // If the deadline is exceeded... + if ctx.Err() != nil { + return ctx.Err() + } + err := stub(ctx) + code := grpc.Code(err) + if code == codes.OK { + return nil + } + + if !retrySettings.RetryCodes[code] { + return err + } + + // Sleep a random amount up to the current delay + d := time.Duration(rand.Int63n(int64(delay))) + delayCtx, _ := context.WithTimeout(ctx, delay) + logger.Printf("Retryable error: %v, retrying in %v", err, d) + <-delayCtx.Done() + + delay = scaleDuration(delay, backoffSettings.DelayTimeoutSettings.Multiplier) + if delay > backoffSettings.DelayTimeoutSettings.Max { + delay = backoffSettings.DelayTimeoutSettings.Max + } + } +} + +// Invoke calls stub with a child of context modified by the specified options. +func Invoke(ctx context.Context, stub APICall, opts ...CallOption) error { + settings := &CallSettings{} + callOptions(opts).Resolve(settings) + if len(settings.RetrySettings.RetryCodes) > 0 { + return invokeWithRetry(ctx, stub, *settings) + } + return stub(ctx) +} diff --git a/vendor/cloud.google.com/go/bigtable/internal/gax/invoke_test.go b/vendor/cloud.google.com/go/bigtable/internal/gax/invoke_test.go new file mode 100644 index 00000000..40bad5fb --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/internal/gax/invoke_test.go @@ -0,0 +1,49 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package gax + +import ( + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +func TestRandomizedDelays(t *testing.T) { + max := 200 * time.Millisecond + settings := []CallOption{ + WithRetryCodes([]codes.Code{codes.Unavailable, codes.DeadlineExceeded}), + WithDelayTimeoutSettings(10*time.Millisecond, max, 1.5), + } + + deadline := time.Now().Add(1 * time.Second) + ctx, _ := context.WithDeadline(context.Background(), deadline) + var invokeTime time.Time + Invoke(ctx, func(childCtx context.Context) error { + // Keep failing, make sure we never slept more than max (plus a fudge factor) + if !invokeTime.IsZero() { + if got, want := time.Since(invokeTime), max; got > (want + 20*time.Millisecond) { + t.Fatalf("Slept too long. Got: %v, want: %v", got, max) + } + } + invokeTime = time.Now() + // Workaround for `go vet`: https://github.com/grpc/grpc-go/issues/90 + errf := grpc.Errorf + return errf(codes.Unavailable, "") + }, settings...) +} diff --git a/vendor/cloud.google.com/go/bigtable/internal/option/option.go b/vendor/cloud.google.com/go/bigtable/internal/option/option.go new file mode 100644 index 00000000..3b9072e6 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/internal/option/option.go @@ -0,0 +1,48 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package option contains common code for dealing with client options. +package option + +import ( + "fmt" + "os" + + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +// DefaultClientOptions returns the default client options to use for the +// client's gRPC connection. +func DefaultClientOptions(endpoint, scope, userAgent string) ([]option.ClientOption, error) { + var o []option.ClientOption + // Check the environment variables for the bigtable emulator. + // Dial it directly and don't pass any credentials. + if addr := os.Getenv("BIGTABLE_EMULATOR_HOST"); addr != "" { + conn, err := grpc.Dial(addr, grpc.WithInsecure()) + if err != nil { + return nil, fmt.Errorf("emulator grpc.Dial: %v", err) + } + o = []option.ClientOption{option.WithGRPCConn(conn)} + } else { + o = []option.ClientOption{ + option.WithEndpoint(endpoint), + option.WithScopes(scope), + option.WithUserAgent(userAgent), + } + } + return o, nil +} diff --git a/vendor/cloud.google.com/go/bigtable/internal/stat/stats.go b/vendor/cloud.google.com/go/bigtable/internal/stat/stats.go new file mode 100644 index 00000000..5fb047f6 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/internal/stat/stats.go @@ -0,0 +1,144 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stat + +import ( + "bytes" + "encoding/csv" + "fmt" + "io" + "math" + "sort" + "strconv" + "text/tabwriter" + "time" +) + +type byDuration []time.Duration + +func (data byDuration) Len() int { return len(data) } +func (data byDuration) Swap(i, j int) { data[i], data[j] = data[j], data[i] } +func (data byDuration) Less(i, j int) bool { return data[i] < data[j] } + +// quantile returns a value representing the kth of q quantiles. +// May alter the order of data. +func quantile(data []time.Duration, k, q int) (quantile time.Duration, ok bool) { + if len(data) < 1 { + return 0, false + } + if k > q { + return 0, false + } + if k < 0 || q < 1 { + return 0, false + } + + sort.Sort(byDuration(data)) + + if k == 0 { + return data[0], true + } + if k == q { + return data[len(data)-1], true + } + + bucketSize := float64(len(data)-1) / float64(q) + i := float64(k) * bucketSize + + lower := int(math.Trunc(i)) + var upper int + if i > float64(lower) && lower+1 < len(data) { + // If the quantile lies between two elements + upper = lower + 1 + } else { + upper = lower + } + weightUpper := i - float64(lower) + weightLower := 1 - weightUpper + return time.Duration(weightLower*float64(data[lower]) + weightUpper*float64(data[upper])), true +} + +type Aggregate struct { + Name string + Count, Errors int + Min, Median, Max time.Duration + P75, P90, P95, P99 time.Duration // percentiles +} + +// NewAggregate constructs an aggregate from latencies. Returns nil if latencies does not contain aggregateable data. +func NewAggregate(name string, latencies []time.Duration, errorCount int) *Aggregate { + agg := Aggregate{Name: name, Count: len(latencies), Errors: errorCount} + + if len(latencies) == 0 { + return nil + } + var ok bool + if agg.Min, ok = quantile(latencies, 0, 2); !ok { + return nil + } + if agg.Median, ok = quantile(latencies, 1, 2); !ok { + return nil + } + if agg.Max, ok = quantile(latencies, 2, 2); !ok { + return nil + } + if agg.P75, ok = quantile(latencies, 75, 100); !ok { + return nil + } + if agg.P90, ok = quantile(latencies, 90, 100); !ok { + return nil + } + if agg.P95, ok = quantile(latencies, 95, 100); !ok { + return nil + } + if agg.P99, ok = quantile(latencies, 99, 100); !ok { + return nil + } + return &agg +} + +func (agg *Aggregate) String() string { + if agg == nil { + return "no data" + } + var buf bytes.Buffer + tw := tabwriter.NewWriter(&buf, 0, 0, 1, ' ', 0) // one-space padding + fmt.Fprintf(tw, "min:\t%v\nmedian:\t%v\nmax:\t%v\n95th percentile:\t%v\n99th percentile:\t%v\n", + agg.Min, agg.Median, agg.Max, agg.P95, agg.P99) + tw.Flush() + return buf.String() +} + +// WriteCSV writes a csv file to the given Writer, +// with a header row and one row per aggregate. +func WriteCSV(aggs []*Aggregate, iow io.Writer) error { + w := csv.NewWriter(iow) + defer w.Flush() + err := w.Write([]string{"name", "count", "errors", "min", "median", "max", "p75", "p90", "p95", "p99"}) + if err != nil { + return err + } + for _, agg := range aggs { + err = w.Write([]string{ + agg.Name, strconv.Itoa(agg.Count), strconv.Itoa(agg.Errors), + agg.Min.String(), agg.Median.String(), agg.Max.String(), + agg.P75.String(), agg.P90.String(), agg.P95.String(), agg.P99.String(), + }) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/cloud.google.com/go/bigtable/reader.go b/vendor/cloud.google.com/go/bigtable/reader.go new file mode 100644 index 00000000..4af2f702 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/reader.go @@ -0,0 +1,250 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "bytes" + "fmt" + + btpb "google.golang.org/genproto/googleapis/bigtable/v2" +) + +// A Row is returned by ReadRows. The map is keyed by column family (the prefix +// of the column name before the colon). The values are the returned ReadItems +// for that column family in the order returned by Read. +type Row map[string][]ReadItem + +// Key returns the row's key, or "" if the row is empty. +func (r Row) Key() string { + for _, items := range r { + if len(items) > 0 { + return items[0].Row + } + } + return "" +} + +// A ReadItem is returned by Read. A ReadItem contains data from a specific row and column. +type ReadItem struct { + Row, Column string + Timestamp Timestamp + Value []byte +} + +// The current state of the read rows state machine. +type rrState int64 + +const ( + newRow rrState = iota + rowInProgress + cellInProgress +) + +// chunkReader handles cell chunks from the read rows response and combines +// them into full Rows. +type chunkReader struct { + state rrState + curKey []byte + curFam string + curQual []byte + curTS int64 + curVal []byte + curRow Row + lastKey string +} + +// newChunkReader returns a new chunkReader for handling read rows responses. +func newChunkReader() *chunkReader { + return &chunkReader{state: newRow} +} + +// Process takes a cell chunk and returns a new Row if the given chunk +// completes a Row, or nil otherwise. +func (cr *chunkReader) Process(cc *btpb.ReadRowsResponse_CellChunk) (Row, error) { + var row Row + switch cr.state { + case newRow: + if err := cr.validateNewRow(cc); err != nil { + return nil, err + } + + cr.curRow = make(Row) + cr.curKey = cc.RowKey + cr.curFam = cc.FamilyName.Value + cr.curQual = cc.Qualifier.Value + cr.curTS = cc.TimestampMicros + row = cr.handleCellValue(cc) + + case rowInProgress: + if err := cr.validateRowInProgress(cc); err != nil { + return nil, err + } + + if cc.GetResetRow() { + cr.resetToNewRow() + return nil, nil + } + + if cc.FamilyName != nil { + cr.curFam = cc.FamilyName.Value + } + if cc.Qualifier != nil { + cr.curQual = cc.Qualifier.Value + } + cr.curTS = cc.TimestampMicros + row = cr.handleCellValue(cc) + + case cellInProgress: + if err := cr.validateCellInProgress(cc); err != nil { + return nil, err + } + if cc.GetResetRow() { + cr.resetToNewRow() + return nil, nil + } + row = cr.handleCellValue(cc) + } + + return row, nil +} + +// Close must be called after all cell chunks from the response +// have been processed. An error will be returned if the reader is +// in an invalid state, in which case the error should be propagated to the caller. +func (cr *chunkReader) Close() error { + if cr.state != newRow { + return fmt.Errorf("invalid state for end of stream %q", cr.state) + } + return nil +} + +// handleCellValue returns a Row if the cell value includes a commit, otherwise nil. +func (cr *chunkReader) handleCellValue(cc *btpb.ReadRowsResponse_CellChunk) Row { + if cc.ValueSize > 0 { + // ValueSize is specified so expect a split value of ValueSize bytes + if cr.curVal == nil { + cr.curVal = make([]byte, 0, cc.ValueSize) + } + cr.curVal = append(cr.curVal, cc.Value...) + cr.state = cellInProgress + } else { + // This cell is either the complete value or the last chunk of a split + if cr.curVal == nil { + cr.curVal = cc.Value + } else { + cr.curVal = append(cr.curVal, cc.Value...) + } + cr.finishCell() + + if cc.GetCommitRow() { + return cr.commitRow() + } else { + cr.state = rowInProgress + } + } + + return nil +} + +func (cr *chunkReader) finishCell() { + ri := ReadItem{ + Row: string(cr.curKey), + Column: fmt.Sprintf("%s:%s", cr.curFam, cr.curQual), + Timestamp: Timestamp(cr.curTS), + Value: cr.curVal, + } + cr.curRow[cr.curFam] = append(cr.curRow[cr.curFam], ri) + cr.curVal = nil +} + +func (cr *chunkReader) commitRow() Row { + row := cr.curRow + cr.lastKey = cr.curRow.Key() + cr.resetToNewRow() + return row +} + +func (cr *chunkReader) resetToNewRow() { + cr.curKey = nil + cr.curFam = "" + cr.curQual = nil + cr.curVal = nil + cr.curRow = nil + cr.curTS = 0 + cr.state = newRow +} + +func (cr *chunkReader) validateNewRow(cc *btpb.ReadRowsResponse_CellChunk) error { + if cc.GetResetRow() { + return fmt.Errorf("reset_row not allowed between rows") + } + if cc.RowKey == nil || cc.FamilyName == nil || cc.Qualifier == nil { + return fmt.Errorf("missing key field for new row %v", cc) + } + if cr.lastKey != "" && cr.lastKey >= string(cc.RowKey) { + return fmt.Errorf("out of order row key: %q, %q", cr.lastKey, string(cc.RowKey)) + } + return nil +} + +func (cr *chunkReader) validateRowInProgress(cc *btpb.ReadRowsResponse_CellChunk) error { + if err := cr.validateRowStatus(cc); err != nil { + return err + } + if cc.RowKey != nil && !bytes.Equal(cc.RowKey, cr.curKey) { + return fmt.Errorf("received new row key %q during existing row %q", cc.RowKey, cr.curKey) + } + if cc.FamilyName != nil && cc.Qualifier == nil { + return fmt.Errorf("family name %q specified without a qualifier", cc.FamilyName) + } + return nil +} + +func (cr *chunkReader) validateCellInProgress(cc *btpb.ReadRowsResponse_CellChunk) error { + if err := cr.validateRowStatus(cc); err != nil { + return err + } + if cr.curVal == nil { + return fmt.Errorf("no cached cell while CELL_IN_PROGRESS %v", cc) + } + if cc.GetResetRow() == false && cr.isAnyKeyPresent(cc) { + return fmt.Errorf("cell key components found while CELL_IN_PROGRESS %v", cc) + } + return nil +} + +func (cr *chunkReader) isAnyKeyPresent(cc *btpb.ReadRowsResponse_CellChunk) bool { + return cc.RowKey != nil || + cc.FamilyName != nil || + cc.Qualifier != nil || + cc.TimestampMicros != 0 +} + +// Validate a RowStatus, commit or reset, if present. +func (cr *chunkReader) validateRowStatus(cc *btpb.ReadRowsResponse_CellChunk) error { + // Resets can't be specified with any other part of a cell + if cc.GetResetRow() && (cr.isAnyKeyPresent(cc) || + cc.Value != nil || + cc.ValueSize != 0 || + cc.Labels != nil) { + return fmt.Errorf("reset must not be specified with other fields %v", cc) + } + if cc.GetCommitRow() && cc.ValueSize > 0 { + return fmt.Errorf("commit row found in between chunks in a cell") + } + return nil +} diff --git a/vendor/cloud.google.com/go/bigtable/reader_test.go b/vendor/cloud.google.com/go/bigtable/reader_test.go new file mode 100644 index 00000000..24a17914 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/reader_test.go @@ -0,0 +1,343 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bigtable + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "reflect" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/wrappers" + btspb "google.golang.org/genproto/googleapis/bigtable/v2" +) + +// Indicates that a field in the proto should be omitted, rather than included +// as a wrapped empty string. +const nilStr = "<>" + +func TestSingleCell(t *testing.T) { + cr := newChunkReader() + + // All in one cell + row, err := cr.Process(cc("rk", "fm", "col", 1, "value", 0, true)) + if err != nil { + t.Fatalf("Processing chunk: %v", err) + } + if row == nil { + t.Fatalf("Missing row") + } + if len(row["fm"]) != 1 { + t.Fatalf("Family name length mismatch %d, %d", 1, len(row["fm"])) + } + want := []ReadItem{ri("rk", "fm", "col", 1, "value")} + if !reflect.DeepEqual(row["fm"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm"], want) + } + if err := cr.Close(); err != nil { + t.Fatalf("Close: %v", err) + } +} + +func TestMultipleCells(t *testing.T) { + cr := newChunkReader() + + cr.Process(cc("rs", "fm1", "col1", 0, "val1", 0, false)) + cr.Process(cc("rs", "fm1", "col1", 1, "val2", 0, false)) + cr.Process(cc("rs", "fm1", "col2", 0, "val3", 0, false)) + cr.Process(cc("rs", "fm2", "col1", 0, "val4", 0, false)) + row, err := cr.Process(cc("rs", "fm2", "col2", 1, "extralongval5", 0, true)) + if err != nil { + t.Fatalf("Processing chunk: %v", err) + } + if row == nil { + t.Fatalf("Missing row") + } + + want := []ReadItem{ + ri("rs", "fm1", "col1", 0, "val1"), + ri("rs", "fm1", "col1", 1, "val2"), + ri("rs", "fm1", "col2", 0, "val3"), + } + if !reflect.DeepEqual(row["fm1"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want) + } + want = []ReadItem{ + ri("rs", "fm2", "col1", 0, "val4"), + ri("rs", "fm2", "col2", 1, "extralongval5"), + } + if !reflect.DeepEqual(row["fm2"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want) + } + if err := cr.Close(); err != nil { + t.Fatalf("Close: %v", err) + } +} + +func TestSplitCells(t *testing.T) { + cr := newChunkReader() + + cr.Process(cc("rs", "fm1", "col1", 0, "hello ", 11, false)) + cr.Process(ccData("world", 0, false)) + row, err := cr.Process(cc("rs", "fm1", "col2", 0, "val2", 0, true)) + if err != nil { + t.Fatalf("Processing chunk: %v", err) + } + if row == nil { + t.Fatalf("Missing row") + } + + want := []ReadItem{ + ri("rs", "fm1", "col1", 0, "hello world"), + ri("rs", "fm1", "col2", 0, "val2"), + } + if !reflect.DeepEqual(row["fm1"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want) + } + if err := cr.Close(); err != nil { + t.Fatalf("Close: %v", err) + } +} + +func TestMultipleRows(t *testing.T) { + cr := newChunkReader() + + row, err := cr.Process(cc("rs1", "fm1", "col1", 1, "val1", 0, true)) + if err != nil { + t.Fatalf("Processing chunk: %v", err) + } + want := []ReadItem{ri("rs1", "fm1", "col1", 1, "val1")} + if !reflect.DeepEqual(row["fm1"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want) + } + + row, err = cr.Process(cc("rs2", "fm2", "col2", 2, "val2", 0, true)) + if err != nil { + t.Fatalf("Processing chunk: %v", err) + } + want = []ReadItem{ri("rs2", "fm2", "col2", 2, "val2")} + if !reflect.DeepEqual(row["fm2"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want) + } + + if err := cr.Close(); err != nil { + t.Fatalf("Close: %v", err) + } +} + +func TestBlankQualifier(t *testing.T) { + cr := newChunkReader() + + row, err := cr.Process(cc("rs1", "fm1", "", 1, "val1", 0, true)) + if err != nil { + t.Fatalf("Processing chunk: %v", err) + } + want := []ReadItem{ri("rs1", "fm1", "", 1, "val1")} + if !reflect.DeepEqual(row["fm1"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want) + } + + row, err = cr.Process(cc("rs2", "fm2", "col2", 2, "val2", 0, true)) + if err != nil { + t.Fatalf("Processing chunk: %v", err) + } + want = []ReadItem{ri("rs2", "fm2", "col2", 2, "val2")} + if !reflect.DeepEqual(row["fm2"], want) { + t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want) + } + + if err := cr.Close(); err != nil { + t.Fatalf("Close: %v", err) + } +} + +func TestReset(t *testing.T) { + cr := newChunkReader() + + cr.Process(cc("rs", "fm1", "col1", 0, "val1", 0, false)) + cr.Process(cc("rs", "fm1", "col1", 1, "val2", 0, false)) + cr.Process(cc("rs", "fm1", "col2", 0, "val3", 0, false)) + cr.Process(ccReset()) + row, _ := cr.Process(cc("rs1", "fm1", "col1", 1, "val1", 0, true)) + want := []ReadItem{ri("rs1", "fm1", "col1", 1, "val1")} + if !reflect.DeepEqual(row["fm1"], want) { + t.Fatalf("Reset: got: %v\nwant: %v\n", row["fm1"], want) + } + if err := cr.Close(); err != nil { + t.Fatalf("Close: %v", err) + } +} + +func TestNewFamEmptyQualifier(t *testing.T) { + cr := newChunkReader() + + cr.Process(cc("rs", "fm1", "col1", 0, "val1", 0, false)) + _, err := cr.Process(cc(nilStr, "fm2", nilStr, 0, "val2", 0, true)) + if err == nil { + t.Fatalf("Expected error on second chunk with no qualifier set") + } +} + +// The read rows acceptance test reads a json file specifying a number of tests, +// each consisting of one or more cell chunk text protos and one or more resulting +// cells or errors. +type AcceptanceTest struct { + Tests []TestCase `json:"tests"` +} + +type TestCase struct { + Name string `json:"name"` + Chunks []string `json:"chunks"` + Results []TestResult `json:"results"` +} + +type TestResult struct { + RK string `json:"rk"` + FM string `json:"fm"` + Qual string `json:"qual"` + TS int64 `json:"ts"` + Value string `json:"value"` + Error bool `json:"error"` // If true, expect an error. Ignore any other field. +} + +func TestAcceptance(t *testing.T) { + testJson, err := ioutil.ReadFile("./testdata/read-rows-acceptance-test.json") + if err != nil { + t.Fatalf("could not open acceptance test file %v", err) + } + + var accTest AcceptanceTest + err = json.Unmarshal(testJson, &accTest) + if err != nil { + t.Fatalf("could not parse acceptance test file: %v", err) + } + + for _, test := range accTest.Tests { + runTestCase(t, test) + } +} + +func runTestCase(t *testing.T, test TestCase) { + // Increment an index into the result array as we get results + cr := newChunkReader() + var results []TestResult + var seenErr bool + for _, chunkText := range test.Chunks { + // Parse and pass each cell chunk to the ChunkReader + cc := &btspb.ReadRowsResponse_CellChunk{} + err := proto.UnmarshalText(chunkText, cc) + if err != nil { + t.Errorf("[%s] failed to unmarshal text proto: %s\n%s", test.Name, chunkText, err) + return + } + row, err := cr.Process(cc) + if err != nil { + results = append(results, TestResult{Error: true}) + seenErr = true + break + } else { + // Turn the Row into TestResults + for fm, ris := range row { + for _, ri := range ris { + tr := TestResult{ + RK: ri.Row, + FM: fm, + Qual: strings.Split(ri.Column, ":")[1], + TS: int64(ri.Timestamp), + Value: string(ri.Value), + } + results = append(results, tr) + } + } + } + } + + // Only Close if we don't have an error yet, otherwise Close: is expected. + if !seenErr { + err := cr.Close() + if err != nil { + results = append(results, TestResult{Error: true}) + } + } + + got := toSet(results) + want := toSet(test.Results) + if !reflect.DeepEqual(got, want) { + t.Fatalf("[%s]: got: %v\nwant: %v\n", test.Name, got, want) + } +} + +func toSet(res []TestResult) map[TestResult]bool { + set := make(map[TestResult]bool) + for _, tr := range res { + set[tr] = true + } + return set +} + +// ri returns a ReadItem for the given components +func ri(rk string, fm string, qual string, ts int64, val string) ReadItem { + return ReadItem{Row: rk, Column: fmt.Sprintf("%s:%s", fm, qual), Value: []byte(val), Timestamp: Timestamp(ts)} +} + +// cc returns a CellChunk proto +func cc(rk string, fm string, qual string, ts int64, val string, size int32, commit bool) *btspb.ReadRowsResponse_CellChunk { + // The components of the cell key are wrapped and can be null or empty + var rkWrapper []byte + if rk == nilStr { + rkWrapper = nil + } else { + rkWrapper = []byte(rk) + } + + var fmWrapper *wrappers.StringValue + if fm != nilStr { + fmWrapper = &wrappers.StringValue{Value: fm} + } else { + fmWrapper = nil + } + + var qualWrapper *wrappers.BytesValue + if qual != nilStr { + qualWrapper = &wrappers.BytesValue{Value: []byte(qual)} + } else { + qualWrapper = nil + } + + return &btspb.ReadRowsResponse_CellChunk{ + RowKey: rkWrapper, + FamilyName: fmWrapper, + Qualifier: qualWrapper, + TimestampMicros: ts, + Value: []byte(val), + ValueSize: size, + RowStatus: &btspb.ReadRowsResponse_CellChunk_CommitRow{CommitRow: commit}} +} + +// ccData returns a CellChunk with only a value and size +func ccData(val string, size int32, commit bool) *btspb.ReadRowsResponse_CellChunk { + return cc(nilStr, nilStr, nilStr, 0, val, size, commit) +} + +// ccReset returns a CellChunk with RestRow set to true +func ccReset() *btspb.ReadRowsResponse_CellChunk { + return &btspb.ReadRowsResponse_CellChunk{ + RowStatus: &btspb.ReadRowsResponse_CellChunk_ResetRow{ResetRow: true}} +} diff --git a/vendor/cloud.google.com/go/bigtable/retry_test.go b/vendor/cloud.google.com/go/bigtable/retry_test.go new file mode 100644 index 00000000..27375fc5 --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/retry_test.go @@ -0,0 +1,362 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package bigtable + +import ( + "reflect" + "strings" + "testing" + "time" + + "cloud.google.com/go/bigtable/bttest" + "github.com/golang/protobuf/ptypes/wrappers" + "golang.org/x/net/context" + "google.golang.org/api/option" + btpb "google.golang.org/genproto/googleapis/bigtable/v2" + rpcpb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +func setupFakeServer(opt ...grpc.ServerOption) (tbl *Table, cleanup func(), err error) { + srv, err := bttest.NewServer("127.0.0.1:0", opt...) + if err != nil { + return nil, nil, err + } + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) + if err != nil { + return nil, nil, err + } + + client, err := NewClient(context.Background(), "client", "instance", option.WithGRPCConn(conn)) + if err != nil { + return nil, nil, err + } + + adminClient, err := NewAdminClient(context.Background(), "client", "instance", option.WithGRPCConn(conn)) + if err != nil { + return nil, nil, err + } + if err := adminClient.CreateTable(context.Background(), "table"); err != nil { + return nil, nil, err + } + if err := adminClient.CreateColumnFamily(context.Background(), "table", "cf"); err != nil { + return nil, nil, err + } + t := client.Open("table") + + cleanupFunc := func() { + adminClient.Close() + client.Close() + srv.Close() + } + return t, cleanupFunc, nil +} + +func TestRetryApply(t *testing.T) { + ctx := context.Background() + + errCount := 0 + code := codes.Unavailable // Will be retried + // Intercept requests and return an error or defer to the underlying handler + errInjector := func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + if strings.HasSuffix(info.FullMethod, "MutateRow") && errCount < 3 { + errCount++ + return nil, grpc.Errorf(code, "") + } + return handler(ctx, req) + } + tbl, cleanup, err := setupFakeServer(grpc.UnaryInterceptor(errInjector)) + defer cleanup() + if err != nil { + t.Fatalf("fake server setup: %v", err) + } + + mut := NewMutation() + mut.Set("cf", "col", 1, []byte("val")) + if err := tbl.Apply(ctx, "row1", mut); err != nil { + t.Errorf("applying single mutation with retries: %v", err) + } + row, err := tbl.ReadRow(ctx, "row1") + if err != nil { + t.Errorf("reading single value with retries: %v", err) + } + if row == nil { + t.Errorf("applying single mutation with retries: could not read back row") + } + + code = codes.FailedPrecondition // Won't be retried + errCount = 0 + if err := tbl.Apply(ctx, "row", mut); err == nil { + t.Errorf("applying single mutation with no retries: no error") + } + + // Check and mutate + mutTrue := NewMutation() + mutTrue.DeleteRow() + mutFalse := NewMutation() + mutFalse.Set("cf", "col", 1, []byte("val")) + condMut := NewCondMutation(ValueFilter("."), mutTrue, mutFalse) + + errCount = 0 + code = codes.Unavailable // Will be retried + if err := tbl.Apply(ctx, "row1", condMut); err != nil { + t.Errorf("conditionally mutating row with retries: %v", err) + } + row, err = tbl.ReadRow(ctx, "row1") // row1 already in the table + if err != nil { + t.Errorf("reading single value after conditional mutation: %v", err) + } + if row != nil { + t.Errorf("reading single value after conditional mutation: row not deleted") + } + + errCount = 0 + code = codes.FailedPrecondition // Won't be retried + if err := tbl.Apply(ctx, "row", condMut); err == nil { + t.Errorf("conditionally mutating row with no retries: no error") + } +} + +func TestRetryApplyBulk(t *testing.T) { + ctx := context.Background() + + // Intercept requests and delegate to an interceptor defined by the test case + errCount := 0 + var f func(grpc.ServerStream) error + errInjector := func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + if strings.HasSuffix(info.FullMethod, "MutateRows") { + return f(ss) + } + return handler(ctx, ss) + } + + tbl, cleanup, err := setupFakeServer(grpc.StreamInterceptor(errInjector)) + defer cleanup() + if err != nil { + t.Fatalf("fake server setup: %v", err) + } + + errCount = 0 + // Test overall request failure and retries + f = func(ss grpc.ServerStream) error { + if errCount < 3 { + errCount++ + return grpc.Errorf(codes.Aborted, "") + } + return nil + } + mut := NewMutation() + mut.Set("cf", "col", 1, []byte{}) + errors, err := tbl.ApplyBulk(ctx, []string{"row2"}, []*Mutation{mut}) + if errors != nil || err != nil { + t.Errorf("bulk with request failure: got: %v, %v, want: nil", errors, err) + } + + // Test failures and retries in one request + errCount = 0 + m1 := NewMutation() + m1.Set("cf", "col", 1, []byte{}) + m2 := NewMutation() + m2.Set("cf", "col2", 1, []byte{}) + m3 := NewMutation() + m3.Set("cf", "col3", 1, []byte{}) + f = func(ss grpc.ServerStream) error { + var err error + req := new(btpb.MutateRowsRequest) + ss.RecvMsg(req) + switch errCount { + case 0: + // Retryable request failure + err = grpc.Errorf(codes.Unavailable, "") + case 1: + // Two mutations fail + writeMutateRowsResponse(ss, codes.Unavailable, codes.OK, codes.Aborted) + err = nil + case 2: + // Two failures were retried. One will succeed. + if want, got := 2, len(req.Entries); want != got { + t.Errorf("2 bulk retries, got: %d, want %d", got, want) + } + writeMutateRowsResponse(ss, codes.OK, codes.Aborted) + err = nil + case 3: + // One failure was retried and will succeed. + if want, got := 1, len(req.Entries); want != got { + t.Errorf("1 bulk retry, got: %d, want %d", got, want) + } + writeMutateRowsResponse(ss, codes.OK) + err = nil + } + errCount++ + return err + } + errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2", "row3"}, []*Mutation{m1, m2, m3}) + if errors != nil || err != nil { + t.Errorf("bulk with retries: got: %v, %v, want: nil", errors, err) + } + + // Test unretryable errors + niMut := NewMutation() + niMut.Set("cf", "col", ServerTime, []byte{}) // Non-idempotent + errCount = 0 + f = func(ss grpc.ServerStream) error { + var err error + req := new(btpb.MutateRowsRequest) + ss.RecvMsg(req) + switch errCount { + case 0: + // Give non-idempotent mutation a retryable error code. + // Nothing should be retried. + writeMutateRowsResponse(ss, codes.FailedPrecondition, codes.Aborted) + err = nil + case 1: + t.Errorf("unretryable errors: got one retry, want no retries") + } + errCount++ + return err + } + errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2"}, []*Mutation{m1, niMut}) + if err != nil { + t.Errorf("unretryable errors: request failed %v") + } + want := []error{ + grpc.Errorf(codes.FailedPrecondition, ""), + grpc.Errorf(codes.Aborted, ""), + } + if !reflect.DeepEqual(want, errors) { + t.Errorf("unretryable errors: got: %v, want: %v", errors, want) + } + + // Test individual errors and a deadline exceeded + f = func(ss grpc.ServerStream) error { + writeMutateRowsResponse(ss, codes.FailedPrecondition, codes.OK, codes.Aborted) + return nil + } + ctx, _ = context.WithTimeout(ctx, 100*time.Millisecond) + errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2", "row3"}, []*Mutation{m1, m2, m3}) + wantErr := context.DeadlineExceeded + if wantErr != err { + t.Errorf("deadline exceeded error: got: %v, want: %v", err, wantErr) + } + if errors != nil { + t.Errorf("deadline exceeded errors: got: %v, want: nil", err) + } +} + +func writeMutateRowsResponse(ss grpc.ServerStream, codes ...codes.Code) error { + res := &btpb.MutateRowsResponse{Entries: make([]*btpb.MutateRowsResponse_Entry, len(codes))} + for i, code := range codes { + res.Entries[i] = &btpb.MutateRowsResponse_Entry{ + Index: int64(i), + Status: &rpcpb.Status{Code: int32(code), Message: ""}, + } + } + return ss.SendMsg(res) +} + +func TestRetainRowsAfter(t *testing.T) { + prevRowRange := NewRange("a", "z") + prevRowKey := "m" + want := NewRange("m\x00", "z") + got := prevRowRange.retainRowsAfter(prevRowKey) + if !reflect.DeepEqual(want, got) { + t.Errorf("range retry: got %v, want %v", got, want) + } + + prevRowList := RowList{"a", "b", "c", "d", "e", "f"} + prevRowKey = "b" + wantList := RowList{"c", "d", "e", "f"} + got = prevRowList.retainRowsAfter(prevRowKey) + if !reflect.DeepEqual(wantList, got) { + t.Errorf("list retry: got %v, want %v", got, wantList) + } +} + +func TestRetryReadRows(t *testing.T) { + ctx := context.Background() + + // Intercept requests and delegate to an interceptor defined by the test case + errCount := 0 + var f func(grpc.ServerStream) error + errInjector := func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + if strings.HasSuffix(info.FullMethod, "ReadRows") { + return f(ss) + } + return handler(ctx, ss) + } + + tbl, cleanup, err := setupFakeServer(grpc.StreamInterceptor(errInjector)) + defer cleanup() + if err != nil { + t.Fatalf("fake server setup: %v", err) + } + + errCount = 0 + // Test overall request failure and retries + f = func(ss grpc.ServerStream) error { + var err error + req := new(btpb.ReadRowsRequest) + ss.RecvMsg(req) + switch errCount { + case 0: + // Retryable request failure + err = grpc.Errorf(codes.Unavailable, "") + case 1: + // Write two rows then error + if want, got := "a", string(req.Rows.RowRanges[0].GetStartKeyClosed()); want != got { + t.Errorf("first retry, no data received yet: got %q, want %q", got, want) + } + writeReadRowsResponse(ss, "a", "b") + err = grpc.Errorf(codes.Unavailable, "") + case 2: + // Retryable request failure + if want, got := "b\x00", string(req.Rows.RowRanges[0].GetStartKeyClosed()); want != got { + t.Errorf("2 range retries: got %q, want %q", got, want) + } + err = grpc.Errorf(codes.Unavailable, "") + case 3: + // Write two more rows + writeReadRowsResponse(ss, "c", "d") + err = nil + } + errCount++ + return err + } + + var got []string + tbl.ReadRows(ctx, NewRange("a", "z"), func(r Row) bool { + got = append(got, r.Key()) + return true + }) + want := []string{"a", "b", "c", "d"} + if !reflect.DeepEqual(got, want) { + t.Errorf("retry range integration: got %v, want %v", got, want) + } +} + +func writeReadRowsResponse(ss grpc.ServerStream, rowKeys ...string) error { + var chunks []*btpb.ReadRowsResponse_CellChunk + for _, key := range rowKeys { + chunks = append(chunks, &btpb.ReadRowsResponse_CellChunk{ + RowKey: []byte(key), + FamilyName: &wrappers.StringValue{Value: "fm"}, + Qualifier: &wrappers.BytesValue{Value: []byte("col")}, + RowStatus: &btpb.ReadRowsResponse_CellChunk_CommitRow{CommitRow: true}, + }) + } + return ss.SendMsg(&btpb.ReadRowsResponse{Chunks: chunks}) +} diff --git a/vendor/cloud.google.com/go/bigtable/testdata/read-rows-acceptance-test.json b/vendor/cloud.google.com/go/bigtable/testdata/read-rows-acceptance-test.json new file mode 100644 index 00000000..4973831f --- /dev/null +++ b/vendor/cloud.google.com/go/bigtable/testdata/read-rows-acceptance-test.json @@ -0,0 +1,1178 @@ +{ + "tests": [ + { + "name": "invalid - no commit", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - no cell key before commit", + "chunks": [ + "commit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - no cell key before value", + "chunks": [ + "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - new col family must specify qualifier", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"B\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "bare commit implies ts=0", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "commit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + } + ] + }, + { + "name": "simple row with timestamp", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "missing timestamp, implied ts=0", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "empty cell value", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + } + ] + }, + { + "name": "two unsplit cells", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "two qualifiers", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "D", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "two families", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "B", + "qual": "E", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "with labels", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nlabels: \"L_1\"\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nlabels: \"L_2\"\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "L_1", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "L_2", + "error": false + } + ] + }, + { + "name": "split cell, bare commit", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL\"\ncommit_row: false\n", + "commit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + } + ] + }, + { + "name": "split cell", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "split four ways", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"l\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"ue-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "L", + "error": false + } + ] + }, + { + "name": "two split cells", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "multi-qualifier splits", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_1\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "D", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "multi-qualifier multi-split", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"lue-VAL_1\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"lue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "D", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "multi-family split", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_1\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "B", + "qual": "E", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "invalid - no commit between rows", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - no commit after first row", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - last row missing commit", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - duplicate row key", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - new row missing row key", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", + "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "two rows", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows implicit timestamp", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\nvalue: \"value-VAL\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows empty value", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows, one with multiple cells", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "D", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows, multiple cells", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"F\"\n\u003e\ntimestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK_1", + "fm": "A", + "qual": "D", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "E", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "F", + "ts": 104, + "value": "value-VAL_4", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows, multiple cells, multiple families", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"E\"\n\u003e\ntimestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"M\"\n\u003e\nqualifier: \u003c\n value: \"O\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: false\n", + "family_name: \u003c\n value: \"N\"\n\u003e\nqualifier: \u003c\n value: \"P\"\n\u003e\ntimestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK_1", + "fm": "B", + "qual": "E", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "M", + "qual": "O", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "N", + "qual": "P", + "ts": 104, + "value": "value-VAL_4", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows, four cells, 2 labels", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 101\nlabels: \"L_1\"\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 103\nlabels: \"L_3\"\nvalue: \"value-VAL_3\"\ncommit_row: false\n", + "timestamp_micros: 104\nvalue: \"value-VAL_4\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 101, + "value": "value-VAL_1", + "label": "L_1", + "error": false + }, + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 102, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "D", + "ts": 103, + "value": "value-VAL_3", + "label": "L_3", + "error": false + }, + { + "rk": "RK_2", + "fm": "B", + "qual": "D", + "ts": 104, + "value": "value-VAL_4", + "label": "", + "error": false + } + ] + }, + { + "name": "two rows with splits, same timestamp", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_1\"\ncommit_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"alue-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_1", + "label": "", + "error": false + }, + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "invalid - bare reset", + "chunks": [ + "reset_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - bad reset, no commit", + "chunks": [ + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - missing key after reset", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "reset_row: true\n", + "timestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "no data after reset", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "reset_row: true\n" + ], + "results": null + }, + { + "name": "simple reset", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + } + ] + }, + { + "name": "reset to new val", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "reset to new qual", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "D", + "ts": 100, + "value": "value-VAL_1", + "label": "", + "error": false + } + ] + }, + { + "name": "reset with splits", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "timestamp_micros: 102\nvalue: \"value-VAL_2\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "reset two cells", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n", + "timestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + } + ] + }, + { + "name": "two resets", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_3", + "label": "", + "error": false + } + ] + }, + { + "name": "reset then two cells", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"B\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: false\n", + "qualifier: \u003c\n value: \"D\"\n\u003e\ntimestamp_micros: 103\nvalue: \"value-VAL_3\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "B", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "B", + "qual": "D", + "ts": 103, + "value": "value-VAL_3", + "label": "", + "error": false + } + ] + }, + { + "name": "reset to new row", + "chunks": [ + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK_2\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_2\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_2", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_2", + "label": "", + "error": false + } + ] + }, + { + "name": "reset in between chunks", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: false\n", + "reset_row: true\n", + "row_key: \"RK_1\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL_1\"\ncommit_row: true\n" + ], + "results": [ + { + "rk": "RK_1", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL_1", + "label": "", + "error": false + } + ] + }, + { + "name": "invalid - reset with chunk", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\nreset_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "invalid - commit with chunk", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nlabels: \"L\"\nvalue: \"v\"\nvalue_size: 10\ncommit_row: false\n", + "value: \"a\"\nvalue_size: 10\ncommit_row: true\n" + ], + "results": [ + { + "rk": "", + "fm": "", + "qual": "", + "ts": 0, + "value": "", + "label": "", + "error": true + } + ] + }, + { + "name": "empty cell chunk", + "chunks": [ + "row_key: \"RK\"\nfamily_name: \u003c\n value: \"A\"\n\u003e\nqualifier: \u003c\n value: \"C\"\n\u003e\ntimestamp_micros: 100\nvalue: \"value-VAL\"\ncommit_row: false\n", + "commit_row: false\n", + "commit_row: true\n" + ], + "results": [ + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 100, + "value": "value-VAL", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + }, + { + "rk": "RK", + "fm": "A", + "qual": "C", + "ts": 0, + "value": "", + "label": "", + "error": false + } + ] + } + ] +} \ No newline at end of file diff --git a/vendor/cloud.google.com/go/civil/civil.go b/vendor/cloud.google.com/go/civil/civil.go new file mode 100644 index 00000000..1cb2675b --- /dev/null +++ b/vendor/cloud.google.com/go/civil/civil.go @@ -0,0 +1,277 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package civil implements types for civil time, a time-zone-independent +// representation of time that follows the rules of the proleptic +// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second +// minutes. +// +// Because they lack location information, these types do not represent unique +// moments or intervals of time. Use time.Time for that purpose. +package civil + +import ( + "fmt" + "time" +) + +// A Date represents a date (year, month, day). +// +// This type does not include location information, and therefore does not +// describe a unique 24-hour timespan. +type Date struct { + Year int // Year (e.g., 2014). + Month time.Month // Month of the year (January = 1, ...). + Day int // Day of the month, starting at 1. +} + +// DateOf returns the Date in which a time occurs in that time's location. +func DateOf(t time.Time) Date { + var d Date + d.Year, d.Month, d.Day = t.Date() + return d +} + +// ParseDate parses a string in RFC3339 full-date format and returns the date value it represents. +func ParseDate(s string) (Date, error) { + t, err := time.Parse("2006-01-02", s) + if err != nil { + return Date{}, err + } + return DateOf(t), nil +} + +// String returns the date in RFC3339 full-date format. +func (d Date) String() string { + return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) +} + +// IsValid reports whether the date is valid. +func (d Date) IsValid() bool { + return DateOf(d.In(time.UTC)) == d +} + +// In returns the time corresponding to time 00:00:00 of the date in the location. +// +// In is always consistent with time.Date, even when time.Date returns a time +// on a different day. For example, if loc is America/Indiana/Vincennes, then both +// time.Date(1955, time.May, 1, 0, 0, 0, 0, loc) +// and +// civil.Date{Year: 1955, Month: time.May, Day: 1}.In(loc) +// return 23:00:00 on April 30, 1955. +// +// In panics if loc is nil. +func (d Date) In(loc *time.Location) time.Time { + return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc) +} + +// AddDays returns the date that is n days in the future. +// n can also be negative to go into the past. +func (d Date) AddDays(n int) Date { + return DateOf(d.In(time.UTC).AddDate(0, 0, n)) +} + +// DaysSince returns the signed number of days between the date and s, not including the end day. +// This is the inverse operation to AddDays. +func (d Date) DaysSince(s Date) (days int) { + // We convert to Unix time so we do not have to worry about leap seconds: + // Unix time increases by exactly 86400 seconds per day. + deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix() + return int(deltaUnix / 86400) +} + +// Before reports whether d1 occurs before d2. +func (d1 Date) Before(d2 Date) bool { + if d1.Year != d2.Year { + return d1.Year < d2.Year + } + if d1.Month != d2.Month { + return d1.Month < d2.Month + } + return d1.Day < d2.Day +} + +// After reports whether d1 occurs after d2. +func (d1 Date) After(d2 Date) bool { + return d2.Before(d1) +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of d.String(). +func (d Date) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The date is expected to be a string in a format accepted by ParseDate. +func (d *Date) UnmarshalText(data []byte) error { + var err error + *d, err = ParseDate(string(data)) + return err +} + +// A Time represents a time with nanosecond precision. +// +// This type does not include location information, and therefore does not +// describe a unique moment in time. +// +// This type exists to represent the TIME type in storage-based APIs like BigQuery. +// Most operations on Times are unlikely to be meaningful. Prefer the DateTime type. +type Time struct { + Hour int // The hour of the day in 24-hour format; range [0-23] + Minute int // The minute of the hour; range [0-59] + Second int // The second of the minute; range [0-59] + Nanosecond int // The nanosecond of the second; range [0-999999999] +} + +// TimeOf returns the Time representing the time of day in which a time occurs +// in that time's location. It ignores the date. +func TimeOf(t time.Time) Time { + var tm Time + tm.Hour, tm.Minute, tm.Second = t.Clock() + tm.Nanosecond = t.Nanosecond() + return tm +} + +// ParseTime parses a string and returns the time value it represents. +// ParseTime accepts an extended form of the RFC3339 partial-time format. After +// the HH:MM:SS part of the string, an optional fractional part may appear, +// consisting of a decimal point followed by one to nine decimal digits. +// (RFC3339 admits only one digit after the decimal point). +func ParseTime(s string) (Time, error) { + t, err := time.Parse("15:04:05.999999999", s) + if err != nil { + return Time{}, err + } + return TimeOf(t), nil +} + +// String returns the date in the format described in ParseTime. If Nanoseconds +// is zero, no fractional part will be generated. Otherwise, the result will +// end with a fractional part consisting of a decimal point and nine digits. +func (t Time) String() string { + s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second) + if t.Nanosecond == 0 { + return s + } + return s + fmt.Sprintf(".%09d", t.Nanosecond) +} + +// IsValid reports whether the time is valid. +func (t Time) IsValid() bool { + // Construct a non-zero time. + tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC) + return TimeOf(tm) == t +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of t.String(). +func (t Time) MarshalText() ([]byte, error) { + return []byte(t.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The time is expected to be a string in a format accepted by ParseTime. +func (t *Time) UnmarshalText(data []byte) error { + var err error + *t, err = ParseTime(string(data)) + return err +} + +// A DateTime represents a date and time. +// +// This type does not include location information, and therefore does not +// describe a unique moment in time. +type DateTime struct { + Date Date + Time Time +} + +// Note: We deliberately do not embed Date into DateTime, to avoid promoting AddDays and Sub. + +// DateTimeOf returns the DateTime in which a time occurs in that time's location. +func DateTimeOf(t time.Time) DateTime { + return DateTime{ + Date: DateOf(t), + Time: TimeOf(t), + } +} + +// ParseDateTime parses a string and returns the DateTime it represents. +// ParseDateTime accepts a variant of the RFC3339 date-time format that omits +// the time offset but includes an optional fractional time, as described in +// ParseTime. Informally, the accepted format is +// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF] +// where the 'T' may be a lower-case 't'. +func ParseDateTime(s string) (DateTime, error) { + t, err := time.Parse("2006-01-02T15:04:05.999999999", s) + if err != nil { + t, err = time.Parse("2006-01-02t15:04:05.999999999", s) + if err != nil { + return DateTime{}, err + } + } + return DateTimeOf(t), nil +} + +// String returns the date in the format described in ParseDate. +func (dt DateTime) String() string { + return dt.Date.String() + "T" + dt.Time.String() +} + +// IsValid reports whether the datetime is valid. +func (dt DateTime) IsValid() bool { + return dt.Date.IsValid() && dt.Time.IsValid() +} + +// In returns the time corresponding to the DateTime in the given location. +// +// If the time is missing or ambigous at the location, In returns the same +// result as time.Date. For example, if loc is America/Indiana/Vincennes, then +// both +// time.Date(1955, time.May, 1, 0, 30, 0, 0, loc) +// and +// civil.DateTime{ +// civil.Date{Year: 1955, Month: time.May, Day: 1}}, +// civil.Time{Minute: 30}}.In(loc) +// return 23:30:00 on April 30, 1955. +// +// In panics if loc is nil. +func (dt DateTime) In(loc *time.Location) time.Time { + return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc) +} + +// Before reports whether dt1 occurs before dt2. +func (dt1 DateTime) Before(dt2 DateTime) bool { + return dt1.In(time.UTC).Before(dt2.In(time.UTC)) +} + +// After reports whether dt1 occurs after dt2. +func (dt1 DateTime) After(dt2 DateTime) bool { + return dt2.Before(dt1) +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of dt.String(). +func (dt DateTime) MarshalText() ([]byte, error) { + return []byte(dt.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The datetime is expected to be a string in a format accepted by ParseDateTime +func (dt *DateTime) UnmarshalText(data []byte) error { + var err error + *dt, err = ParseDateTime(string(data)) + return err +} diff --git a/vendor/cloud.google.com/go/civil/civil_test.go b/vendor/cloud.google.com/go/civil/civil_test.go new file mode 100644 index 00000000..e3b76a70 --- /dev/null +++ b/vendor/cloud.google.com/go/civil/civil_test.go @@ -0,0 +1,441 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package civil + +import ( + "encoding/json" + "reflect" + "testing" + "time" +) + +func TestDates(t *testing.T) { + for _, test := range []struct { + date Date + loc *time.Location + wantStr string + wantTime time.Time + }{ + { + date: Date{2014, 7, 29}, + loc: time.Local, + wantStr: "2014-07-29", + wantTime: time.Date(2014, time.July, 29, 0, 0, 0, 0, time.Local), + }, + { + date: DateOf(time.Date(2014, 8, 20, 15, 8, 43, 1, time.Local)), + loc: time.UTC, + wantStr: "2014-08-20", + wantTime: time.Date(2014, 8, 20, 0, 0, 0, 0, time.UTC), + }, + { + date: DateOf(time.Date(999, time.January, 26, 0, 0, 0, 0, time.Local)), + loc: time.UTC, + wantStr: "0999-01-26", + wantTime: time.Date(999, 1, 26, 0, 0, 0, 0, time.UTC), + }, + } { + if got := test.date.String(); got != test.wantStr { + t.Errorf("%#v.String() = %q, want %q", test.date, got, test.wantStr) + } + if got := test.date.In(test.loc); !got.Equal(test.wantTime) { + t.Errorf("%#v.In(%v) = %v, want %v", test.date, test.loc, got, test.wantTime) + } + } +} + +func TestDateIsValid(t *testing.T) { + for _, test := range []struct { + date Date + want bool + }{ + {Date{2014, 7, 29}, true}, + {Date{2000, 2, 29}, true}, + {Date{10000, 12, 31}, true}, + {Date{1, 1, 1}, true}, + {Date{0, 1, 1}, true}, // year zero is OK + {Date{-1, 1, 1}, true}, // negative year is OK + {Date{1, 0, 1}, false}, + {Date{1, 1, 0}, false}, + {Date{2016, 1, 32}, false}, + {Date{2016, 13, 1}, false}, + {Date{1, -1, 1}, false}, + {Date{1, 1, -1}, false}, + } { + got := test.date.IsValid() + if got != test.want { + t.Errorf("%#v: got %t, want %t", test.date, got, test.want) + } + } +} + +func TestParseDate(t *testing.T) { + for _, test := range []struct { + str string + want Date // if empty, expect an error + }{ + {"2016-01-02", Date{2016, 1, 2}}, + {"2016-12-31", Date{2016, 12, 31}}, + {"0003-02-04", Date{3, 2, 4}}, + {"999-01-26", Date{}}, + {"", Date{}}, + {"2016-01-02x", Date{}}, + } { + got, err := ParseDate(test.str) + if got != test.want { + t.Errorf("ParseDate(%q) = %+v, want %+v", test.str, got, test.want) + } + if err != nil && test.want != (Date{}) { + t.Errorf("Unexpected error %v from ParseDate(%q)", err, test.str) + } + } +} + +func TestDateArithmetic(t *testing.T) { + for _, test := range []struct { + desc string + start Date + end Date + days int + }{ + { + desc: "zero days noop", + start: Date{2014, 5, 9}, + end: Date{2014, 5, 9}, + days: 0, + }, + { + desc: "crossing a year boundary", + start: Date{2014, 12, 31}, + end: Date{2015, 1, 1}, + days: 1, + }, + { + desc: "negative number of days", + start: Date{2015, 1, 1}, + end: Date{2014, 12, 31}, + days: -1, + }, + { + desc: "full leap year", + start: Date{2004, 1, 1}, + end: Date{2005, 1, 1}, + days: 366, + }, + { + desc: "full non-leap year", + start: Date{2001, 1, 1}, + end: Date{2002, 1, 1}, + days: 365, + }, + { + desc: "crossing a leap second", + start: Date{1972, 6, 30}, + end: Date{1972, 7, 1}, + days: 1, + }, + { + desc: "dates before the unix epoch", + start: Date{101, 1, 1}, + end: Date{102, 1, 1}, + days: 365, + }, + } { + if got := test.start.AddDays(test.days); got != test.end { + t.Errorf("[%s] %#v.AddDays(%v) = %#v, want %#v", test.desc, test.start, test.days, got, test.end) + } + if got := test.end.DaysSince(test.start); got != test.days { + t.Errorf("[%s] %#v.Sub(%#v) = %v, want %v", test.desc, test.end, test.start, got, test.days) + } + } +} + +func TestDateBefore(t *testing.T) { + for _, test := range []struct { + d1, d2 Date + want bool + }{ + {Date{2016, 12, 31}, Date{2017, 1, 1}, true}, + {Date{2016, 1, 1}, Date{2016, 1, 1}, false}, + {Date{2016, 12, 30}, Date{2016, 12, 31}, true}, + } { + if got := test.d1.Before(test.d2); got != test.want { + t.Errorf("%v.Before(%v): got %t, want %t", test.d1, test.d2, got, test.want) + } + } +} + +func TestDateAfter(t *testing.T) { + for _, test := range []struct { + d1, d2 Date + want bool + }{ + {Date{2016, 12, 31}, Date{2017, 1, 1}, false}, + {Date{2016, 1, 1}, Date{2016, 1, 1}, false}, + {Date{2016, 12, 30}, Date{2016, 12, 31}, false}, + } { + if got := test.d1.After(test.d2); got != test.want { + t.Errorf("%v.After(%v): got %t, want %t", test.d1, test.d2, got, test.want) + } + } +} + +func TestTimeToString(t *testing.T) { + for _, test := range []struct { + str string + time Time + roundTrip bool // ParseTime(str).String() == str? + }{ + {"13:26:33", Time{13, 26, 33, 0}, true}, + {"01:02:03.000023456", Time{1, 2, 3, 23456}, true}, + {"00:00:00.000000001", Time{0, 0, 0, 1}, true}, + {"13:26:03.1", Time{13, 26, 3, 100000000}, false}, + {"13:26:33.0000003", Time{13, 26, 33, 300}, false}, + } { + gotTime, err := ParseTime(test.str) + if err != nil { + t.Errorf("ParseTime(%q): got error: %v", test.str, err) + continue + } + if gotTime != test.time { + t.Errorf("ParseTime(%q) = %+v, want %+v", test.str, gotTime, test.time) + } + if test.roundTrip { + gotStr := test.time.String() + if gotStr != test.str { + t.Errorf("%#v.String() = %q, want %q", test.time, gotStr, test.str) + } + } + } +} + +func TestTimeOf(t *testing.T) { + for _, test := range []struct { + time time.Time + want Time + }{ + {time.Date(2014, 8, 20, 15, 8, 43, 1, time.Local), Time{15, 8, 43, 1}}, + {time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), Time{0, 0, 0, 0}}, + } { + if got := TimeOf(test.time); got != test.want { + t.Errorf("TimeOf(%v) = %+v, want %+v", test.time, got, test.want) + } + } +} + +func TestTimeIsValid(t *testing.T) { + for _, test := range []struct { + time Time + want bool + }{ + {Time{0, 0, 0, 0}, true}, + {Time{23, 0, 0, 0}, true}, + {Time{23, 59, 59, 999999999}, true}, + {Time{24, 59, 59, 999999999}, false}, + {Time{23, 60, 59, 999999999}, false}, + {Time{23, 59, 60, 999999999}, false}, + {Time{23, 59, 59, 1000000000}, false}, + {Time{-1, 0, 0, 0}, false}, + {Time{0, -1, 0, 0}, false}, + {Time{0, 0, -1, 0}, false}, + {Time{0, 0, 0, -1}, false}, + } { + got := test.time.IsValid() + if got != test.want { + t.Errorf("%#v: got %t, want %t", test.time, got, test.want) + } + } +} + +func TestDateTimeToString(t *testing.T) { + for _, test := range []struct { + str string + dateTime DateTime + roundTrip bool // ParseDateTime(str).String() == str? + }{ + {"2016-03-22T13:26:33", DateTime{Date{2016, 03, 22}, Time{13, 26, 33, 0}}, true}, + {"2016-03-22T13:26:33.000000600", DateTime{Date{2016, 03, 22}, Time{13, 26, 33, 600}}, true}, + {"2016-03-22t13:26:33", DateTime{Date{2016, 03, 22}, Time{13, 26, 33, 0}}, false}, + } { + gotDateTime, err := ParseDateTime(test.str) + if err != nil { + t.Errorf("ParseDateTime(%q): got error: %v", test.str, err) + continue + } + if gotDateTime != test.dateTime { + t.Errorf("ParseDateTime(%q) = %+v, want %+v", test.str, gotDateTime, test.dateTime) + } + if test.roundTrip { + gotStr := test.dateTime.String() + if gotStr != test.str { + t.Errorf("%#v.String() = %q, want %q", test.dateTime, gotStr, test.str) + } + } + } +} + +func TestParseDateTimeErrors(t *testing.T) { + for _, str := range []string{ + "", + "2016-03-22", // just a date + "13:26:33", // just a time + "2016-03-22 13:26:33", // wrong separating character + "2016-03-22T13:26:33x", // extra at end + } { + if _, err := ParseDateTime(str); err == nil { + t.Errorf("ParseDateTime(%q) succeeded, want error", str) + } + } +} + +func TestDateTimeOf(t *testing.T) { + for _, test := range []struct { + time time.Time + want DateTime + }{ + {time.Date(2014, 8, 20, 15, 8, 43, 1, time.Local), + DateTime{Date{2014, 8, 20}, Time{15, 8, 43, 1}}}, + {time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), + DateTime{Date{1, 1, 1}, Time{0, 0, 0, 0}}}, + } { + if got := DateTimeOf(test.time); got != test.want { + t.Errorf("DateTimeOf(%v) = %+v, want %+v", test.time, got, test.want) + } + } +} + +func TestDateTimeIsValid(t *testing.T) { + // No need to be exhaustive here; it's just Date.IsValid && Time.IsValid. + for _, test := range []struct { + dt DateTime + want bool + }{ + {DateTime{Date{2016, 3, 20}, Time{0, 0, 0, 0}}, true}, + {DateTime{Date{2016, -3, 20}, Time{0, 0, 0, 0}}, false}, + {DateTime{Date{2016, 3, 20}, Time{24, 0, 0, 0}}, false}, + } { + got := test.dt.IsValid() + if got != test.want { + t.Errorf("%#v: got %t, want %t", test.dt, got, test.want) + } + } +} + +func TestDateTimeIn(t *testing.T) { + dt := DateTime{Date{2016, 1, 2}, Time{3, 4, 5, 6}} + got := dt.In(time.UTC) + want := time.Date(2016, 1, 2, 3, 4, 5, 6, time.UTC) + if !got.Equal(want) { + t.Errorf("got %v, want %v", got, want) + } +} + +func TestDateTimeBefore(t *testing.T) { + d1 := Date{2016, 12, 31} + d2 := Date{2017, 1, 1} + t1 := Time{5, 6, 7, 8} + t2 := Time{5, 6, 7, 9} + for _, test := range []struct { + dt1, dt2 DateTime + want bool + }{ + {DateTime{d1, t1}, DateTime{d2, t1}, true}, + {DateTime{d1, t1}, DateTime{d1, t2}, true}, + {DateTime{d2, t1}, DateTime{d1, t1}, false}, + {DateTime{d2, t1}, DateTime{d2, t1}, false}, + } { + if got := test.dt1.Before(test.dt2); got != test.want { + t.Errorf("%v.Before(%v): got %t, want %t", test.dt1, test.dt2, got, test.want) + } + } +} + +func TestDateTimeAfter(t *testing.T) { + d1 := Date{2016, 12, 31} + d2 := Date{2017, 1, 1} + t1 := Time{5, 6, 7, 8} + t2 := Time{5, 6, 7, 9} + for _, test := range []struct { + dt1, dt2 DateTime + want bool + }{ + {DateTime{d1, t1}, DateTime{d2, t1}, false}, + {DateTime{d1, t1}, DateTime{d1, t2}, false}, + {DateTime{d2, t1}, DateTime{d1, t1}, true}, + {DateTime{d2, t1}, DateTime{d2, t1}, false}, + } { + if got := test.dt1.After(test.dt2); got != test.want { + t.Errorf("%v.After(%v): got %t, want %t", test.dt1, test.dt2, got, test.want) + } + } +} + +func TestMarshalJSON(t *testing.T) { + for _, test := range []struct { + value interface{} + want string + }{ + {Date{1987, 4, 15}, `"1987-04-15"`}, + {Time{18, 54, 2, 0}, `"18:54:02"`}, + {DateTime{Date{1987, 4, 15}, Time{18, 54, 2, 0}}, `"1987-04-15T18:54:02"`}, + } { + bgot, err := json.Marshal(test.value) + if err != nil { + t.Fatal(err) + } + if got := string(bgot); got != test.want { + t.Errorf("%#v: got %s, want %s", test.value, got, test.want) + } + } +} + +func TestUnmarshalJSON(t *testing.T) { + var d Date + var tm Time + var dt DateTime + for _, test := range []struct { + data string + ptr interface{} + want interface{} + }{ + {`"1987-04-15"`, &d, &Date{1987, 4, 15}}, + {`"1987-04-\u0031\u0035"`, &d, &Date{1987, 4, 15}}, + {`"18:54:02"`, &tm, &Time{18, 54, 2, 0}}, + {`"1987-04-15T18:54:02"`, &dt, &DateTime{Date{1987, 4, 15}, Time{18, 54, 2, 0}}}, + } { + if err := json.Unmarshal([]byte(test.data), test.ptr); err != nil { + t.Fatalf("%s: %v", test.data, err) + } + if !reflect.DeepEqual(test.ptr, test.want) { + t.Errorf("%s: got %#v, want %#v", test.data, test.ptr, test.want) + } + } + + for _, bad := range []string{"", `""`, `"bad"`, `"1987-04-15x"`, + `19870415`, // a JSON number + `11987-04-15x`, // not a JSON string + + } { + if json.Unmarshal([]byte(bad), &d) == nil { + t.Errorf("%q, Date: got nil, want error", bad) + } + if json.Unmarshal([]byte(bad), &tm) == nil { + t.Errorf("%q, Time: got nil, want error", bad) + } + if json.Unmarshal([]byte(bad), &dt) == nil { + t.Errorf("%q, DateTime: got nil, want error", bad) + } + } +} diff --git a/vendor/cloud.google.com/go/cloud.go b/vendor/cloud.google.com/go/cloud.go new file mode 100644 index 00000000..6ba428dc --- /dev/null +++ b/vendor/cloud.google.com/go/cloud.go @@ -0,0 +1,20 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package cloud is the root of the packages used to access Google Cloud +// Services. See https://godoc.org/cloud.google.com/go for a full list +// of sub-packages. +// +// This package documents how to authorize and authenticate the sub packages. +package cloud // import "cloud.google.com/go" diff --git a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/debuglet.go b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/debuglet.go new file mode 100644 index 00000000..6a8702c7 --- /dev/null +++ b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/debuglet.go @@ -0,0 +1,450 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux + +package main + +import ( + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "log" + "math/rand" + "os" + "sync" + "time" + + "cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints" + debuglet "cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller" + "cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector" + "cloud.google.com/go/compute/metadata" + "golang.org/x/debug" + "golang.org/x/debug/local" + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + cd "google.golang.org/api/clouddebugger/v2" +) + +var ( + appModule = flag.String("appmodule", "", "Optional application module name.") + appVersion = flag.String("appversion", "", "Optional application module version name.") + sourceContextFile = flag.String("sourcecontext", "", "File containing JSON-encoded source context.") + verbose = flag.Bool("v", false, "Output verbose log messages.") + projectNumber = flag.String("projectnumber", "", "Project number."+ + " If this is not set, it is read from the GCP metadata server.") + projectID = flag.String("projectid", "", "Project ID."+ + " If this is not set, it is read from the GCP metadata server.") + serviceAccountFile = flag.String("serviceaccountfile", "", "File containing JSON service account credentials.") +) + +const ( + maxCapturedStackFrames = 50 + maxCapturedVariables = 1000 +) + +func main() { + flag.Usage = usage + flag.Parse() + args := flag.Args() + if len(args) == 0 { + // The user needs to supply the name of the executable to run. + flag.Usage() + return + } + if *projectNumber == "" { + var err error + *projectNumber, err = metadata.NumericProjectID() + if err != nil { + log.Print("Debuglet initialization: ", err) + } + } + if *projectID == "" { + var err error + *projectID, err = metadata.ProjectID() + if err != nil { + log.Print("Debuglet initialization: ", err) + } + } + sourceContexts, err := readSourceContextFile(*sourceContextFile) + if err != nil { + log.Print("Reading source context file: ", err) + } + var ts oauth2.TokenSource + ctx := context.Background() + if *serviceAccountFile != "" { + if ts, err = serviceAcctTokenSource(ctx, *serviceAccountFile, cd.CloudDebuggerScope); err != nil { + log.Fatalf("Error getting credentials from file %s: %v", *serviceAccountFile, err) + } + } else if ts, err = google.DefaultTokenSource(ctx, cd.CloudDebuggerScope); err != nil { + log.Print("Error getting application default credentials for Cloud Debugger:", err) + os.Exit(103) + } + c, err := debuglet.NewController(ctx, debuglet.Options{ + ProjectNumber: *projectNumber, + ProjectID: *projectID, + AppModule: *appModule, + AppVersion: *appVersion, + SourceContexts: sourceContexts, + Verbose: *verbose, + TokenSource: ts, + }) + if err != nil { + log.Fatal("Error connecting to Cloud Debugger: ", err) + } + prog, err := local.New(args[0]) + if err != nil { + log.Fatal("Error loading program: ", err) + } + // Load the program, but don't actually start it running yet. + if _, err = prog.Run(args[1:]...); err != nil { + log.Fatal("Error loading program: ", err) + } + bs := breakpoints.NewBreakpointStore(prog) + + // Seed the random number generator. + rand.Seed(time.Now().UnixNano()) + + // Now we want to do two things: run the user's program, and start sending + // List requests periodically to the Debuglet Controller to get breakpoints + // to set. + // + // We want to give the Debuglet Controller a chance to give us breakpoints + // before we start the program, otherwise we would miss any breakpoint + // triggers that occur during program startup -- for example, a breakpoint on + // the first line of main. But if the Debuglet Controller is not responding or + // is returning errors, we don't want to delay starting the program + // indefinitely. + // + // We pass a channel to breakpointListLoop, which will close it when the first + // List call finishes. Then we wait until either the channel is closed or a + // 5-second timer has finished before starting the program. + ch := make(chan bool) + // Start a goroutine that sends List requests to the Debuglet Controller, and + // sets any breakpoints it gets back. + go breakpointListLoop(ctx, c, bs, ch) + // Wait until 5 seconds have passed or breakpointListLoop has closed ch. + select { + case <-time.After(5 * time.Second): + case <-ch: + } + // Run the debuggee. + programLoop(ctx, c, bs, prog) +} + +// usage prints a usage message to stderr and exits. +func usage() { + me := "a.out" + if len(os.Args) >= 1 { + me = os.Args[0] + } + fmt.Fprintf(os.Stderr, "Usage of %s:\n", me) + fmt.Fprintf(os.Stderr, "\t%s [flags...] -- args...\n", me) + fmt.Fprintf(os.Stderr, "Flags:\n") + flag.PrintDefaults() + fmt.Fprintf(os.Stderr, + "See https://cloud.google.com/tools/cloud-debugger/setting-up-on-compute-engine for more information.\n") + os.Exit(2) +} + +// readSourceContextFile reads a JSON-encoded source context from the given file. +// It returns a non-empty slice on success. +func readSourceContextFile(filename string) ([]*cd.SourceContext, error) { + if filename == "" { + return nil, nil + } + scJSON, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("reading file %q: %v", filename, err) + } + var sc cd.SourceContext + if err = json.Unmarshal(scJSON, &sc); err != nil { + return nil, fmt.Errorf("parsing file %q: %v", filename, err) + } + return []*cd.SourceContext{&sc}, nil +} + +// breakpointListLoop repeatedly calls the Debuglet Controller's List RPC, and +// passes the results to the BreakpointStore so it can set and unset breakpoints +// in the program. +// +// After the first List call finishes, ch is closed. +func breakpointListLoop(ctx context.Context, c *debuglet.Controller, bs *breakpoints.BreakpointStore, first chan bool) { + const ( + avgTimeBetweenCalls = time.Second + errorDelay = 5 * time.Second + ) + + // randomDuration returns a random duration with expected value avg. + randomDuration := func(avg time.Duration) time.Duration { + return time.Duration(rand.Int63n(int64(2*avg + 1))) + } + + var consecutiveFailures uint + + for { + callStart := time.Now() + resp, err := c.List(ctx) + if err != nil && err != debuglet.ErrListUnchanged { + log.Printf("Debuglet controller server error: %v", err) + } + if err == nil { + bs.ProcessBreakpointList(resp.Breakpoints) + } + + if first != nil { + // We've finished one call to List and set any breakpoints we received. + close(first) + first = nil + } + + // Asynchronously send updates for any breakpoints that caused an error when + // the BreakpointStore tried to process them. We don't wait for the update + // to finish before the program can exit, as we do for normal updates. + errorBps := bs.ErrorBreakpoints() + for _, bp := range errorBps { + go func(bp *cd.Breakpoint) { + if err := c.Update(ctx, bp.Id, bp); err != nil { + log.Printf("Failed to send breakpoint update for %s: %s", bp.Id, err) + } + }(bp) + } + + // Make the next call not too soon after the one we just did. + delay := randomDuration(avgTimeBetweenCalls) + + // If the call returned an error other than ErrListUnchanged, wait longer. + if err != nil && err != debuglet.ErrListUnchanged { + // Wait twice as long after each consecutive failure, to a maximum of 16x. + delay += randomDuration(errorDelay * (1 << consecutiveFailures)) + if consecutiveFailures < 4 { + consecutiveFailures++ + } + } else { + consecutiveFailures = 0 + } + + // Sleep until we reach time callStart+delay. If we've already passed that + // time, time.Sleep will return immediately -- this should be the common + // case, since the server will delay responding to List for a while when + // there are no changes to report. + time.Sleep(callStart.Add(delay).Sub(time.Now())) + } +} + +// programLoop runs the program being debugged to completion. When a breakpoint's +// conditions are satisfied, it sends an Update RPC to the Debuglet Controller. +// The function returns when the program exits and all Update RPCs have finished. +func programLoop(ctx context.Context, c *debuglet.Controller, bs *breakpoints.BreakpointStore, prog debug.Program) { + var wg sync.WaitGroup + for { + // Run the program until it hits a breakpoint or exits. + status, err := prog.Resume() + if err != nil { + break + } + + // Get the breakpoints at this address whose conditions were satisfied, + // and remove the ones that aren't logpoints. + bps := bs.BreakpointsAtPC(status.PC) + bps = bpsWithConditionSatisfied(bps, prog) + for _, bp := range bps { + if bp.Action != "LOG" { + bs.RemoveBreakpoint(bp) + } + } + + if len(bps) == 0 { + continue + } + + // Evaluate expressions and get the stack. + vc := valuecollector.NewCollector(prog, maxCapturedVariables) + needStackFrames := false + for _, bp := range bps { + // If evaluating bp's condition didn't return an error, evaluate bp's + // expressions, and later get the stack frames. + if bp.Status == nil { + bp.EvaluatedExpressions = expressionValues(bp.Expressions, prog, vc) + needStackFrames = true + } + } + var ( + stack []*cd.StackFrame + stackFramesStatusMessage *cd.StatusMessage + ) + if needStackFrames { + stack, stackFramesStatusMessage = stackFrames(prog, vc) + } + + // Read variable values from the program. + variableTable := vc.ReadValues() + + // Start a goroutine to send updates to the Debuglet Controller or write + // to logs, concurrently with resuming the program. + // TODO: retry Update on failure. + for _, bp := range bps { + wg.Add(1) + switch bp.Action { + case "LOG": + go func(format string, evaluatedExpressions []*cd.Variable) { + s := valuecollector.LogString(format, evaluatedExpressions, variableTable) + log.Print(s) + wg.Done() + }(bp.LogMessageFormat, bp.EvaluatedExpressions) + bp.Status = nil + bp.EvaluatedExpressions = nil + default: + go func(bp *cd.Breakpoint) { + defer wg.Done() + bp.IsFinalState = true + if bp.Status == nil { + // If evaluating bp's condition didn't return an error, include the + // stack frames, variable table, and any status message produced when + // getting the stack frames. + bp.StackFrames = stack + bp.VariableTable = variableTable + bp.Status = stackFramesStatusMessage + } + if err := c.Update(ctx, bp.Id, bp); err != nil { + log.Printf("Failed to send breakpoint update for %s: %s", bp.Id, err) + } + }(bp) + } + } + } + + // Wait for all updates to finish before returning. + wg.Wait() +} + +// bpsWithConditionSatisfied returns the breakpoints whose conditions are true +// (or that do not have a condition.) +func bpsWithConditionSatisfied(bpsIn []*cd.Breakpoint, prog debug.Program) []*cd.Breakpoint { + var bpsOut []*cd.Breakpoint + for _, bp := range bpsIn { + cond, err := condTruth(bp.Condition, prog) + if err != nil { + bp.Status = errorStatusMessage(err.Error(), refersToBreakpointCondition) + // Include bp in the list to be updated when there's an error, so that + // the user gets a response. + bpsOut = append(bpsOut, bp) + } else if cond { + bpsOut = append(bpsOut, bp) + } + } + return bpsOut +} + +// condTruth evaluates a condition. +func condTruth(condition string, prog debug.Program) (bool, error) { + if condition == "" { + // A condition wasn't set. + return true, nil + } + val, err := prog.Evaluate(condition) + if err != nil { + return false, err + } + if v, ok := val.(bool); !ok { + return false, fmt.Errorf("condition expression has type %T, should be bool", val) + } else { + return v, nil + } +} + +// expressionValues evaluates a slice of expressions and returns a []*cd.Variable +// containing the results. +// If the result of an expression evaluation refers to values from the program's +// memory (e.g., the expression evaluates to a slice) a corresponding variable is +// added to the value collector, to be read later. +func expressionValues(expressions []string, prog debug.Program, vc *valuecollector.Collector) []*cd.Variable { + evaluatedExpressions := make([]*cd.Variable, len(expressions)) + for i, exp := range expressions { + ee := &cd.Variable{Name: exp} + evaluatedExpressions[i] = ee + if val, err := prog.Evaluate(exp); err != nil { + ee.Status = errorStatusMessage(err.Error(), refersToBreakpointExpression) + } else { + vc.FillValue(val, ee) + } + } + return evaluatedExpressions +} + +// stackFrames returns a stack trace for the program. It passes references to +// function parameters and local variables to the value collector, so it can read +// their values later. +func stackFrames(prog debug.Program, vc *valuecollector.Collector) ([]*cd.StackFrame, *cd.StatusMessage) { + frames, err := prog.Frames(maxCapturedStackFrames) + if err != nil { + return nil, errorStatusMessage("Error getting stack: "+err.Error(), refersToUnspecified) + } + stackFrames := make([]*cd.StackFrame, len(frames)) + for i, f := range frames { + frame := &cd.StackFrame{} + frame.Function = f.Function + for _, v := range f.Params { + frame.Arguments = append(frame.Arguments, vc.AddVariable(debug.LocalVar(v))) + } + for _, v := range f.Vars { + frame.Locals = append(frame.Locals, vc.AddVariable(v)) + } + frame.Location = &cd.SourceLocation{ + Path: f.File, + Line: int64(f.Line), + } + stackFrames[i] = frame + } + return stackFrames, nil +} + +// errorStatusMessage returns a *cd.StatusMessage indicating an error, +// with the given message and refersTo field. +func errorStatusMessage(msg string, refersTo int) *cd.StatusMessage { + return &cd.StatusMessage{ + Description: &cd.FormatMessage{Format: "$0", Parameters: []string{msg}}, + IsError: true, + RefersTo: refersToString[refersTo], + } +} + +const ( + // RefersTo values for cd.StatusMessage. + refersToUnspecified = iota + refersToBreakpointCondition + refersToBreakpointExpression +) + +// refersToString contains the strings for each refersTo value. +// See the definition of StatusMessage in the v2/clouddebugger package. +var refersToString = map[int]string{ + refersToUnspecified: "UNSPECIFIED", + refersToBreakpointCondition: "BREAKPOINT_CONDITION", + refersToBreakpointExpression: "BREAKPOINT_EXPRESSION", +} + +func serviceAcctTokenSource(ctx context.Context, filename string, scope ...string) (oauth2.TokenSource, error) { + data, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("cannot read service account file: %v", err) + } + cfg, err := google.JWTConfigFromJSON(data, scope...) + if err != nil { + return nil, fmt.Errorf("google.JWTConfigFromJSON: %v", err) + } + return cfg.TokenSource(ctx), nil +} diff --git a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints/breakpoints.go b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints/breakpoints.go new file mode 100644 index 00000000..afe07cbf --- /dev/null +++ b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints/breakpoints.go @@ -0,0 +1,174 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package breakpoints handles breakpoint requests we get from the user through +// the Debuglet Controller, and manages corresponding breakpoints set in the code. +package breakpoints + +import ( + "log" + "sync" + + "golang.org/x/debug" + cd "google.golang.org/api/clouddebugger/v2" +) + +// BreakpointStore stores the set of breakpoints for a program. +type BreakpointStore struct { + mu sync.Mutex + // prog is the program being debugged. + prog debug.Program + // idToBreakpoint is a map from breakpoint identifier to *cd.Breakpoint. The + // map value is nil if the breakpoint is inactive. A breakpoint is active if: + // - We received it from the Debuglet Controller, and it was active at the time; + // - We were able to set code breakpoints for it; + // - We have not reached any of those code breakpoints while satisfying the + // breakpoint's conditions, or the breakpoint has action LOG; and + // - The Debuglet Controller hasn't informed us the breakpoint has become inactive. + idToBreakpoint map[string]*cd.Breakpoint + // pcToBps and bpToPCs store the many-to-many relationship between breakpoints we + // received from the Debuglet Controller and the code breakpoints we set for them. + pcToBps map[uint64][]*cd.Breakpoint + bpToPCs map[*cd.Breakpoint][]uint64 + // errors contains any breakpoints which couldn't be set because they caused an + // error. These are retrieved with ErrorBreakpoints, and the caller is + // expected to handle sending updates for them. + errors []*cd.Breakpoint +} + +// NewBreakpointStore returns a BreakpointStore for the given program. +func NewBreakpointStore(prog debug.Program) *BreakpointStore { + return &BreakpointStore{ + idToBreakpoint: make(map[string]*cd.Breakpoint), + pcToBps: make(map[uint64][]*cd.Breakpoint), + bpToPCs: make(map[*cd.Breakpoint][]uint64), + prog: prog, + } +} + +// ProcessBreakpointList applies updates received from the Debuglet Controller through a List call. +func (bs *BreakpointStore) ProcessBreakpointList(bps []*cd.Breakpoint) { + bs.mu.Lock() + defer bs.mu.Unlock() + for _, bp := range bps { + if storedBp, ok := bs.idToBreakpoint[bp.Id]; ok { + if storedBp != nil && bp.IsFinalState { + // IsFinalState indicates that the breakpoint has been made inactive. + bs.removeBreakpointLocked(storedBp) + } + } else { + if bp.IsFinalState { + // The controller is notifying us that the breakpoint is no longer active, + // but we didn't know about it anyway. + continue + } + if bp.Action != "" && bp.Action != "CAPTURE" && bp.Action != "LOG" { + bp.IsFinalState = true + bp.Status = &cd.StatusMessage{ + Description: &cd.FormatMessage{Format: "Action is not supported"}, + IsError: true, + } + bs.errors = append(bs.errors, bp) + // Note in idToBreakpoint that we've already seen this breakpoint, so that we + // don't try to report it as an error multiple times. + bs.idToBreakpoint[bp.Id] = nil + continue + } + pcs, err := bs.prog.BreakpointAtLine(bp.Location.Path, uint64(bp.Location.Line)) + if err != nil { + log.Printf("error setting breakpoint at %s:%d: %v", bp.Location.Path, bp.Location.Line, err) + } + if len(pcs) == 0 { + // We can't find a PC for this breakpoint's source line, so don't make it active. + // TODO: we could snap the line to a location where we can break, or report an error to the user. + bs.idToBreakpoint[bp.Id] = nil + } else { + bs.idToBreakpoint[bp.Id] = bp + for _, pc := range pcs { + bs.pcToBps[pc] = append(bs.pcToBps[pc], bp) + } + bs.bpToPCs[bp] = pcs + } + } + } +} + +// ErrorBreakpoints returns a slice of Breakpoints that caused errors when the +// BreakpointStore tried to process them, and resets the list of such +// breakpoints. +// The caller is expected to send updates to the server to indicate the errors. +func (bs *BreakpointStore) ErrorBreakpoints() []*cd.Breakpoint { + bs.mu.Lock() + defer bs.mu.Unlock() + bps := bs.errors + bs.errors = nil + return bps +} + +// BreakpointsAtPC returns all the breakpoints for which we set a code +// breakpoint at the given address. +func (bs *BreakpointStore) BreakpointsAtPC(pc uint64) []*cd.Breakpoint { + bs.mu.Lock() + defer bs.mu.Unlock() + return bs.pcToBps[pc] +} + +// RemoveBreakpoint makes the given breakpoint inactive. +// This is called when either the debugged program hits the breakpoint, or the Debuglet +// Controller informs us that the breakpoint is now inactive. +func (bs *BreakpointStore) RemoveBreakpoint(bp *cd.Breakpoint) { + bs.mu.Lock() + bs.removeBreakpointLocked(bp) + bs.mu.Unlock() +} + +func (bs *BreakpointStore) removeBreakpointLocked(bp *cd.Breakpoint) { + // Set the ID's corresponding breakpoint to nil, so that we won't activate it + // if we see it again. + // TODO: we could delete it after a few seconds. + bs.idToBreakpoint[bp.Id] = nil + + // Delete bp from the list of cd breakpoints at each of its corresponding + // code breakpoint locations, and delete any code breakpoints which no longer + // have a corresponding cd breakpoint. + var codeBreakpointsToDelete []uint64 + for _, pc := range bs.bpToPCs[bp] { + bps := remove(bs.pcToBps[pc], bp) + if len(bps) == 0 { + // bp was the last breakpoint set at this PC, so delete the code breakpoint. + codeBreakpointsToDelete = append(codeBreakpointsToDelete, pc) + delete(bs.pcToBps, pc) + } else { + bs.pcToBps[pc] = bps + } + } + if len(codeBreakpointsToDelete) > 0 { + bs.prog.DeleteBreakpoints(codeBreakpointsToDelete) + } + delete(bs.bpToPCs, bp) +} + +// remove updates rs by removing r, then returns rs. +// The mutex in the BreakpointStore which contains rs should be held. +func remove(rs []*cd.Breakpoint, r *cd.Breakpoint) []*cd.Breakpoint { + for i := range rs { + if rs[i] == r { + rs[i] = rs[len(rs)-1] + rs = rs[0 : len(rs)-1] + return rs + } + } + // We shouldn't reach here. + return rs +} diff --git a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints/breakpoints_test.go b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints/breakpoints_test.go new file mode 100644 index 00000000..089a3ba6 --- /dev/null +++ b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/breakpoints/breakpoints_test.go @@ -0,0 +1,168 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package breakpoints + +import ( + "reflect" + "testing" + + "golang.org/x/debug" + cd "google.golang.org/api/clouddebugger/v2" +) + +var ( + testPC1 uint64 = 0x1234 + testPC2 uint64 = 0x5678 + testPC3 uint64 = 0x3333 + testFile = "foo.go" + testLine uint64 = 42 + testLine2 uint64 = 99 + testLogPC uint64 = 0x9abc + testLogLine uint64 = 43 + testBadPC uint64 = 0xdef0 + testBadLine uint64 = 44 + testBP = &cd.Breakpoint{ + Action: "CAPTURE", + Id: "TestBreakpoint", + IsFinalState: false, + Location: &cd.SourceLocation{Path: testFile, Line: int64(testLine)}, + } + testBP2 = &cd.Breakpoint{ + Action: "CAPTURE", + Id: "TestBreakpoint2", + IsFinalState: false, + Location: &cd.SourceLocation{Path: testFile, Line: int64(testLine2)}, + } + testLogBP = &cd.Breakpoint{ + Action: "LOG", + Id: "TestLogBreakpoint", + IsFinalState: false, + Location: &cd.SourceLocation{Path: testFile, Line: int64(testLogLine)}, + } + testBadBP = &cd.Breakpoint{ + Action: "BEEP", + Id: "TestBadBreakpoint", + IsFinalState: false, + Location: &cd.SourceLocation{Path: testFile, Line: int64(testBadLine)}, + } +) + +func TestBreakpointStore(t *testing.T) { + p := &Program{breakpointPCs: make(map[uint64]bool)} + bs := NewBreakpointStore(p) + checkPCs := func(expected map[uint64]bool) { + if !reflect.DeepEqual(p.breakpointPCs, expected) { + t.Errorf("got breakpoint map %v want %v", p.breakpointPCs, expected) + } + } + bs.ProcessBreakpointList([]*cd.Breakpoint{testBP, testBP2, testLogBP, testBadBP}) + checkPCs(map[uint64]bool{ + testPC1: true, + testPC2: true, + testPC3: true, + testLogPC: true, + }) + for _, test := range []struct { + pc uint64 + expected []*cd.Breakpoint + }{ + {testPC1, []*cd.Breakpoint{testBP}}, + {testPC2, []*cd.Breakpoint{testBP}}, + {testPC3, []*cd.Breakpoint{testBP2}}, + {testLogPC, []*cd.Breakpoint{testLogBP}}, + } { + if bps := bs.BreakpointsAtPC(test.pc); !reflect.DeepEqual(bps, test.expected) { + t.Errorf("BreakpointsAtPC(%x): got %v want %v", test.pc, bps, test.expected) + } + } + testBP2.IsFinalState = true + bs.ProcessBreakpointList([]*cd.Breakpoint{testBP, testBP2, testLogBP, testBadBP}) + checkPCs(map[uint64]bool{ + testPC1: true, + testPC2: true, + testPC3: false, + testLogPC: true, + }) + bs.RemoveBreakpoint(testBP) + checkPCs(map[uint64]bool{ + testPC1: false, + testPC2: false, + testPC3: false, + testLogPC: true, + }) + for _, pc := range []uint64{testPC1, testPC2, testPC3} { + if bps := bs.BreakpointsAtPC(pc); len(bps) != 0 { + t.Errorf("BreakpointsAtPC(%x): got %v want []", pc, bps) + } + } + // bs.ErrorBreakpoints should return testBadBP. + errorBps := bs.ErrorBreakpoints() + if len(errorBps) != 1 { + t.Errorf("ErrorBreakpoints: got %d want 1", len(errorBps)) + } else { + bp := errorBps[0] + if bp.Id != testBadBP.Id { + t.Errorf("ErrorBreakpoints: got id %q want 1", bp.Id) + } + if bp.Status == nil || !bp.Status.IsError { + t.Errorf("ErrorBreakpoints: got %v, want error", bp.Status) + } + } + // The error should have been removed by the last call to bs.ErrorBreakpoints. + errorBps = bs.ErrorBreakpoints() + if len(errorBps) != 0 { + t.Errorf("ErrorBreakpoints: got %d want 0", len(errorBps)) + } + // Even if testBadBP is sent in a new list, it should not be returned again. + bs.ProcessBreakpointList([]*cd.Breakpoint{testBadBP}) + errorBps = bs.ErrorBreakpoints() + if len(errorBps) != 0 { + t.Errorf("ErrorBreakpoints: got %d want 0", len(errorBps)) + } +} + +// Program implements the similarly-named interface in x/debug. +// ValueCollector should only call its BreakpointAtLine and DeleteBreakpoints methods. +type Program struct { + debug.Program + // breakpointPCs contains the state of code breakpoints -- true if the + // breakpoint is currently set, false if it has been deleted. + breakpointPCs map[uint64]bool +} + +func (p *Program) BreakpointAtLine(file string, line uint64) ([]uint64, error) { + var pcs []uint64 + switch { + case file == testFile && line == testLine: + pcs = []uint64{testPC1, testPC2} + case file == testFile && line == testLine2: + pcs = []uint64{testPC3} + case file == testFile && line == testLogLine: + pcs = []uint64{testLogPC} + default: + pcs = []uint64{0xbad} + } + for _, pc := range pcs { + p.breakpointPCs[pc] = true + } + return pcs, nil +} + +func (p *Program) DeleteBreakpoints(pcs []uint64) error { + for _, pc := range pcs { + p.breakpointPCs[pc] = false + } + return nil +} diff --git a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client.go b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client.go new file mode 100644 index 00000000..1bc2c982 --- /dev/null +++ b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client.go @@ -0,0 +1,279 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package controller is a library for interacting with the Google Cloud Debugger's Debuglet Controller service. +package controller + +import ( + "crypto/sha256" + "encoding/json" + "errors" + "fmt" + "log" + "sync" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + cd "google.golang.org/api/clouddebugger/v2" + "google.golang.org/api/googleapi" + "google.golang.org/api/option" + "google.golang.org/api/transport" +) + +const ( + // agentVersionString identifies the agent to the service. + agentVersionString = "google.com/go-gcp/v0.2" + // initWaitToken is the wait token sent in the first Update request to a server. + initWaitToken = "init" +) + +var ( + // ErrListUnchanged is returned by List if the server time limit is reached + // before the list of breakpoints changes. + ErrListUnchanged = errors.New("breakpoint list unchanged") + // ErrDebuggeeDisabled is returned by List or Update if the server has disabled + // this Debuggee. The caller can retry later. + ErrDebuggeeDisabled = errors.New("debuglet disabled by server") +) + +// Controller manages a connection to the Debuglet Controller service. +type Controller struct { + s serviceInterface + // waitToken is sent with List requests so the server knows which set of + // breakpoints this client has already seen. Each successful List request + // returns a new waitToken to send in the next request. + waitToken string + // verbose determines whether to do some logging + verbose bool + // options, uniquifier and description are used in register. + options Options + uniquifier string + description string + // mu protects debuggeeID + mu sync.Mutex + // debuggeeID is returned from the server on registration, and is passed back + // to the server in List and Update requests. + debuggeeID string +} + +// Options controls how the Debuglet Controller client identifies itself to the server. +// See https://cloud.google.com/storage/docs/projects and +// https://cloud.google.com/tools/cloud-debugger/setting-up-on-compute-engine +// for further documentation of these parameters. +type Options struct { + ProjectNumber string // GCP Project Number. + ProjectID string // GCP Project ID. + AppModule string // Module name for the debugged program. + AppVersion string // Version number for this module. + SourceContexts []*cd.SourceContext // Description of source. + Verbose bool + TokenSource oauth2.TokenSource // Source of Credentials used for Stackdriver Debugger. +} + +type serviceInterface interface { + Register(ctx context.Context, req *cd.RegisterDebuggeeRequest) (*cd.RegisterDebuggeeResponse, error) + Update(ctx context.Context, debuggeeID, breakpointID string, req *cd.UpdateActiveBreakpointRequest) (*cd.UpdateActiveBreakpointResponse, error) + List(ctx context.Context, debuggeeID, waitToken string) (*cd.ListActiveBreakpointsResponse, error) +} + +var newService = func(ctx context.Context, tokenSource oauth2.TokenSource) (serviceInterface, error) { + httpClient, endpoint, err := transport.NewHTTPClient(ctx, option.WithTokenSource(tokenSource)) + if err != nil { + return nil, err + } + s, err := cd.New(httpClient) + if err != nil { + return nil, err + } + if endpoint != "" { + s.BasePath = endpoint + } + return &service{s: s}, nil +} + +type service struct { + s *cd.Service +} + +func (s service) Register(ctx context.Context, req *cd.RegisterDebuggeeRequest) (*cd.RegisterDebuggeeResponse, error) { + call := cd.NewControllerDebuggeesService(s.s).Register(req) + return call.Context(ctx).Do() +} + +func (s service) Update(ctx context.Context, debuggeeID, breakpointID string, req *cd.UpdateActiveBreakpointRequest) (*cd.UpdateActiveBreakpointResponse, error) { + call := cd.NewControllerDebuggeesBreakpointsService(s.s).Update(debuggeeID, breakpointID, req) + return call.Context(ctx).Do() +} + +func (s service) List(ctx context.Context, debuggeeID, waitToken string) (*cd.ListActiveBreakpointsResponse, error) { + call := cd.NewControllerDebuggeesBreakpointsService(s.s).List(debuggeeID) + call.WaitToken(waitToken) + return call.Context(ctx).Do() +} + +// NewController connects to the Debuglet Controller server using the given options, +// and returns a Controller for that connection. +// Google Application Default Credentials are used to connect to the Debuglet Controller; +// see https://developers.google.com/identity/protocols/application-default-credentials +func NewController(ctx context.Context, o Options) (*Controller, error) { + // We build a JSON encoding of o.SourceContexts so we can hash it. + scJSON, err := json.Marshal(o.SourceContexts) + if err != nil { + scJSON = nil + o.SourceContexts = nil + } + + // Compute a uniquifier string by hashing the project number, app module name, + // app module version, debuglet version, and source context. + // The choice of hash function is arbitrary. + h := sha256.Sum256([]byte(fmt.Sprintf("%d %s %d %s %d %s %d %s %d %s", + len(o.ProjectNumber), o.ProjectNumber, + len(o.AppModule), o.AppModule, + len(o.AppVersion), o.AppVersion, + len(agentVersionString), agentVersionString, + len(scJSON), scJSON))) + uniquifier := fmt.Sprintf("%X", h[0:16]) // 32 hex characters + + description := o.ProjectID + if o.AppModule != "" { + description += "-" + o.AppModule + } + if o.AppVersion != "" { + description += "-" + o.AppVersion + } + + s, err := newService(ctx, o.TokenSource) + if err != nil { + return nil, err + } + + // Construct client. + c := &Controller{ + s: s, + waitToken: initWaitToken, + verbose: o.Verbose, + options: o, + uniquifier: uniquifier, + description: description, + } + + return c, nil +} + +func (c *Controller) getDebuggeeID(ctx context.Context) (string, error) { + c.mu.Lock() + defer c.mu.Unlock() + if c.debuggeeID != "" { + return c.debuggeeID, nil + } + // The debuglet hasn't been registered yet, or it is disabled and we should try registering again. + if err := c.register(ctx); err != nil { + return "", err + } + return c.debuggeeID, nil +} + +// List retrieves the current list of breakpoints from the server. +// If the set of breakpoints on the server is the same as the one returned in +// the previous call to List, the server can delay responding until it changes, +// and return an error instead if no change occurs before a time limit the +// server sets. List can't be called concurrently with itself. +func (c *Controller) List(ctx context.Context) (*cd.ListActiveBreakpointsResponse, error) { + id, err := c.getDebuggeeID(ctx) + if err != nil { + return nil, err + } + resp, err := c.s.List(ctx, id, c.waitToken) + if err != nil { + if isAbortedError(err) { + return nil, ErrListUnchanged + } + // For other errors, the protocol requires that we attempt to re-register. + c.mu.Lock() + defer c.mu.Unlock() + if regError := c.register(ctx); regError != nil { + return nil, regError + } + return nil, err + } + if resp == nil { + return nil, errors.New("no response") + } + if c.verbose { + log.Printf("List response: %v", resp) + } + c.waitToken = resp.NextWaitToken + return resp, nil +} + +// isAbortedError tests if err is a *googleapi.Error, that it contains one error +// in Errors, and that that error's Reason is "aborted". +func isAbortedError(err error) bool { + e, _ := err.(*googleapi.Error) + if e == nil { + return false + } + if len(e.Errors) != 1 { + return false + } + return e.Errors[0].Reason == "aborted" +} + +// Update reports information to the server about a breakpoint that was hit. +// Update can be called concurrently with List and Update. +func (c *Controller) Update(ctx context.Context, breakpointID string, bp *cd.Breakpoint) error { + req := &cd.UpdateActiveBreakpointRequest{Breakpoint: bp} + if c.verbose { + log.Printf("sending update for %s: %v", breakpointID, req) + } + id, err := c.getDebuggeeID(ctx) + if err != nil { + return err + } + _, err = c.s.Update(ctx, id, breakpointID, req) + return err +} + +// register calls the Debuglet Controller Register method, and sets c.debuggeeID. +// c.mu should be locked while calling this function. List and Update can't +// make progress until it returns. +func (c *Controller) register(ctx context.Context) error { + req := cd.RegisterDebuggeeRequest{ + Debuggee: &cd.Debuggee{ + AgentVersion: agentVersionString, + Description: c.description, + Project: c.options.ProjectNumber, + SourceContexts: c.options.SourceContexts, + Uniquifier: c.uniquifier, + }, + } + resp, err := c.s.Register(ctx, &req) + if err != nil { + return err + } + if resp == nil { + return errors.New("register: no response") + } + if resp.Debuggee.IsDisabled { + // Setting c.debuggeeID to empty makes sure future List and Update calls + // will call register first. + c.debuggeeID = "" + } else { + c.debuggeeID = resp.Debuggee.Id + } + if c.debuggeeID == "" { + return ErrDebuggeeDisabled + } + return nil +} diff --git a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client_test.go b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client_test.go new file mode 100644 index 00000000..fb439c9d --- /dev/null +++ b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/controller/client_test.go @@ -0,0 +1,218 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "testing" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + + cd "google.golang.org/api/clouddebugger/v2" + "google.golang.org/api/googleapi" +) + +const ( + testDebuggeeID = "d12345" + testBreakpointID = "bp12345" +) + +var ( + // The sequence of wait tokens in List requests and responses. + expectedWaitToken = []string{"init", "token1", "token2", "token1", "token1"} + // The set of breakpoints returned from each List call. + expectedBreakpoints = [][]*cd.Breakpoint{ + nil, + { + &cd.Breakpoint{ + Id: testBreakpointID, + IsFinalState: false, + Location: &cd.SourceLocation{Line: 42, Path: "foo.go"}, + }, + }, + nil, + } + abortedError error = &googleapi.Error{ + Code: 409, + Message: "Conflict", + Body: `{ + "error": { + "errors": [ + { + "domain": "global", + "reason": "aborted", + "message": "Conflict" + } + ], + "code": 409, + "message": "Conflict" + } + }`, + Errors: []googleapi.ErrorItem{ + {Reason: "aborted", Message: "Conflict"}, + }, + } + backendError error = &googleapi.Error{ + Code: 503, + Message: "Backend Error", + Body: `{ + "error": { + "errors": [ + { + "domain": "global", + "reason": "backendError", + "message": "Backend Error" + } + ], + "code": 503, + "message": "Backend Error" + } + }`, + Errors: []googleapi.ErrorItem{ + {Reason: "backendError", Message: "Backend Error"}, + }, + } +) + +type mockService struct { + t *testing.T + listCallsSeen int + registerCallsSeen int +} + +func (s *mockService) Register(ctx context.Context, req *cd.RegisterDebuggeeRequest) (*cd.RegisterDebuggeeResponse, error) { + s.registerCallsSeen++ + if req.Debuggee == nil { + s.t.Errorf("missing debuggee") + return nil, nil + } + if req.Debuggee.AgentVersion == "" { + s.t.Errorf("missing agent version") + } + if req.Debuggee.Description == "" { + s.t.Errorf("missing debuglet description") + } + if req.Debuggee.Project == "" { + s.t.Errorf("missing project id") + } + if req.Debuggee.Uniquifier == "" { + s.t.Errorf("missing uniquifier") + } + return &cd.RegisterDebuggeeResponse{ + Debuggee: &cd.Debuggee{Id: testDebuggeeID}, + }, nil +} + +func (s *mockService) Update(ctx context.Context, id, breakpointID string, req *cd.UpdateActiveBreakpointRequest) (*cd.UpdateActiveBreakpointResponse, error) { + if id != testDebuggeeID { + s.t.Errorf("got debuggee ID %s want %s", id, testDebuggeeID) + } + if breakpointID != testBreakpointID { + s.t.Errorf("got breakpoint ID %s want %s", breakpointID, testBreakpointID) + } + if !req.Breakpoint.IsFinalState { + s.t.Errorf("got IsFinalState = false, want true") + } + return nil, nil +} + +func (s *mockService) List(ctx context.Context, id, waitToken string) (*cd.ListActiveBreakpointsResponse, error) { + if id != testDebuggeeID { + s.t.Errorf("got debuggee ID %s want %s", id, testDebuggeeID) + } + if waitToken != expectedWaitToken[s.listCallsSeen] { + s.t.Errorf("got wait token %s want %s", waitToken, expectedWaitToken[s.listCallsSeen]) + } + s.listCallsSeen++ + if s.listCallsSeen == 4 { + return nil, backendError + } + if s.listCallsSeen == 5 { + return nil, abortedError + } + resp := &cd.ListActiveBreakpointsResponse{ + Breakpoints: expectedBreakpoints[s.listCallsSeen-1], + NextWaitToken: expectedWaitToken[s.listCallsSeen], + } + return resp, nil +} + +func TestDebugletControllerClientLibrary(t *testing.T) { + var ( + m *mockService + c *Controller + list *cd.ListActiveBreakpointsResponse + err error + ) + m = &mockService{t: t} + newService = func(context.Context, oauth2.TokenSource) (serviceInterface, error) { return m, nil } + opts := Options{ + ProjectNumber: "5", + ProjectID: "p1", + AppModule: "mod1", + AppVersion: "v1", + } + ctx := context.Background() + if c, err = NewController(ctx, opts); err != nil { + t.Fatal("Initializing Controller client:", err) + } + if list, err = c.List(ctx); err != nil { + t.Fatal("List:", err) + } + if m.registerCallsSeen != 1 { + t.Errorf("saw %d Register calls, want 1", m.registerCallsSeen) + } + if list, err = c.List(ctx); err != nil { + t.Fatal("List:", err) + } + if len(list.Breakpoints) != 1 { + t.Fatalf("got %d breakpoints, want 1", len(list.Breakpoints)) + } + if err = c.Update(ctx, list.Breakpoints[0].Id, &cd.Breakpoint{Id: testBreakpointID, IsFinalState: true}); err != nil { + t.Fatal("Update:", err) + } + if list, err = c.List(ctx); err != nil { + t.Fatal("List:", err) + } + if m.registerCallsSeen != 1 { + t.Errorf("saw %d Register calls, want 1", m.registerCallsSeen) + } + // The next List call produces an error that should cause a Register call. + if list, err = c.List(ctx); err == nil { + t.Fatal("List should have returned an error") + } + if m.registerCallsSeen != 2 { + t.Errorf("saw %d Register calls, want 2", m.registerCallsSeen) + } + // The next List call produces an error that should not cause a Register call. + if list, err = c.List(ctx); err == nil { + t.Fatal("List should have returned an error") + } + if m.registerCallsSeen != 2 { + t.Errorf("saw %d Register calls, want 2", m.registerCallsSeen) + } + if m.listCallsSeen != 5 { + t.Errorf("saw %d list calls, want 5", m.listCallsSeen) + } +} + +func TestIsAbortedError(t *testing.T) { + if !isAbortedError(abortedError) { + t.Errorf("isAborted(%+v): got false, want true", abortedError) + } + if isAbortedError(backendError) { + t.Errorf("isAborted(%+v): got true, want false", backendError) + } +} diff --git a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector.go b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector.go new file mode 100644 index 00000000..8dadc2f6 --- /dev/null +++ b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector.go @@ -0,0 +1,460 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package valuecollector is used to collect the values of variables in a program. +package valuecollector + +import ( + "bytes" + "fmt" + "strconv" + "strings" + + "golang.org/x/debug" + cd "google.golang.org/api/clouddebugger/v2" +) + +const ( + maxArrayLength = 50 + maxMapLength = 20 +) + +// Collector is given references to variables from a program being debugged +// using AddVariable. Then when ReadValues is called, the Collector will fetch +// the values of those variables. Any variables referred to by those values +// will also be fetched; e.g. the targets of pointers, members of structs, +// elements of slices, etc. This continues iteratively, building a graph of +// values, until all the reachable values are fetched, or a size limit is +// reached. +// +// Variables are passed to the Collector as debug.Var, which is used by x/debug +// to represent references to variables. Values are returned as cd.Variable, +// which is used by the Debuglet Controller to represent the graph of values. +// +// For example, if the program has a struct variable: +// +// foo := SomeStruct{a:42, b:"xyz"} +// +// and we call AddVariable with a reference to foo, we will get back a result +// like: +// +// cd.Variable{Name:"foo", VarTableIndex:10} +// +// which denotes a variable named "foo" which will have its value stored in +// element 10 of the table that will later be returned by ReadValues. That +// element might be: +// +// out[10] = &cd.Variable{Members:{{Name:"a", VarTableIndex:11},{Name:"b", VarTableIndex:12}}} +// +// which denotes a struct with two members a and b, whose values are in elements +// 11 and 12 of the output table: +// +// out[11] = &cd.Variable{Value:"42"} +// out[12] = &cd.Variable{Value:"xyz"} +type Collector struct { + // prog is the program being debugged. + prog debug.Program + // limit is the maximum size of the output slice of values. + limit int + // index is a map from references (variables and map elements) to their + // locations in the table. + index map[reference]int + // table contains the references, including those given to the + // Collector directly and those the Collector itself found. + // If VarTableIndex is set to 0 in a cd.Variable, it is ignored, so the first entry + // of table can't be used. On initialization we put a dummy value there. + table []reference +} + +// reference represents a value which is in the queue to be read by the +// collector. It is either a debug.Var, or a mapElement. +type reference interface{} + +// mapElement represents an element of a map in the debugged program's memory. +type mapElement struct { + debug.Map + index uint64 +} + +// NewCollector returns a Collector for the given program and size limit. +// The limit is the maximum size of the slice of values returned by ReadValues. +func NewCollector(prog debug.Program, limit int) *Collector { + return &Collector{ + prog: prog, + limit: limit, + index: make(map[reference]int), + table: []reference{debug.Var{}}, + } +} + +// AddVariable adds another variable to be collected. +// The Collector doesn't get the value immediately; it returns a cd.Variable +// that contains an index into the table which will later be returned by +// ReadValues. +func (c *Collector) AddVariable(lv debug.LocalVar) *cd.Variable { + ret := &cd.Variable{Name: lv.Name} + if index, ok := c.add(lv.Var); !ok { + // If the add call failed, it's because we reached the size limit. + // The Debuglet Controller's convention is to pass it a "Not Captured" error + // in this case. + ret.Status = statusMessage(messageNotCaptured, true, refersToVariableName) + } else { + ret.VarTableIndex = int64(index) + } + return ret +} + +// add adds a reference to the set of values to be read from the +// program. It returns the index in the output table that will contain the +// corresponding value. It fails if the table has reached the size limit. +// It deduplicates references, so the index may be the same as one that was +// returned from an earlier add call. +func (c *Collector) add(r reference) (outputIndex int, ok bool) { + if i, ok := c.index[r]; ok { + return i, true + } + i := len(c.table) + if i >= c.limit { + return 0, false + } + c.index[r] = i + c.table = append(c.table, r) + return i, true +} + +func addMember(v *cd.Variable, name string) *cd.Variable { + v2 := &cd.Variable{Name: name} + v.Members = append(v.Members, v2) + return v2 +} + +// ReadValues fetches values of the variables that were passed to the Collector +// with AddVariable. The values of any new variables found are also fetched, +// e.g. the targets of pointers or the members of structs, until we reach the +// size limit or we run out of values to fetch. +// The results are output as a []*cd.Variable, which is the type we need to send +// to the Debuglet Controller after we trigger a breakpoint. +func (c *Collector) ReadValues() (out []*cd.Variable) { + for i := 0; i < len(c.table); i++ { + // Create a new cd.Variable for this value, and append it to the output. + dcv := new(cd.Variable) + out = append(out, dcv) + if i == 0 { + // The first element is unused. + continue + } + switch x := c.table[i].(type) { + case mapElement: + key, value, err := c.prog.MapElement(x.Map, x.index) + if err != nil { + dcv.Status = statusMessage(err.Error(), true, refersToVariableValue) + continue + } + // Add a member for the key. + member := addMember(dcv, "key") + if index, ok := c.add(key); !ok { + // The table is full. + member.Status = statusMessage(messageNotCaptured, true, refersToVariableName) + continue + } else { + member.VarTableIndex = int64(index) + } + // Add a member for the value. + member = addMember(dcv, "value") + if index, ok := c.add(value); !ok { + // The table is full. + member.Status = statusMessage(messageNotCaptured, true, refersToVariableName) + } else { + member.VarTableIndex = int64(index) + } + case debug.Var: + if v, err := c.prog.Value(x); err != nil { + dcv.Status = statusMessage(err.Error(), true, refersToVariableValue) + } else { + c.FillValue(v, dcv) + } + } + } + return out +} + +// indexable is an interface for arrays, slices and channels. +type indexable interface { + Len() uint64 + Element(uint64) debug.Var +} + +// channel implements indexable. +type channel struct { + debug.Channel +} + +func (c channel) Len() uint64 { + return c.Length +} + +var ( + _ indexable = debug.Array{} + _ indexable = debug.Slice{} + _ indexable = channel{} +) + +// FillValue copies a value into a cd.Variable. Any variables referred to by +// that value, e.g. struct members and pointer targets, are added to the +// collector's queue, to be fetched later by ReadValues. +func (c *Collector) FillValue(v debug.Value, dcv *cd.Variable) { + if c, ok := v.(debug.Channel); ok { + // Convert to channel, which implements indexable. + v = channel{c} + } + // Fill in dcv in a manner depending on the type of the value we got. + switch val := v.(type) { + case int8, int16, int32, int64, bool, uint8, uint16, uint32, uint64, float32, float64, complex64, complex128: + // For simple types, we just print the value to dcv.Value. + dcv.Value = fmt.Sprint(val) + case string: + // Put double quotes around strings. + dcv.Value = strconv.Quote(val) + case debug.String: + if uint64(len(val.String)) < val.Length { + // This string value was truncated. + dcv.Value = strconv.Quote(val.String + "...") + } else { + dcv.Value = strconv.Quote(val.String) + } + case debug.Struct: + // For structs, we add an entry to dcv.Members for each field in the + // struct. + // Each member will contain the name of the field, and the index in the + // output table which will contain the value of that field. + for _, f := range val.Fields { + member := addMember(dcv, f.Name) + if index, ok := c.add(f.Var); !ok { + // The table is full. + member.Status = statusMessage(messageNotCaptured, true, refersToVariableName) + } else { + member.VarTableIndex = int64(index) + } + } + case debug.Map: + dcv.Value = fmt.Sprintf("len = %d", val.Length) + for i := uint64(0); i < val.Length; i++ { + field := addMember(dcv, `⚫`) + if i == maxMapLength { + field.Name = "..." + field.Status = statusMessage(messageTruncated, true, refersToVariableName) + break + } + if index, ok := c.add(mapElement{val, i}); !ok { + // The value table is full; add a member to contain the error message. + field.Name = "..." + field.Status = statusMessage(messageNotCaptured, true, refersToVariableName) + break + } else { + field.VarTableIndex = int64(index) + } + } + case debug.Pointer: + if val.Address == 0 { + dcv.Value = "" + } else if val.TypeID == 0 { + // We don't know the type of the pointer, so just output the address as + // the value. + dcv.Value = fmt.Sprintf("0x%X", val.Address) + dcv.Status = statusMessage(messageUnknownPointerType, false, refersToVariableName) + } else { + // Adds the pointed-to variable to the table, and links this value to + // that table entry through VarTableIndex. + dcv.Value = fmt.Sprintf("0x%X", val.Address) + target := addMember(dcv, "") + if index, ok := c.add(debug.Var(val)); !ok { + target.Status = statusMessage(messageNotCaptured, true, refersToVariableName) + } else { + target.VarTableIndex = int64(index) + } + } + case indexable: + // Arrays, slices and channels. + dcv.Value = "len = " + fmt.Sprint(val.Len()) + for j := uint64(0); j < val.Len(); j++ { + field := addMember(dcv, fmt.Sprint(`[`, j, `]`)) + if j == maxArrayLength { + field.Name = "..." + field.Status = statusMessage(messageTruncated, true, refersToVariableName) + break + } + vr := val.Element(j) + if index, ok := c.add(vr); !ok { + // The value table is full; add a member to contain the error message. + field.Name = "..." + field.Status = statusMessage(messageNotCaptured, true, refersToVariableName) + break + } else { + // Add a member with the index as the name. + field.VarTableIndex = int64(index) + } + } + default: + dcv.Status = statusMessage(messageUnknownType, false, refersToVariableName) + } +} + +// statusMessage returns a *cd.StatusMessage with the given message, IsError +// field and refersTo field. +func statusMessage(msg string, isError bool, refersTo int) *cd.StatusMessage { + return &cd.StatusMessage{ + Description: &cd.FormatMessage{Format: "$0", Parameters: []string{msg}}, + IsError: isError, + RefersTo: refersToString[refersTo], + } +} + +// LogString produces a string for a logpoint, substituting in variable values +// using evaluatedExpressions and varTable. +func LogString(s string, evaluatedExpressions []*cd.Variable, varTable []*cd.Variable) string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "LOGPOINT: ") + seen := make(map[*cd.Variable]bool) + for i := 0; i < len(s); { + if s[i] == '$' { + i++ + if num, n, ok := parseToken(s[i:], len(evaluatedExpressions)-1); ok { + // This token is one of $0, $1, etc. Write the corresponding expression. + writeExpression(&buf, evaluatedExpressions[num], false, varTable, seen) + i += n + } else { + // Something else, like $$. + buf.WriteByte(s[i]) + i++ + } + } else { + buf.WriteByte(s[i]) + i++ + } + } + return buf.String() +} + +func parseToken(s string, max int) (num int, bytesRead int, ok bool) { + var i int + for i < len(s) && s[i] >= '0' && s[i] <= '9' { + i++ + } + num, err := strconv.Atoi(s[:i]) + return num, i, err == nil && num <= max +} + +// writeExpression recursively writes variables to buf, in a format suitable +// for logging. If printName is true, writes the name of the variable. +func writeExpression(buf *bytes.Buffer, v *cd.Variable, printName bool, varTable []*cd.Variable, seen map[*cd.Variable]bool) { + if v == nil { + // Shouldn't happen. + return + } + name, value, status, members := v.Name, v.Value, v.Status, v.Members + + // If v.VarTableIndex is not zero, it refers to an element of varTable. + // We merge its fields with the fields we got from v. + var other *cd.Variable + if idx := int(v.VarTableIndex); idx > 0 && idx < len(varTable) { + other = varTable[idx] + } + if other != nil { + if name == "" { + name = other.Name + } + if value == "" { + value = other.Value + } + if status == nil { + status = other.Status + } + if len(members) == 0 { + members = other.Members + } + } + if printName && name != "" { + buf.WriteString(name) + buf.WriteByte(':') + } + + // If we have seen this value before, write "..." rather than repeating it. + if seen[v] { + buf.WriteString("...") + return + } + seen[v] = true + if other != nil { + if seen[other] { + buf.WriteString("...") + return + } + seen[other] = true + } + + if value != "" && !strings.HasPrefix(value, "len = ") { + // A plain value. + buf.WriteString(value) + } else if status != nil && status.Description != nil { + // An error. + for _, p := range status.Description.Parameters { + buf.WriteByte('(') + buf.WriteString(p) + buf.WriteByte(')') + } + } else if name == `⚫` { + // A map element. + first := true + for _, member := range members { + if first { + first = false + } else { + buf.WriteByte(':') + } + writeExpression(buf, member, false, varTable, seen) + } + } else { + // A map, array, slice, channel, or struct. + isStruct := value == "" + first := true + buf.WriteByte('{') + for _, member := range members { + if first { + first = false + } else { + buf.WriteString(", ") + } + writeExpression(buf, member, isStruct, varTable, seen) + } + buf.WriteByte('}') + } +} + +const ( + // Error messages for cd.StatusMessage + messageNotCaptured = "Not captured" + messageTruncated = "Truncated" + messageUnknownPointerType = "Unknown pointer type" + messageUnknownType = "Unknown type" + // RefersTo values for cd.StatusMessage. + refersToVariableName = iota + refersToVariableValue +) + +// refersToString contains the strings for each refersTo value. +// See the definition of StatusMessage in the v2/clouddebugger package. +var refersToString = map[int]string{ + refersToVariableName: "VARIABLE_NAME", + refersToVariableValue: "VARIABLE_VALUE", +} diff --git a/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector_test.go b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector_test.go new file mode 100644 index 00000000..2bc97dcf --- /dev/null +++ b/vendor/cloud.google.com/go/cmd/go-cloud-debug-agent/internal/valuecollector/valuecollector_test.go @@ -0,0 +1,418 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package valuecollector + +import ( + "fmt" + "reflect" + "testing" + + "golang.org/x/debug" + cd "google.golang.org/api/clouddebugger/v2" +) + +const ( + // Some arbitrary type IDs for the test, for use in debug.Var's TypeID field. + // A TypeID of 0 means the type is unknown, so we start at 1. + int16Type = iota + 1 + stringType + structType + pointerType + arrayType + int32Type + debugStringType + mapType + channelType + sliceType +) + +func TestValueCollector(t *testing.T) { + // Construct the collector. + c := NewCollector(&Program{}, 26) + // Add some variables of various types, whose values we want the collector to read. + variablesToAdd := []debug.LocalVar{ + {Name: "a", Var: debug.Var{int16Type, 0x1}}, + {Name: "b", Var: debug.Var{stringType, 0x2}}, + {Name: "c", Var: debug.Var{structType, 0x3}}, + {Name: "d", Var: debug.Var{pointerType, 0x4}}, + {Name: "e", Var: debug.Var{arrayType, 0x5}}, + {Name: "f", Var: debug.Var{debugStringType, 0x6}}, + {Name: "g", Var: debug.Var{mapType, 0x7}}, + {Name: "h", Var: debug.Var{channelType, 0x8}}, + {Name: "i", Var: debug.Var{sliceType, 0x9}}, + } + expectedResults := []*cd.Variable{ + &cd.Variable{Name: "a", VarTableIndex: 1}, + &cd.Variable{Name: "b", VarTableIndex: 2}, + &cd.Variable{Name: "c", VarTableIndex: 3}, + &cd.Variable{Name: "d", VarTableIndex: 4}, + &cd.Variable{Name: "e", VarTableIndex: 5}, + &cd.Variable{Name: "f", VarTableIndex: 6}, + &cd.Variable{Name: "g", VarTableIndex: 7}, + &cd.Variable{Name: "h", VarTableIndex: 8}, + &cd.Variable{Name: "i", VarTableIndex: 9}, + } + for i, v := range variablesToAdd { + added := c.AddVariable(v) + if !reflect.DeepEqual(added, expectedResults[i]) { + t.Errorf("AddVariable: got %+v want %+v", *added, *expectedResults[i]) + } + } + // Read the values, compare the output to what we expect. + v := c.ReadValues() + expectedValues := []*cd.Variable{ + &cd.Variable{}, + &cd.Variable{Value: "1"}, + &cd.Variable{Value: `"hello"`}, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "x", VarTableIndex: 1}, + &cd.Variable{Name: "y", VarTableIndex: 2}, + }, + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{VarTableIndex: 1}, + }, + Value: "0x1", + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "[0]", VarTableIndex: 10}, + &cd.Variable{Name: "[1]", VarTableIndex: 11}, + &cd.Variable{Name: "[2]", VarTableIndex: 12}, + &cd.Variable{Name: "[3]", VarTableIndex: 13}, + }, + Value: "len = 4", + }, + &cd.Variable{Value: `"world"`}, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "⚫", VarTableIndex: 14}, + &cd.Variable{Name: "⚫", VarTableIndex: 15}, + &cd.Variable{Name: "⚫", VarTableIndex: 16}, + }, + Value: "len = 3", + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "[0]", VarTableIndex: 17}, + &cd.Variable{Name: "[1]", VarTableIndex: 18}, + }, + Value: "len = 2", + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "[0]", VarTableIndex: 19}, + &cd.Variable{Name: "[1]", VarTableIndex: 20}, + }, + Value: "len = 2", + }, + &cd.Variable{Value: "100"}, + &cd.Variable{Value: "104"}, + &cd.Variable{Value: "108"}, + &cd.Variable{Value: "112"}, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "key", VarTableIndex: 21}, + &cd.Variable{Name: "value", VarTableIndex: 22}, + }, + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "key", VarTableIndex: 23}, + &cd.Variable{Name: "value", VarTableIndex: 24}, + }, + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "key", VarTableIndex: 25}, + &cd.Variable{ + Name: "value", + Status: &cd.StatusMessage{ + Description: &cd.FormatMessage{ + Format: "$0", + Parameters: []string{"Not captured"}, + }, + IsError: true, + RefersTo: "VARIABLE_NAME", + }, + }, + }, + }, + &cd.Variable{Value: "246"}, + &cd.Variable{Value: "210"}, + &cd.Variable{Value: "300"}, + &cd.Variable{Value: "304"}, + &cd.Variable{Value: "400"}, + &cd.Variable{Value: "404"}, + &cd.Variable{Value: "1400"}, + &cd.Variable{Value: "1404"}, + &cd.Variable{Value: "2400"}, + } + if !reflect.DeepEqual(v, expectedValues) { + t.Errorf("ReadValues: got %v want %v", v, expectedValues) + // Do element-by-element comparisons, for more useful error messages. + for i := range v { + if i < len(expectedValues) && !reflect.DeepEqual(v[i], expectedValues[i]) { + t.Errorf("element %d: got %+v want %+v", i, *v[i], *expectedValues[i]) + } + } + } +} + +// Program implements the similarly-named interface in x/debug. +// ValueCollector should only call its Value and MapElement methods. +type Program struct { + debug.Program +} + +func (p *Program) Value(v debug.Var) (debug.Value, error) { + // We determine what to return using v.TypeID. + switch v.TypeID { + case int16Type: + // We use the address as the value, so that we're testing whether the right + // address was calculated. + return int16(v.Address), nil + case stringType: + // A string. + return "hello", nil + case structType: + // A struct with two elements. + return debug.Struct{ + Fields: []debug.StructField{ + { + Name: "x", + Var: debug.Var{int16Type, 0x1}, + }, + { + Name: "y", + Var: debug.Var{stringType, 0x2}, + }, + }, + }, nil + case pointerType: + // A pointer to the first variable above. + return debug.Pointer{int16Type, 0x1}, nil + case arrayType: + // An array of 4 32-bit-wide elements. + return debug.Array{ + ElementTypeID: int32Type, + Address: 0x64, + Length: 4, + StrideBits: 32, + }, nil + case debugStringType: + return debug.String{ + Length: 5, + String: "world", + }, nil + case mapType: + return debug.Map{ + TypeID: 99, + Address: 0x100, + Length: 3, + }, nil + case channelType: + return debug.Channel{ + ElementTypeID: int32Type, + Address: 200, + Buffer: 210, + Length: 2, + Capacity: 10, + Stride: 4, + BufferStart: 9, + }, nil + case sliceType: + // A slice of 2 32-bit-wide elements. + return debug.Slice{ + Array: debug.Array{ + ElementTypeID: int32Type, + Address: 300, + Length: 2, + StrideBits: 32, + }, + Capacity: 50, + }, nil + case int32Type: + // We use the address as the value, so that we're testing whether the right + // address was calculated. + return int32(v.Address), nil + } + return nil, fmt.Errorf("unexpected Value request") +} + +func (p *Program) MapElement(m debug.Map, index uint64) (debug.Var, debug.Var, error) { + return debug.Var{TypeID: int16Type, Address: 1000*index + 400}, + debug.Var{TypeID: int32Type, Address: 1000*index + 404}, + nil +} + +func TestLogString(t *testing.T) { + bp := cd.Breakpoint{ + Action: "LOG", + LogMessageFormat: "$0 hello, $$7world! $1 $2 $3 $4 $5$6 $7 $8", + EvaluatedExpressions: []*cd.Variable{ + &cd.Variable{Name: "a", VarTableIndex: 1}, + &cd.Variable{Name: "b", VarTableIndex: 2}, + &cd.Variable{Name: "c", VarTableIndex: 3}, + &cd.Variable{Name: "d", VarTableIndex: 4}, + &cd.Variable{Name: "e", VarTableIndex: 5}, + &cd.Variable{Name: "f", VarTableIndex: 6}, + &cd.Variable{Name: "g", VarTableIndex: 7}, + &cd.Variable{Name: "h", VarTableIndex: 8}, + &cd.Variable{Name: "i", VarTableIndex: 9}, + }, + } + varTable := []*cd.Variable{ + &cd.Variable{}, + &cd.Variable{Value: "1"}, + &cd.Variable{Value: `"hello"`}, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "x", Value: "1"}, + &cd.Variable{Name: "y", Value: `"hello"`}, + &cd.Variable{Name: "z", VarTableIndex: 3}, + }, + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{VarTableIndex: 1}, + }, + Value: "0x1", + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "[0]", VarTableIndex: 10}, + &cd.Variable{Name: "[1]", VarTableIndex: 11}, + &cd.Variable{Name: "[2]", VarTableIndex: 12}, + &cd.Variable{Name: "[3]", VarTableIndex: 13}, + }, + Value: "len = 4", + }, + &cd.Variable{Value: `"world"`}, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "⚫", VarTableIndex: 14}, + &cd.Variable{Name: "⚫", VarTableIndex: 15}, + &cd.Variable{Name: "⚫", VarTableIndex: 16}, + }, + Value: "len = 3", + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "[0]", VarTableIndex: 17}, + &cd.Variable{Name: "[1]", VarTableIndex: 18}, + }, + Value: "len = 2", + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "[0]", VarTableIndex: 19}, + &cd.Variable{Name: "[1]", VarTableIndex: 20}, + }, + Value: "len = 2", + }, + &cd.Variable{Value: "100"}, + &cd.Variable{Value: "104"}, + &cd.Variable{Value: "108"}, + &cd.Variable{Value: "112"}, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "key", VarTableIndex: 21}, + &cd.Variable{Name: "value", VarTableIndex: 22}, + }, + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "key", VarTableIndex: 23}, + &cd.Variable{Name: "value", VarTableIndex: 24}, + }, + }, + &cd.Variable{ + Members: []*cd.Variable{ + &cd.Variable{Name: "key", VarTableIndex: 25}, + &cd.Variable{ + Name: "value", + Status: &cd.StatusMessage{ + Description: &cd.FormatMessage{ + Format: "$0", + Parameters: []string{"Not captured"}, + }, + IsError: true, + RefersTo: "VARIABLE_NAME", + }, + }, + }, + }, + &cd.Variable{Value: "246"}, + &cd.Variable{Value: "210"}, + &cd.Variable{Value: "300"}, + &cd.Variable{Value: "304"}, + &cd.Variable{Value: "400"}, + &cd.Variable{Value: "404"}, + &cd.Variable{Value: "1400"}, + &cd.Variable{Value: "1404"}, + &cd.Variable{Value: "2400"}, + } + s := LogString(bp.LogMessageFormat, bp.EvaluatedExpressions, varTable) + expected := `LOGPOINT: 1 hello, $7world! "hello" {x:1, y:"hello", z:...} ` + + `0x1 {100, 104, 108, 112} "world"{400:404, 1400:1404, 2400:(Not captured)} ` + + `{246, 210} {300, 304}` + if s != expected { + t.Errorf("LogString: got %q want %q", s, expected) + } +} + +func TestParseToken(t *testing.T) { + for _, c := range []struct { + s string + max int + num int + n int + ok bool + }{ + {"", 0, 0, 0, false}, + {".", 0, 0, 0, false}, + {"0", 0, 0, 1, true}, + {"0", 1, 0, 1, true}, + {"00", 0, 0, 2, true}, + {"1.", 1, 1, 1, true}, + {"1.", 0, 0, 0, false}, + {"10", 10, 10, 2, true}, + {"10..", 10, 10, 2, true}, + {"10", 11, 10, 2, true}, + {"10..", 11, 10, 2, true}, + {"10", 9, 0, 0, false}, + {"10..", 9, 0, 0, false}, + {" 10", 10, 0, 0, false}, + {"010", 10, 10, 3, true}, + {"123456789", 123456789, 123456789, 9, true}, + {"123456789", 123456788, 0, 0, false}, + {"123456789123456789123456789", 999999999, 0, 0, false}, + } { + num, n, ok := parseToken(c.s, c.max) + if ok != c.ok { + t.Errorf("parseToken(%q, %d): got ok=%t want ok=%t", c.s, c.max, ok, c.ok) + continue + } + if !ok { + continue + } + if num != c.num || n != c.n { + t.Errorf("parseToken(%q, %d): got %d,%d,%t want %d,%d,%t", c.s, c.max, num, n, ok, c.num, c.n, c.ok) + } + } +} diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go new file mode 100644 index 00000000..f9d2bef6 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -0,0 +1,438 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metadata provides access to Google Compute Engine (GCE) +// metadata and API service accounts. +// +// This package is a wrapper around the GCE metadata service, +// as documented at https://developers.google.com/compute/docs/metadata. +package metadata // import "cloud.google.com/go/compute/metadata" + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" + + "cloud.google.com/go/internal" +) + +const ( + // metadataIP is the documented metadata server IP address. + metadataIP = "169.254.169.254" + + // metadataHostEnv is the environment variable specifying the + // GCE metadata hostname. If empty, the default value of + // metadataIP ("169.254.169.254") is used instead. + // This is variable name is not defined by any spec, as far as + // I know; it was made up for the Go package. + metadataHostEnv = "GCE_METADATA_HOST" +) + +type cachedValue struct { + k string + trim bool + mu sync.Mutex + v string +} + +var ( + projID = &cachedValue{k: "project/project-id", trim: true} + projNum = &cachedValue{k: "project/numeric-project-id", trim: true} + instID = &cachedValue{k: "instance/id", trim: true} +) + +var ( + metaClient = &http.Client{ + Transport: &internal.Transport{ + Base: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + ResponseHeaderTimeout: 2 * time.Second, + }, + }, + } + subscribeClient = &http.Client{ + Transport: &internal.Transport{ + Base: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + }, + }, + } +) + +// NotDefinedError is returned when requested metadata is not defined. +// +// The underlying string is the suffix after "/computeMetadata/v1/". +// +// This error is not returned if the value is defined to be the empty +// string. +type NotDefinedError string + +func (suffix NotDefinedError) Error() string { + return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) +} + +// Get returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func Get(suffix string) (string, error) { + val, _, err := getETag(metaClient, suffix) + return val, err +} + +// getETag returns a value from the metadata service as well as the associated +// ETag using the provided client. This func is otherwise equivalent to Get. +func getETag(client *http.Client, suffix string) (value, etag string, err error) { + // Using a fixed IP makes it very difficult to spoof the metadata service in + // a container, which is an important use-case for local testing of cloud + // deployments. To enable spoofing of the metadata service, the environment + // variable GCE_METADATA_HOST is first inspected to decide where metadata + // requests shall go. + host := os.Getenv(metadataHostEnv) + if host == "" { + // Using 169.254.169.254 instead of "metadata" here because Go + // binaries built with the "netgo" tag and without cgo won't + // know the search suffix for "metadata" is + // ".google.internal", and this IP address is documented as + // being stable anyway. + host = metadataIP + } + url := "http://" + host + "/computeMetadata/v1/" + suffix + req, _ := http.NewRequest("GET", url, nil) + req.Header.Set("Metadata-Flavor", "Google") + res, err := client.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return "", "", NotDefinedError(suffix) + } + if res.StatusCode != 200 { + return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + return string(all), res.Header.Get("Etag"), nil +} + +func getTrimmed(suffix string) (s string, err error) { + s, err = Get(suffix) + s = strings.TrimSpace(s) + return +} + +func (c *cachedValue) get() (v string, err error) { + defer c.mu.Unlock() + c.mu.Lock() + if c.v != "" { + return c.v, nil + } + if c.trim { + v, err = getTrimmed(c.k) + } else { + v, err = Get(c.k) + } + if err == nil { + c.v = v + } + return +} + +var ( + onGCEOnce sync.Once + onGCE bool +) + +// OnGCE reports whether this process is running on Google Compute Engine. +func OnGCE() bool { + onGCEOnce.Do(initOnGCE) + return onGCE +} + +func initOnGCE() { + onGCE = testOnGCE() +} + +func testOnGCE() bool { + // The user explicitly said they're on GCE, so trust them. + if os.Getenv(metadataHostEnv) != "" { + return true + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + resc := make(chan bool, 2) + + // Try two strategies in parallel. + // See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194 + go func() { + res, err := ctxhttp.Get(ctx, metaClient, "http://"+metadataIP) + if err != nil { + resc <- false + return + } + defer res.Body.Close() + resc <- res.Header.Get("Metadata-Flavor") == "Google" + }() + + go func() { + addrs, err := net.LookupHost("metadata.google.internal") + if err != nil || len(addrs) == 0 { + resc <- false + return + } + resc <- strsContains(addrs, metadataIP) + }() + + tryHarder := systemInfoSuggestsGCE() + if tryHarder { + res := <-resc + if res { + // The first strategy succeeded, so let's use it. + return true + } + // Wait for either the DNS or metadata server probe to + // contradict the other one and say we are running on + // GCE. Give it a lot of time to do so, since the system + // info already suggests we're running on a GCE BIOS. + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case res = <-resc: + return res + case <-timer.C: + // Too slow. Who knows what this system is. + return false + } + } + + // There's no hint from the system info that we're running on + // GCE, so use the first probe's result as truth, whether it's + // true or false. The goal here is to optimize for speed for + // users who are NOT running on GCE. We can't assume that + // either a DNS lookup or an HTTP request to a blackholed IP + // address is fast. Worst case this should return when the + // metaClient's Transport.ResponseHeaderTimeout or + // Transport.Dial.Timeout fires (in two seconds). + return <-resc +} + +// systemInfoSuggestsGCE reports whether the local system (without +// doing network requests) suggests that we're running on GCE. If this +// returns true, testOnGCE tries a bit harder to reach its metadata +// server. +func systemInfoSuggestsGCE() bool { + if runtime.GOOS != "linux" { + // We don't have any non-Linux clues available, at least yet. + return false + } + slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") + name := strings.TrimSpace(string(slurp)) + return name == "Google" || name == "Google Compute Engine" +} + +// Subscribe subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// The suffix may contain query parameters. +// +// Subscribe calls fn with the latest metadata value indicated by the provided +// suffix. If the metadata value is deleted, fn is called with the empty string +// and ok false. Subscribe blocks until fn returns a non-nil error or the value +// is deleted. Subscribe returns the error value returned from the last call to +// fn, which may be nil when ok == false. +func Subscribe(suffix string, fn func(v string, ok bool) error) error { + const failedSubscribeSleep = time.Second * 5 + + // First check to see if the metadata value exists at all. + val, lastETag, err := getETag(subscribeClient, suffix) + if err != nil { + return err + } + + if err := fn(val, true); err != nil { + return err + } + + ok := true + if strings.ContainsRune(suffix, '?') { + suffix += "&wait_for_change=true&last_etag=" + } else { + suffix += "?wait_for_change=true&last_etag=" + } + for { + val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag)) + if err != nil { + if _, deleted := err.(NotDefinedError); !deleted { + time.Sleep(failedSubscribeSleep) + continue // Retry on other errors. + } + ok = false + } + lastETag = etag + + if err := fn(val, ok); err != nil || !ok { + return err + } + } +} + +// ProjectID returns the current instance's project ID string. +func ProjectID() (string, error) { return projID.get() } + +// NumericProjectID returns the current instance's numeric project ID. +func NumericProjectID() (string, error) { return projNum.get() } + +// InternalIP returns the instance's primary internal IP address. +func InternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/ip") +} + +// ExternalIP returns the instance's primary external (public) IP address. +func ExternalIP() (string, error) { + return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") +} + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func Hostname() (string, error) { + return getTrimmed("instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTags() ([]string, error) { + var s []string + j, err := Get("instance/tags") + if err != nil { + return nil, err + } + if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { + return nil, err + } + return s, nil +} + +// InstanceID returns the current VM's numeric instance ID. +func InstanceID() (string, error) { + return instID.get() +} + +// InstanceName returns the current VM's instance ID string. +func InstanceName() (string, error) { + host, err := Hostname() + if err != nil { + return "", err + } + return strings.Split(host, ".")[0], nil +} + +// Zone returns the current VM's zone, such as "us-central1-b". +func Zone() (string, error) { + zone, err := getTrimmed("instance/zone") + // zone is of the form "projects//zones/". + if err != nil { + return "", err + } + return zone[strings.LastIndex(zone, "/")+1:], nil +} + +// InstanceAttributes returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") } + +// ProjectAttributes returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func ProjectAttributes() ([]string, error) { return lines("project/attributes/") } + +func lines(suffix string) ([]string, error) { + j, err := Get(suffix) + if err != nil { + return nil, err + } + s := strings.Split(strings.TrimSpace(j), "\n") + for i := range s { + s[i] = strings.TrimSpace(s[i]) + } + return s, nil +} + +// InstanceAttributeValue returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func InstanceAttributeValue(attr string) (string, error) { + return Get("instance/attributes/" + attr) +} + +// ProjectAttributeValue returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func ProjectAttributeValue(attr string) (string, error) { + return Get("project/attributes/" + attr) +} + +// Scopes returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func Scopes(serviceAccount string) ([]string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return lines("instance/service-accounts/" + serviceAccount + "/scopes") +} + +func strsContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata_test.go b/vendor/cloud.google.com/go/compute/metadata/metadata_test.go new file mode 100644 index 00000000..9ac59269 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/metadata_test.go @@ -0,0 +1,48 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "os" + "sync" + "testing" +) + +func TestOnGCE_Stress(t *testing.T) { + if testing.Short() { + t.Skip("skipping in -short mode") + } + var last bool + for i := 0; i < 100; i++ { + onGCEOnce = sync.Once{} + + now := OnGCE() + if i > 0 && now != last { + t.Errorf("%d. changed from %v to %v", i, last, now) + } + last = now + } + t.Logf("OnGCE() = %v", last) +} + +func TestOnGCE_Force(t *testing.T) { + onGCEOnce = sync.Once{} + old := os.Getenv(metadataHostEnv) + defer os.Setenv(metadataHostEnv, old) + os.Setenv(metadataHostEnv, "127.0.0.1") + if !OnGCE() { + t.Error("OnGCE() = false; want true") + } +} diff --git a/vendor/cloud.google.com/go/container/container.go b/vendor/cloud.google.com/go/container/container.go new file mode 100644 index 00000000..684984eb --- /dev/null +++ b/vendor/cloud.google.com/go/container/container.go @@ -0,0 +1,278 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package container contains a Google Container Engine client. +// +// For more information about the API, +// see https://cloud.google.com/container-engine/docs +// +// Authentication +// +// See examples of authorization and authentication at +// https://godoc.org/cloud.google.com/go#pkg-examples. +package container // import "cloud.google.com/go/container" + +import ( + "errors" + "fmt" + "time" + + "golang.org/x/net/context" + raw "google.golang.org/api/container/v1" + "google.golang.org/api/option" + "google.golang.org/api/transport" +) + +type Type string + +const ( + TypeCreate = Type("createCluster") + TypeDelete = Type("deleteCluster") +) + +type Status string + +const ( + StatusDone = Status("done") + StatusPending = Status("pending") + StatusRunning = Status("running") + StatusError = Status("error") + StatusProvisioning = Status("provisioning") + StatusStopping = Status("stopping") +) + +const prodAddr = "https://container.googleapis.com/" +const userAgent = "gcloud-golang-container/20151008" + +// Client is a Google Container Engine client, which may be used to manage +// clusters with a project. It must be constructed via NewClient. +type Client struct { + projectID string + svc *raw.Service +} + +// NewClient creates a new Google Container Engine client. +func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { + o := []option.ClientOption{ + option.WithEndpoint(prodAddr), + option.WithScopes(raw.CloudPlatformScope), + option.WithUserAgent(userAgent), + } + o = append(o, opts...) + httpClient, endpoint, err := transport.NewHTTPClient(ctx, o...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + + svc, err := raw.New(httpClient) + if err != nil { + return nil, fmt.Errorf("constructing container client: %v", err) + } + svc.BasePath = endpoint + + c := &Client{ + projectID: projectID, + svc: svc, + } + + return c, nil +} + +// Resource is a Google Container Engine cluster resource. +type Resource struct { + // Name is the name of this cluster. The name must be unique + // within this project and zone, and can be up to 40 characters. + Name string + + // Description is the description of the cluster. Optional. + Description string + + // Zone is the Google Compute Engine zone in which the cluster resides. + Zone string + + // Status is the current status of the cluster. It could either be + // StatusError, StatusProvisioning, StatusRunning or StatusStopping. + Status Status + + // Num is the number of the nodes in this cluster resource. + Num int64 + + // APIVersion is the version of the Kubernetes master and kubelets running + // in this cluster. Allowed value is 0.4.2, or leave blank to + // pick up the latest stable release. + APIVersion string + + // Endpoint is the IP address of this cluster's Kubernetes master. + // The endpoint can be accessed at https://username:password@endpoint/. + // See Username and Password fields for the username and password information. + Endpoint string + + // Username is the username to use when accessing the Kubernetes master endpoint. + Username string + + // Password is the password to use when accessing the Kubernetes master endpoint. + Password string + + // ContainerIPv4CIDR is the IP addresses of the container pods in + // this cluster, in CIDR notation (e.g. 1.2.3.4/29). + ContainerIPv4CIDR string + + // ServicesIPv4CIDR is the IP addresses of the Kubernetes services in this + // cluster, in CIDR notation (e.g. 1.2.3.4/29). Service addresses are + // always in the 10.0.0.0/16 range. + ServicesIPv4CIDR string + + // MachineType is a Google Compute Engine machine type (e.g. n1-standard-1). + // If none set, the default type is used while creating a new cluster. + MachineType string + + // This field is ignored. It was removed from the underlying container API in v1. + SourceImage string + + // Created is the creation time of this cluster. + Created time.Time +} + +func resourceFromRaw(c *raw.Cluster) *Resource { + if c == nil { + return nil + } + r := &Resource{ + Name: c.Name, + Description: c.Description, + Zone: c.Zone, + Status: Status(c.Status), + Num: c.InitialNodeCount, + APIVersion: c.InitialClusterVersion, + Endpoint: c.Endpoint, + Username: c.MasterAuth.Username, + Password: c.MasterAuth.Password, + ContainerIPv4CIDR: c.ClusterIpv4Cidr, + ServicesIPv4CIDR: c.ServicesIpv4Cidr, + MachineType: c.NodeConfig.MachineType, + } + r.Created, _ = time.Parse(time.RFC3339, c.CreateTime) + return r +} + +func resourcesFromRaw(c []*raw.Cluster) []*Resource { + r := make([]*Resource, len(c)) + for i, val := range c { + r[i] = resourceFromRaw(val) + } + return r +} + +// Op represents a Google Container Engine API operation. +type Op struct { + // Name is the name of the operation. + Name string + + // Zone is the Google Compute Engine zone. + Zone string + + // This field is ignored. It was removed from the underlying container API in v1. + TargetURL string + + // Type is the operation type. It could be either be TypeCreate or TypeDelete. + Type Type + + // Status is the current status of this operation. It could be either + // OpDone or OpPending. + Status Status +} + +func opFromRaw(o *raw.Operation) *Op { + if o == nil { + return nil + } + return &Op{ + Name: o.Name, + Zone: o.Zone, + Type: Type(o.OperationType), + Status: Status(o.Status), + } +} + +func opsFromRaw(o []*raw.Operation) []*Op { + ops := make([]*Op, len(o)) + for i, val := range o { + ops[i] = opFromRaw(val) + } + return ops +} + +// Clusters returns a list of cluster resources from the specified zone. +// If no zone is specified, it returns all clusters under the user project. +func (c *Client) Clusters(ctx context.Context, zone string) ([]*Resource, error) { + if zone == "" { + zone = "-" + } + resp, err := c.svc.Projects.Zones.Clusters.List(c.projectID, zone).Do() + if err != nil { + return nil, err + } + return resourcesFromRaw(resp.Clusters), nil +} + +// Cluster returns metadata about the specified cluster. +func (c *Client) Cluster(ctx context.Context, zone, name string) (*Resource, error) { + resp, err := c.svc.Projects.Zones.Clusters.Get(c.projectID, zone, name).Do() + if err != nil { + return nil, err + } + return resourceFromRaw(resp), nil +} + +// CreateCluster creates a new cluster with the provided metadata +// in the specified zone. +func (c *Client) CreateCluster(ctx context.Context, zone string, resource *Resource) (*Resource, error) { + panic("not implemented") +} + +// DeleteCluster deletes a cluster. +func (c *Client) DeleteCluster(ctx context.Context, zone, name string) error { + _, err := c.svc.Projects.Zones.Clusters.Delete(c.projectID, zone, name).Do() + return err +} + +// Operations returns a list of operations from the specified zone. +// If no zone is specified, it looks up for all of the operations +// that are running under the user's project. +func (c *Client) Operations(ctx context.Context, zone string) ([]*Op, error) { + if zone == "" { + resp, err := c.svc.Projects.Zones.Operations.List(c.projectID, "-").Do() + if err != nil { + return nil, err + } + return opsFromRaw(resp.Operations), nil + } + resp, err := c.svc.Projects.Zones.Operations.List(c.projectID, zone).Do() + if err != nil { + return nil, err + } + return opsFromRaw(resp.Operations), nil +} + +// Operation returns an operation. +func (c *Client) Operation(ctx context.Context, zone, name string) (*Op, error) { + resp, err := c.svc.Projects.Zones.Operations.Get(c.projectID, zone, name).Do() + if err != nil { + return nil, err + } + if resp.StatusMessage != "" { + return nil, errors.New(resp.StatusMessage) + } + return opFromRaw(resp), nil +} diff --git a/vendor/cloud.google.com/go/datastore/datastore.go b/vendor/cloud.google.com/go/datastore/datastore.go new file mode 100644 index 00000000..d8ff2972 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/datastore.go @@ -0,0 +1,600 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "errors" + "fmt" + "log" + "os" + "reflect" + + "cloud.google.com/go/internal/version" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + pb "google.golang.org/genproto/googleapis/datastore/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +const ( + prodAddr = "datastore.googleapis.com:443" + userAgent = "gcloud-golang-datastore/20160401" +) + +// ScopeDatastore grants permissions to view and/or manage datastore entities +const ScopeDatastore = "https://www.googleapis.com/auth/datastore" + +// resourcePrefixHeader is the name of the metadata header used to indicate +// the resource being operated on. +const resourcePrefixHeader = "google-cloud-resource-prefix" + +// protoClient is an interface for *transport.ProtoClient to support injecting +// fake clients in tests. +type protoClient interface { + Call(context.Context, string, proto.Message, proto.Message) error +} + +// datastoreClient is a wrapper for the pb.DatastoreClient that includes gRPC +// metadata to be sent in each request for server-side traffic management. +type datastoreClient struct { + c pb.DatastoreClient + md metadata.MD +} + +func newDatastoreClient(conn *grpc.ClientConn, projectID string) pb.DatastoreClient { + return &datastoreClient{ + c: pb.NewDatastoreClient(conn), + md: metadata.Pairs( + resourcePrefixHeader, "projects/"+projectID, + "x-goog-api-client", fmt.Sprintf("gl-go/%s gccl/%s grpc/", version.Go(), version.Repo)), + } +} + +func (dc *datastoreClient) Lookup(ctx context.Context, in *pb.LookupRequest, opts ...grpc.CallOption) (*pb.LookupResponse, error) { + return dc.c.Lookup(metadata.NewContext(ctx, dc.md), in, opts...) +} + +func (dc *datastoreClient) RunQuery(ctx context.Context, in *pb.RunQueryRequest, opts ...grpc.CallOption) (*pb.RunQueryResponse, error) { + return dc.c.RunQuery(metadata.NewContext(ctx, dc.md), in, opts...) +} + +func (dc *datastoreClient) BeginTransaction(ctx context.Context, in *pb.BeginTransactionRequest, opts ...grpc.CallOption) (*pb.BeginTransactionResponse, error) { + return dc.c.BeginTransaction(metadata.NewContext(ctx, dc.md), in, opts...) +} + +func (dc *datastoreClient) Commit(ctx context.Context, in *pb.CommitRequest, opts ...grpc.CallOption) (*pb.CommitResponse, error) { + return dc.c.Commit(metadata.NewContext(ctx, dc.md), in, opts...) +} + +func (dc *datastoreClient) Rollback(ctx context.Context, in *pb.RollbackRequest, opts ...grpc.CallOption) (*pb.RollbackResponse, error) { + return dc.c.Rollback(metadata.NewContext(ctx, dc.md), in, opts...) +} + +func (dc *datastoreClient) AllocateIds(ctx context.Context, in *pb.AllocateIdsRequest, opts ...grpc.CallOption) (*pb.AllocateIdsResponse, error) { + return dc.c.AllocateIds(metadata.NewContext(ctx, dc.md), in, opts...) +} + +// Client is a client for reading and writing data in a datastore dataset. +type Client struct { + conn *grpc.ClientConn + client pb.DatastoreClient + endpoint string + dataset string // Called dataset by the datastore API, synonym for project ID. +} + +// NewClient creates a new Client for a given dataset. +// If the project ID is empty, it is derived from the DATASTORE_PROJECT_ID environment variable. +// If the DATASTORE_EMULATOR_HOST environment variable is set, client will use its value +// to connect to a locally-running datastore emulator. +func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { + var o []option.ClientOption + // Environment variables for gcd emulator: + // https://cloud.google.com/datastore/docs/tools/datastore-emulator + // If the emulator is available, dial it directly (and don't pass any credentials). + if addr := os.Getenv("DATASTORE_EMULATOR_HOST"); addr != "" { + conn, err := grpc.Dial(addr, grpc.WithInsecure()) + if err != nil { + return nil, fmt.Errorf("grpc.Dial: %v", err) + } + o = []option.ClientOption{option.WithGRPCConn(conn)} + } else { + o = []option.ClientOption{ + option.WithEndpoint(prodAddr), + option.WithScopes(ScopeDatastore), + option.WithUserAgent(userAgent), + } + } + // Warn if we see the legacy emulator environment variables. + if os.Getenv("DATASTORE_HOST") != "" && os.Getenv("DATASTORE_EMULATOR_HOST") == "" { + log.Print("WARNING: legacy environment variable DATASTORE_HOST is ignored. Use DATASTORE_EMULATOR_HOST instead.") + } + if os.Getenv("DATASTORE_DATASET") != "" && os.Getenv("DATASTORE_PROJECT_ID") == "" { + log.Print("WARNING: legacy environment variable DATASTORE_DATASET is ignored. Use DATASTORE_PROJECT_ID instead.") + } + if projectID == "" { + projectID = os.Getenv("DATASTORE_PROJECT_ID") + } + if projectID == "" { + return nil, errors.New("datastore: missing project/dataset id") + } + o = append(o, opts...) + conn, err := transport.DialGRPC(ctx, o...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + return &Client{ + conn: conn, + client: newDatastoreClient(conn, projectID), + dataset: projectID, + }, nil + +} + +var ( + // ErrInvalidEntityType is returned when functions like Get or Next are + // passed a dst or src argument of invalid type. + ErrInvalidEntityType = errors.New("datastore: invalid entity type") + // ErrInvalidKey is returned when an invalid key is presented. + ErrInvalidKey = errors.New("datastore: invalid key") + // ErrNoSuchEntity is returned when no entity was found for a given key. + ErrNoSuchEntity = errors.New("datastore: no such entity") +) + +type multiArgType int + +const ( + multiArgTypeInvalid multiArgType = iota + multiArgTypePropertyLoadSaver + multiArgTypeStruct + multiArgTypeStructPtr + multiArgTypeInterface +) + +// ErrFieldMismatch is returned when a field is to be loaded into a different +// type than the one it was stored from, or when a field is missing or +// unexported in the destination struct. +// StructType is the type of the struct pointed to by the destination argument +// passed to Get or to Iterator.Next. +type ErrFieldMismatch struct { + StructType reflect.Type + FieldName string + Reason string +} + +func (e *ErrFieldMismatch) Error() string { + return fmt.Sprintf("datastore: cannot load field %q into a %q: %s", + e.FieldName, e.StructType, e.Reason) +} + +// GeoPoint represents a location as latitude/longitude in degrees. +type GeoPoint struct { + Lat, Lng float64 +} + +// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude. +func (g GeoPoint) Valid() bool { + return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180 +} + +func keyToProto(k *Key) *pb.Key { + if k == nil { + return nil + } + + // TODO(jbd): Eliminate unrequired allocations. + var path []*pb.Key_PathElement + for { + el := &pb.Key_PathElement{Kind: k.Kind} + if k.ID != 0 { + el.IdType = &pb.Key_PathElement_Id{k.ID} + } else if k.Name != "" { + el.IdType = &pb.Key_PathElement_Name{k.Name} + } + path = append([]*pb.Key_PathElement{el}, path...) + if k.Parent == nil { + break + } + k = k.Parent + } + key := &pb.Key{Path: path} + if k.Namespace != "" { + key.PartitionId = &pb.PartitionId{ + NamespaceId: k.Namespace, + } + } + return key +} + +// protoToKey decodes a protocol buffer representation of a key into an +// equivalent *Key object. If the key is invalid, protoToKey will return the +// invalid key along with ErrInvalidKey. +func protoToKey(p *pb.Key) (*Key, error) { + var key *Key + var namespace string + if partition := p.PartitionId; partition != nil { + namespace = partition.NamespaceId + } + for _, el := range p.Path { + key = &Key{ + Namespace: namespace, + Kind: el.Kind, + ID: el.GetId(), + Name: el.GetName(), + Parent: key, + } + } + if !key.valid() { // Also detects key == nil. + return key, ErrInvalidKey + } + return key, nil +} + +// multiKeyToProto is a batch version of keyToProto. +func multiKeyToProto(keys []*Key) []*pb.Key { + ret := make([]*pb.Key, len(keys)) + for i, k := range keys { + ret[i] = keyToProto(k) + } + return ret +} + +// multiKeyToProto is a batch version of keyToProto. +func multiProtoToKey(keys []*pb.Key) ([]*Key, error) { + hasErr := false + ret := make([]*Key, len(keys)) + err := make(MultiError, len(keys)) + for i, k := range keys { + ret[i], err[i] = protoToKey(k) + if err[i] != nil { + hasErr = true + } + } + if hasErr { + return nil, err + } + return ret, nil +} + +// multiValid is a batch version of Key.valid. It returns an error, not a +// []bool. +func multiValid(key []*Key) error { + invalid := false + for _, k := range key { + if !k.valid() { + invalid = true + break + } + } + if !invalid { + return nil + } + err := make(MultiError, len(key)) + for i, k := range key { + if !k.valid() { + err[i] = ErrInvalidKey + } + } + return err +} + +// checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct +// type S, for some interface type I, or some non-interface non-pointer type P +// such that P or *P implements PropertyLoadSaver. +// +// It returns what category the slice's elements are, and the reflect.Type +// that represents S, I or P. +// +// As a special case, PropertyList is an invalid type for v. +// +// TODO(djd): multiArg is very confusing. Fold this logic into the +// relevant Put/Get methods to make the logic less opaque. +func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) { + if v.Kind() != reflect.Slice { + return multiArgTypeInvalid, nil + } + if v.Type() == typeOfPropertyList { + return multiArgTypeInvalid, nil + } + elemType = v.Type().Elem() + if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) { + return multiArgTypePropertyLoadSaver, elemType + } + switch elemType.Kind() { + case reflect.Struct: + return multiArgTypeStruct, elemType + case reflect.Interface: + return multiArgTypeInterface, elemType + case reflect.Ptr: + elemType = elemType.Elem() + if elemType.Kind() == reflect.Struct { + return multiArgTypeStructPtr, elemType + } + } + return multiArgTypeInvalid, nil +} + +// Close closes the Client. +func (c *Client) Close() error { + return c.conn.Close() +} + +// Get loads the entity stored for key into dst, which must be a struct pointer +// or implement PropertyLoadSaver. If there is no such entity for the key, Get +// returns ErrNoSuchEntity. +// +// The values of dst's unmatched struct fields are not modified, and matching +// slice-typed fields are not reset before appending to them. In particular, it +// is recommended to pass a pointer to a zero valued struct on each Get call. +// +// ErrFieldMismatch is returned when a field is to be loaded into a different +// type than the one it was stored from, or when a field is missing or +// unexported in the destination struct. ErrFieldMismatch is only returned if +// dst is a struct pointer. +func (c *Client) Get(ctx context.Context, key *Key, dst interface{}) error { + if dst == nil { // get catches nil interfaces; we need to catch nil ptr here + return ErrInvalidEntityType + } + err := c.get(ctx, []*Key{key}, []interface{}{dst}, nil) + if me, ok := err.(MultiError); ok { + return me[0] + } + return err +} + +// GetMulti is a batch version of Get. +// +// dst must be a []S, []*S, []I or []P, for some struct type S, some interface +// type I, or some non-interface non-pointer type P such that P or *P +// implements PropertyLoadSaver. If an []I, each element must be a valid dst +// for Get: it must be a struct pointer or implement PropertyLoadSaver. +// +// As a special case, PropertyList is an invalid type for dst, even though a +// PropertyList is a slice of structs. It is treated as invalid to avoid being +// mistakenly passed when []PropertyList was intended. +func (c *Client) GetMulti(ctx context.Context, keys []*Key, dst interface{}) error { + return c.get(ctx, keys, dst, nil) +} + +func (c *Client) get(ctx context.Context, keys []*Key, dst interface{}, opts *pb.ReadOptions) error { + v := reflect.ValueOf(dst) + multiArgType, _ := checkMultiArg(v) + + // Sanity checks + if multiArgType == multiArgTypeInvalid { + return errors.New("datastore: dst has invalid type") + } + if len(keys) != v.Len() { + return errors.New("datastore: keys and dst slices have different length") + } + if len(keys) == 0 { + return nil + } + + // Go through keys, validate them, serialize then, and create a dict mapping them to their index + multiErr, any := make(MultiError, len(keys)), false + keyMap := make(map[string]int) + pbKeys := make([]*pb.Key, len(keys)) + for i, k := range keys { + if !k.valid() { + multiErr[i] = ErrInvalidKey + any = true + } else { + keyMap[k.String()] = i + pbKeys[i] = keyToProto(k) + } + } + if any { + return multiErr + } + req := &pb.LookupRequest{ + ProjectId: c.dataset, + Keys: pbKeys, + ReadOptions: opts, + } + resp, err := c.client.Lookup(ctx, req) + if err != nil { + return err + } + found := resp.Found + missing := resp.Missing + // Upper bound 100 iterations to prevent infinite loop. + // We choose 100 iterations somewhat logically: + // Max number of Entities you can request from Datastore is 1,000. + // Max size for a Datastore Entity is 1 MiB. + // Max request size is 10 MiB, so we assume max response size is also 10 MiB. + // 1,000 / 10 = 100. + // Note that if ctx has a deadline, the deadline will probably + // be hit before we reach 100 iterations. + for i := 0; len(resp.Deferred) > 0 && i < 100; i++ { + req.Keys = resp.Deferred + resp, err = c.client.Lookup(ctx, req) + if err != nil { + return err + } + found = append(found, resp.Found...) + missing = append(missing, resp.Missing...) + } + if len(keys) != len(found)+len(missing) { + return errors.New("datastore: internal error: server returned the wrong number of entities") + } + for _, e := range found { + k, err := protoToKey(e.Entity.Key) + if err != nil { + return errors.New("datastore: internal error: server returned an invalid key") + } + index := keyMap[k.String()] + elem := v.Index(index) + if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct { + elem = elem.Addr() + } + if multiArgType == multiArgTypeStructPtr && elem.IsNil() { + elem.Set(reflect.New(elem.Type().Elem())) + } + if err := loadEntityProto(elem.Interface(), e.Entity); err != nil { + multiErr[index] = err + any = true + } + } + for _, e := range missing { + k, err := protoToKey(e.Entity.Key) + if err != nil { + return errors.New("datastore: internal error: server returned an invalid key") + } + multiErr[keyMap[k.String()]] = ErrNoSuchEntity + any = true + } + if any { + return multiErr + } + return nil +} + +// Put saves the entity src into the datastore with key k. src must be a struct +// pointer or implement PropertyLoadSaver; if a struct pointer then any +// unexported fields of that struct will be skipped. If k is an incomplete key, +// the returned key will be a unique key generated by the datastore. +func (c *Client) Put(ctx context.Context, key *Key, src interface{}) (*Key, error) { + k, err := c.PutMulti(ctx, []*Key{key}, []interface{}{src}) + if err != nil { + if me, ok := err.(MultiError); ok { + return nil, me[0] + } + return nil, err + } + return k[0], nil +} + +// PutMulti is a batch version of Put. +// +// src must satisfy the same conditions as the dst argument to GetMulti. +func (c *Client) PutMulti(ctx context.Context, keys []*Key, src interface{}) ([]*Key, error) { + mutations, err := putMutations(keys, src) + if err != nil { + return nil, err + } + + // Make the request. + req := &pb.CommitRequest{ + ProjectId: c.dataset, + Mutations: mutations, + Mode: pb.CommitRequest_NON_TRANSACTIONAL, + } + resp, err := c.client.Commit(ctx, req) + if err != nil { + return nil, err + } + + // Copy any newly minted keys into the returned keys. + ret := make([]*Key, len(keys)) + for i, key := range keys { + if key.Incomplete() { + // This key is in the mutation results. + ret[i], err = protoToKey(resp.MutationResults[i].Key) + if err != nil { + return nil, errors.New("datastore: internal error: server returned an invalid key") + } + } else { + ret[i] = key + } + } + return ret, nil +} + +func putMutations(keys []*Key, src interface{}) ([]*pb.Mutation, error) { + v := reflect.ValueOf(src) + multiArgType, _ := checkMultiArg(v) + if multiArgType == multiArgTypeInvalid { + return nil, errors.New("datastore: src has invalid type") + } + if len(keys) != v.Len() { + return nil, errors.New("datastore: key and src slices have different length") + } + if len(keys) == 0 { + return nil, nil + } + if err := multiValid(keys); err != nil { + return nil, err + } + mutations := make([]*pb.Mutation, 0, len(keys)) + multiErr := make(MultiError, len(keys)) + hasErr := false + for i, k := range keys { + elem := v.Index(i) + // Two cases where we need to take the address: + // 1) multiArgTypePropertyLoadSaver => &elem implements PLS + // 2) multiArgTypeStruct => saveEntity needs *struct + if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct { + elem = elem.Addr() + } + p, err := saveEntity(k, elem.Interface()) + if err != nil { + multiErr[i] = err + hasErr = true + } + var mut *pb.Mutation + if k.Incomplete() { + mut = &pb.Mutation{Operation: &pb.Mutation_Insert{p}} + } else { + mut = &pb.Mutation{Operation: &pb.Mutation_Upsert{p}} + } + mutations = append(mutations, mut) + } + if hasErr { + return nil, multiErr + } + return mutations, nil +} + +// Delete deletes the entity for the given key. +func (c *Client) Delete(ctx context.Context, key *Key) error { + err := c.DeleteMulti(ctx, []*Key{key}) + if me, ok := err.(MultiError); ok { + return me[0] + } + return err +} + +// DeleteMulti is a batch version of Delete. +func (c *Client) DeleteMulti(ctx context.Context, keys []*Key) error { + mutations, err := deleteMutations(keys) + if err != nil { + return err + } + + req := &pb.CommitRequest{ + ProjectId: c.dataset, + Mutations: mutations, + Mode: pb.CommitRequest_NON_TRANSACTIONAL, + } + _, err = c.client.Commit(ctx, req) + return err +} + +func deleteMutations(keys []*Key) ([]*pb.Mutation, error) { + mutations := make([]*pb.Mutation, 0, len(keys)) + for _, k := range keys { + if k.Incomplete() { + return nil, fmt.Errorf("datastore: can't delete the incomplete key: %v", k) + } + mutations = append(mutations, &pb.Mutation{ + Operation: &pb.Mutation_Delete{keyToProto(k)}, + }) + } + return mutations, nil +} diff --git a/vendor/cloud.google.com/go/datastore/datastore_test.go b/vendor/cloud.google.com/go/datastore/datastore_test.go new file mode 100644 index 00000000..e9e57449 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/datastore_test.go @@ -0,0 +1,2776 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" + "strings" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/datastore/v1" + "google.golang.org/grpc" +) + +type ( + myBlob []byte + myByte byte + myString string +) + +func makeMyByteSlice(n int) []myByte { + b := make([]myByte, n) + for i := range b { + b[i] = myByte(i) + } + return b +} + +func makeInt8Slice(n int) []int8 { + b := make([]int8, n) + for i := range b { + b[i] = int8(i) + } + return b +} + +func makeUint8Slice(n int) []uint8 { + b := make([]uint8, n) + for i := range b { + b[i] = uint8(i) + } + return b +} + +func newKey(stringID string, parent *Key) *Key { + return NameKey("kind", stringID, parent) +} + +var ( + testKey0 = newKey("name0", nil) + testKey1a = newKey("name1", nil) + testKey1b = newKey("name1", nil) + testKey2a = newKey("name2", testKey0) + testKey2b = newKey("name2", testKey0) + testGeoPt0 = GeoPoint{Lat: 1.2, Lng: 3.4} + testGeoPt1 = GeoPoint{Lat: 5, Lng: 10} + testBadGeoPt = GeoPoint{Lat: 1000, Lng: 34} + + ts = time.Unix(1e9, 0).UTC() +) + +type B0 struct { + B []byte `datastore:",noindex"` +} + +type B1 struct { + B []int8 +} + +type B2 struct { + B myBlob `datastore:",noindex"` +} + +type B3 struct { + B []myByte `datastore:",noindex"` +} + +type B4 struct { + B [][]byte +} + +type C0 struct { + I int + C chan int +} + +type C1 struct { + I int + C *chan int +} + +type C2 struct { + I int + C []chan int +} + +type C3 struct { + C string +} + +type c4 struct { + C string +} + +type E struct{} + +type G0 struct { + G GeoPoint +} + +type G1 struct { + G []GeoPoint +} + +type K0 struct { + K *Key +} + +type K1 struct { + K []*Key +} + +type S struct { + St string +} + +type NoOmit struct { + A string + B int `datastore:"Bb"` + C bool `datastore:",noindex"` +} + +type OmitAll struct { + A string `datastore:",omitempty"` + B int `datastore:"Bb,omitempty"` + C bool `datastore:",omitempty,noindex"` + F []int `datastore:",omitempty"` +} + +type Omit struct { + A string `datastore:",omitempty"` + B int `datastore:"Bb,omitempty"` + C bool `datastore:",omitempty,noindex"` + F []int `datastore:",omitempty"` + S `datastore:",omitempty"` +} + +type NoOmits struct { + No []NoOmit `datastore:",omitempty"` + S `datastore:",omitempty"` + Ss S `datastore:",omitempty"` +} + +type N0 struct { + X0 + Nonymous X0 + Ignore string `datastore:"-"` + Other string +} + +type N1 struct { + X0 + Nonymous []X0 + Ignore string `datastore:"-"` + Other string +} + +type N2 struct { + N1 `datastore:"red"` + Green N1 `datastore:"green"` + Blue N1 + White N1 `datastore:"-"` +} + +type N3 struct { + C3 `datastore:"red"` +} + +type N4 struct { + c4 +} + +type N5 struct { + c4 `datastore:"red"` +} + +type O0 struct { + I int64 +} + +type O1 struct { + I int32 +} + +type U0 struct { + U uint +} + +type U1 struct { + U string +} + +type T struct { + T time.Time +} + +type X0 struct { + S string + I int + i int +} + +type X1 struct { + S myString + I int32 + J int64 +} + +type X2 struct { + Z string + i int +} + +type X3 struct { + S bool + I int +} + +type Y0 struct { + B bool + F []float64 + G []float64 +} + +type Y1 struct { + B bool + F float64 +} + +type Y2 struct { + B bool + F []int64 +} + +type Tagged struct { + A int `datastore:"a,noindex"` + B []int `datastore:"b"` + C int `datastore:",noindex"` + D int `datastore:""` + E int + I int `datastore:"-"` + J int `datastore:",noindex" json:"j"` + + Y0 `datastore:"-"` + Z chan int `datastore:"-"` +} + +type InvalidTagged1 struct { + I int `datastore:"\t"` +} + +type InvalidTagged2 struct { + I int + J int `datastore:"I"` +} + +type InvalidTagged3 struct { + X string `datastore:"-,noindex"` +} + +type InvalidTagged4 struct { + X string `datastore:",garbage"` +} + +type Inner1 struct { + W int32 + X string +} + +type Inner2 struct { + Y float64 +} + +type Inner3 struct { + Z bool +} + +type Inner5 struct { + WW int +} + +type Inner4 struct { + X Inner5 +} + +type Outer struct { + A int16 + I []Inner1 + J Inner2 + Inner3 +} + +type OuterFlatten struct { + A int16 + I []Inner1 `datastore:",flatten"` + J Inner2 `datastore:",flatten,noindex"` + Inner3 `datastore:",flatten"` + K Inner4 `datastore:",flatten"` +} + +type OuterEquivalent struct { + A int16 + IDotW []int32 `datastore:"I.W"` + IDotX []string `datastore:"I.X"` + JDotY float64 `datastore:"J.Y"` + Z bool +} + +type Dotted struct { + A DottedA `datastore:"A0.A1.A2"` +} + +type DottedA struct { + B DottedB `datastore:"B3"` +} + +type DottedB struct { + C int `datastore:"C4.C5"` +} + +type SliceOfSlices struct { + I int + S []struct { + J int + F []float64 + } `datastore:",flatten"` +} + +type Recursive struct { + I int + R []Recursive +} + +type MutuallyRecursive0 struct { + I int + R []MutuallyRecursive1 +} + +type MutuallyRecursive1 struct { + I int + R []MutuallyRecursive0 +} + +type EntityWithKey struct { + I int + S string + K *Key `datastore:"__key__"` +} + +type EntityWithKey2 EntityWithKey + +type WithNestedEntityWithKey struct { + N EntityWithKey +} + +type WithNonKeyField struct { + I int + K string `datastore:"__key__"` +} + +type NestedWithNonKeyField struct { + N WithNonKeyField +} + +type Basic struct { + A string +} + +type PtrToStructField struct { + B *Basic + C *Basic `datastore:"c,noindex"` + *Basic + D []*Basic +} + +var two int = 2 + +type PtrToInt struct { + I *int +} + +type EmbeddedTime struct { + time.Time +} + +type SpecialTime struct { + MyTime EmbeddedTime +} + +type Doubler struct { + S string + I int64 + B bool +} + +type Repeat struct { + Key string + Value []byte +} + +type Repeated struct { + Repeats []Repeat +} + +func (d *Doubler) Load(props []Property) error { + return LoadStruct(d, props) +} + +func (d *Doubler) Save() ([]Property, error) { + // Save the default Property slice to an in-memory buffer (a PropertyList). + props, err := SaveStruct(d) + if err != nil { + return nil, err + } + var list PropertyList + if err := list.Load(props); err != nil { + return nil, err + } + + // Edit that PropertyList, and send it on. + for i := range list { + switch v := list[i].Value.(type) { + case string: + // + means string concatenation. + list[i].Value = v + v + case int64: + // + means integer addition. + list[i].Value = v + v + } + } + return list.Save() +} + +var _ PropertyLoadSaver = (*Doubler)(nil) + +type Deriver struct { + S, Derived, Ignored string +} + +func (e *Deriver) Load(props []Property) error { + for _, p := range props { + if p.Name != "S" { + continue + } + e.S = p.Value.(string) + e.Derived = "derived+" + e.S + } + return nil +} + +func (e *Deriver) Save() ([]Property, error) { + return []Property{ + { + Name: "S", + Value: e.S, + }, + }, nil +} + +var _ PropertyLoadSaver = (*Deriver)(nil) + +type BadMultiPropEntity struct{} + +func (e *BadMultiPropEntity) Load(props []Property) error { + return errors.New("unimplemented") +} + +func (e *BadMultiPropEntity) Save() ([]Property, error) { + // Write multiple properties with the same name "I". + var props []Property + for i := 0; i < 3; i++ { + props = append(props, Property{ + Name: "I", + Value: int64(i), + }) + } + return props, nil +} + +var _ PropertyLoadSaver = (*BadMultiPropEntity)(nil) + +type testCase struct { + desc string + src interface{} + want interface{} + putErr string + getErr string +} + +var testCases = []testCase{ + { + "chan save fails", + &C0{I: -1}, + &E{}, + "unsupported struct field", + "", + }, + { + "*chan save fails", + &C1{I: -1}, + &E{}, + "unsupported struct field", + "", + }, + { + "[]chan save fails", + &C2{I: -1, C: make([]chan int, 8)}, + &E{}, + "unsupported struct field", + "", + }, + { + "chan load fails", + &C3{C: "not a chan"}, + &C0{}, + "", + "type mismatch", + }, + { + "*chan load fails", + &C3{C: "not a *chan"}, + &C1{}, + "", + "type mismatch", + }, + { + "[]chan load fails", + &C3{C: "not a []chan"}, + &C2{}, + "", + "type mismatch", + }, + { + "empty struct", + &E{}, + &E{}, + "", + "", + }, + { + "geopoint", + &G0{G: testGeoPt0}, + &G0{G: testGeoPt0}, + "", + "", + }, + { + "geopoint invalid", + &G0{G: testBadGeoPt}, + &G0{}, + "invalid GeoPoint value", + "", + }, + { + "geopoint as props", + &G0{G: testGeoPt0}, + &PropertyList{ + Property{Name: "G", Value: testGeoPt0, NoIndex: false}, + }, + "", + "", + }, + { + "geopoint slice", + &G1{G: []GeoPoint{testGeoPt0, testGeoPt1}}, + &G1{G: []GeoPoint{testGeoPt0, testGeoPt1}}, + "", + "", + }, + { + "omit empty, all", + &OmitAll{}, + new(PropertyList), + "", + "", + }, + { + "omit empty", + &Omit{}, + &PropertyList{ + Property{Name: "St", Value: "", NoIndex: false}, + }, + "", + "", + }, + { + "omit empty, fields populated", + &Omit{ + A: "a", + B: 10, + C: true, + F: []int{11}, + }, + &PropertyList{ + Property{Name: "A", Value: "a", NoIndex: false}, + Property{Name: "Bb", Value: int64(10), NoIndex: false}, + Property{Name: "C", Value: true, NoIndex: true}, + Property{Name: "F", Value: []interface{}{int64(11)}, NoIndex: false}, + Property{Name: "St", Value: "", NoIndex: false}, + }, + "", + "", + }, + { + "omit empty, fields populated", + &Omit{ + A: "a", + B: 10, + C: true, + F: []int{11}, + S: S{St: "string"}, + }, + &PropertyList{ + Property{Name: "A", Value: "a", NoIndex: false}, + Property{Name: "Bb", Value: int64(10), NoIndex: false}, + Property{Name: "C", Value: true, NoIndex: true}, + Property{Name: "F", Value: []interface{}{int64(11)}, NoIndex: false}, + Property{Name: "St", Value: "string", NoIndex: false}, + }, + "", + "", + }, + { + "omit empty does not propagate", + &NoOmits{ + No: []NoOmit{ + NoOmit{}, + }, + S: S{}, + Ss: S{}, + }, + &PropertyList{ + Property{Name: "No", Value: []interface{}{ + &Entity{ + Properties: []Property{ + Property{Name: "A", Value: "", NoIndex: false}, + Property{Name: "Bb", Value: int64(0), NoIndex: false}, + Property{Name: "C", Value: false, NoIndex: true}, + }, + }, + }, NoIndex: false}, + Property{Name: "Ss", Value: &Entity{ + Properties: []Property{ + Property{Name: "St", Value: "", NoIndex: false}, + }, + }, NoIndex: false}, + Property{Name: "St", Value: "", NoIndex: false}, + }, + "", + "", + }, + { + "key", + &K0{K: testKey1a}, + &K0{K: testKey1b}, + "", + "", + }, + { + "key with parent", + &K0{K: testKey2a}, + &K0{K: testKey2b}, + "", + "", + }, + { + "nil key", + &K0{}, + &K0{}, + "", + "", + }, + { + "all nil keys in slice", + &K1{[]*Key{nil, nil}}, + &K1{[]*Key{nil, nil}}, + "", + "", + }, + { + "some nil keys in slice", + &K1{[]*Key{testKey1a, nil, testKey2a}}, + &K1{[]*Key{testKey1b, nil, testKey2b}}, + "", + "", + }, + { + "overflow", + &O0{I: 1 << 48}, + &O1{}, + "", + "overflow", + }, + { + "time", + &T{T: time.Unix(1e9, 0)}, + &T{T: time.Unix(1e9, 0)}, + "", + "", + }, + { + "time as props", + &T{T: time.Unix(1e9, 0)}, + &PropertyList{ + Property{Name: "T", Value: time.Unix(1e9, 0), NoIndex: false}, + }, + "", + "", + }, + { + "uint save", + &U0{U: 1}, + &U0{}, + "unsupported struct field", + "", + }, + { + "uint load", + &U1{U: "not a uint"}, + &U0{}, + "", + "type mismatch", + }, + { + "zero", + &X0{}, + &X0{}, + "", + "", + }, + { + "basic", + &X0{S: "one", I: 2, i: 3}, + &X0{S: "one", I: 2}, + "", + "", + }, + { + "save string/int load myString/int32", + &X0{S: "one", I: 2, i: 3}, + &X1{S: "one", I: 2}, + "", + "", + }, + { + "missing fields", + &X0{S: "one", I: 2, i: 3}, + &X2{}, + "", + "no such struct field", + }, + { + "save string load bool", + &X0{S: "one", I: 2, i: 3}, + &X3{I: 2}, + "", + "type mismatch", + }, + { + "basic slice", + &Y0{B: true, F: []float64{7, 8, 9}}, + &Y0{B: true, F: []float64{7, 8, 9}}, + "", + "", + }, + { + "save []float64 load float64", + &Y0{B: true, F: []float64{7, 8, 9}}, + &Y1{B: true}, + "", + "requires a slice", + }, + { + "save []float64 load []int64", + &Y0{B: true, F: []float64{7, 8, 9}}, + &Y2{B: true}, + "", + "type mismatch", + }, + { + "single slice is too long", + &Y0{F: make([]float64, maxIndexedProperties+1)}, + &Y0{}, + "too many indexed properties", + "", + }, + { + "two slices are too long", + &Y0{F: make([]float64, maxIndexedProperties), G: make([]float64, maxIndexedProperties)}, + &Y0{}, + "too many indexed properties", + "", + }, + { + "one slice and one scalar are too long", + &Y0{F: make([]float64, maxIndexedProperties), B: true}, + &Y0{}, + "too many indexed properties", + "", + }, + { + "slice of slices of bytes", + &Repeated{ + Repeats: []Repeat{ + { + Key: "key 1", + Value: []byte("value 1"), + }, + { + Key: "key 2", + Value: []byte("value 2"), + }, + }, + }, + &Repeated{ + Repeats: []Repeat{ + { + Key: "key 1", + Value: []byte("value 1"), + }, + { + Key: "key 2", + Value: []byte("value 2"), + }, + }, + }, + "", + "", + }, + { + "long blob", + &B0{B: makeUint8Slice(maxIndexedProperties + 1)}, + &B0{B: makeUint8Slice(maxIndexedProperties + 1)}, + "", + "", + }, + { + "long []int8 is too long", + &B1{B: makeInt8Slice(maxIndexedProperties + 1)}, + &B1{}, + "too many indexed properties", + "", + }, + { + "short []int8", + &B1{B: makeInt8Slice(3)}, + &B1{B: makeInt8Slice(3)}, + "", + "", + }, + { + "long myBlob", + &B2{B: makeUint8Slice(maxIndexedProperties + 1)}, + &B2{B: makeUint8Slice(maxIndexedProperties + 1)}, + "", + "", + }, + { + "short myBlob", + &B2{B: makeUint8Slice(3)}, + &B2{B: makeUint8Slice(3)}, + "", + "", + }, + { + "long []myByte", + &B3{B: makeMyByteSlice(maxIndexedProperties + 1)}, + &B3{B: makeMyByteSlice(maxIndexedProperties + 1)}, + "", + "", + }, + { + "short []myByte", + &B3{B: makeMyByteSlice(3)}, + &B3{B: makeMyByteSlice(3)}, + "", + "", + }, + { + "slice of blobs", + &B4{B: [][]byte{ + makeUint8Slice(3), + makeUint8Slice(4), + makeUint8Slice(5), + }}, + &B4{B: [][]byte{ + makeUint8Slice(3), + makeUint8Slice(4), + makeUint8Slice(5), + }}, + "", + "", + }, + { + "[]byte must be noindex", + &PropertyList{ + Property{Name: "B", Value: makeUint8Slice(1501), NoIndex: false}, + }, + nil, + "[]byte property too long to index", + "", + }, + { + "string must be noindex", + &PropertyList{ + Property{Name: "B", Value: strings.Repeat("x", 1501), NoIndex: false}, + }, + nil, + "string property too long to index", + "", + }, + { + "slice of []byte must be noindex", + &PropertyList{ + Property{Name: "B", Value: []interface{}{ + []byte("short"), + makeUint8Slice(1501), + }, NoIndex: false}, + }, + nil, + "[]byte property too long to index", + "", + }, + { + "slice of string must be noindex", + &PropertyList{ + Property{Name: "B", Value: []interface{}{ + "short", + strings.Repeat("x", 1501), + }, NoIndex: false}, + }, + nil, + "string property too long to index", + "", + }, + { + "save tagged load props", + &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, I: 6, J: 7}, + &PropertyList{ + // A and B are renamed to a and b; A and C are noindex, I is ignored. + // Order is sorted as per byName. + Property{Name: "C", Value: int64(3), NoIndex: true}, + Property{Name: "D", Value: int64(4), NoIndex: false}, + Property{Name: "E", Value: int64(5), NoIndex: false}, + Property{Name: "J", Value: int64(7), NoIndex: true}, + Property{Name: "a", Value: int64(1), NoIndex: true}, + Property{Name: "b", Value: []interface{}{int64(21), int64(22), int64(23)}, NoIndex: false}, + }, + "", + "", + }, + { + "save tagged load tagged", + &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, I: 6, J: 7}, + &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, J: 7}, + "", + "", + }, + { + "invalid tagged1", + &InvalidTagged1{I: 1}, + &InvalidTagged1{}, + "struct tag has invalid property name", + "", + }, + { + "invalid tagged2", + &InvalidTagged2{I: 1, J: 2}, + &InvalidTagged2{J: 2}, + "", + "", + }, + { + "invalid tagged3", + &InvalidTagged3{X: "hello"}, + &InvalidTagged3{}, + "struct tag has invalid property name: \"-\"", + "", + }, + { + "invalid tagged4", + &InvalidTagged4{X: "hello"}, + &InvalidTagged4{}, + "struct tag has invalid option: \"garbage\"", + "", + }, + { + "doubler", + &Doubler{S: "s", I: 1, B: true}, + &Doubler{S: "ss", I: 2, B: true}, + "", + "", + }, + { + "save struct load props", + &X0{S: "s", I: 1}, + &PropertyList{ + Property{Name: "I", Value: int64(1), NoIndex: false}, + Property{Name: "S", Value: "s", NoIndex: false}, + }, + "", + "", + }, + { + "save props load struct", + &PropertyList{ + Property{Name: "I", Value: int64(1), NoIndex: false}, + Property{Name: "S", Value: "s", NoIndex: false}, + }, + &X0{S: "s", I: 1}, + "", + "", + }, + { + "nil-value props", + &PropertyList{ + Property{Name: "I", Value: nil, NoIndex: false}, + Property{Name: "B", Value: nil, NoIndex: false}, + Property{Name: "S", Value: nil, NoIndex: false}, + Property{Name: "F", Value: nil, NoIndex: false}, + Property{Name: "K", Value: nil, NoIndex: false}, + Property{Name: "T", Value: nil, NoIndex: false}, + Property{Name: "J", Value: []interface{}{nil, int64(7), nil}, NoIndex: false}, + }, + &struct { + I int64 + B bool + S string + F float64 + K *Key + T time.Time + J []int64 + }{ + J: []int64{0, 7, 0}, + }, + "", + "", + }, + { + "save outer load props flatten", + &OuterFlatten{ + A: 1, + I: []Inner1{ + {10, "ten"}, + {20, "twenty"}, + {30, "thirty"}, + }, + J: Inner2{ + Y: 3.14, + }, + Inner3: Inner3{ + Z: true, + }, + K: Inner4{ + X: Inner5{ + WW: 12, + }, + }, + }, + &PropertyList{ + Property{Name: "A", Value: int64(1), NoIndex: false}, + Property{Name: "I.W", Value: []interface{}{int64(10), int64(20), int64(30)}, NoIndex: false}, + Property{Name: "I.X", Value: []interface{}{"ten", "twenty", "thirty"}, NoIndex: false}, + Property{Name: "J.Y", Value: float64(3.14), NoIndex: true}, + Property{Name: "K.X.WW", Value: int64(12), NoIndex: false}, + Property{Name: "Z", Value: true, NoIndex: false}, + }, + "", + "", + }, + { + "load outer props flatten", + &PropertyList{ + Property{Name: "A", Value: int64(1), NoIndex: false}, + Property{Name: "I.W", Value: []interface{}{int64(10), int64(20), int64(30)}, NoIndex: false}, + Property{Name: "I.X", Value: []interface{}{"ten", "twenty", "thirty"}, NoIndex: false}, + Property{Name: "J.Y", Value: float64(3.14), NoIndex: true}, + Property{Name: "Z", Value: true, NoIndex: false}, + }, + &OuterFlatten{ + A: 1, + I: []Inner1{ + {10, "ten"}, + {20, "twenty"}, + {30, "thirty"}, + }, + J: Inner2{ + Y: 3.14, + }, + Inner3: Inner3{ + Z: true, + }, + }, + "", + "", + }, + { + "save outer load props", + &Outer{ + A: 1, + I: []Inner1{ + {10, "ten"}, + {20, "twenty"}, + {30, "thirty"}, + }, + J: Inner2{ + Y: 3.14, + }, + Inner3: Inner3{ + Z: true, + }, + }, + &PropertyList{ + Property{Name: "A", Value: int64(1), NoIndex: false}, + Property{Name: "I", Value: []interface{}{ + &Entity{ + Properties: []Property{ + Property{Name: "W", Value: int64(10), NoIndex: false}, + Property{Name: "X", Value: "ten", NoIndex: false}, + }, + }, + &Entity{ + Properties: []Property{ + Property{Name: "W", Value: int64(20), NoIndex: false}, + Property{Name: "X", Value: "twenty", NoIndex: false}, + }, + }, + &Entity{ + Properties: []Property{ + Property{Name: "W", Value: int64(30), NoIndex: false}, + Property{Name: "X", Value: "thirty", NoIndex: false}, + }, + }, + }, NoIndex: false}, + Property{Name: "J", Value: &Entity{ + Properties: []Property{ + Property{Name: "Y", Value: float64(3.14), NoIndex: false}, + }, + }, NoIndex: false}, + Property{Name: "Z", Value: true, NoIndex: false}, + }, + "", + "", + }, + { + "save props load outer-equivalent", + &PropertyList{ + Property{Name: "A", Value: int64(1), NoIndex: false}, + Property{Name: "I.W", Value: []interface{}{int64(10), int64(20), int64(30)}, NoIndex: false}, + Property{Name: "I.X", Value: []interface{}{"ten", "twenty", "thirty"}, NoIndex: false}, + Property{Name: "J.Y", Value: float64(3.14), NoIndex: false}, + Property{Name: "Z", Value: true, NoIndex: false}, + }, + &OuterEquivalent{ + A: 1, + IDotW: []int32{10, 20, 30}, + IDotX: []string{"ten", "twenty", "thirty"}, + JDotY: 3.14, + Z: true, + }, + "", + "", + }, + { + "dotted names save", + &Dotted{A: DottedA{B: DottedB{C: 88}}}, + &PropertyList{ + Property{Name: "A0.A1.A2", Value: &Entity{ + Properties: []Property{ + Property{Name: "B3", Value: &Entity{ + Properties: []Property{ + Property{Name: "C4.C5", Value: int64(88), NoIndex: false}, + }, + }, NoIndex: false}, + }, + }, NoIndex: false}, + }, + "", + "", + }, + { + "dotted names load", + &PropertyList{ + Property{Name: "A0.A1.A2", Value: &Entity{ + Properties: []Property{ + Property{Name: "B3", Value: &Entity{ + Properties: []Property{ + Property{Name: "C4.C5", Value: 99, NoIndex: false}, + }, + }, NoIndex: false}, + }, + }, NoIndex: false}, + }, + &Dotted{A: DottedA{B: DottedB{C: 99}}}, + "", + "", + }, + { + "save struct load deriver", + &X0{S: "s", I: 1}, + &Deriver{S: "s", Derived: "derived+s"}, + "", + "", + }, + { + "save deriver load struct", + &Deriver{S: "s", Derived: "derived+s", Ignored: "ignored"}, + &X0{S: "s"}, + "", + "", + }, + { + "zero time.Time", + &T{T: time.Time{}}, + &T{T: time.Time{}}, + "", + "", + }, + { + "time.Time near Unix zero time", + &T{T: time.Unix(0, 4e3)}, + &T{T: time.Unix(0, 4e3)}, + "", + "", + }, + { + "time.Time, far in the future", + &T{T: time.Date(99999, 1, 1, 0, 0, 0, 0, time.UTC)}, + &T{T: time.Date(99999, 1, 1, 0, 0, 0, 0, time.UTC)}, + "", + "", + }, + { + "time.Time, very far in the past", + &T{T: time.Date(-300000, 1, 1, 0, 0, 0, 0, time.UTC)}, + &T{}, + "time value out of range", + "", + }, + { + "time.Time, very far in the future", + &T{T: time.Date(294248, 1, 1, 0, 0, 0, 0, time.UTC)}, + &T{}, + "time value out of range", + "", + }, + { + "structs", + &N0{ + X0: X0{S: "one", I: 2, i: 3}, + Nonymous: X0{S: "four", I: 5, i: 6}, + Ignore: "ignore", + Other: "other", + }, + &N0{ + X0: X0{S: "one", I: 2}, + Nonymous: X0{S: "four", I: 5}, + Other: "other", + }, + "", + "", + }, + { + "slice of structs", + &N1{ + X0: X0{S: "one", I: 2, i: 3}, + Nonymous: []X0{ + {S: "four", I: 5, i: 6}, + {S: "seven", I: 8, i: 9}, + {S: "ten", I: 11, i: 12}, + {S: "thirteen", I: 14, i: 15}, + }, + Ignore: "ignore", + Other: "other", + }, + &N1{ + X0: X0{S: "one", I: 2}, + Nonymous: []X0{ + {S: "four", I: 5}, + {S: "seven", I: 8}, + {S: "ten", I: 11}, + {S: "thirteen", I: 14}, + }, + Other: "other", + }, + "", + "", + }, + { + "structs with slices of structs", + &N2{ + N1: N1{ + X0: X0{S: "rouge"}, + Nonymous: []X0{ + {S: "rosso0"}, + {S: "rosso1"}, + }, + }, + Green: N1{ + X0: X0{S: "vert"}, + Nonymous: []X0{ + {S: "verde0"}, + {S: "verde1"}, + {S: "verde2"}, + }, + }, + Blue: N1{ + X0: X0{S: "bleu"}, + Nonymous: []X0{ + {S: "blu0"}, + {S: "blu1"}, + {S: "blu2"}, + {S: "blu3"}, + }, + }, + }, + &N2{ + N1: N1{ + X0: X0{S: "rouge"}, + Nonymous: []X0{ + {S: "rosso0"}, + {S: "rosso1"}, + }, + }, + Green: N1{ + X0: X0{S: "vert"}, + Nonymous: []X0{ + {S: "verde0"}, + {S: "verde1"}, + {S: "verde2"}, + }, + }, + Blue: N1{ + X0: X0{S: "bleu"}, + Nonymous: []X0{ + {S: "blu0"}, + {S: "blu1"}, + {S: "blu2"}, + {S: "blu3"}, + }, + }, + }, + "", + "", + }, + { + "save structs load props", + &N2{ + N1: N1{ + X0: X0{S: "rouge"}, + Nonymous: []X0{ + {S: "rosso0"}, + {S: "rosso1"}, + }, + }, + Green: N1{ + X0: X0{S: "vert"}, + Nonymous: []X0{ + {S: "verde0"}, + {S: "verde1"}, + {S: "verde2"}, + }, + }, + Blue: N1{ + X0: X0{S: "bleu"}, + Nonymous: []X0{ + {S: "blu0"}, + {S: "blu1"}, + {S: "blu2"}, + {S: "blu3"}, + }, + }, + }, + &PropertyList{ + Property{Name: "Blue", Value: &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "Nonymous", Value: []interface{}{ + &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "S", Value: "blu0", NoIndex: false}, + }, + }, + &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "S", Value: "blu1", NoIndex: false}, + }, + }, + &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "S", Value: "blu2", NoIndex: false}, + }, + }, + &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "S", Value: "blu3", NoIndex: false}, + }, + }, + }, NoIndex: false}, + Property{Name: "Other", Value: "", NoIndex: false}, + Property{Name: "S", Value: "bleu", NoIndex: false}, + }, + }, NoIndex: false}, + Property{Name: "green", Value: &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "Nonymous", Value: []interface{}{ + &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "S", Value: "verde0", NoIndex: false}, + }, + }, + &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "S", Value: "verde1", NoIndex: false}, + }, + }, + &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "S", Value: "verde2", NoIndex: false}, + }, + }, + }, NoIndex: false}, + Property{Name: "Other", Value: "", NoIndex: false}, + Property{Name: "S", Value: "vert", NoIndex: false}, + }, + }, NoIndex: false}, + Property{Name: "red", Value: &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "Nonymous", Value: []interface{}{ + &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "S", Value: "rosso0", NoIndex: false}, + }, + }, + &Entity{ + Properties: []Property{ + Property{Name: "I", Value: int64(0), NoIndex: false}, + Property{Name: "S", Value: "rosso1", NoIndex: false}, + }, + }, + }, NoIndex: false}, + Property{Name: "Other", Value: "", NoIndex: false}, + Property{Name: "S", Value: "rouge", NoIndex: false}, + }, + }, NoIndex: false}, + }, + "", + "", + }, + { + "nested entity with key", + &WithNestedEntityWithKey{ + N: EntityWithKey{ + I: 12, + S: "abcd", + K: testKey0, + }, + }, + &WithNestedEntityWithKey{ + N: EntityWithKey{ + I: 12, + S: "abcd", + K: testKey0, + }, + }, + "", + "", + }, + { + "entity with key at top level", + &EntityWithKey{ + I: 12, + S: "abc", + K: testKey0, + }, + &EntityWithKey{ + I: 12, + S: "abc", + K: testKey0, + }, + "", + "", + }, + { + "entity with key at top level (key is populated on load)", + &EntityWithKey{ + I: 12, + S: "abc", + }, + &EntityWithKey{ + I: 12, + S: "abc", + K: testKey0, + }, + "", + "", + }, + { + "__key__ field not a *Key", + &NestedWithNonKeyField{ + N: WithNonKeyField{ + I: 12, + K: "abcd", + }, + }, + &NestedWithNonKeyField{ + N: WithNonKeyField{ + I: 12, + K: "abcd", + }, + }, + "datastore: __key__ field on struct datastore.WithNonKeyField is not a *datastore.Key", + "", + }, + { + "save struct with ptr to struct fields", + &PtrToStructField{ + &Basic{ + A: "b", + }, + &Basic{ + A: "c", + }, + &Basic{ + A: "anon", + }, + []*Basic{ + &Basic{ + A: "slice0", + }, + &Basic{ + A: "slice1", + }, + }, + }, + &PropertyList{ + Property{Name: "A", Value: "anon", NoIndex: false}, + Property{Name: "B", Value: &Entity{ + Properties: []Property{ + Property{Name: "A", Value: "b", NoIndex: false}, + }, + }}, + Property{Name: "D", Value: []interface{}{ + &Entity{ + Properties: []Property{ + Property{Name: "A", Value: "slice0", NoIndex: false}, + }, + }, + &Entity{ + Properties: []Property{ + Property{Name: "A", Value: "slice1", NoIndex: false}, + }, + }, + }, NoIndex: false}, + Property{Name: "c", Value: &Entity{ + Properties: []Property{ + Property{Name: "A", Value: "c", NoIndex: true}, + }, + }, NoIndex: true}, + }, + "", + "", + }, + { + "save and load struct with ptr to struct fields", + &PtrToStructField{ + &Basic{ + A: "b", + }, + &Basic{ + A: "c", + }, + &Basic{ + A: "anon", + }, + []*Basic{ + &Basic{ + A: "slice0", + }, + &Basic{ + A: "slice1", + }, + }, + }, + &PtrToStructField{ + &Basic{ + A: "b", + }, + &Basic{ + A: "c", + }, + &Basic{ + A: "anon", + }, + []*Basic{ + &Basic{ + A: "slice0", + }, + &Basic{ + A: "slice1", + }, + }, + }, + "", + "", + }, + { + "save struct with pointer to int field", + &PtrToInt{ + I: &two, + }, + &PtrToInt{}, + "unsupported struct field", + "", + }, + { + "struct with nil ptr to struct fields", + &PtrToStructField{ + nil, + nil, + nil, + nil, + }, + new(PropertyList), + "", + "", + }, + { + "nested load entity with key", + &WithNestedEntityWithKey{ + N: EntityWithKey{ + I: 12, + S: "abcd", + K: testKey0, + }, + }, + &PropertyList{ + Property{Name: "N", Value: &Entity{ + Key: testKey0, + Properties: []Property{ + Property{Name: "I", Value: int64(12), NoIndex: false}, + Property{Name: "S", Value: "abcd", NoIndex: false}, + }, + }, + NoIndex: false}, + }, + "", + "", + }, + { + "nested save entity with key", + &PropertyList{ + Property{Name: "N", Value: &Entity{ + Key: testKey0, + Properties: []Property{ + Property{Name: "I", Value: int64(12), NoIndex: false}, + Property{Name: "S", Value: "abcd", NoIndex: false}, + }, + }, NoIndex: false}, + }, + + &WithNestedEntityWithKey{ + N: EntityWithKey{ + I: 12, + S: "abcd", + K: testKey0, + }, + }, + "", + "", + }, + { + "anonymous field with tag", + &N3{ + C3: C3{C: "s"}, + }, + &PropertyList{ + Property{Name: "red", Value: &Entity{ + Properties: []Property{ + Property{Name: "C", Value: "s", NoIndex: false}, + }, + }, NoIndex: false}, + }, + "", + "", + }, + { + "unexported anonymous field", + &N4{ + c4: c4{C: "s"}, + }, + &PropertyList{ + Property{Name: "C", Value: "s", NoIndex: false}, + }, + "", + "", + }, + { + "unexported anonymous field with tag", + &N5{ + c4: c4{C: "s"}, + }, + new(PropertyList), + "", + "", + }, + { + "save props load structs with ragged fields", + &PropertyList{ + Property{Name: "red.S", Value: "rot", NoIndex: false}, + Property{Name: "green.Nonymous.I", Value: []interface{}{int64(10), int64(11), int64(12), int64(13)}, NoIndex: false}, + Property{Name: "Blue.Nonymous.I", Value: []interface{}{int64(20), int64(21)}, NoIndex: false}, + Property{Name: "Blue.Nonymous.S", Value: []interface{}{"blau0", "blau1", "blau2"}, NoIndex: false}, + }, + &N2{ + N1: N1{ + X0: X0{S: "rot"}, + }, + Green: N1{ + Nonymous: []X0{ + {I: 10}, + {I: 11}, + {I: 12}, + {I: 13}, + }, + }, + Blue: N1{ + Nonymous: []X0{ + {S: "blau0", I: 20}, + {S: "blau1", I: 21}, + {S: "blau2"}, + }, + }, + }, + "", + "", + }, + { + "save structs with noindex tags", + &struct { + A struct { + X string `datastore:",noindex"` + Y string + } `datastore:",noindex"` + B struct { + X string `datastore:",noindex"` + Y string + } + }{}, + &PropertyList{ + Property{Name: "A", Value: &Entity{ + Properties: []Property{ + Property{Name: "X", Value: "", NoIndex: true}, + Property{Name: "Y", Value: "", NoIndex: true}, + }, + }, NoIndex: true}, + Property{Name: "B", Value: &Entity{ + Properties: []Property{ + Property{Name: "X", Value: "", NoIndex: true}, + Property{Name: "Y", Value: "", NoIndex: false}, + }, + }, NoIndex: false}, + }, + "", + "", + }, + { + "embedded struct with name override", + &struct { + Inner1 `datastore:"foo"` + }{}, + &PropertyList{ + Property{Name: "foo", Value: &Entity{ + Properties: []Property{ + Property{Name: "W", Value: int64(0), NoIndex: false}, + Property{Name: "X", Value: "", NoIndex: false}, + }, + }, NoIndex: false}, + }, + "", + "", + }, + { + "slice of slices", + &SliceOfSlices{}, + nil, + "flattening nested structs leads to a slice of slices", + "", + }, + { + "recursive struct", + &Recursive{}, + &Recursive{}, + "", + "", + }, + { + "mutually recursive struct", + &MutuallyRecursive0{}, + &MutuallyRecursive0{}, + "", + "", + }, + { + "non-exported struct fields", + &struct { + i, J int64 + }{i: 1, J: 2}, + &PropertyList{ + Property{Name: "J", Value: int64(2), NoIndex: false}, + }, + "", + "", + }, + { + "json.RawMessage", + &struct { + J json.RawMessage + }{ + J: json.RawMessage("rawr"), + }, + &PropertyList{ + Property{Name: "J", Value: []byte("rawr"), NoIndex: false}, + }, + "", + "", + }, + { + "json.RawMessage to myBlob", + &struct { + B json.RawMessage + }{ + B: json.RawMessage("rawr"), + }, + &B2{B: myBlob("rawr")}, + "", + "", + }, + { + "repeated property names", + &PropertyList{ + Property{Name: "A", Value: ""}, + Property{Name: "A", Value: ""}, + }, + nil, + "duplicate Property", + "", + }, + { + "embedded time field", + &SpecialTime{MyTime: EmbeddedTime{ts}}, + &SpecialTime{MyTime: EmbeddedTime{ts}}, + "", + "", + }, + { + "embedded time load", + &PropertyList{ + Property{Name: "MyTime.Time", Value: ts}, + }, + &SpecialTime{MyTime: EmbeddedTime{ts}}, + "", + "", + }, +} + +// checkErr returns the empty string if either both want and err are zero, +// or if want is a non-empty substring of err's string representation. +func checkErr(want string, err error) string { + if err != nil { + got := err.Error() + if want == "" || strings.Index(got, want) == -1 { + return got + } + } else if want != "" { + return fmt.Sprintf("want error %q", want) + } + return "" +} + +func TestRoundTrip(t *testing.T) { + for _, tc := range testCases { + p, err := saveEntity(testKey0, tc.src) + if s := checkErr(tc.putErr, err); s != "" { + t.Errorf("%s: save: %s", tc.desc, s) + continue + } + if p == nil { + continue + } + var got interface{} + if _, ok := tc.want.(*PropertyList); ok { + got = new(PropertyList) + } else { + got = reflect.New(reflect.TypeOf(tc.want).Elem()).Interface() + } + err = loadEntityProto(got, p) + if s := checkErr(tc.getErr, err); s != "" { + t.Errorf("%s: load: %s", tc.desc, s) + continue + } + if pl, ok := got.(*PropertyList); ok { + // Sort by name to make sure we have a deterministic order. + sortPL(*pl) + } + + equal := false + switch v := got.(type) { + // Round tripping a time.Time can result in a different time.Location: Local instead of UTC. + // We therefore test equality explicitly, instead of relying on reflect.DeepEqual. + case *T: + equal = v.T.Equal(tc.want.(*T).T) + case *SpecialTime: + equal = v.MyTime.Equal(tc.want.(*SpecialTime).MyTime.Time) + default: + equal = reflect.DeepEqual(got, tc.want) + } + if !equal { + t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, got, tc.want) + continue + } + } +} + +type aPtrPLS struct { + Count int +} + +func (pls *aPtrPLS) Load([]Property) error { + pls.Count += 1 + return nil +} + +func (pls *aPtrPLS) Save() ([]Property, error) { + return []Property{{Name: "Count", Value: 4}}, nil +} + +type aValuePLS struct { + Count int +} + +func (pls aValuePLS) Load([]Property) error { + pls.Count += 2 + return nil +} + +func (pls aValuePLS) Save() ([]Property, error) { + return []Property{{Name: "Count", Value: 8}}, nil +} + +type aNotPLS struct { + Count int +} + +type plsString string + +func (s *plsString) Load([]Property) error { + *s = "LOADED" + return nil +} + +func (s *plsString) Save() ([]Property, error) { + return []Property{{Name: "SS", Value: "SAVED"}}, nil +} + +type aSubPLS struct { + Foo string + Bar *aPtrPLS +} + +type aSubNotPLS struct { + Foo string + Bar *aNotPLS + S plsString `datastore:",omitempty"` +} + +type aSubPLSErr struct { + Foo string + Bar aValuePLS +} + +func TestLoadSaveNestedStructPLS(t *testing.T) { + type testCase struct { + desc string + src interface{} + wantSave *pb.Entity + wantLoad interface{} + loadErr string + } + + testCases := []testCase{ + { + desc: "substruct (ptr) does implement PLS", + src: &aSubPLS{Foo: "foo", Bar: &aPtrPLS{Count: 2}}, + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Foo": {ValueType: &pb.Value_StringValue{"foo"}}, + "Bar": {ValueType: &pb.Value_EntityValue{ + &pb.Entity{ + Properties: map[string]*pb.Value{ + "Count": {ValueType: &pb.Value_IntegerValue{4}}, + }, + }, + }}, + }, + }, + // PLS impl for 'S' not used, not entity. + wantLoad: &aSubPLS{Foo: "foo", Bar: &aPtrPLS{Count: 1}}, + }, + { + desc: "substruct (ptr) does implement PLS, nil valued substruct", + src: &aSubPLS{Foo: "foo"}, + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Foo": {ValueType: &pb.Value_StringValue{"foo"}}, + }, + }, + wantLoad: &aSubPLS{Foo: "foo"}, + }, + { + desc: "substruct (ptr) does not implement PLS", + src: &aSubNotPLS{Foo: "foo", Bar: &aNotPLS{Count: 2}, S: "something"}, + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Foo": {ValueType: &pb.Value_StringValue{"foo"}}, + "Bar": {ValueType: &pb.Value_EntityValue{ + &pb.Entity{ + Properties: map[string]*pb.Value{ + "Count": {ValueType: &pb.Value_IntegerValue{2}}, + }, + }, + }}, + // PLS impl for 'S' not used, not entity. + "S": {ValueType: &pb.Value_StringValue{"something"}}, + }, + }, + wantLoad: &aSubNotPLS{Foo: "foo", Bar: &aNotPLS{Count: 2}, S: "something"}, + }, + { + desc: "substruct (value) does implement PLS, error", + src: &aSubPLSErr{Foo: "foo", Bar: aValuePLS{Count: 3}}, + wantSave: &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Foo": {ValueType: &pb.Value_StringValue{"foo"}}, + "Bar": {ValueType: &pb.Value_EntityValue{ + &pb.Entity{ + Properties: map[string]*pb.Value{ + "Count": {ValueType: &pb.Value_IntegerValue{8}}, + }, + }, + }}, + }, + }, + wantLoad: &aSubPLSErr{}, + loadErr: "PropertyLoadSaver methods must be implemented on a pointer", + }, + } + + for _, tc := range testCases { + e, err := saveEntity(testKey0, tc.src) + if err != nil { + t.Errorf("%s: save: %v", tc.desc, err) + continue + } + + if !reflect.DeepEqual(e, tc.wantSave) { + t.Errorf("%s: save: got: %#v, want: %#v", tc.desc, e, tc.wantSave) + continue + } + + gota := reflect.New(reflect.TypeOf(tc.wantLoad).Elem()).Interface() + err = loadEntityProto(gota, e) + switch tc.loadErr { + case "": + if err != nil { + t.Errorf("%s: load: %v", tc.desc, err) + continue + } + default: + if err == nil { + t.Errorf("%s: load: want err", tc.desc) + continue + } + if !strings.Contains(err.Error(), tc.loadErr) { + t.Errorf("%s: load: want err '%s', got '%s'", tc.desc, err.Error(), tc.loadErr) + } + continue + } + + if !reflect.DeepEqual(tc.wantLoad, gota) { + t.Errorf("%s: load: got: %#v, want: %#v", tc.desc, gota, tc.wantLoad) + continue + } + } + +} + +func TestQueryConstruction(t *testing.T) { + tests := []struct { + q, exp *Query + err string + }{ + { + q: NewQuery("Foo"), + exp: &Query{ + kind: "Foo", + limit: -1, + }, + }, + { + // Regular filtered query with standard spacing. + q: NewQuery("Foo").Filter("foo >", 7), + exp: &Query{ + kind: "Foo", + filter: []filter{ + { + FieldName: "foo", + Op: greaterThan, + Value: 7, + }, + }, + limit: -1, + }, + }, + { + // Filtered query with no spacing. + q: NewQuery("Foo").Filter("foo=", 6), + exp: &Query{ + kind: "Foo", + filter: []filter{ + { + FieldName: "foo", + Op: equal, + Value: 6, + }, + }, + limit: -1, + }, + }, + { + // Filtered query with funky spacing. + q: NewQuery("Foo").Filter(" foo< ", 8), + exp: &Query{ + kind: "Foo", + filter: []filter{ + { + FieldName: "foo", + Op: lessThan, + Value: 8, + }, + }, + limit: -1, + }, + }, + { + // Filtered query with multicharacter op. + q: NewQuery("Foo").Filter("foo >=", 9), + exp: &Query{ + kind: "Foo", + filter: []filter{ + { + FieldName: "foo", + Op: greaterEq, + Value: 9, + }, + }, + limit: -1, + }, + }, + { + // Query with ordering. + q: NewQuery("Foo").Order("bar"), + exp: &Query{ + kind: "Foo", + order: []order{ + { + FieldName: "bar", + Direction: ascending, + }, + }, + limit: -1, + }, + }, + { + // Query with reverse ordering, and funky spacing. + q: NewQuery("Foo").Order(" - bar"), + exp: &Query{ + kind: "Foo", + order: []order{ + { + FieldName: "bar", + Direction: descending, + }, + }, + limit: -1, + }, + }, + { + // Query with an empty ordering. + q: NewQuery("Foo").Order(""), + err: "empty order", + }, + { + // Query with a + ordering. + q: NewQuery("Foo").Order("+bar"), + err: "invalid order", + }, + } + for i, test := range tests { + if test.q.err != nil { + got := test.q.err.Error() + if !strings.Contains(got, test.err) { + t.Errorf("%d: error mismatch: got %q want something containing %q", i, got, test.err) + } + continue + } + if !reflect.DeepEqual(test.q, test.exp) { + t.Errorf("%d: mismatch: got %v want %v", i, test.q, test.exp) + } + } +} + +func TestPutMultiTypes(t *testing.T) { + ctx := context.Background() + type S struct { + A int + B string + } + + testCases := []struct { + desc string + src interface{} + wantErr bool + }{ + // Test cases to check each of the valid input types for src. + // Each case has the same elements. + { + desc: "type []struct", + src: []S{ + {1, "one"}, {2, "two"}, + }, + }, + { + desc: "type []*struct", + src: []*S{ + {1, "one"}, {2, "two"}, + }, + }, + { + desc: "type []interface{} with PLS elems", + src: []interface{}{ + &PropertyList{Property{Name: "A", Value: 1}, Property{Name: "B", Value: "one"}}, + &PropertyList{Property{Name: "A", Value: 2}, Property{Name: "B", Value: "two"}}, + }, + }, + { + desc: "type []interface{} with struct ptr elems", + src: []interface{}{ + &S{1, "one"}, &S{2, "two"}, + }, + }, + { + desc: "type []PropertyLoadSaver{}", + src: []PropertyLoadSaver{ + &PropertyList{Property{Name: "A", Value: 1}, Property{Name: "B", Value: "one"}}, + &PropertyList{Property{Name: "A", Value: 2}, Property{Name: "B", Value: "two"}}, + }, + }, + { + desc: "type []P (non-pointer, *P implements PropertyLoadSaver)", + src: []PropertyList{ + {Property{Name: "A", Value: 1}, Property{Name: "B", Value: "one"}}, + {Property{Name: "A", Value: 2}, Property{Name: "B", Value: "two"}}, + }, + }, + // Test some invalid cases. + { + desc: "type []interface{} with struct elems", + src: []interface{}{ + S{1, "one"}, S{2, "two"}, + }, + wantErr: true, + }, + { + desc: "PropertyList", + src: PropertyList{ + Property{Name: "A", Value: 1}, + Property{Name: "B", Value: "one"}, + }, + wantErr: true, + }, + { + desc: "type []int", + src: []int{1, 2}, + wantErr: true, + }, + { + desc: "not a slice", + src: S{1, "one"}, + wantErr: true, + }, + } + + // Use the same keys and expected entities for all tests. + keys := []*Key{ + NameKey("testKind", "first", nil), + NameKey("testKind", "second", nil), + } + want := []*pb.Mutation{ + {Operation: &pb.Mutation_Upsert{&pb.Entity{ + Key: keyToProto(keys[0]), + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_IntegerValue{1}}, + "B": {ValueType: &pb.Value_StringValue{"one"}}, + }, + }}}, + {Operation: &pb.Mutation_Upsert{&pb.Entity{ + Key: keyToProto(keys[1]), + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_IntegerValue{2}}, + "B": {ValueType: &pb.Value_StringValue{"two"}}, + }, + }}}, + } + + for _, tt := range testCases { + // Set up a fake client which captures upserts. + var got []*pb.Mutation + client := &Client{ + client: &fakeClient{ + commitFn: func(req *pb.CommitRequest) (*pb.CommitResponse, error) { + got = req.Mutations + return &pb.CommitResponse{}, nil + }, + }, + } + + _, err := client.PutMulti(ctx, keys, tt.src) + if err != nil { + if !tt.wantErr { + t.Errorf("%s: error %v", tt.desc, err) + } + continue + } + if tt.wantErr { + t.Errorf("%s: wanted error, but none returned", tt.desc) + continue + } + if len(got) != len(want) { + t.Errorf("%s: got %d entities, want %d", tt.desc, len(got), len(want)) + continue + } + for i, e := range got { + if !proto.Equal(e, want[i]) { + t.Logf("%s: entity %d doesn't match\ngot: %v\nwant: %v", tt.desc, i, e, want[i]) + } + } + } +} + +func TestNoIndexOnSliceProperties(t *testing.T) { + // Check that ExcludeFromIndexes is set on the inner elements, + // rather than the top-level ArrayValue value. + pl := PropertyList{ + Property{ + Name: "repeated", + Value: []interface{}{ + 123, + false, + "short", + strings.Repeat("a", 1503), + }, + NoIndex: true, + }, + } + key := NameKey("dummy", "dummy", nil) + + entity, err := saveEntity(key, &pl) + if err != nil { + t.Fatalf("saveEntity: %v", err) + } + + want := &pb.Value{ + ValueType: &pb.Value_ArrayValue{&pb.ArrayValue{[]*pb.Value{ + {ValueType: &pb.Value_IntegerValue{123}, ExcludeFromIndexes: true}, + {ValueType: &pb.Value_BooleanValue{false}, ExcludeFromIndexes: true}, + {ValueType: &pb.Value_StringValue{"short"}, ExcludeFromIndexes: true}, + {ValueType: &pb.Value_StringValue{strings.Repeat("a", 1503)}, ExcludeFromIndexes: true}, + }}}, + } + if got := entity.Properties["repeated"]; !proto.Equal(got, want) { + t.Errorf("Entity proto differs\ngot: %v\nwant: %v", got, want) + } +} + +type byName PropertyList + +func (s byName) Len() int { return len(s) } +func (s byName) Less(i, j int) bool { return s[i].Name < s[j].Name } +func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// sortPL sorts the property list by property name, and +// recursively sorts any nested property lists, or nested slices of +// property lists. +func sortPL(pl PropertyList) { + sort.Stable(byName(pl)) + for _, p := range pl { + switch p.Value.(type) { + case *Entity: + sortPL(p.Value.(*Entity).Properties) + case []interface{}: + for _, p2 := range p.Value.([]interface{}) { + if nent, ok := p2.(*Entity); ok { + sortPL(nent.Properties) + } + } + } + } +} + +func TestValidGeoPoint(t *testing.T) { + testCases := []struct { + desc string + pt GeoPoint + want bool + }{ + { + "valid", + GeoPoint{67.21, 13.37}, + true, + }, + { + "high lat", + GeoPoint{-90.01, 13.37}, + false, + }, + { + "low lat", + GeoPoint{90.01, 13.37}, + false, + }, + { + "high lng", + GeoPoint{67.21, 182}, + false, + }, + { + "low lng", + GeoPoint{67.21, -181}, + false, + }, + } + + for _, tc := range testCases { + if got := tc.pt.Valid(); got != tc.want { + t.Errorf("%s: got %v, want %v", tc.desc, got, tc.want) + } + } +} + +func TestPutInvalidEntity(t *testing.T) { + // Test that trying to put an invalid entity always returns the correct error + // type. + + // Fake client that can pretend to start a transaction. + fakeClient := &fakeDatastoreClient{ + beginTransaction: func(*pb.BeginTransactionRequest) (*pb.BeginTransactionResponse, error) { + return &pb.BeginTransactionResponse{ + Transaction: []byte("deadbeef"), + }, nil + }, + } + client := &Client{ + client: fakeClient, + } + + ctx := context.Background() + key := IncompleteKey("kind", nil) + + _, err := client.Put(ctx, key, "invalid entity") + if err != ErrInvalidEntityType { + t.Errorf("client.Put returned err %v, want %v", err, ErrInvalidEntityType) + } + + _, err = client.PutMulti(ctx, []*Key{key}, []interface{}{"invalid entity"}) + if me, ok := err.(MultiError); !ok { + t.Errorf("client.PutMulti returned err %v, want MultiError type", err) + } else if len(me) != 1 || me[0] != ErrInvalidEntityType { + t.Errorf("client.PutMulti returned err %v, want MulitError{ErrInvalidEntityType}", err) + } + + client.RunInTransaction(ctx, func(tx *Transaction) error { + _, err := tx.Put(key, "invalid entity") + if err != ErrInvalidEntityType { + t.Errorf("tx.Put returned err %v, want %v", err, ErrInvalidEntityType) + } + + _, err = tx.PutMulti([]*Key{key}, []interface{}{"invalid entity"}) + if me, ok := err.(MultiError); !ok { + t.Errorf("tx.PutMulti returned err %v, want MultiError type", err) + } else if len(me) != 1 || me[0] != ErrInvalidEntityType { + t.Errorf("tx.PutMulti returned err %v, want MulitError{ErrInvalidEntityType}", err) + } + + return errors.New("bang!") // Return error: we don't actually want to commit. + }) +} + +func TestDeferred(t *testing.T) { + type Ent struct { + A int + B string + } + + keys := []*Key{ + NameKey("testKind", "first", nil), + NameKey("testKind", "second", nil), + } + + entity1 := &pb.Entity{ + Key: keyToProto(keys[0]), + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_IntegerValue{1}}, + "B": {ValueType: &pb.Value_StringValue{"one"}}, + }, + } + entity2 := &pb.Entity{ + Key: keyToProto(keys[1]), + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_IntegerValue{2}}, + "B": {ValueType: &pb.Value_StringValue{"two"}}, + }, + } + + // count keeps track of the number of times fakeClient.lookup has been + // called. + var count int + // Fake client that will return Deferred keys in resp on the first call. + fakeClient := &fakeDatastoreClient{ + lookup: func(*pb.LookupRequest) (*pb.LookupResponse, error) { + count++ + // On the first call, we return deferred keys. + if count == 1 { + return &pb.LookupResponse{ + Found: []*pb.EntityResult{ + { + Entity: entity1, + Version: 1, + }, + }, + Deferred: []*pb.Key{ + keyToProto(keys[1]), + }, + }, nil + } + + // On the second call, we do not return any more deferred keys. + return &pb.LookupResponse{ + Found: []*pb.EntityResult{ + { + Entity: entity2, + Version: 1, + }, + }, + }, nil + }, + } + client := &Client{ + client: fakeClient, + } + + ctx := context.Background() + + dst := make([]Ent, len(keys)) + err := client.GetMulti(ctx, keys, dst) + if err != nil { + t.Fatalf("client.Get: %v", err) + } + + if count != 2 { + t.Fatalf("expected client.lookup to be called 2 times. Got %d", count) + } + + if len(dst) != 2 { + t.Fatalf("expected 2 entities returned, got %d", len(dst)) + } + + for _, e := range dst { + if e.A == 1 { + if e.B != "one" { + t.Fatalf("unexpected entity %#v", e) + } + } else if e.A == 2 { + if e.B != "two" { + t.Fatalf("unexpected entity %#v", e) + } + } else { + t.Fatalf("unexpected entity %#v", e) + } + } + +} + +func TestDeferredMissing(t *testing.T) { + type Ent struct { + A int + B string + } + + keys := []*Key{ + NameKey("testKind", "first", nil), + NameKey("testKind", "second", nil), + } + + entity1 := &pb.Entity{ + Key: keyToProto(keys[0]), + } + entity2 := &pb.Entity{ + Key: keyToProto(keys[1]), + } + + var count int + fakeClient := &fakeDatastoreClient{ + lookup: func(*pb.LookupRequest) (*pb.LookupResponse, error) { + count++ + + if count == 1 { + return &pb.LookupResponse{ + Missing: []*pb.EntityResult{ + { + Entity: entity1, + Version: 1, + }, + }, + Deferred: []*pb.Key{ + keyToProto(keys[1]), + }, + }, nil + } + + return &pb.LookupResponse{ + Missing: []*pb.EntityResult{ + { + Entity: entity2, + Version: 1, + }, + }, + }, nil + }, + } + client := &Client{ + client: fakeClient, + } + + ctx := context.Background() + + dst := make([]Ent, len(keys)) + err := client.GetMulti(ctx, keys, dst) + errs, ok := err.(MultiError) + if !ok { + t.Fatalf("expected error returns to be MultiError; got %v", err) + } + if len(errs) != 2 { + t.Fatalf("expected 2 errors returns, got %d", len(errs)) + } + if errs[0] != ErrNoSuchEntity { + t.Fatalf("expected error to be ErrNoSuchEntity; got %v", errs[0]) + } + if errs[1] != ErrNoSuchEntity { + t.Fatalf("expected error to be ErrNoSuchEntity; got %v", errs[1]) + } + + if count != 2 { + t.Fatalf("expected client.lookup to be called 2 times. Got %d", count) + } + + if len(dst) != 2 { + t.Fatalf("expected 2 entities returned, got %d", len(dst)) + } + + for _, e := range dst { + if e.A != 0 || e.B != "" { + t.Fatalf("unexpected entity %#v", e) + } + } +} + +type fakeDatastoreClient struct { + // Optional handlers for the datastore methods. + // Any handlers left undefined will return an error. + lookup func(*pb.LookupRequest) (*pb.LookupResponse, error) + runQuery func(*pb.RunQueryRequest) (*pb.RunQueryResponse, error) + beginTransaction func(*pb.BeginTransactionRequest) (*pb.BeginTransactionResponse, error) + commit func(*pb.CommitRequest) (*pb.CommitResponse, error) + rollback func(*pb.RollbackRequest) (*pb.RollbackResponse, error) + allocateIds func(*pb.AllocateIdsRequest) (*pb.AllocateIdsResponse, error) +} + +func (c *fakeDatastoreClient) Lookup(ctx context.Context, in *pb.LookupRequest, opts ...grpc.CallOption) (*pb.LookupResponse, error) { + if c.lookup == nil { + return nil, errors.New("no lookup handler defined") + } + return c.lookup(in) +} +func (c *fakeDatastoreClient) RunQuery(ctx context.Context, in *pb.RunQueryRequest, opts ...grpc.CallOption) (*pb.RunQueryResponse, error) { + if c.runQuery == nil { + return nil, errors.New("no runQuery handler defined") + } + return c.runQuery(in) +} +func (c *fakeDatastoreClient) BeginTransaction(ctx context.Context, in *pb.BeginTransactionRequest, opts ...grpc.CallOption) (*pb.BeginTransactionResponse, error) { + if c.beginTransaction == nil { + return nil, errors.New("no beginTransaction handler defined") + } + return c.beginTransaction(in) +} +func (c *fakeDatastoreClient) Commit(ctx context.Context, in *pb.CommitRequest, opts ...grpc.CallOption) (*pb.CommitResponse, error) { + if c.commit == nil { + return nil, errors.New("no commit handler defined") + } + return c.commit(in) +} +func (c *fakeDatastoreClient) Rollback(ctx context.Context, in *pb.RollbackRequest, opts ...grpc.CallOption) (*pb.RollbackResponse, error) { + if c.rollback == nil { + return nil, errors.New("no rollback handler defined") + } + return c.rollback(in) +} +func (c *fakeDatastoreClient) AllocateIds(ctx context.Context, in *pb.AllocateIdsRequest, opts ...grpc.CallOption) (*pb.AllocateIdsResponse, error) { + if c.allocateIds == nil { + return nil, errors.New("no allocateIds handler defined") + } + return c.allocateIds(in) +} diff --git a/vendor/cloud.google.com/go/datastore/doc.go b/vendor/cloud.google.com/go/datastore/doc.go new file mode 100644 index 00000000..5e3a6a82 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/doc.go @@ -0,0 +1,420 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package datastore provides a client for Google Cloud Datastore. + +Note: This package is in beta. Some backwards-incompatible changes may occur. + + +Basic Operations + +Entities are the unit of storage and are associated with a key. A key +consists of an optional parent key, a string application ID, a string kind +(also known as an entity type), and either a StringID or an IntID. A +StringID is also known as an entity name or key name. + +It is valid to create a key with a zero StringID and a zero IntID; this is +called an incomplete key, and does not refer to any saved entity. Putting an +entity into the datastore under an incomplete key will cause a unique key +to be generated for that entity, with a non-zero IntID. + +An entity's contents are a mapping from case-sensitive field names to values. +Valid value types are: + - signed integers (int, int8, int16, int32 and int64), + - bool, + - string, + - float32 and float64, + - []byte (up to 1 megabyte in length), + - any type whose underlying type is one of the above predeclared types, + - *Key, + - GeoPoint, + - time.Time (stored with microsecond precision), + - structs whose fields are all valid value types, + - pointers to structs whose fields are all valid value types, + - slices of any of the above. + +Slices of structs are valid, as are structs that contain slices. + +The Get and Put functions load and save an entity's contents. An entity's +contents are typically represented by a struct pointer. + +Example code: + + type Entity struct { + Value string + } + + func main() { + ctx := context.Background() + + // Create a datastore client. In a typical application, you would create + // a single client which is reused for every datastore operation. + dsClient, err := datastore.NewClient(ctx, "my-project") + if err != nil { + // Handle error. + } + + k := datastore.NameKey("Entity", "stringID", nil) + e := new(Entity) + if err := dsClient.Get(ctx, k, e); err != nil { + // Handle error. + } + + old := e.Value + e.Value = "Hello World!" + + if _, err := dsClient.Put(ctx, k, e); err != nil { + // Handle error. + } + + fmt.Printf("Updated value from %q to %q\n", old, e.Value) + } + +GetMulti, PutMulti and DeleteMulti are batch versions of the Get, Put and +Delete functions. They take a []*Key instead of a *Key, and may return a +datastore.MultiError when encountering partial failure. + + +Properties + +An entity's contents can be represented by a variety of types. These are +typically struct pointers, but can also be any type that implements the +PropertyLoadSaver interface. If using a struct pointer, you do not have to +explicitly implement the PropertyLoadSaver interface; the datastore will +automatically convert via reflection. If a struct pointer does implement that +interface then those methods will be used in preference to the default +behavior for struct pointers. Struct pointers are more strongly typed and are +easier to use; PropertyLoadSavers are more flexible. + +The actual types passed do not have to match between Get and Put calls or even +across different calls to datastore. It is valid to put a *PropertyList and +get that same entity as a *myStruct, or put a *myStruct0 and get a *myStruct1. +Conceptually, any entity is saved as a sequence of properties, and is loaded +into the destination value on a property-by-property basis. When loading into +a struct pointer, an entity that cannot be completely represented (such as a +missing field) will result in an ErrFieldMismatch error but it is up to the +caller whether this error is fatal, recoverable or ignorable. + +By default, for struct pointers, all properties are potentially indexed, and +the property name is the same as the field name (and hence must start with an +upper case letter). + +Fields may have a `datastore:"name,options"` tag. The tag name is the +property name, which must be one or more valid Go identifiers joined by ".", +but may start with a lower case letter. An empty tag name means to just use the +field name. A "-" tag name means that the datastore will ignore that field. + +The only valid options are "omitempty", "noindex" and "flatten". + +If the options include "omitempty" and the value of the field is empty, then the field will be omitted on Save. +The empty values are false, 0, any nil interface value, and any array, slice, map, or string of length zero. +Struct field values will never be empty. + +If options include "noindex" then the field will not be indexed. All fields are indexed +by default. Strings or byte slices longer than 1500 bytes cannot be indexed; +fields used to store long strings and byte slices must be tagged with "noindex" +or they will cause Put operations to fail. + +For a nested struct field, the options may also include "flatten". This indicates +that the immediate fields and any nested substruct fields of the nested struct should be +flattened. See below for examples. + +To use multiple options together, separate them by a comma. +The order does not matter. + +If the options is "" then the comma may be omitted. + +Example code: + + // A and B are renamed to a and b. + // A, C and J are not indexed. + // D's tag is equivalent to having no tag at all (E). + // I is ignored entirely by the datastore. + // J has tag information for both the datastore and json packages. + type TaggedStruct struct { + A int `datastore:"a,noindex"` + B int `datastore:"b"` + C int `datastore:",noindex"` + D int `datastore:""` + E int + I int `datastore:"-"` + J int `datastore:",noindex" json:"j"` + } + + +Key Field + +If the struct contains a *datastore.Key field tagged with the name "__key__", +its value will be ignored on Put. When reading the Entity back into the Go struct, +the field will be populated with the *datastore.Key value used to query for +the Entity. + +Example code: + + type MyEntity struct { + A int + K *datastore.Key `datastore:"__key__"` + } + + k := datastore.NameKey("Entity", "stringID", nil) + e := MyEntity{A: 12} + k, err = dsClient.Put(ctx, k, e) + if err != nil { + // Handle error. + } + + var entities []MyEntity + q := datastore.NewQuery("Entity").Filter("A =", 12).Limit(1) + _, err := dsClient.GetAll(ctx, q, &entities) + if err != nil { + // Handle error + } + + log.Println(entities[0]) + // Prints {12 /Entity,stringID} + + + +Structured Properties + +If the struct pointed to contains other structs, then the nested or embedded +structs are themselves saved as Entity values. For example, given these definitions: + + type Inner struct { + W int32 + X string + } + + type Outer struct { + I Inner + } + +then an Outer would have one property, Inner, encoded as an Entity value. + +If an outer struct is tagged "noindex" then all of its implicit flattened +fields are effectively "noindex". + +If the Inner struct contains a *Key field with the name "__key__", like so: + + type Inner struct { + W int32 + X string + K *datastore.Key `datastore:"__key__"` + } + + type Outer struct { + I Inner + } + +then the value of K will be used as the Key for Inner, represented +as an Entity value in datastore. + +If any nested struct fields should be flattened, instead of encoded as +Entity values, the nested struct field should be tagged with the "flatten" +option. For example, given the following: + + type Inner1 struct { + W int32 + X string + } + + type Inner2 struct { + Y float64 + } + + type Inner3 struct { + Z bool + } + + type Inner4 struct { + WW int + } + + type Inner5 struct { + X Inner4 + } + + type Outer struct { + A int16 + I []Inner1 `datastore:",flatten"` + J Inner2 `datastore:",flatten"` + K Inner5 `datastore:",flatten"` + Inner3 `datastore:",flatten"` + } + +an Outer's properties would be equivalent to those of: + + type OuterEquivalent struct { + A int16 + IDotW []int32 `datastore:"I.W"` + IDotX []string `datastore:"I.X"` + JDotY float64 `datastore:"J.Y"` + KDotXDotWW int `datastore:"K.X.WW"` + Z bool + } + +Note that the "flatten" option cannot be used for Entity value fields. +The server will reject any dotted field names for an Entity value. + + +The PropertyLoadSaver Interface + +An entity's contents can also be represented by any type that implements the +PropertyLoadSaver interface. This type may be a struct pointer, but it does +not have to be. The datastore package will call Load when getting the entity's +contents, and Save when putting the entity's contents. +Possible uses include deriving non-stored fields, verifying fields, or indexing +a field only if its value is positive. + +Example code: + + type CustomPropsExample struct { + I, J int + // Sum is not stored, but should always be equal to I + J. + Sum int `datastore:"-"` + } + + func (x *CustomPropsExample) Load(ps []datastore.Property) error { + // Load I and J as usual. + if err := datastore.LoadStruct(x, ps); err != nil { + return err + } + // Derive the Sum field. + x.Sum = x.I + x.J + return nil + } + + func (x *CustomPropsExample) Save() ([]datastore.Property, error) { + // Validate the Sum field. + if x.Sum != x.I + x.J { + return nil, errors.New("CustomPropsExample has inconsistent sum") + } + // Save I and J as usual. The code below is equivalent to calling + // "return datastore.SaveStruct(x)", but is done manually for + // demonstration purposes. + return []datastore.Property{ + { + Name: "I", + Value: int64(x.I), + }, + { + Name: "J", + Value: int64(x.J), + }, + }, nil + } + +The *PropertyList type implements PropertyLoadSaver, and can therefore hold an +arbitrary entity's contents. + + +Queries + +Queries retrieve entities based on their properties or key's ancestry. Running +a query yields an iterator of results: either keys or (key, entity) pairs. +Queries are re-usable and it is safe to call Query.Run from concurrent +goroutines. Iterators are not safe for concurrent use. + +Queries are immutable, and are either created by calling NewQuery, or derived +from an existing query by calling a method like Filter or Order that returns a +new query value. A query is typically constructed by calling NewQuery followed +by a chain of zero or more such methods. These methods are: + - Ancestor and Filter constrain the entities returned by running a query. + - Order affects the order in which they are returned. + - Project constrains the fields returned. + - Distinct de-duplicates projected entities. + - KeysOnly makes the iterator return only keys, not (key, entity) pairs. + - Start, End, Offset and Limit define which sub-sequence of matching entities + to return. Start and End take cursors, Offset and Limit take integers. Start + and Offset affect the first result, End and Limit affect the last result. + If both Start and Offset are set, then the offset is relative to Start. + If both End and Limit are set, then the earliest constraint wins. Limit is + relative to Start+Offset, not relative to End. As a special case, a + negative limit means unlimited. + +Example code: + + type Widget struct { + Description string + Price int + } + + func printWidgets(ctx context.Context, client *datastore.Client) { + q := datastore.NewQuery("Widget"). + Filter("Price <", 1000). + Order("-Price") + for t := client.Run(ctx, q); ; { + var x Widget + key, err := t.Next(&x) + if err == iterator.Done { + break + } + if err != nil { + // Handle error. + } + fmt.Printf("Key=%v\nWidget=%#v\n\n", key, x) + } + } + + +Transactions + +Client.RunInTransaction runs a function in a transaction. + +Example code: + + type Counter struct { + Count int + } + + func incCount(ctx context.Context, client *datastore.Client) { + var count int + key := datastore.NameKey("Counter", "singleton", nil) + _, err := client.RunInTransaction(ctx, func(tx *datastore.Transaction) error { + var x Counter + if err := tx.Get(key, &x); err != nil && err != datastore.ErrNoSuchEntity { + return err + } + x.Count++ + if _, err := tx.Put(key, &x); err != nil { + return err + } + count = x.Count + return nil + }) + if err != nil { + // Handle error. + } + // The value of count is only valid once the transaction is successful + // (RunInTransaction has returned nil). + fmt.Printf("Count=%d\n", count) + } + +Google Cloud Datastore Emulator + +This package supports the Cloud Datastore emulator, which is useful for testing and +development. Environment variables are used to indicate that datastore traffic should be +directed to the emulator instead of the production Datastore service. + +To install and set up the emulator and its environment variables, see the documentation +at https://cloud.google.com/datastore/docs/tools/datastore-emulator. + +Authentication + +See examples of authorization and authentication at +https://godoc.org/cloud.google.com/go#pkg-examples. + +*/ +package datastore // import "cloud.google.com/go/datastore" diff --git a/vendor/cloud.google.com/go/datastore/errors.go b/vendor/cloud.google.com/go/datastore/errors.go new file mode 100644 index 00000000..3077f80d --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/errors.go @@ -0,0 +1,47 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file provides error functions for common API failure modes. + +package datastore + +import ( + "fmt" +) + +// MultiError is returned by batch operations when there are errors with +// particular elements. Errors will be in a one-to-one correspondence with +// the input elements; successful elements will have a nil entry. +type MultiError []error + +func (m MultiError) Error() string { + s, n := "", 0 + for _, e := range m { + if e != nil { + if n == 0 { + s = e.Error() + } + n++ + } + } + switch n { + case 0: + return "(0 errors)" + case 1: + return s + case 2: + return s + " (and 1 other error)" + } + return fmt.Sprintf("%s (and %d other errors)", s, n-1) +} diff --git a/vendor/cloud.google.com/go/datastore/example_test.go b/vendor/cloud.google.com/go/datastore/example_test.go new file mode 100644 index 00000000..c6f81e13 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/example_test.go @@ -0,0 +1,545 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore_test + +import ( + "fmt" + "log" + "time" + + "cloud.google.com/go/datastore" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleNewClient() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + _ = client // TODO: Use client. +} + +func ExampleClient_Get() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + type Article struct { + Title string + Description string + Body string `datastore:",noindex"` + Author *datastore.Key + PublishedAt time.Time + } + key := datastore.NameKey("Article", "articled1", nil) + article := &Article{} + if err := client.Get(ctx, key, article); err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_Put() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + type Article struct { + Title string + Description string + Body string `datastore:",noindex"` + Author *datastore.Key + PublishedAt time.Time + } + newKey := datastore.IncompleteKey("Article", nil) + _, err = client.Put(ctx, newKey, &Article{ + Title: "The title of the article", + Description: "The description of the article...", + Body: "...", + Author: datastore.NameKey("Author", "jbd", nil), + PublishedAt: time.Now(), + }) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_Put_flatten() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + log.Fatal(err) + } + + type Animal struct { + Name string + Type string + Breed string + } + + type Human struct { + Name string + Height int + Pet Animal `datastore:",flatten"` + } + + newKey := datastore.IncompleteKey("Human", nil) + _, err = client.Put(ctx, newKey, &Human{ + Name: "Susan", + Height: 67, + Pet: Animal{ + Name: "Fluffy", + Type: "Cat", + Breed: "Sphynx", + }, + }) + if err != nil { + log.Fatal(err) + } +} + +func ExampleClient_Delete() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + key := datastore.NameKey("Article", "articled1", nil) + if err := client.Delete(ctx, key); err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_DeleteMulti() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + var keys []*datastore.Key + for i := 1; i <= 10; i++ { + keys = append(keys, datastore.IDKey("Article", int64(i), nil)) + } + if err := client.DeleteMulti(ctx, keys); err != nil { + // TODO: Handle error. + } +} + +type Post struct { + Title string + PublishedAt time.Time + Comments int +} + +func ExampleClient_GetMulti() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + keys := []*datastore.Key{ + datastore.NameKey("Post", "post1", nil), + datastore.NameKey("Post", "post2", nil), + datastore.NameKey("Post", "post3", nil), + } + posts := make([]Post, 3) + if err := client.GetMulti(ctx, keys, posts); err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_PutMulti_slice() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + keys := []*datastore.Key{ + datastore.NameKey("Post", "post1", nil), + datastore.NameKey("Post", "post2", nil), + } + + // PutMulti with a Post slice. + posts := []*Post{ + {Title: "Post 1", PublishedAt: time.Now()}, + {Title: "Post 2", PublishedAt: time.Now()}, + } + if _, err := client.PutMulti(ctx, keys, posts); err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_PutMulti_interfaceSlice() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + keys := []*datastore.Key{ + datastore.NameKey("Post", "post1", nil), + datastore.NameKey("Post", "post2", nil), + } + + // PutMulti with an empty interface slice. + posts := []interface{}{ + &Post{Title: "Post 1", PublishedAt: time.Now()}, + &Post{Title: "Post 2", PublishedAt: time.Now()}, + } + if _, err := client.PutMulti(ctx, keys, posts); err != nil { + // TODO: Handle error. + } +} + +func ExampleNewQuery() { + // Query for Post entities. + q := datastore.NewQuery("Post") + _ = q // TODO: Use the query with Client.Run. +} + +func ExampleNewQuery_options() { + // Query to order the posts by the number of comments they have recieved. + q := datastore.NewQuery("Post").Order("-Comments") + // Start listing from an offset and limit the results. + q = q.Offset(20).Limit(10) + _ = q // TODO: Use the query. +} + +func ExampleClient_Count() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + // Count the number of the post entities. + q := datastore.NewQuery("Post") + n, err := client.Count(ctx, q) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("There are %d posts.", n) +} + +func ExampleClient_Run() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + // List the posts published since yesterday. + yesterday := time.Now().Add(-24 * time.Hour) + q := datastore.NewQuery("Post").Filter("PublishedAt >", yesterday) + it := client.Run(ctx, q) + _ = it // TODO: iterate using Next. +} + +func ExampleClient_NewTransaction() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + const retries = 3 + + // Increment a counter. + // See https://cloud.google.com/appengine/articles/sharding_counters for + // a more scalable solution. + type Counter struct { + Count int + } + + key := datastore.NameKey("counter", "CounterA", nil) + var tx *datastore.Transaction + for i := 0; i < retries; i++ { + tx, err = client.NewTransaction(ctx) + if err != nil { + break + } + + var c Counter + if err = tx.Get(key, &c); err != nil && err != datastore.ErrNoSuchEntity { + break + } + c.Count++ + if _, err = tx.Put(key, &c); err != nil { + break + } + + // Attempt to commit the transaction. If there's a conflict, try again. + if _, err = tx.Commit(); err != datastore.ErrConcurrentTransaction { + break + } + } + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_RunInTransaction() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + // Increment a counter. + // See https://cloud.google.com/appengine/articles/sharding_counters for + // a more scalable solution. + type Counter struct { + Count int + } + + var count int + key := datastore.NameKey("Counter", "singleton", nil) + _, err = client.RunInTransaction(ctx, func(tx *datastore.Transaction) error { + var x Counter + if err := tx.Get(key, &x); err != nil && err != datastore.ErrNoSuchEntity { + return err + } + x.Count++ + if _, err := tx.Put(key, &x); err != nil { + return err + } + count = x.Count + return nil + }) + if err != nil { + // TODO: Handle error. + } + // The value of count is only valid once the transaction is successful + // (RunInTransaction has returned nil). + fmt.Printf("Count=%d\n", count) +} + +func ExampleClient_AllocateIDs() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + var keys []*datastore.Key + for i := 0; i < 10; i++ { + keys = append(keys, datastore.IncompleteKey("Article", nil)) + } + keys, err = client.AllocateIDs(ctx, keys) + if err != nil { + // TODO: Handle error. + } + _ = keys // TODO: Use keys. +} + +func ExampleKey_Encode() { + key := datastore.IDKey("Article", 1, nil) + encoded := key.Encode() + fmt.Println(encoded) + // Output: EgsKB0FydGljbGUQAQ +} + +func ExampleDecodeKey() { + const encoded = "EgsKB0FydGljbGUQAQ" + key, err := datastore.DecodeKey(encoded) + if err != nil { + // TODO: Handle error. + } + fmt.Println(key) + // Output: /Article,1 +} + +func ExampleIDKey() { + // Key with numeric ID. + k := datastore.IDKey("Article", 1, nil) + _ = k // TODO: Use key. +} + +func ExampleNameKey() { + // Key with string ID. + k := datastore.NameKey("Article", "article8", nil) + _ = k // TODO: Use key. +} + +func ExampleIncompleteKey() { + k := datastore.IncompleteKey("Article", nil) + _ = k // TODO: Use incomplete key. +} + +func ExampleClient_GetAll() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + var posts []*Post + keys, err := client.GetAll(ctx, datastore.NewQuery("Post"), &posts) + for i, key := range keys { + fmt.Println(key) + fmt.Println(posts[i]) + } +} + +func ExampleCommit_Key() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "") + if err != nil { + // TODO: Handle error. + } + var pk1, pk2 *datastore.PendingKey + // Create two posts in a single transaction. + commit, err := client.RunInTransaction(ctx, func(tx *datastore.Transaction) error { + var err error + pk1, err = tx.Put(datastore.IncompleteKey("Post", nil), &Post{Title: "Post 1", PublishedAt: time.Now()}) + if err != nil { + return err + } + pk2, err = tx.Put(datastore.IncompleteKey("Post", nil), &Post{Title: "Post 2", PublishedAt: time.Now()}) + if err != nil { + return err + } + return nil + }) + if err != nil { + // TODO: Handle error. + } + // Now pk1, pk2 are valid PendingKeys. Let's convert them into real keys + // using the Commit object. + k1 := commit.Key(pk1) + k2 := commit.Key(pk2) + fmt.Println(k1, k2) +} + +func ExampleIterator_Next() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it := client.Run(ctx, datastore.NewQuery("Post")) + for { + var p Post + key, err := it.Next(&p) + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(key, p) + } +} + +func ExampleIterator_Cursor() { + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it := client.Run(ctx, datastore.NewQuery("Post")) + for { + var p Post + _, err := it.Next(&p) + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(p) + cursor, err := it.Cursor() + if err != nil { + // TODO: Handle error. + } + // When printed, a cursor will display as a string that can be passed + // to datastore.NewCursor. + fmt.Printf("to resume with this post, use cursor %s\n", cursor) + } +} + +func ExampleDecodeCursor() { + // See Query.Start for a fuller example of DecodeCursor. + // getCursor represents a function that returns a cursor from a previous + // iteration in string form. + cursorString := getCursor() + cursor, err := datastore.DecodeCursor(cursorString) + if err != nil { + // TODO: Handle error. + } + _ = cursor // TODO: Use the cursor with Query.Start or Query.End. +} + +func getCursor() string { return "" } + +func ExampleQuery_Start() { + // This example demonstrates how to use cursors and Query.Start + // to resume an iteration. + ctx := context.Background() + client, err := datastore.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + // getCursor represents a function that returns a cursor from a previous + // iteration in string form. + cursorString := getCursor() + cursor, err := datastore.DecodeCursor(cursorString) + if err != nil { + // TODO: Handle error. + } + it := client.Run(ctx, datastore.NewQuery("Post").Start(cursor)) + _ = it // TODO: Use iterator. +} + +func ExampleLoadStruct() { + type Player struct { + User string + Score int + } + // Normally LoadStruct would only be used inside a custom implementation of + // PropertyLoadSaver; this is for illustrative purposes only. + props := []datastore.Property{ + {Name: "User", Value: "Alice"}, + {Name: "Score", Value: int64(97)}, + } + + var p Player + if err := datastore.LoadStruct(&p, props); err != nil { + // TODO: Handle error. + } + fmt.Println(p) + // Output: {Alice 97} +} + +func ExampleSaveStruct() { + type Player struct { + User string + Score int + } + + p := &Player{ + User: "Alice", + Score: 97, + } + props, err := datastore.SaveStruct(p) + if err != nil { + // TODO: Handle error. + } + fmt.Println(props) + // TODO(jba): make this output stable: Output: [{User Alice false} {Score 97 false}] +} diff --git a/vendor/cloud.google.com/go/datastore/integration_test.go b/vendor/cloud.google.com/go/datastore/integration_test.go new file mode 100644 index 00000000..5170206f --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/integration_test.go @@ -0,0 +1,1040 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "errors" + "fmt" + "reflect" + "sort" + "strings" + "sync" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +// TODO(djd): Make test entity clean up more robust: some test entities may +// be left behind if tests are aborted, the transport fails, etc. + +// suffix is a timestamp-based suffix which is appended to key names, +// particularly for the root keys of entity groups. This reduces flakiness +// when the tests are run in parallel. +var suffix = fmt.Sprintf("-t%d", time.Now().UnixNano()) + +func newClient(ctx context.Context, t *testing.T) *Client { + ts := testutil.TokenSource(ctx, ScopeDatastore) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + client, err := NewClient(ctx, testutil.ProjID(), option.WithTokenSource(ts)) + if err != nil { + t.Fatalf("NewClient: %v", err) + } + return client +} + +func TestBasics(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx, _ := context.WithTimeout(context.Background(), time.Second*20) + client := newClient(ctx, t) + defer client.Close() + + type X struct { + I int + S string + T time.Time + } + + x0 := X{66, "99", time.Now().Truncate(time.Millisecond)} + k, err := client.Put(ctx, IncompleteKey("BasicsX", nil), &x0) + if err != nil { + t.Fatalf("client.Put: %v", err) + } + x1 := X{} + err = client.Get(ctx, k, &x1) + if err != nil { + t.Errorf("client.Get: %v", err) + } + err = client.Delete(ctx, k) + if err != nil { + t.Errorf("client.Delete: %v", err) + } + if !reflect.DeepEqual(x0, x1) { + t.Errorf("compare: x0=%v, x1=%v", x0, x1) + } +} + +func TestTopLevelKeyLoaded(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + + ctx, _ := context.WithTimeout(context.Background(), time.Second*20) + client := newClient(ctx, t) + defer client.Close() + + completeKey := NameKey("EntityWithKey", "myent", nil) + + type EntityWithKey struct { + I int + S string + K *Key `datastore:"__key__"` + } + + in := &EntityWithKey{ + I: 12, + S: "abcd", + } + + k, err := client.Put(ctx, completeKey, in) + if err != nil { + t.Fatalf("client.Put: %v", err) + } + + var e EntityWithKey + err = client.Get(ctx, k, &e) + if err != nil { + t.Fatalf("client.Get: %v", err) + } + + // The two keys should be absolutely identical. + if !reflect.DeepEqual(e.K, k) { + t.Fatalf("e.K not equal to k; got %#v, want %#v", e.K, k) + } + +} + +func TestListValues(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + p0 := PropertyList{ + {Name: "L", Value: []interface{}{int64(12), "string", true}}, + } + k, err := client.Put(ctx, IncompleteKey("ListValue", nil), &p0) + if err != nil { + t.Fatalf("client.Put: %v", err) + } + var p1 PropertyList + if err := client.Get(ctx, k, &p1); err != nil { + t.Errorf("client.Get: %v", err) + } + if !reflect.DeepEqual(p0, p1) { + t.Errorf("compare:\np0=%v\np1=%#v", p0, p1) + } + if err = client.Delete(ctx, k); err != nil { + t.Errorf("client.Delete: %v", err) + } +} + +func TestGetMulti(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + type X struct { + I int + } + p := NameKey("X", "x"+suffix, nil) + + cases := []struct { + key *Key + put bool + }{ + {key: NameKey("X", "item1", p), put: true}, + {key: NameKey("X", "item2", p), put: false}, + {key: NameKey("X", "item3", p), put: false}, + {key: NameKey("X", "item4", p), put: true}, + } + + var src, dst []*X + var srcKeys, dstKeys []*Key + for _, c := range cases { + dst = append(dst, &X{}) + dstKeys = append(dstKeys, c.key) + if c.put { + src = append(src, &X{}) + srcKeys = append(srcKeys, c.key) + } + } + if _, err := client.PutMulti(ctx, srcKeys, src); err != nil { + t.Error(err) + } + err := client.GetMulti(ctx, dstKeys, dst) + if err == nil { + t.Errorf("client.GetMulti got %v, expected error", err) + } + e, ok := err.(MultiError) + if !ok { + t.Errorf("client.GetMulti got %T, expected MultiError", err) + } + for i, err := range e { + got, want := err, (error)(nil) + if !cases[i].put { + got, want = err, ErrNoSuchEntity + } + if got != want { + t.Errorf("MultiError[%d] == %v, want %v", i, got, want) + } + } +} + +type Z struct { + S string + T string `datastore:",noindex"` + P []byte + K []byte `datastore:",noindex"` +} + +func (z Z) String() string { + var lens []string + v := reflect.ValueOf(z) + for i := 0; i < v.NumField(); i++ { + if l := v.Field(i).Len(); l > 0 { + lens = append(lens, fmt.Sprintf("len(%s)=%d", v.Type().Field(i).Name, l)) + } + } + return fmt.Sprintf("Z{ %s }", strings.Join(lens, ",")) +} + +func TestUnindexableValues(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + x1500 := strings.Repeat("x", 1500) + x1501 := strings.Repeat("x", 1501) + testCases := []struct { + in Z + wantErr bool + }{ + {in: Z{S: x1500}, wantErr: false}, + {in: Z{S: x1501}, wantErr: true}, + {in: Z{T: x1500}, wantErr: false}, + {in: Z{T: x1501}, wantErr: false}, + {in: Z{P: []byte(x1500)}, wantErr: false}, + {in: Z{P: []byte(x1501)}, wantErr: true}, + {in: Z{K: []byte(x1500)}, wantErr: false}, + {in: Z{K: []byte(x1501)}, wantErr: false}, + } + for _, tt := range testCases { + _, err := client.Put(ctx, IncompleteKey("BasicsZ", nil), &tt.in) + if (err != nil) != tt.wantErr { + t.Errorf("client.Put %s got err %v, want err %t", tt.in, err, tt.wantErr) + } + } +} + +func TestNilKey(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + testCases := []struct { + in K0 + wantErr bool + }{ + {in: K0{K: testKey0}, wantErr: false}, + {in: K0{}, wantErr: false}, + } + for _, tt := range testCases { + _, err := client.Put(ctx, IncompleteKey("NilKey", nil), &tt.in) + if (err != nil) != tt.wantErr { + t.Errorf("client.Put %s got err %v, want err %t", tt.in, err, tt.wantErr) + } + } +} + +type SQChild struct { + I, J int + T, U int64 +} + +type SQTestCase struct { + desc string + q *Query + wantCount int + wantSum int +} + +func testSmallQueries(t *testing.T, ctx context.Context, client *Client, parent *Key, children []*SQChild, + testCases []SQTestCase, extraTests ...func()) { + keys := make([]*Key, len(children)) + for i := range keys { + keys[i] = IncompleteKey("SQChild", parent) + } + keys, err := client.PutMulti(ctx, keys, children) + if err != nil { + t.Fatalf("client.PutMulti: %v", err) + } + defer func() { + err := client.DeleteMulti(ctx, keys) + if err != nil { + t.Errorf("client.DeleteMulti: %v", err) + } + }() + + for _, tc := range testCases { + count, err := client.Count(ctx, tc.q) + if err != nil { + t.Errorf("Count %q: %v", tc.desc, err) + continue + } + if count != tc.wantCount { + t.Errorf("Count %q: got %d want %d", tc.desc, count, tc.wantCount) + continue + } + } + + for _, tc := range testCases { + var got []SQChild + _, err := client.GetAll(ctx, tc.q, &got) + if err != nil { + t.Errorf("client.GetAll %q: %v", tc.desc, err) + continue + } + sum := 0 + for _, c := range got { + sum += c.I + c.J + } + if sum != tc.wantSum { + t.Errorf("sum %q: got %d want %d", tc.desc, sum, tc.wantSum) + continue + } + } + for _, x := range extraTests { + x() + } +} + +func TestFilters(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + parent := NameKey("SQParent", "TestFilters"+suffix, nil) + now := time.Now().Truncate(time.Millisecond).Unix() + children := []*SQChild{ + {I: 0, T: now, U: now}, + {I: 1, T: now, U: now}, + {I: 2, T: now, U: now}, + {I: 3, T: now, U: now}, + {I: 4, T: now, U: now}, + {I: 5, T: now, U: now}, + {I: 6, T: now, U: now}, + {I: 7, T: now, U: now}, + } + baseQuery := NewQuery("SQChild").Ancestor(parent).Filter("T=", now) + testSmallQueries(t, ctx, client, parent, children, []SQTestCase{ + { + "I>1", + baseQuery.Filter("I>", 1), + 6, + 2 + 3 + 4 + 5 + 6 + 7, + }, + { + "I>2 AND I<=5", + baseQuery.Filter("I>", 2).Filter("I<=", 5), + 3, + 3 + 4 + 5, + }, + { + "I>=3 AND I<3", + baseQuery.Filter("I>=", 3).Filter("I<", 3), + 0, + 0, + }, + { + "I=4", + baseQuery.Filter("I=", 4), + 1, + 4, + }, + }, func() { + got := []*SQChild{} + want := []*SQChild{ + {I: 0, T: now, U: now}, + {I: 1, T: now, U: now}, + {I: 2, T: now, U: now}, + {I: 3, T: now, U: now}, + {I: 4, T: now, U: now}, + {I: 5, T: now, U: now}, + {I: 6, T: now, U: now}, + {I: 7, T: now, U: now}, + } + _, err := client.GetAll(ctx, baseQuery.Order("I"), &got) + if err != nil { + t.Errorf("client.GetAll: %v", err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("compare: got=%v, want=%v", got, want) + } + }, func() { + got := []*SQChild{} + want := []*SQChild{ + {I: 7, T: now, U: now}, + {I: 6, T: now, U: now}, + {I: 5, T: now, U: now}, + {I: 4, T: now, U: now}, + {I: 3, T: now, U: now}, + {I: 2, T: now, U: now}, + {I: 1, T: now, U: now}, + {I: 0, T: now, U: now}, + } + _, err := client.GetAll(ctx, baseQuery.Order("-I"), &got) + if err != nil { + t.Errorf("client.GetAll: %v", err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("compare: got=%v, want=%v", got, want) + } + }) +} + +func TestLargeQuery(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + parent := NameKey("LQParent", "TestFilters"+suffix, nil) + now := time.Now().Truncate(time.Millisecond).Unix() + + // Make a large number of children entities. + const n = 800 + children := make([]*SQChild, 0, n) + keys := make([]*Key, 0, n) + for i := 0; i < n; i++ { + children = append(children, &SQChild{I: i, T: now, U: now}) + keys = append(keys, IncompleteKey("SQChild", parent)) + } + + // Store using PutMulti in batches. + const batchSize = 500 + for i := 0; i < n; i = i + 500 { + j := i + batchSize + if j > n { + j = n + } + fullKeys, err := client.PutMulti(ctx, keys[i:j], children[i:j]) + if err != nil { + t.Fatalf("PutMulti(%d, %d): %v", i, j, err) + } + defer func() { + err := client.DeleteMulti(ctx, fullKeys) + if err != nil { + t.Errorf("client.DeleteMulti: %v", err) + } + }() + } + + q := NewQuery("SQChild").Ancestor(parent).Filter("T=", now).Order("I") + + // Wait group to allow us to run query tests in parallel below. + var wg sync.WaitGroup + + // Check we get the expected count and results for various limits/offsets. + queryTests := []struct { + limit, offset, want int + }{ + // Just limit. + {limit: 0, want: 0}, + {limit: 100, want: 100}, + {limit: 501, want: 501}, + {limit: n, want: n}, + {limit: n * 2, want: n}, + {limit: -1, want: n}, + // Just offset. + {limit: -1, offset: 100, want: n - 100}, + {limit: -1, offset: 500, want: n - 500}, + {limit: -1, offset: n, want: 0}, + // Limit and offset. + {limit: 100, offset: 100, want: 100}, + {limit: 1000, offset: 100, want: n - 100}, + {limit: 500, offset: 500, want: n - 500}, + } + for _, tt := range queryTests { + q := q.Limit(tt.limit).Offset(tt.offset) + wg.Add(1) + + go func(limit, offset, want int) { + defer wg.Done() + // Check Count returns the expected number of results. + count, err := client.Count(ctx, q) + if err != nil { + t.Errorf("client.Count(limit=%d offset=%d): %v", limit, offset, err) + return + } + if count != want { + t.Errorf("Count(limit=%d offset=%d) returned %d, want %d", limit, offset, count, want) + } + + var got []SQChild + _, err = client.GetAll(ctx, q, &got) + if err != nil { + t.Errorf("client.GetAll(limit=%d offset=%d): %v", limit, offset, err) + return + } + if len(got) != want { + t.Errorf("GetAll(limit=%d offset=%d) returned %d, want %d", limit, offset, len(got), want) + } + for i, child := range got { + if got, want := child.I, i+offset; got != want { + t.Errorf("GetAll(limit=%d offset=%d) got[%d].I == %d; want %d", limit, offset, i, got, want) + break + } + } + }(tt.limit, tt.offset, tt.want) + } + + // Also check iterator cursor behaviour. + cursorTests := []struct { + limit, offset int // Query limit and offset. + count int // The number of times to call "next" + want int // The I value of the desired element, -1 for "Done". + }{ + // No limits. + {count: 0, limit: -1, want: 0}, + {count: 5, limit: -1, want: 5}, + {count: 500, limit: -1, want: 500}, + {count: 1000, limit: -1, want: -1}, // No more results. + // Limits. + {count: 5, limit: 5, want: 5}, + {count: 500, limit: 5, want: 5}, + {count: 1000, limit: 1000, want: -1}, // No more results. + // Offsets. + {count: 0, offset: 5, limit: -1, want: 5}, + {count: 5, offset: 5, limit: -1, want: 10}, + {count: 200, offset: 500, limit: -1, want: 700}, + {count: 200, offset: 1000, limit: -1, want: -1}, // No more results. + } + for _, tt := range cursorTests { + wg.Add(1) + + go func(count, limit, offset, want int) { + defer wg.Done() + + // Run iterator through count calls to Next. + it := client.Run(ctx, q.Limit(limit).Offset(offset).KeysOnly()) + for i := 0; i < count; i++ { + _, err := it.Next(nil) + if err == iterator.Done { + break + } + if err != nil { + t.Errorf("count=%d, limit=%d, offset=%d: it.Next failed at i=%d", count, limit, offset, i) + return + } + } + + // Grab the cursor. + cursor, err := it.Cursor() + if err != nil { + t.Errorf("count=%d, limit=%d, offset=%d: it.Cursor: %v", count, limit, offset, err) + return + } + + // Make a request for the next element. + it = client.Run(ctx, q.Limit(1).Start(cursor)) + var entity SQChild + _, err = it.Next(&entity) + switch { + case want == -1: + if err != iterator.Done { + t.Errorf("count=%d, limit=%d, offset=%d: it.Next from cursor %v, want Done", count, limit, offset, err) + } + case err != nil: + t.Errorf("count=%d, limit=%d, offset=%d: it.Next from cursor: %v, want nil", count, limit, offset, err) + case entity.I != want: + t.Errorf("count=%d, limit=%d, offset=%d: got.I = %d, want %d", count, limit, offset, entity.I, want) + } + }(tt.count, tt.limit, tt.offset, tt.want) + } + + wg.Wait() +} + +func TestEventualConsistency(t *testing.T) { + // TODO(jba): either make this actually test eventual consistency, or + // delete it. Currently it behaves the same with or without the + // EventualConsistency call. + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + parent := NameKey("SQParent", "TestEventualConsistency"+suffix, nil) + now := time.Now().Truncate(time.Millisecond).Unix() + children := []*SQChild{ + {I: 0, T: now, U: now}, + {I: 1, T: now, U: now}, + {I: 2, T: now, U: now}, + } + query := NewQuery("SQChild").Ancestor(parent).Filter("T =", now).EventualConsistency() + testSmallQueries(t, ctx, client, parent, children, nil, func() { + got, err := client.Count(ctx, query) + if err != nil { + t.Fatalf("Count: %v", err) + } + if got < 0 || 3 < got { + t.Errorf("Count: got %d, want [0,3]", got) + } + }) +} + +func TestProjection(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + parent := NameKey("SQParent", "TestProjection"+suffix, nil) + now := time.Now().Truncate(time.Millisecond).Unix() + children := []*SQChild{ + {I: 1 << 0, J: 100, T: now, U: now}, + {I: 1 << 1, J: 100, T: now, U: now}, + {I: 1 << 2, J: 200, T: now, U: now}, + {I: 1 << 3, J: 300, T: now, U: now}, + {I: 1 << 4, J: 300, T: now, U: now}, + } + baseQuery := NewQuery("SQChild").Ancestor(parent).Filter("T=", now).Filter("J>", 150) + testSmallQueries(t, ctx, client, parent, children, []SQTestCase{ + { + "project", + baseQuery.Project("J"), + 3, + 200 + 300 + 300, + }, + { + "distinct", + baseQuery.Project("J").Distinct(), + 2, + 200 + 300, + }, + { + "distinct on", + baseQuery.Project("J").DistinctOn("J"), + 2, + 200 + 300, + }, + { + "project on meaningful (GD_WHEN) field", + baseQuery.Project("U"), + 3, + 0, + }, + }) +} + +func TestAllocateIDs(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + keys := make([]*Key, 5) + for i := range keys { + keys[i] = IncompleteKey("AllocID", nil) + } + keys, err := client.AllocateIDs(ctx, keys) + if err != nil { + t.Errorf("AllocID #0 failed: %v", err) + } + if want := len(keys); want != 5 { + t.Errorf("Expected to allocate 5 keys, %d keys are found", want) + } + for _, k := range keys { + if k.Incomplete() { + t.Errorf("Unexpeceted incomplete key found: %v", k) + } + } +} + +func TestGetAllWithFieldMismatch(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + type Fat struct { + X, Y int + } + type Thin struct { + X int + } + + // Ancestor queries (those within an entity group) are strongly consistent + // by default, which prevents a test from being flaky. + // See https://cloud.google.com/appengine/docs/go/datastore/queries#Go_Data_consistency + // for more information. + parent := NameKey("SQParent", "TestGetAllWithFieldMismatch"+suffix, nil) + putKeys := make([]*Key, 3) + for i := range putKeys { + putKeys[i] = IDKey("GetAllThing", int64(10+i), parent) + _, err := client.Put(ctx, putKeys[i], &Fat{X: 20 + i, Y: 30 + i}) + if err != nil { + t.Fatalf("client.Put: %v", err) + } + } + + var got []Thin + want := []Thin{ + {X: 20}, + {X: 21}, + {X: 22}, + } + getKeys, err := client.GetAll(ctx, NewQuery("GetAllThing").Ancestor(parent), &got) + if len(getKeys) != 3 && !reflect.DeepEqual(getKeys, putKeys) { + t.Errorf("client.GetAll: keys differ\ngetKeys=%v\nputKeys=%v", getKeys, putKeys) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("client.GetAll: entities differ\ngot =%v\nwant=%v", got, want) + } + if _, ok := err.(*ErrFieldMismatch); !ok { + t.Errorf("client.GetAll: got err=%v, want ErrFieldMismatch", err) + } +} + +func TestKindlessQueries(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + type Dee struct { + I int + Why string + } + type Dum struct { + I int + Pling string + } + + parent := NameKey("Tweedle", "tweedle"+suffix, nil) + + keys := []*Key{ + NameKey("Dee", "dee0", parent), + NameKey("Dum", "dum1", parent), + NameKey("Dum", "dum2", parent), + NameKey("Dum", "dum3", parent), + } + src := []interface{}{ + &Dee{1, "binary0001"}, + &Dum{2, "binary0010"}, + &Dum{4, "binary0100"}, + &Dum{8, "binary1000"}, + } + keys, err := client.PutMulti(ctx, keys, src) + if err != nil { + t.Fatalf("put: %v", err) + } + + testCases := []struct { + desc string + query *Query + want []int + wantErr string + }{ + { + desc: "Dee", + query: NewQuery("Dee"), + want: []int{1}, + }, + { + desc: "Doh", + query: NewQuery("Doh"), + want: nil}, + { + desc: "Dum", + query: NewQuery("Dum"), + want: []int{2, 4, 8}, + }, + { + desc: "", + query: NewQuery(""), + want: []int{1, 2, 4, 8}, + }, + { + desc: "Kindless filter", + query: NewQuery("").Filter("__key__ =", keys[2]), + want: []int{4}, + }, + { + desc: "Kindless order", + query: NewQuery("").Order("__key__"), + want: []int{1, 2, 4, 8}, + }, + { + desc: "Kindless bad filter", + query: NewQuery("").Filter("I =", 4), + wantErr: "kind is required", + }, + { + desc: "Kindless bad order", + query: NewQuery("").Order("-__key__"), + wantErr: "kind is required for all orders except __key__ ascending", + }, + } +loop: + for _, tc := range testCases { + q := tc.query.Ancestor(parent) + gotCount, err := client.Count(ctx, q) + if err != nil { + if tc.wantErr == "" || !strings.Contains(err.Error(), tc.wantErr) { + t.Errorf("count %q: err %v, want err %q", tc.desc, err, tc.wantErr) + } + continue + } + if tc.wantErr != "" { + t.Errorf("count %q: want err %q", tc.desc, tc.wantErr) + continue + } + if gotCount != len(tc.want) { + t.Errorf("count %q: got %d want %d", tc.desc, gotCount, len(tc.want)) + continue + } + var got []int + for iter := client.Run(ctx, q); ; { + var dst struct { + I int + Why, Pling string + } + _, err := iter.Next(&dst) + if err == iterator.Done { + break + } + if err != nil { + t.Errorf("iter.Next %q: %v", tc.desc, err) + continue loop + } + got = append(got, dst.I) + } + sort.Ints(got) + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("elems %q: got %+v want %+v", tc.desc, got, tc.want) + continue + } + } +} + +func TestTransaction(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + type Counter struct { + N int + T time.Time + } + + bangErr := errors.New("bang") + tests := []struct { + desc string + causeConflict []bool + retErr []error + want int + wantErr error + }{ + { + desc: "3 attempts, no conflicts", + causeConflict: []bool{false}, + retErr: []error{nil}, + want: 11, + }, + { + desc: "1 attempt, user error", + causeConflict: []bool{false}, + retErr: []error{bangErr}, + wantErr: bangErr, + }, + { + desc: "2 attempts, 1 conflict", + causeConflict: []bool{true, false}, + retErr: []error{nil, nil}, + want: 13, // Each conflict increments by 2. + }, + { + desc: "3 attempts, 3 conflicts", + causeConflict: []bool{true, true, true}, + retErr: []error{nil, nil, nil}, + wantErr: ErrConcurrentTransaction, + }, + } + + for i, tt := range tests { + // Put a new counter. + c := &Counter{N: 10, T: time.Now()} + key, err := client.Put(ctx, IncompleteKey("TransCounter", nil), c) + if err != nil { + t.Errorf("%s: client.Put: %v", tt.desc, err) + continue + } + defer client.Delete(ctx, key) + + // Increment the counter in a transaction. + // The test case can manually cause a conflict or return an + // error at each attempt. + var attempts int + _, err = client.RunInTransaction(ctx, func(tx *Transaction) error { + attempts++ + if attempts > len(tt.causeConflict) { + return fmt.Errorf("too many attempts. Got %d, max %d", attempts, len(tt.causeConflict)) + } + + var c Counter + if err := tx.Get(key, &c); err != nil { + return err + } + c.N++ + if _, err := tx.Put(key, &c); err != nil { + return err + } + + if tt.causeConflict[attempts-1] { + c.N += 1 + if _, err := client.Put(ctx, key, &c); err != nil { + return err + } + } + + return tt.retErr[attempts-1] + }, MaxAttempts(i)) + + // Check the error returned by RunInTransaction. + if err != tt.wantErr { + t.Errorf("%s: got err %v, want %v", tt.desc, err, tt.wantErr) + continue + } + if err != nil { + continue + } + + // Check the final value of the counter. + if err := client.Get(ctx, key, c); err != nil { + t.Errorf("%s: client.Get: %v", tt.desc, err) + continue + } + if c.N != tt.want { + t.Errorf("%s: counter N=%d, want N=%d", tt.desc, c.N, tt.want) + } + } +} + +func TestNilPointers(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + type X struct { + S string + } + + src := []*X{{"zero"}, {"one"}} + keys := []*Key{IncompleteKey("NilX", nil), IncompleteKey("NilX", nil)} + keys, err := client.PutMulti(ctx, keys, src) + if err != nil { + t.Fatalf("PutMulti: %v", err) + } + + // It's okay to store into a slice of nil *X. + xs := make([]*X, 2) + if err := client.GetMulti(ctx, keys, xs); err != nil { + t.Errorf("GetMulti: %v", err) + } else if !reflect.DeepEqual(xs, src) { + t.Errorf("GetMulti fetched %v, want %v", xs, src) + } + + // It isn't okay to store into a single nil *X. + var x0 *X + if err, want := client.Get(ctx, keys[0], x0), ErrInvalidEntityType; err != want { + t.Errorf("Get: err %v; want %v", err, want) + } + + if err := client.DeleteMulti(ctx, keys); err != nil { + t.Errorf("Delete: %v", err) + } +} + +func TestNestedRepeatedElementNoIndex(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + client := newClient(ctx, t) + defer client.Close() + + type Inner struct { + Name string + Value string `datastore:",noindex"` + } + type Outer struct { + Config []Inner + } + m := &Outer{ + Config: []Inner{ + {Name: "short", Value: "a"}, + {Name: "long", Value: strings.Repeat("a", 2000)}, + }, + } + + key := NameKey("Nested", "Nested"+suffix, nil) + if _, err := client.Put(ctx, key, m); err != nil { + t.Fatalf("client.Put: %v", err) + } + if err := client.Delete(ctx, key); err != nil { + t.Fatalf("client.Delete: %v", err) + } +} diff --git a/vendor/cloud.google.com/go/datastore/key.go b/vendor/cloud.google.com/go/datastore/key.go new file mode 100644 index 00000000..b9f2cf5e --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/key.go @@ -0,0 +1,280 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "bytes" + "encoding/base64" + "encoding/gob" + "errors" + "strconv" + "strings" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/datastore/v1" +) + +// Key represents the datastore key for a stored entity. +type Key struct { + // Kind cannot be empty. + Kind string + // Either ID or Name must be zero for the Key to be valid. + // If both are zero, the Key is incomplete. + ID int64 + Name string + // Parent must either be a complete Key or nil. + Parent *Key + + // Namespace provides the ability to partition your data for multiple + // tenants. In most cases, it is not necessary to specify a namespace. + // See docs on datastore multitenancy for details: + // https://cloud.google.com/datastore/docs/concepts/multitenancy + Namespace string +} + +// Incomplete reports whether the key does not refer to a stored entity. +func (k *Key) Incomplete() bool { + return k.Name == "" && k.ID == 0 +} + +// valid returns whether the key is valid. +func (k *Key) valid() bool { + if k == nil { + return false + } + for ; k != nil; k = k.Parent { + if k.Kind == "" { + return false + } + if k.Name != "" && k.ID != 0 { + return false + } + if k.Parent != nil { + if k.Parent.Incomplete() { + return false + } + if k.Parent.Namespace != k.Namespace { + return false + } + } + } + return true +} + +// Equal reports whether two keys are equal. Two keys are equal if they are +// both nil, or if their kinds, IDs, names, namespaces and parents are equal. +func (k *Key) Equal(o *Key) bool { + for { + if k == nil || o == nil { + return k == o // if either is nil, both must be nil + } + if k.Namespace != o.Namespace || k.Name != o.Name || k.ID != o.ID || k.Kind != o.Kind { + return false + } + if k.Parent == nil && o.Parent == nil { + return true + } + k = k.Parent + o = o.Parent + } +} + +// marshal marshals the key's string representation to the buffer. +func (k *Key) marshal(b *bytes.Buffer) { + if k.Parent != nil { + k.Parent.marshal(b) + } + b.WriteByte('/') + b.WriteString(k.Kind) + b.WriteByte(',') + if k.Name != "" { + b.WriteString(k.Name) + } else { + b.WriteString(strconv.FormatInt(k.ID, 10)) + } +} + +// String returns a string representation of the key. +func (k *Key) String() string { + if k == nil { + return "" + } + b := bytes.NewBuffer(make([]byte, 0, 512)) + k.marshal(b) + return b.String() +} + +// Note: Fields not renamed compared to appengine gobKey struct +// This ensures gobs created by appengine can be read here, and vice/versa +type gobKey struct { + Kind string + StringID string + IntID int64 + Parent *gobKey + AppID string + Namespace string +} + +func keyToGobKey(k *Key) *gobKey { + if k == nil { + return nil + } + return &gobKey{ + Kind: k.Kind, + StringID: k.Name, + IntID: k.ID, + Parent: keyToGobKey(k.Parent), + Namespace: k.Namespace, + } +} + +func gobKeyToKey(gk *gobKey) *Key { + if gk == nil { + return nil + } + return &Key{ + Kind: gk.Kind, + Name: gk.StringID, + ID: gk.IntID, + Parent: gobKeyToKey(gk.Parent), + Namespace: gk.Namespace, + } +} + +// GobEncode marshals the key into a sequence of bytes +// using an encoding/gob.Encoder. +func (k *Key) GobEncode() ([]byte, error) { + buf := new(bytes.Buffer) + if err := gob.NewEncoder(buf).Encode(keyToGobKey(k)); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// GobDecode unmarshals a sequence of bytes using an encoding/gob.Decoder. +func (k *Key) GobDecode(buf []byte) error { + gk := new(gobKey) + if err := gob.NewDecoder(bytes.NewBuffer(buf)).Decode(gk); err != nil { + return err + } + *k = *gobKeyToKey(gk) + return nil +} + +// MarshalJSON marshals the key into JSON. +func (k *Key) MarshalJSON() ([]byte, error) { + return []byte(`"` + k.Encode() + `"`), nil +} + +// UnmarshalJSON unmarshals a key JSON object into a Key. +func (k *Key) UnmarshalJSON(buf []byte) error { + if len(buf) < 2 || buf[0] != '"' || buf[len(buf)-1] != '"' { + return errors.New("datastore: bad JSON key") + } + k2, err := DecodeKey(string(buf[1 : len(buf)-1])) + if err != nil { + return err + } + *k = *k2 + return nil +} + +// Encode returns an opaque representation of the key +// suitable for use in HTML and URLs. +// This is compatible with the Python and Java runtimes. +func (k *Key) Encode() string { + pKey := keyToProto(k) + + b, err := proto.Marshal(pKey) + if err != nil { + panic(err) + } + + // Trailing padding is stripped. + return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") +} + +// DecodeKey decodes a key from the opaque representation returned by Encode. +func DecodeKey(encoded string) (*Key, error) { + // Re-add padding. + if m := len(encoded) % 4; m != 0 { + encoded += strings.Repeat("=", 4-m) + } + + b, err := base64.URLEncoding.DecodeString(encoded) + if err != nil { + return nil, err + } + + pKey := new(pb.Key) + if err := proto.Unmarshal(b, pKey); err != nil { + return nil, err + } + return protoToKey(pKey) +} + +// AllocateIDs accepts a slice of incomplete keys and returns a +// slice of complete keys that are guaranteed to be valid in the datastore. +func (c *Client) AllocateIDs(ctx context.Context, keys []*Key) ([]*Key, error) { + if keys == nil { + return nil, nil + } + + req := &pb.AllocateIdsRequest{ + ProjectId: c.dataset, + Keys: multiKeyToProto(keys), + } + resp, err := c.client.AllocateIds(ctx, req) + if err != nil { + return nil, err + } + + return multiProtoToKey(resp.Keys) +} + +// IncompleteKey creates a new incomplete key. +// The supplied kind cannot be empty. +// The namespace of the new key is empty. +func IncompleteKey(kind string, parent *Key) *Key { + return &Key{ + Kind: kind, + Parent: parent, + } +} + +// NameKey creates a new key with a name. +// The supplied kind cannot be empty. +// The supplied parent must either be a complete key or nil. +// The namespace of the new key is empty. +func NameKey(kind, name string, parent *Key) *Key { + return &Key{ + Kind: kind, + Name: name, + Parent: parent, + } +} + +// IDKey creates a new key with an ID. +// The supplied kind cannot be empty. +// The supplied parent must either be a complete key or nil. +// The namespace of the new key is empty. +func IDKey(kind string, id int64, parent *Key) *Key { + return &Key{ + Kind: kind, + ID: id, + Parent: parent, + } +} diff --git a/vendor/cloud.google.com/go/datastore/key_test.go b/vendor/cloud.google.com/go/datastore/key_test.go new file mode 100644 index 00000000..5f2ddcb6 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/key_test.go @@ -0,0 +1,210 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "bytes" + "encoding/gob" + "encoding/json" + "testing" +) + +func TestEqual(t *testing.T) { + testCases := []struct { + x, y *Key + equal bool + }{ + { + x: nil, + y: nil, + equal: true, + }, + { + x: &Key{Kind: "kindA"}, + y: &Key{Kind: "kindA"}, + equal: true, + }, + { + x: &Key{Kind: "kindA", Name: "nameA"}, + y: &Key{Kind: "kindA", Name: "nameA"}, + equal: true, + }, + { + x: &Key{Kind: "kindA", Name: "nameA", Namespace: "gopherspace"}, + y: &Key{Kind: "kindA", Name: "nameA", Namespace: "gopherspace"}, + equal: true, + }, + { + x: &Key{Kind: "kindA", ID: 1337, Parent: &Key{Kind: "kindX", Name: "nameX"}}, + y: &Key{Kind: "kindA", ID: 1337, Parent: &Key{Kind: "kindX", Name: "nameX"}}, + equal: true, + }, + { + x: &Key{Kind: "kindA", Name: "nameA"}, + y: &Key{Kind: "kindB", Name: "nameA"}, + equal: false, + }, + { + x: &Key{Kind: "kindA", Name: "nameA"}, + y: &Key{Kind: "kindA", Name: "nameB"}, + equal: false, + }, + { + x: &Key{Kind: "kindA", Name: "nameA"}, + y: &Key{Kind: "kindA", ID: 1337}, + equal: false, + }, + { + x: &Key{Kind: "kindA", Name: "nameA"}, + y: &Key{Kind: "kindA", Name: "nameA", Namespace: "gopherspace"}, + equal: false, + }, + { + x: &Key{Kind: "kindA", ID: 1337, Parent: &Key{Kind: "kindX", Name: "nameX"}}, + y: &Key{Kind: "kindA", ID: 1337, Parent: &Key{Kind: "kindY", Name: "nameX"}}, + equal: false, + }, + { + x: &Key{Kind: "kindA", ID: 1337, Parent: &Key{Kind: "kindX", Name: "nameX"}}, + y: &Key{Kind: "kindA", ID: 1337}, + equal: false, + }, + } + + for _, tt := range testCases { + if got := tt.x.Equal(tt.y); got != tt.equal { + t.Errorf("Equal(%v, %v) = %t; want %t", tt.x, tt.y, got, tt.equal) + } + if got := tt.y.Equal(tt.x); got != tt.equal { + t.Errorf("Equal(%v, %v) = %t; want %t", tt.y, tt.x, got, tt.equal) + } + } +} + +func TestEncoding(t *testing.T) { + testCases := []struct { + k *Key + valid bool + }{ + { + k: nil, + valid: false, + }, + { + k: &Key{}, + valid: false, + }, + { + k: &Key{Kind: "kindA"}, + valid: true, + }, + { + k: &Key{Kind: "kindA", Namespace: "gopherspace"}, + valid: true, + }, + { + k: &Key{Kind: "kindA", Name: "nameA"}, + valid: true, + }, + { + k: &Key{Kind: "kindA", ID: 1337}, + valid: true, + }, + { + k: &Key{Kind: "kindA", Name: "nameA", ID: 1337}, + valid: false, + }, + { + k: &Key{Kind: "kindA", Parent: &Key{Kind: "kindB", Name: "nameB"}}, + valid: true, + }, + { + k: &Key{Kind: "kindA", Parent: &Key{Kind: "kindB"}}, + valid: false, + }, + { + k: &Key{Kind: "kindA", Parent: &Key{Kind: "kindB", Name: "nameB", Namespace: "gopherspace"}}, + valid: false, + }, + } + + for _, tt := range testCases { + if got := tt.k.valid(); got != tt.valid { + t.Errorf("valid(%v) = %t; want %t", tt.k, got, tt.valid) + } + + // Check encoding/decoding for valid keys. + if !tt.valid { + continue + } + enc := tt.k.Encode() + dec, err := DecodeKey(enc) + if err != nil { + t.Errorf("DecodeKey(%q) from %v: %v", enc, tt.k, err) + continue + } + if !tt.k.Equal(dec) { + t.Logf("Proto: %s", keyToProto(tt.k)) + t.Errorf("Decoded key %v not equal to %v", dec, tt.k) + } + + b, err := json.Marshal(tt.k) + if err != nil { + t.Errorf("json.Marshal(%v): %v", tt.k, err) + continue + } + key := &Key{} + if err := json.Unmarshal(b, key); err != nil { + t.Errorf("json.Unmarshal(%s) for key %v: %v", b, tt.k, err) + continue + } + if !tt.k.Equal(key) { + t.Errorf("JSON decoded key %v not equal to %v", dec, tt.k) + } + + buf := &bytes.Buffer{} + gobEnc := gob.NewEncoder(buf) + if err := gobEnc.Encode(tt.k); err != nil { + t.Errorf("gobEnc.Encode(%v): %v", tt.k, err) + continue + } + gobDec := gob.NewDecoder(buf) + key = &Key{} + if err := gobDec.Decode(key); err != nil { + t.Errorf("gobDec.Decode() for key %v: %v", tt.k, err) + } + if !tt.k.Equal(key) { + t.Errorf("gob decoded key %v not equal to %v", dec, tt.k) + } + } +} + +func TestInvalidKeyDecode(t *testing.T) { + // Check that decoding an invalid key returns an err and doesn't panic. + enc := NameKey("Kind", "Foo", nil).Encode() + + invalid := []string{ + "", + "Laboratorio", + enc + "Junk", + enc[:len(enc)-4], + } + for _, enc := range invalid { + key, err := DecodeKey(enc) + if err == nil || key != nil { + t.Errorf("DecodeKey(%q) = %v, %v; want nil, error", enc, key, err) + } + } +} diff --git a/vendor/cloud.google.com/go/datastore/load.go b/vendor/cloud.google.com/go/datastore/load.go new file mode 100644 index 00000000..119e8065 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/load.go @@ -0,0 +1,430 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "fmt" + "reflect" + "strings" + "time" + + "cloud.google.com/go/internal/fields" + pb "google.golang.org/genproto/googleapis/datastore/v1" +) + +var ( + typeOfByteSlice = reflect.TypeOf([]byte(nil)) + typeOfTime = reflect.TypeOf(time.Time{}) + typeOfGeoPoint = reflect.TypeOf(GeoPoint{}) + typeOfKeyPtr = reflect.TypeOf(&Key{}) + typeOfEntityPtr = reflect.TypeOf(&Entity{}) +) + +// typeMismatchReason returns a string explaining why the property p could not +// be stored in an entity field of type v.Type(). +func typeMismatchReason(p Property, v reflect.Value) string { + entityType := "empty" + switch p.Value.(type) { + case int64: + entityType = "int" + case bool: + entityType = "bool" + case string: + entityType = "string" + case float64: + entityType = "float" + case *Key: + entityType = "*datastore.Key" + case *Entity: + entityType = "*datastore.Entity" + case GeoPoint: + entityType = "GeoPoint" + case time.Time: + entityType = "time.Time" + case []byte: + entityType = "[]byte" + } + + return fmt.Sprintf("type mismatch: %s versus %v", entityType, v.Type()) +} + +type propertyLoader struct { + // m holds the number of times a substruct field like "Foo.Bar.Baz" has + // been seen so far. The map is constructed lazily. + m map[string]int +} + +func (l *propertyLoader) load(codec fields.List, structValue reflect.Value, p Property, prev map[string]struct{}) string { + sl, ok := p.Value.([]interface{}) + if !ok { + return l.loadOneElement(codec, structValue, p, prev) + } + for _, val := range sl { + p.Value = val + if errStr := l.loadOneElement(codec, structValue, p, prev); errStr != "" { + return errStr + } + } + return "" +} + +// loadOneElement loads the value of Property p into structValue based on the provided +// codec. codec is used to find the field in structValue into which p should be loaded. +// prev is the set of property names already seen for structValue. +func (l *propertyLoader) loadOneElement(codec fields.List, structValue reflect.Value, p Property, prev map[string]struct{}) string { + var sliceOk bool + var sliceIndex int + var v reflect.Value + + name := p.Name + fieldNames := strings.Split(name, ".") + + for len(fieldNames) > 0 { + var field *fields.Field + + // Start by trying to find a field with name. If none found, + // cut off the last field (delimited by ".") and find its parent + // in the codec. + // eg. for name "A.B.C.D", split off "A.B.C" and try to + // find a field in the codec with this name. + // Loop again with "A.B", etc. + for i := len(fieldNames); i > 0; i-- { + parent := strings.Join(fieldNames[:i], ".") + field = codec.Match(parent) + if field != nil { + fieldNames = fieldNames[i:] + break + } + } + + // If we never found a matching field in the codec, return + // error message. + if field == nil { + return "no such struct field" + } + + v = initField(structValue, field.Index) + if !v.IsValid() { + return "no such struct field" + } + if !v.CanSet() { + return "cannot set struct field" + } + + var err error + if field.Type.Kind() == reflect.Struct { + codec, err = structCache.Fields(field.Type) + if err != nil { + return err.Error() + } + structValue = v + } + + // If the element is a slice, we need to accommodate it. + if v.Kind() == reflect.Slice && v.Type() != typeOfByteSlice { + if l.m == nil { + l.m = make(map[string]int) + } + sliceIndex = l.m[p.Name] + l.m[p.Name] = sliceIndex + 1 + for v.Len() <= sliceIndex { + v.Set(reflect.Append(v, reflect.New(v.Type().Elem()).Elem())) + } + structValue = v.Index(sliceIndex) + if structValue.Type().Kind() == reflect.Struct { + codec, err = structCache.Fields(structValue.Type()) + if err != nil { + return err.Error() + } + } + sliceOk = true + } + } + + var slice reflect.Value + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { + slice = v + v = reflect.New(v.Type().Elem()).Elem() + } else if _, ok := prev[p.Name]; ok && !sliceOk { + // Zero the field back out that was set previously, turns out + // it's a slice and we don't know what to do with it + v.Set(reflect.Zero(v.Type())) + return "multiple-valued property requires a slice field type" + } + + prev[p.Name] = struct{}{} + + if errReason := setVal(v, p); errReason != "" { + // Set the slice back to its zero value. + if slice.IsValid() { + slice.Set(reflect.Zero(slice.Type())) + } + return errReason + } + + if slice.IsValid() { + slice.Index(sliceIndex).Set(v) + } + + return "" +} + +// setVal sets 'v' to the value of the Property 'p'. +func setVal(v reflect.Value, p Property) string { + pValue := p.Value + + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + x, ok := pValue.(int64) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + if v.OverflowInt(x) { + return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type()) + } + v.SetInt(x) + case reflect.Bool: + x, ok := pValue.(bool) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + v.SetBool(x) + case reflect.String: + x, ok := pValue.(string) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + v.SetString(x) + case reflect.Float32, reflect.Float64: + x, ok := pValue.(float64) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + if v.OverflowFloat(x) { + return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type()) + } + v.SetFloat(x) + case reflect.Ptr: + // v must be either a pointer to a Key or Entity. + if v.Type() != typeOfKeyPtr && v.Type().Elem().Kind() != reflect.Struct { + return typeMismatchReason(p, v) + } + + if pValue == nil { + // If v is populated already, set it to nil. + if !v.IsNil() { + v.Set(reflect.New(v.Type()).Elem()) + } + return "" + } + + switch x := pValue.(type) { + case *Key: + if _, ok := v.Interface().(*Key); !ok { + return typeMismatchReason(p, v) + } + v.Set(reflect.ValueOf(x)) + case *Entity: + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + err := loadEntity(v.Interface(), x) + if err != nil { + return err.Error() + } + + default: + return typeMismatchReason(p, v) + } + case reflect.Struct: + switch v.Type() { + case typeOfTime: + x, ok := pValue.(time.Time) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + v.Set(reflect.ValueOf(x)) + case typeOfGeoPoint: + x, ok := pValue.(GeoPoint) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + v.Set(reflect.ValueOf(x)) + default: + ent, ok := pValue.(*Entity) + if !ok { + return typeMismatchReason(p, v) + } + + // Check if v implements PropertyLoadSaver. + if _, ok := v.Interface().(PropertyLoadSaver); ok { + return fmt.Sprintf("datastore: PropertyLoadSaver methods must be implemented on a pointer to %T.", v.Interface()) + } + + err := loadEntity(v.Addr().Interface(), ent) + if err != nil { + return err.Error() + } + } + case reflect.Slice: + x, ok := pValue.([]byte) + if !ok && pValue != nil { + return typeMismatchReason(p, v) + } + if v.Type().Elem().Kind() != reflect.Uint8 { + return typeMismatchReason(p, v) + } + v.SetBytes(x) + default: + return typeMismatchReason(p, v) + } + return "" +} + +// initField is similar to reflect's Value.FieldByIndex, in that it +// returns the nested struct field corresponding to index, but it +// initialises any nil pointers encountered when traversing the structure. +func initField(val reflect.Value, index []int) reflect.Value { + for _, i := range index[:len(index)-1] { + val = val.Field(i) + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + val = val.Elem() + } + } + return val.Field(index[len(index)-1]) +} + +// loadEntityProto loads an EntityProto into PropertyLoadSaver or struct pointer. +func loadEntityProto(dst interface{}, src *pb.Entity) error { + ent, err := protoToEntity(src) + if err != nil { + return err + } + return loadEntity(dst, ent) +} + +func loadEntity(dst interface{}, ent *Entity) error { + if pls, ok := dst.(PropertyLoadSaver); ok { + return pls.Load(ent.Properties) + } + return loadEntityToStruct(dst, ent) +} + +func loadEntityToStruct(dst interface{}, ent *Entity) error { + pls, err := newStructPLS(dst) + if err != nil { + return err + } + // Load properties. + err = pls.Load(ent.Properties) + if err != nil { + return err + } + // Load key. + keyField := pls.codec.Match(keyFieldName) + if keyField != nil && ent.Key != nil { + pls.v.FieldByIndex(keyField.Index).Set(reflect.ValueOf(ent.Key)) + } + + return nil +} + +func (s structPLS) Load(props []Property) error { + var fieldName, errReason string + var l propertyLoader + + prev := make(map[string]struct{}) + for _, p := range props { + if errStr := l.load(s.codec, s.v, p, prev); errStr != "" { + // We don't return early, as we try to load as many properties as possible. + // It is valid to load an entity into a struct that cannot fully represent it. + // That case returns an error, but the caller is free to ignore it. + fieldName, errReason = p.Name, errStr + } + } + if errReason != "" { + return &ErrFieldMismatch{ + StructType: s.v.Type(), + FieldName: fieldName, + Reason: errReason, + } + } + return nil +} + +func protoToEntity(src *pb.Entity) (*Entity, error) { + props := make([]Property, 0, len(src.Properties)) + for name, val := range src.Properties { + v, err := propToValue(val) + if err != nil { + return nil, err + } + props = append(props, Property{ + Name: name, + Value: v, + NoIndex: val.ExcludeFromIndexes, + }) + } + var key *Key + if src.Key != nil { + // Ignore any error, since nested entity values + // are allowed to have an invalid key. + key, _ = protoToKey(src.Key) + } + + return &Entity{key, props}, nil +} + +// propToValue returns a Go value that represents the PropertyValue. For +// example, a TimestampValue becomes a time.Time. +func propToValue(v *pb.Value) (interface{}, error) { + switch v := v.ValueType.(type) { + case *pb.Value_NullValue: + return nil, nil + case *pb.Value_BooleanValue: + return v.BooleanValue, nil + case *pb.Value_IntegerValue: + return v.IntegerValue, nil + case *pb.Value_DoubleValue: + return v.DoubleValue, nil + case *pb.Value_TimestampValue: + return time.Unix(v.TimestampValue.Seconds, int64(v.TimestampValue.Nanos)), nil + case *pb.Value_KeyValue: + return protoToKey(v.KeyValue) + case *pb.Value_StringValue: + return v.StringValue, nil + case *pb.Value_BlobValue: + return []byte(v.BlobValue), nil + case *pb.Value_GeoPointValue: + return GeoPoint{Lat: v.GeoPointValue.Latitude, Lng: v.GeoPointValue.Longitude}, nil + case *pb.Value_EntityValue: + return protoToEntity(v.EntityValue) + case *pb.Value_ArrayValue: + arr := make([]interface{}, 0, len(v.ArrayValue.Values)) + for _, v := range v.ArrayValue.Values { + vv, err := propToValue(v) + if err != nil { + return nil, err + } + arr = append(arr, vv) + } + return arr, nil + default: + return nil, nil + } +} diff --git a/vendor/cloud.google.com/go/datastore/load_test.go b/vendor/cloud.google.com/go/datastore/load_test.go new file mode 100644 index 00000000..e24ad45a --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/load_test.go @@ -0,0 +1,510 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "reflect" + "testing" + + pb "google.golang.org/genproto/googleapis/datastore/v1" +) + +type Simple struct { + I int64 +} + +type SimpleWithTag struct { + I int64 `datastore:"II"` +} + +type NestedSimpleWithTag struct { + A SimpleWithTag `datastore:"AA"` +} + +type NestedSliceOfSimple struct { + A []Simple +} + +type SimpleTwoFields struct { + S string + SS string +} + +type NestedSimpleAnonymous struct { + Simple + X string +} + +type NestedSimple struct { + A Simple + I int +} + +type NestedSimple1 struct { + A Simple + X string +} + +type NestedSimple2X struct { + AA NestedSimple + A SimpleTwoFields + S string +} + +type BDotB struct { + B string `datastore:"B.B"` +} + +type ABDotB struct { + A BDotB +} + +type MultiAnonymous struct { + Simple + SimpleTwoFields + X string +} + +func TestLoadEntityNestedLegacy(t *testing.T) { + testCases := []struct { + desc string + src *pb.Entity + want interface{} + }{ + { + "nested", + &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "X": {ValueType: &pb.Value_StringValue{"two"}}, + "A.I": {ValueType: &pb.Value_IntegerValue{2}}, + }, + }, + &NestedSimple1{ + A: Simple{I: 2}, + X: "two", + }, + }, + { + "nested with tag", + &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "AA.II": {ValueType: &pb.Value_IntegerValue{2}}, + }, + }, + &NestedSimpleWithTag{ + A: SimpleWithTag{I: 2}, + }, + }, + { + "nested with anonymous struct field", + &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "X": {ValueType: &pb.Value_StringValue{"two"}}, + "I": {ValueType: &pb.Value_IntegerValue{2}}, + }, + }, + &NestedSimpleAnonymous{ + Simple: Simple{I: 2}, + X: "two", + }, + }, + { + "nested with dotted field tag", + &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "A.B.B": {ValueType: &pb.Value_StringValue{"bb"}}, + }, + }, + &ABDotB{ + A: BDotB{ + B: "bb", + }, + }, + }, + { + "nested with multiple anonymous fields", + &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{3}}, + "S": {ValueType: &pb.Value_StringValue{"S"}}, + "SS": {ValueType: &pb.Value_StringValue{"s"}}, + "X": {ValueType: &pb.Value_StringValue{"s"}}, + }, + }, + &MultiAnonymous{ + Simple: Simple{I: 3}, + SimpleTwoFields: SimpleTwoFields{S: "S", SS: "s"}, + X: "s", + }, + }, + } + + for _, tc := range testCases { + dst := reflect.New(reflect.TypeOf(tc.want).Elem()).Interface() + err := loadEntityProto(dst, tc.src) + if err != nil { + t.Errorf("loadEntityProto: %s: %v", tc.desc, err) + continue + } + + if !reflect.DeepEqual(tc.want, dst) { + t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, dst, tc.want) + } + } +} + +type WithKey struct { + X string + I int + K *Key `datastore:"__key__"` +} + +type NestedWithKey struct { + Y string + N WithKey +} + +var ( + incompleteKey = newKey("", nil) + invalidKey = newKey("s", incompleteKey) +) + +func TestLoadEntityNested(t *testing.T) { + testCases := []struct { + desc string + src *pb.Entity + want interface{} + }{ + { + "nested basic", + &pb.Entity{ + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_EntityValue{ + &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{3}}, + }, + }, + }}, + "I": {ValueType: &pb.Value_IntegerValue{10}}, + }, + }, + &NestedSimple{ + A: Simple{I: 3}, + I: 10, + }, + }, + { + "nested with struct tags", + &pb.Entity{ + Properties: map[string]*pb.Value{ + "AA": {ValueType: &pb.Value_EntityValue{ + &pb.Entity{ + Properties: map[string]*pb.Value{ + "II": {ValueType: &pb.Value_IntegerValue{1}}, + }, + }, + }}, + }, + }, + &NestedSimpleWithTag{ + A: SimpleWithTag{I: 1}, + }, + }, + { + "nested 2x", + &pb.Entity{ + Properties: map[string]*pb.Value{ + "AA": {ValueType: &pb.Value_EntityValue{ + &pb.Entity{ + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_EntityValue{ + &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{3}}, + }, + }, + }}, + "I": {ValueType: &pb.Value_IntegerValue{1}}, + }, + }, + }}, + "A": {ValueType: &pb.Value_EntityValue{ + &pb.Entity{ + Properties: map[string]*pb.Value{ + "S": {ValueType: &pb.Value_StringValue{"S"}}, + "SS": {ValueType: &pb.Value_StringValue{"s"}}, + }, + }, + }}, + "S": {ValueType: &pb.Value_StringValue{"SS"}}, + }, + }, + &NestedSimple2X{ + AA: NestedSimple{ + A: Simple{I: 3}, + I: 1, + }, + A: SimpleTwoFields{S: "S", SS: "s"}, + S: "SS", + }, + }, + { + "nested anonymous", + &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{3}}, + "X": {ValueType: &pb.Value_StringValue{"SomeX"}}, + }, + }, + &NestedSimpleAnonymous{ + Simple: Simple{I: 3}, + X: "SomeX", + }, + }, + { + "nested simple with slice", + &pb.Entity{ + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_ArrayValue{ + &pb.ArrayValue{ + []*pb.Value{ + {ValueType: &pb.Value_EntityValue{ + &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{3}}, + }, + }, + }}, + {ValueType: &pb.Value_EntityValue{ + &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{4}}, + }, + }, + }}, + }, + }, + }}, + }, + }, + + &NestedSliceOfSimple{ + A: []Simple{Simple{I: 3}, Simple{I: 4}}, + }, + }, + { + "nested with multiple anonymous fields", + &pb.Entity{ + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_IntegerValue{3}}, + "S": {ValueType: &pb.Value_StringValue{"S"}}, + "SS": {ValueType: &pb.Value_StringValue{"s"}}, + "X": {ValueType: &pb.Value_StringValue{"ss"}}, + }, + }, + &MultiAnonymous{ + Simple: Simple{I: 3}, + SimpleTwoFields: SimpleTwoFields{S: "S", SS: "s"}, + X: "ss", + }, + }, + { + "nested with dotted field tag", + &pb.Entity{ + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_EntityValue{ + &pb.Entity{ + Properties: map[string]*pb.Value{ + "B.B": {ValueType: &pb.Value_StringValue{"bb"}}, + }, + }, + }}, + }, + }, + &ABDotB{ + A: BDotB{ + B: "bb", + }, + }, + }, + { + "nested entity with key", + &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Y": {ValueType: &pb.Value_StringValue{"yyy"}}, + "N": {ValueType: &pb.Value_EntityValue{ + &pb.Entity{ + Key: keyToProto(testKey1a), + Properties: map[string]*pb.Value{ + "X": {ValueType: &pb.Value_StringValue{"two"}}, + "I": {ValueType: &pb.Value_IntegerValue{2}}, + }, + }, + }}, + }, + }, + &NestedWithKey{ + Y: "yyy", + N: WithKey{ + X: "two", + I: 2, + K: testKey1a, + }, + }, + }, + { + "nested entity with invalid key", + &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Y": {ValueType: &pb.Value_StringValue{"yyy"}}, + "N": {ValueType: &pb.Value_EntityValue{ + &pb.Entity{ + Key: keyToProto(invalidKey), + Properties: map[string]*pb.Value{ + "X": {ValueType: &pb.Value_StringValue{"two"}}, + "I": {ValueType: &pb.Value_IntegerValue{2}}, + }, + }, + }}, + }, + }, + &NestedWithKey{ + Y: "yyy", + N: WithKey{ + X: "two", + I: 2, + K: invalidKey, + }, + }, + }, + } + + for _, tc := range testCases { + dst := reflect.New(reflect.TypeOf(tc.want).Elem()).Interface() + err := loadEntityProto(dst, tc.src) + if err != nil { + t.Errorf("loadEntityProto: %s: %v", tc.desc, err) + continue + } + + if !reflect.DeepEqual(tc.want, dst) { + t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, dst, tc.want) + } + } +} + +type NestedStructPtrs struct { + *SimpleTwoFields + Nest *SimpleTwoFields + TwiceNest *NestedSimple2 + I int +} + +type NestedSimple2 struct { + A *Simple + I int +} + +func TestAlreadyPopulatedDst(t *testing.T) { + testCases := []struct { + desc string + src *pb.Entity + dst interface{} + want interface{} + }{ + { + "simple already populated, nil properties", + &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "I": {ValueType: &pb.Value_NullValue{}}, + }, + }, + &Simple{ + I: 12, + }, + &Simple{}, + }, + { + "nested structs already populated", + &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "SS": {ValueType: &pb.Value_StringValue{"world"}}, + }, + }, + &SimpleTwoFields{S: "hello" /* SS: "" */}, + &SimpleTwoFields{S: "hello", SS: "world"}, + }, + { + "nested structs already populated, pValues nil", + &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "S": {ValueType: &pb.Value_NullValue{}}, + "SS": {ValueType: &pb.Value_StringValue{"ss hello"}}, + "Nest": {ValueType: &pb.Value_NullValue{}}, + "TwiceNest": {ValueType: &pb.Value_EntityValue{ + &pb.Entity{ + Properties: map[string]*pb.Value{ + "A": {ValueType: &pb.Value_NullValue{}}, + "I": {ValueType: &pb.Value_IntegerValue{2}}, + }, + }, + }}, + "I": {ValueType: &pb.Value_IntegerValue{5}}, + }, + }, + &NestedStructPtrs{ + &SimpleTwoFields{S: "hello" /* SS: "" */}, + &SimpleTwoFields{ /* S: "" */ SS: "twice hello"}, + &NestedSimple2{ + A: &Simple{I: 2}, + /* I: 0 */ + }, + 0, + }, + &NestedStructPtrs{ + &SimpleTwoFields{ /* S: "" */ SS: "ss hello"}, + nil, + &NestedSimple2{ + /* A: nil, */ + I: 2, + }, + 5, + }, + }, + } + + for _, tc := range testCases { + err := loadEntityProto(tc.dst, tc.src) + if err != nil { + t.Errorf("loadEntityProto: %s: %v", tc.desc, err) + continue + } + + if !reflect.DeepEqual(tc.want, tc.dst) { + t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, tc.dst, tc.want) + } + } +} diff --git a/vendor/cloud.google.com/go/datastore/prop.go b/vendor/cloud.google.com/go/datastore/prop.go new file mode 100644 index 00000000..69e424b3 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/prop.go @@ -0,0 +1,279 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "fmt" + "reflect" + "strings" + "unicode" + + "cloud.google.com/go/internal/fields" +) + +// Entities with more than this many indexed properties will not be saved. +const maxIndexedProperties = 20000 + +// []byte fields more than 1 megabyte long will not be loaded or saved. +const maxBlobLen = 1 << 20 + +// Property is a name/value pair plus some metadata. A datastore entity's +// contents are loaded and saved as a sequence of Properties. Each property +// name must be unique within an entity. +type Property struct { + // Name is the property name. + Name string + // Value is the property value. The valid types are: + // - int64 + // - bool + // - string + // - float64 + // - *Key + // - time.Time + // - GeoPoint + // - []byte (up to 1 megabyte in length) + // - *Entity (representing a nested struct) + // Value can also be: + // - []interface{} where each element is one of the above types + // This set is smaller than the set of valid struct field types that the + // datastore can load and save. A Value's type must be explicitly on + // the list above; it is not sufficient for the underlying type to be + // on that list. For example, a Value of "type myInt64 int64" is + // invalid. Smaller-width integers and floats are also invalid. Again, + // this is more restrictive than the set of valid struct field types. + // + // A Value will have an opaque type when loading entities from an index, + // such as via a projection query. Load entities into a struct instead + // of a PropertyLoadSaver when using a projection query. + // + // A Value may also be the nil interface value; this is equivalent to + // Python's None but not directly representable by a Go struct. Loading + // a nil-valued property into a struct will set that field to the zero + // value. + Value interface{} + // NoIndex is whether the datastore cannot index this property. + // If NoIndex is set to false, []byte and string values are limited to + // 1500 bytes. + NoIndex bool +} + +// An Entity is the value type for a nested struct. +// This type is only used for a Property's Value. +type Entity struct { + Key *Key + Properties []Property +} + +// PropertyLoadSaver can be converted from and to a slice of Properties. +type PropertyLoadSaver interface { + Load([]Property) error + Save() ([]Property, error) +} + +// PropertyList converts a []Property to implement PropertyLoadSaver. +type PropertyList []Property + +var ( + typeOfPropertyLoadSaver = reflect.TypeOf((*PropertyLoadSaver)(nil)).Elem() + typeOfPropertyList = reflect.TypeOf(PropertyList(nil)) +) + +// Load loads all of the provided properties into l. +// It does not first reset *l to an empty slice. +func (l *PropertyList) Load(p []Property) error { + *l = append(*l, p...) + return nil +} + +// Save saves all of l's properties as a slice of Properties. +func (l *PropertyList) Save() ([]Property, error) { + return *l, nil +} + +// validPropertyName returns whether name consists of one or more valid Go +// identifiers joined by ".". +func validPropertyName(name string) bool { + if name == "" { + return false + } + for _, s := range strings.Split(name, ".") { + if s == "" { + return false + } + first := true + for _, c := range s { + if first { + first = false + if c != '_' && !unicode.IsLetter(c) { + return false + } + } else { + if c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + } + return true +} + +// parseTag interprets datastore struct field tags +func parseTag(t reflect.StructTag) (name string, keep bool, other interface{}, err error) { + s := t.Get("datastore") + parts := strings.Split(s, ",") + if parts[0] == "-" && len(parts) == 1 { + return "", false, nil, nil + } + if parts[0] != "" && !validPropertyName(parts[0]) { + err = fmt.Errorf("datastore: struct tag has invalid property name: %q", parts[0]) + return "", false, nil, err + } + + var opts saveOpts + if len(parts) > 1 { + for _, p := range parts[1:] { + switch p { + case "flatten": + opts.flatten = true + case "omitempty": + opts.omitEmpty = true + case "noindex": + opts.noIndex = true + default: + err = fmt.Errorf("datastore: struct tag has invalid option: %q", p) + return "", false, nil, err + } + } + other = opts + } + return parts[0], true, other, nil +} + +func validateType(t reflect.Type) error { + if t.Kind() != reflect.Struct { + return fmt.Errorf("datastore: validate called with non-struct type %s", t) + } + + return validateChildType(t, "", false, false, map[reflect.Type]bool{}) +} + +// validateChildType is a recursion helper func for validateType +func validateChildType(t reflect.Type, fieldName string, flatten, prevSlice bool, prevTypes map[reflect.Type]bool) error { + if prevTypes[t] { + return nil + } + prevTypes[t] = true + + switch t.Kind() { + case reflect.Slice: + if flatten && prevSlice { + return fmt.Errorf("datastore: flattening nested structs leads to a slice of slices: field %q", fieldName) + } + return validateChildType(t.Elem(), fieldName, flatten, true, prevTypes) + case reflect.Struct: + if t == typeOfTime || t == typeOfGeoPoint { + return nil + } + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + + // If a named field is unexported, ignore it. An anonymous + // unexported field is processed, because it may contain + // exported fields, which are visible. + exported := (f.PkgPath == "") + if !exported && !f.Anonymous { + continue + } + + _, keep, other, err := parseTag(f.Tag) + // Handle error from parseTag now instead of later (in cache.Fields call). + if err != nil { + return err + } + if !keep { + continue + } + if other != nil { + opts := other.(saveOpts) + flatten = flatten || opts.flatten + } + if err := validateChildType(f.Type, f.Name, flatten, prevSlice, prevTypes); err != nil { + return err + } + } + case reflect.Ptr: + if t == typeOfKeyPtr { + return nil + } + return validateChildType(t.Elem(), fieldName, flatten, prevSlice, prevTypes) + } + return nil +} + +// isLeafType determines whether or not a type is a 'leaf type' +// and should not be recursed into, but considered one field. +func isLeafType(t reflect.Type) bool { + return t == typeOfTime || t == typeOfGeoPoint +} + +// structCache collects the structs whose fields have already been calculated. +var structCache = fields.NewCache(parseTag, validateType, isLeafType) + +// structPLS adapts a struct to be a PropertyLoadSaver. +type structPLS struct { + v reflect.Value + codec fields.List +} + +// newStructPLS returns a structPLS, which implements the +// PropertyLoadSaver interface, for the struct pointer p. +func newStructPLS(p interface{}) (*structPLS, error) { + v := reflect.ValueOf(p) + if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct { + return nil, ErrInvalidEntityType + } + v = v.Elem() + f, err := structCache.Fields(v.Type()) + if err != nil { + return nil, err + } + return &structPLS{v, f}, nil +} + +// LoadStruct loads the properties from p to dst. +// dst must be a struct pointer. +// +// The values of dst's unmatched struct fields are not modified, +// and matching slice-typed fields are not reset before appending to +// them. In particular, it is recommended to pass a pointer to a zero +// valued struct on each LoadStruct call. +func LoadStruct(dst interface{}, p []Property) error { + x, err := newStructPLS(dst) + if err != nil { + return err + } + return x.Load(p) +} + +// SaveStruct returns the properties from src as a slice of Properties. +// src must be a struct pointer. +func SaveStruct(src interface{}) ([]Property, error) { + x, err := newStructPLS(src) + if err != nil { + return nil, err + } + return x.Save() +} diff --git a/vendor/cloud.google.com/go/datastore/query.go b/vendor/cloud.google.com/go/datastore/query.go new file mode 100644 index 00000000..09f08304 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/query.go @@ -0,0 +1,773 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "encoding/base64" + "errors" + "fmt" + "math" + "reflect" + "strconv" + "strings" + + wrapperspb "github.com/golang/protobuf/ptypes/wrappers" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + pb "google.golang.org/genproto/googleapis/datastore/v1" +) + +type operator int + +const ( + lessThan operator = iota + 1 + lessEq + equal + greaterEq + greaterThan + + keyFieldName = "__key__" +) + +var operatorToProto = map[operator]pb.PropertyFilter_Operator{ + lessThan: pb.PropertyFilter_LESS_THAN, + lessEq: pb.PropertyFilter_LESS_THAN_OR_EQUAL, + equal: pb.PropertyFilter_EQUAL, + greaterEq: pb.PropertyFilter_GREATER_THAN_OR_EQUAL, + greaterThan: pb.PropertyFilter_GREATER_THAN, +} + +// filter is a conditional filter on query results. +type filter struct { + FieldName string + Op operator + Value interface{} +} + +type sortDirection bool + +const ( + ascending sortDirection = false + descending sortDirection = true +) + +var sortDirectionToProto = map[sortDirection]pb.PropertyOrder_Direction{ + ascending: pb.PropertyOrder_ASCENDING, + descending: pb.PropertyOrder_DESCENDING, +} + +// order is a sort order on query results. +type order struct { + FieldName string + Direction sortDirection +} + +// NewQuery creates a new Query for a specific entity kind. +// +// An empty kind means to return all entities, including entities created and +// managed by other App Engine features, and is called a kindless query. +// Kindless queries cannot include filters or sort orders on property values. +func NewQuery(kind string) *Query { + return &Query{ + kind: kind, + limit: -1, + } +} + +// Query represents a datastore query. +type Query struct { + kind string + ancestor *Key + filter []filter + order []order + projection []string + + distinct bool + distinctOn []string + keysOnly bool + eventual bool + limit int32 + offset int32 + start []byte + end []byte + + namespace string + + trans *Transaction + + err error +} + +func (q *Query) clone() *Query { + x := *q + // Copy the contents of the slice-typed fields to a new backing store. + if len(q.filter) > 0 { + x.filter = make([]filter, len(q.filter)) + copy(x.filter, q.filter) + } + if len(q.order) > 0 { + x.order = make([]order, len(q.order)) + copy(x.order, q.order) + } + return &x +} + +// Ancestor returns a derivative query with an ancestor filter. +// The ancestor should not be nil. +func (q *Query) Ancestor(ancestor *Key) *Query { + q = q.clone() + if ancestor == nil { + q.err = errors.New("datastore: nil query ancestor") + return q + } + q.ancestor = ancestor + return q +} + +// EventualConsistency returns a derivative query that returns eventually +// consistent results. +// It only has an effect on ancestor queries. +func (q *Query) EventualConsistency() *Query { + q = q.clone() + q.eventual = true + return q +} + +// Namespace returns a derivative query that is associated with the given +// namespace. +// +// A namespace may be used to partition data for multi-tenant applications. +// For details, see https://cloud.google.com/datastore/docs/concepts/multitenancy. +func (q *Query) Namespace(ns string) *Query { + q = q.clone() + q.namespace = ns + return q +} + +// Transaction returns a derivative query that is associated with the given +// transaction. +// +// All reads performed as part of the transaction will come from a single +// consistent snapshot. Furthermore, if the transaction is set to a +// serializable isolation level, another transaction cannot concurrently modify +// the data that is read or modified by this transaction. +func (q *Query) Transaction(t *Transaction) *Query { + q = q.clone() + q.trans = t + return q +} + +// Filter returns a derivative query with a field-based filter. +// The filterStr argument must be a field name followed by optional space, +// followed by an operator, one of ">", "<", ">=", "<=", or "=". +// Fields are compared against the provided value using the operator. +// Multiple filters are AND'ed together. +// Field names which contain spaces, quote marks, or operator characters +// should be passed as quoted Go string literals as returned by strconv.Quote +// or the fmt package's %q verb. +func (q *Query) Filter(filterStr string, value interface{}) *Query { + q = q.clone() + filterStr = strings.TrimSpace(filterStr) + if filterStr == "" { + q.err = fmt.Errorf("datastore: invalid filter %q", filterStr) + return q + } + f := filter{ + FieldName: strings.TrimRight(filterStr, " ><=!"), + Value: value, + } + switch op := strings.TrimSpace(filterStr[len(f.FieldName):]); op { + case "<=": + f.Op = lessEq + case ">=": + f.Op = greaterEq + case "<": + f.Op = lessThan + case ">": + f.Op = greaterThan + case "=": + f.Op = equal + default: + q.err = fmt.Errorf("datastore: invalid operator %q in filter %q", op, filterStr) + return q + } + var err error + f.FieldName, err = unquote(f.FieldName) + if err != nil { + q.err = fmt.Errorf("datastore: invalid syntax for quoted field name %q", f.FieldName) + return q + } + q.filter = append(q.filter, f) + return q +} + +// Order returns a derivative query with a field-based sort order. Orders are +// applied in the order they are added. The default order is ascending; to sort +// in descending order prefix the fieldName with a minus sign (-). +// Field names which contain spaces, quote marks, or the minus sign +// should be passed as quoted Go string literals as returned by strconv.Quote +// or the fmt package's %q verb. +func (q *Query) Order(fieldName string) *Query { + q = q.clone() + fieldName, dir := strings.TrimSpace(fieldName), ascending + if strings.HasPrefix(fieldName, "-") { + fieldName, dir = strings.TrimSpace(fieldName[1:]), descending + } else if strings.HasPrefix(fieldName, "+") { + q.err = fmt.Errorf("datastore: invalid order: %q", fieldName) + return q + } + fieldName, err := unquote(fieldName) + if err != nil { + q.err = fmt.Errorf("datastore: invalid syntax for quoted field name %q", fieldName) + return q + } + if fieldName == "" { + q.err = errors.New("datastore: empty order") + return q + } + q.order = append(q.order, order{ + Direction: dir, + FieldName: fieldName, + }) + return q +} + +// unquote optionally interprets s as a double-quoted or backquoted Go +// string literal if it begins with the relevant character. +func unquote(s string) (string, error) { + if s == "" || (s[0] != '`' && s[0] != '"') { + return s, nil + } + return strconv.Unquote(s) +} + +// Project returns a derivative query that yields only the given fields. It +// cannot be used with KeysOnly. +func (q *Query) Project(fieldNames ...string) *Query { + q = q.clone() + q.projection = append([]string(nil), fieldNames...) + return q +} + +// Distinct returns a derivative query that yields de-duplicated entities with +// respect to the set of projected fields. It is only used for projection +// queries. Distinct cannot be used with DistinctOn. +func (q *Query) Distinct() *Query { + q = q.clone() + q.distinct = true + return q +} + +// DistinctOn returns a derivative query that yields de-duplicated entities with +// respect to the set of the specified fields. It is only used for projection +// queries. The field list should be a subset of the projected field list. +// DistinctOn cannot be used with Distinct. +func (q *Query) DistinctOn(fieldNames ...string) *Query { + q = q.clone() + q.distinctOn = fieldNames + return q +} + +// KeysOnly returns a derivative query that yields only keys, not keys and +// entities. It cannot be used with projection queries. +func (q *Query) KeysOnly() *Query { + q = q.clone() + q.keysOnly = true + return q +} + +// Limit returns a derivative query that has a limit on the number of results +// returned. A negative value means unlimited. +func (q *Query) Limit(limit int) *Query { + q = q.clone() + if limit < math.MinInt32 || limit > math.MaxInt32 { + q.err = errors.New("datastore: query limit overflow") + return q + } + q.limit = int32(limit) + return q +} + +// Offset returns a derivative query that has an offset of how many keys to +// skip over before returning results. A negative value is invalid. +func (q *Query) Offset(offset int) *Query { + q = q.clone() + if offset < 0 { + q.err = errors.New("datastore: negative query offset") + return q + } + if offset > math.MaxInt32 { + q.err = errors.New("datastore: query offset overflow") + return q + } + q.offset = int32(offset) + return q +} + +// Start returns a derivative query with the given start point. +func (q *Query) Start(c Cursor) *Query { + q = q.clone() + q.start = c.cc + return q +} + +// End returns a derivative query with the given end point. +func (q *Query) End(c Cursor) *Query { + q = q.clone() + q.end = c.cc + return q +} + +// toProto converts the query to a protocol buffer. +func (q *Query) toProto(req *pb.RunQueryRequest) error { + if len(q.projection) != 0 && q.keysOnly { + return errors.New("datastore: query cannot both project and be keys-only") + } + if len(q.distinctOn) != 0 && q.distinct { + return errors.New("datastore: query cannot be both distinct and distinct-on") + } + dst := &pb.Query{} + if q.kind != "" { + dst.Kind = []*pb.KindExpression{{Name: q.kind}} + } + if q.projection != nil { + for _, propertyName := range q.projection { + dst.Projection = append(dst.Projection, &pb.Projection{Property: &pb.PropertyReference{Name: propertyName}}) + } + + for _, propertyName := range q.distinctOn { + dst.DistinctOn = append(dst.DistinctOn, &pb.PropertyReference{Name: propertyName}) + } + + if q.distinct { + for _, propertyName := range q.projection { + dst.DistinctOn = append(dst.DistinctOn, &pb.PropertyReference{Name: propertyName}) + } + } + } + if q.keysOnly { + dst.Projection = []*pb.Projection{{Property: &pb.PropertyReference{Name: keyFieldName}}} + } + + var filters []*pb.Filter + for _, qf := range q.filter { + if qf.FieldName == "" { + return errors.New("datastore: empty query filter field name") + } + v, err := interfaceToProto(reflect.ValueOf(qf.Value).Interface(), false) + if err != nil { + return fmt.Errorf("datastore: bad query filter value type: %v", err) + } + op, ok := operatorToProto[qf.Op] + if !ok { + return errors.New("datastore: unknown query filter operator") + } + xf := &pb.PropertyFilter{ + Op: op, + Property: &pb.PropertyReference{Name: qf.FieldName}, + Value: v, + } + filters = append(filters, &pb.Filter{ + FilterType: &pb.Filter_PropertyFilter{PropertyFilter: xf}, + }) + } + + if q.ancestor != nil { + filters = append(filters, &pb.Filter{ + FilterType: &pb.Filter_PropertyFilter{PropertyFilter: &pb.PropertyFilter{ + Property: &pb.PropertyReference{Name: keyFieldName}, + Op: pb.PropertyFilter_HAS_ANCESTOR, + Value: &pb.Value{ValueType: &pb.Value_KeyValue{KeyValue: keyToProto(q.ancestor)}}, + }}}) + } + + if len(filters) == 1 { + dst.Filter = filters[0] + } else if len(filters) > 1 { + dst.Filter = &pb.Filter{FilterType: &pb.Filter_CompositeFilter{CompositeFilter: &pb.CompositeFilter{ + Op: pb.CompositeFilter_AND, + Filters: filters, + }}} + } + + for _, qo := range q.order { + if qo.FieldName == "" { + return errors.New("datastore: empty query order field name") + } + xo := &pb.PropertyOrder{ + Property: &pb.PropertyReference{Name: qo.FieldName}, + Direction: sortDirectionToProto[qo.Direction], + } + dst.Order = append(dst.Order, xo) + } + if q.limit >= 0 { + dst.Limit = &wrapperspb.Int32Value{Value: q.limit} + } + dst.Offset = q.offset + dst.StartCursor = q.start + dst.EndCursor = q.end + + if t := q.trans; t != nil { + if t.id == nil { + return errExpiredTransaction + } + if q.eventual { + return errors.New("datastore: cannot use EventualConsistency query in a transaction") + } + req.ReadOptions = &pb.ReadOptions{ + ConsistencyType: &pb.ReadOptions_Transaction{Transaction: t.id}, + } + } + + if q.eventual { + req.ReadOptions = &pb.ReadOptions{ConsistencyType: &pb.ReadOptions_ReadConsistency_{ReadConsistency: pb.ReadOptions_EVENTUAL}} + } + + req.QueryType = &pb.RunQueryRequest_Query{Query: dst} + return nil +} + +// Count returns the number of results for the given query. +// +// The running time and number of API calls made by Count scale linearly with +// with the sum of the query's offset and limit. Unless the result count is +// expected to be small, it is best to specify a limit; otherwise Count will +// continue until it finishes counting or the provided context expires. +func (c *Client) Count(ctx context.Context, q *Query) (int, error) { + // Check that the query is well-formed. + if q.err != nil { + return 0, q.err + } + + // Create a copy of the query, with keysOnly true (if we're not a projection, + // since the two are incompatible). + newQ := q.clone() + newQ.keysOnly = len(newQ.projection) == 0 + + // Create an iterator and use it to walk through the batches of results + // directly. + it := c.Run(ctx, newQ) + n := 0 + for { + err := it.nextBatch() + if err == iterator.Done { + return n, nil + } + if err != nil { + return 0, err + } + n += len(it.results) + } +} + +// GetAll runs the provided query in the given context and returns all keys +// that match that query, as well as appending the values to dst. +// +// dst must have type *[]S or *[]*S or *[]P, for some struct type S or some non- +// interface, non-pointer type P such that P or *P implements PropertyLoadSaver. +// +// As a special case, *PropertyList is an invalid type for dst, even though a +// PropertyList is a slice of structs. It is treated as invalid to avoid being +// mistakenly passed when *[]PropertyList was intended. +// +// The keys returned by GetAll will be in a 1-1 correspondence with the entities +// added to dst. +// +// If q is a ``keys-only'' query, GetAll ignores dst and only returns the keys. +// +// The running time and number of API calls made by GetAll scale linearly with +// with the sum of the query's offset and limit. Unless the result count is +// expected to be small, it is best to specify a limit; otherwise GetAll will +// continue until it finishes collecting results or the provided context +// expires. +func (c *Client) GetAll(ctx context.Context, q *Query, dst interface{}) ([]*Key, error) { + var ( + dv reflect.Value + mat multiArgType + elemType reflect.Type + errFieldMismatch error + ) + if !q.keysOnly { + dv = reflect.ValueOf(dst) + if dv.Kind() != reflect.Ptr || dv.IsNil() { + return nil, ErrInvalidEntityType + } + dv = dv.Elem() + mat, elemType = checkMultiArg(dv) + if mat == multiArgTypeInvalid || mat == multiArgTypeInterface { + return nil, ErrInvalidEntityType + } + } + + var keys []*Key + for t := c.Run(ctx, q); ; { + k, e, err := t.next() + if err == iterator.Done { + break + } + if err != nil { + return keys, err + } + if !q.keysOnly { + ev := reflect.New(elemType) + if elemType.Kind() == reflect.Map { + // This is a special case. The zero values of a map type are + // not immediately useful; they have to be make'd. + // + // Funcs and channels are similar, in that a zero value is not useful, + // but even a freshly make'd channel isn't useful: there's no fixed + // channel buffer size that is always going to be large enough, and + // there's no goroutine to drain the other end. Theoretically, these + // types could be supported, for example by sniffing for a constructor + // method or requiring prior registration, but for now it's not a + // frequent enough concern to be worth it. Programmers can work around + // it by explicitly using Iterator.Next instead of the Query.GetAll + // convenience method. + x := reflect.MakeMap(elemType) + ev.Elem().Set(x) + } + if err = loadEntityProto(ev.Interface(), e); err != nil { + if _, ok := err.(*ErrFieldMismatch); ok { + // We continue loading entities even in the face of field mismatch errors. + // If we encounter any other error, that other error is returned. Otherwise, + // an ErrFieldMismatch is returned. + errFieldMismatch = err + } else { + return keys, err + } + } + if mat != multiArgTypeStructPtr { + ev = ev.Elem() + } + dv.Set(reflect.Append(dv, ev)) + } + keys = append(keys, k) + } + return keys, errFieldMismatch +} + +// Run runs the given query in the given context. +func (c *Client) Run(ctx context.Context, q *Query) *Iterator { + if q.err != nil { + return &Iterator{err: q.err} + } + t := &Iterator{ + ctx: ctx, + client: c, + limit: q.limit, + offset: q.offset, + keysOnly: q.keysOnly, + pageCursor: q.start, + entityCursor: q.start, + req: &pb.RunQueryRequest{ + ProjectId: c.dataset, + }, + } + if q.namespace != "" { + t.req.PartitionId = &pb.PartitionId{ + NamespaceId: q.namespace, + } + } + + if err := q.toProto(t.req); err != nil { + t.err = err + } + return t +} + +// Iterator is the result of running a query. +type Iterator struct { + ctx context.Context + client *Client + err error + + // results is the list of EntityResults still to be iterated over from the + // most recent API call. It will be nil if no requests have yet been issued. + results []*pb.EntityResult + // req is the request to send. It may be modified and used multiple times. + req *pb.RunQueryRequest + + // limit is the limit on the number of results this iterator should return. + // The zero value is used to prevent further fetches from the server. + // A negative value means unlimited. + limit int32 + // offset is the number of results that still need to be skipped. + offset int32 + // keysOnly records whether the query was keys-only (skip entity loading). + keysOnly bool + + // pageCursor is the compiled cursor for the next batch/page of result. + // TODO(djd): Can we delete this in favour of paging with the last + // entityCursor from each batch? + pageCursor []byte + // entityCursor is the compiled cursor of the next result. + entityCursor []byte +} + +// Next returns the key of the next result. When there are no more results, +// iterator.Done is returned as the error. +// +// If the query is not keys only and dst is non-nil, it also loads the entity +// stored for that key into the struct pointer or PropertyLoadSaver dst, with +// the same semantics and possible errors as for the Get function. +func (t *Iterator) Next(dst interface{}) (*Key, error) { + k, e, err := t.next() + if err != nil { + return nil, err + } + if dst != nil && !t.keysOnly { + err = loadEntityProto(dst, e) + } + return k, err +} + +func (t *Iterator) next() (*Key, *pb.Entity, error) { + // Fetch additional batches while there are no more results. + for t.err == nil && len(t.results) == 0 { + t.err = t.nextBatch() + } + if t.err != nil { + return nil, nil, t.err + } + + // Extract the next result, update cursors, and parse the entity's key. + e := t.results[0] + t.results = t.results[1:] + t.entityCursor = e.Cursor + if len(t.results) == 0 { + t.entityCursor = t.pageCursor // At the end of the batch. + } + if e.Entity.Key == nil { + return nil, nil, errors.New("datastore: internal error: server did not return a key") + } + k, err := protoToKey(e.Entity.Key) + if err != nil || k.Incomplete() { + return nil, nil, errors.New("datastore: internal error: server returned an invalid key") + } + + return k, e.Entity, nil +} + +// nextBatch makes a single call to the server for a batch of results. +func (t *Iterator) nextBatch() error { + if t.limit == 0 { + return iterator.Done // Short-circuits the zero-item response. + } + + // Adjust the query with the latest start cursor, limit and offset. + q := t.req.GetQuery() + q.StartCursor = t.pageCursor + q.Offset = t.offset + if t.limit >= 0 { + q.Limit = &wrapperspb.Int32Value{Value: t.limit} + } else { + q.Limit = nil + } + + // Run the query. + resp, err := t.client.client.RunQuery(t.ctx, t.req) + if err != nil { + return err + } + + // Adjust any offset from skipped results. + skip := resp.Batch.SkippedResults + if skip < 0 { + return errors.New("datastore: internal error: negative number of skipped_results") + } + t.offset -= skip + if t.offset < 0 { + return errors.New("datastore: internal error: query skipped too many results") + } + if t.offset > 0 && len(resp.Batch.EntityResults) > 0 { + return errors.New("datastore: internal error: query returned results before requested offset") + } + + // Adjust the limit. + if t.limit >= 0 { + t.limit -= int32(len(resp.Batch.EntityResults)) + if t.limit < 0 { + return errors.New("datastore: internal error: query returned more results than the limit") + } + } + + // If there are no more results available, set limit to zero to prevent + // further fetches. Otherwise, check that there is a next page cursor available. + if resp.Batch.MoreResults != pb.QueryResultBatch_NOT_FINISHED { + t.limit = 0 + } else if resp.Batch.EndCursor == nil { + return errors.New("datastore: internal error: server did not return a cursor") + } + + // Update cursors. + // If any results were skipped, use the SkippedCursor as the next entity cursor. + if skip > 0 { + t.entityCursor = resp.Batch.SkippedCursor + } else { + t.entityCursor = q.StartCursor + } + t.pageCursor = resp.Batch.EndCursor + + t.results = resp.Batch.EntityResults + return nil +} + +// Cursor returns a cursor for the iterator's current location. +func (t *Iterator) Cursor() (Cursor, error) { + // If there is still an offset, we need to the skip those results first. + for t.err == nil && t.offset > 0 { + t.err = t.nextBatch() + } + + if t.err != nil && t.err != iterator.Done { + return Cursor{}, t.err + } + + return Cursor{t.entityCursor}, nil +} + +// Cursor is an iterator's position. It can be converted to and from an opaque +// string. A cursor can be used from different HTTP requests, but only with a +// query with the same kind, ancestor, filter and order constraints. +// +// The zero Cursor can be used to indicate that there is no start and/or end +// constraint for a query. +type Cursor struct { + cc []byte +} + +// String returns a base-64 string representation of a cursor. +func (c Cursor) String() string { + if c.cc == nil { + return "" + } + + return strings.TrimRight(base64.URLEncoding.EncodeToString(c.cc), "=") +} + +// Decode decodes a cursor from its base-64 string representation. +func DecodeCursor(s string) (Cursor, error) { + if s == "" { + return Cursor{}, nil + } + if n := len(s) % 4; n != 0 { + s += strings.Repeat("=", 4-n) + } + b, err := base64.URLEncoding.DecodeString(s) + if err != nil { + return Cursor{}, err + } + return Cursor{b}, nil +} diff --git a/vendor/cloud.google.com/go/datastore/query_test.go b/vendor/cloud.google.com/go/datastore/query_test.go new file mode 100644 index 00000000..e9435a27 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/query_test.go @@ -0,0 +1,536 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "errors" + "fmt" + "reflect" + "sort" + "testing" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/datastore/v1" + "google.golang.org/grpc" +) + +var ( + key1 = &pb.Key{ + Path: []*pb.Key_PathElement{ + { + Kind: "Gopher", + IdType: &pb.Key_PathElement_Id{6}, + }, + }, + } + key2 = &pb.Key{ + Path: []*pb.Key_PathElement{ + { + Kind: "Gopher", + IdType: &pb.Key_PathElement_Id{6}, + }, + { + Kind: "Gopher", + IdType: &pb.Key_PathElement_Id{8}, + }, + }, + } +) + +type fakeClient struct { + pb.DatastoreClient + queryFn func(*pb.RunQueryRequest) (*pb.RunQueryResponse, error) + commitFn func(*pb.CommitRequest) (*pb.CommitResponse, error) +} + +func (c *fakeClient) RunQuery(_ context.Context, req *pb.RunQueryRequest, _ ...grpc.CallOption) (*pb.RunQueryResponse, error) { + return c.queryFn(req) +} + +func (c *fakeClient) Commit(_ context.Context, req *pb.CommitRequest, _ ...grpc.CallOption) (*pb.CommitResponse, error) { + return c.commitFn(req) +} + +func fakeRunQuery(in *pb.RunQueryRequest) (*pb.RunQueryResponse, error) { + expectedIn := &pb.RunQueryRequest{ + QueryType: &pb.RunQueryRequest_Query{&pb.Query{ + Kind: []*pb.KindExpression{{Name: "Gopher"}}, + }}, + } + if !proto.Equal(in, expectedIn) { + return nil, fmt.Errorf("unsupported argument: got %v want %v", in, expectedIn) + } + return &pb.RunQueryResponse{ + Batch: &pb.QueryResultBatch{ + MoreResults: pb.QueryResultBatch_NO_MORE_RESULTS, + EntityResultType: pb.EntityResult_FULL, + EntityResults: []*pb.EntityResult{ + { + Entity: &pb.Entity{ + Key: key1, + Properties: map[string]*pb.Value{ + "Name": {ValueType: &pb.Value_StringValue{"George"}}, + "Height": {ValueType: &pb.Value_IntegerValue{32}}, + }, + }, + }, + { + Entity: &pb.Entity{ + Key: key2, + Properties: map[string]*pb.Value{ + "Name": {ValueType: &pb.Value_StringValue{"Rufus"}}, + // No height for Rufus. + }, + }, + }, + }, + }, + }, nil +} + +type StructThatImplementsPLS struct{} + +func (StructThatImplementsPLS) Load(p []Property) error { return nil } +func (StructThatImplementsPLS) Save() ([]Property, error) { return nil, nil } + +var _ PropertyLoadSaver = StructThatImplementsPLS{} + +type StructPtrThatImplementsPLS struct{} + +func (*StructPtrThatImplementsPLS) Load(p []Property) error { return nil } +func (*StructPtrThatImplementsPLS) Save() ([]Property, error) { return nil, nil } + +var _ PropertyLoadSaver = &StructPtrThatImplementsPLS{} + +type PropertyMap map[string]Property + +func (m PropertyMap) Load(props []Property) error { + for _, p := range props { + m[p.Name] = p + } + return nil +} + +func (m PropertyMap) Save() ([]Property, error) { + props := make([]Property, 0, len(m)) + for _, p := range m { + props = append(props, p) + } + return props, nil +} + +var _ PropertyLoadSaver = PropertyMap{} + +type Gopher struct { + Name string + Height int +} + +// typeOfEmptyInterface is the type of interface{}, but we can't use +// reflect.TypeOf((interface{})(nil)) directly because TypeOf takes an +// interface{}. +var typeOfEmptyInterface = reflect.TypeOf((*interface{})(nil)).Elem() + +func TestCheckMultiArg(t *testing.T) { + testCases := []struct { + v interface{} + mat multiArgType + elemType reflect.Type + }{ + // Invalid cases. + {nil, multiArgTypeInvalid, nil}, + {Gopher{}, multiArgTypeInvalid, nil}, + {&Gopher{}, multiArgTypeInvalid, nil}, + {PropertyList{}, multiArgTypeInvalid, nil}, // This is a special case. + {PropertyMap{}, multiArgTypeInvalid, nil}, + {[]*PropertyList(nil), multiArgTypeInvalid, nil}, + {[]*PropertyMap(nil), multiArgTypeInvalid, nil}, + {[]**Gopher(nil), multiArgTypeInvalid, nil}, + {[]*interface{}(nil), multiArgTypeInvalid, nil}, + // Valid cases. + { + []PropertyList(nil), + multiArgTypePropertyLoadSaver, + reflect.TypeOf(PropertyList{}), + }, + { + []PropertyMap(nil), + multiArgTypePropertyLoadSaver, + reflect.TypeOf(PropertyMap{}), + }, + { + []StructThatImplementsPLS(nil), + multiArgTypePropertyLoadSaver, + reflect.TypeOf(StructThatImplementsPLS{}), + }, + { + []StructPtrThatImplementsPLS(nil), + multiArgTypePropertyLoadSaver, + reflect.TypeOf(StructPtrThatImplementsPLS{}), + }, + { + []Gopher(nil), + multiArgTypeStruct, + reflect.TypeOf(Gopher{}), + }, + { + []*Gopher(nil), + multiArgTypeStructPtr, + reflect.TypeOf(Gopher{}), + }, + { + []interface{}(nil), + multiArgTypeInterface, + typeOfEmptyInterface, + }, + } + for _, tc := range testCases { + mat, elemType := checkMultiArg(reflect.ValueOf(tc.v)) + if mat != tc.mat || elemType != tc.elemType { + t.Errorf("checkMultiArg(%T): got %v, %v want %v, %v", + tc.v, mat, elemType, tc.mat, tc.elemType) + } + } +} + +func TestSimpleQuery(t *testing.T) { + struct1 := Gopher{Name: "George", Height: 32} + struct2 := Gopher{Name: "Rufus"} + pList1 := PropertyList{ + { + Name: "Height", + Value: int64(32), + }, + { + Name: "Name", + Value: "George", + }, + } + pList2 := PropertyList{ + { + Name: "Name", + Value: "Rufus", + }, + } + pMap1 := PropertyMap{ + "Name": Property{ + Name: "Name", + Value: "George", + }, + "Height": Property{ + Name: "Height", + Value: int64(32), + }, + } + pMap2 := PropertyMap{ + "Name": Property{ + Name: "Name", + Value: "Rufus", + }, + } + + testCases := []struct { + dst interface{} + want interface{} + }{ + // The destination must have type *[]P, *[]S or *[]*S, for some non-interface + // type P such that *P implements PropertyLoadSaver, or for some struct type S. + {new([]Gopher), &[]Gopher{struct1, struct2}}, + {new([]*Gopher), &[]*Gopher{&struct1, &struct2}}, + {new([]PropertyList), &[]PropertyList{pList1, pList2}}, + {new([]PropertyMap), &[]PropertyMap{pMap1, pMap2}}, + + // Any other destination type is invalid. + {0, nil}, + {Gopher{}, nil}, + {PropertyList{}, nil}, + {PropertyMap{}, nil}, + {[]int{}, nil}, + {[]Gopher{}, nil}, + {[]PropertyList{}, nil}, + {new(int), nil}, + {new(Gopher), nil}, + {new(PropertyList), nil}, // This is a special case. + {new(PropertyMap), nil}, + {new([]int), nil}, + {new([]map[int]int), nil}, + {new([]map[string]Property), nil}, + {new([]map[string]interface{}), nil}, + {new([]*int), nil}, + {new([]*map[int]int), nil}, + {new([]*map[string]Property), nil}, + {new([]*map[string]interface{}), nil}, + {new([]**Gopher), nil}, + {new([]*PropertyList), nil}, + {new([]*PropertyMap), nil}, + } + for _, tc := range testCases { + nCall := 0 + client := &Client{ + client: &fakeClient{ + queryFn: func(req *pb.RunQueryRequest) (*pb.RunQueryResponse, error) { + nCall++ + return fakeRunQuery(req) + }, + }, + } + ctx := context.Background() + + var ( + expectedErr error + expectedNCall int + ) + if tc.want == nil { + expectedErr = ErrInvalidEntityType + } else { + expectedNCall = 1 + } + keys, err := client.GetAll(ctx, NewQuery("Gopher"), tc.dst) + if err != expectedErr { + t.Errorf("dst type %T: got error %v, want %v", tc.dst, err, expectedErr) + continue + } + if nCall != expectedNCall { + t.Errorf("dst type %T: Context.Call was called an incorrect number of times: got %d want %d", tc.dst, nCall, expectedNCall) + continue + } + if err != nil { + continue + } + + key1 := IDKey("Gopher", 6, nil) + expectedKeys := []*Key{ + key1, + IDKey("Gopher", 8, key1), + } + if l1, l2 := len(keys), len(expectedKeys); l1 != l2 { + t.Errorf("dst type %T: got %d keys, want %d keys", tc.dst, l1, l2) + continue + } + for i, key := range keys { + if !keysEqual(key, expectedKeys[i]) { + t.Errorf("dst type %T: got key #%d %v, want %v", tc.dst, i, key, expectedKeys[i]) + continue + } + } + + // Make sure we sort any PropertyList items (the order is not deterministic). + if pLists, ok := tc.dst.(*[]PropertyList); ok { + for _, p := range *pLists { + sort.Sort(byName(p)) + } + } + + if !reflect.DeepEqual(tc.dst, tc.want) { + t.Errorf("dst type %T: Entities\ngot %+v\nwant %+v", tc.dst, tc.dst, tc.want) + continue + } + } +} + +// keysEqual is like (*Key).Equal, but ignores the App ID. +func keysEqual(a, b *Key) bool { + for a != nil && b != nil { + if a.Kind != b.Kind || a.Name != b.Name || a.ID != b.ID { + return false + } + a, b = a.Parent, b.Parent + } + return a == b +} + +func TestQueriesAreImmutable(t *testing.T) { + // Test that deriving q2 from q1 does not modify q1. + q0 := NewQuery("foo") + q1 := NewQuery("foo") + q2 := q1.Offset(2) + if !reflect.DeepEqual(q0, q1) { + t.Errorf("q0 and q1 were not equal") + } + if reflect.DeepEqual(q1, q2) { + t.Errorf("q1 and q2 were equal") + } + + // Test that deriving from q4 twice does not conflict, even though + // q4 has a long list of order clauses. This tests that the arrays + // backed by a query's slice of orders are not shared. + f := func() *Query { + q := NewQuery("bar") + // 47 is an ugly number that is unlikely to be near a re-allocation + // point in repeated append calls. For example, it's not near a power + // of 2 or a multiple of 10. + for i := 0; i < 47; i++ { + q = q.Order(fmt.Sprintf("x%d", i)) + } + return q + } + q3 := f().Order("y") + q4 := f() + q5 := q4.Order("y") + q6 := q4.Order("z") + if !reflect.DeepEqual(q3, q5) { + t.Errorf("q3 and q5 were not equal") + } + if reflect.DeepEqual(q5, q6) { + t.Errorf("q5 and q6 were equal") + } +} + +func TestFilterParser(t *testing.T) { + testCases := []struct { + filterStr string + wantOK bool + wantFieldName string + wantOp operator + }{ + // Supported ops. + {"x<", true, "x", lessThan}, + {"x <", true, "x", lessThan}, + {"x <", true, "x", lessThan}, + {" x < ", true, "x", lessThan}, + {"x <=", true, "x", lessEq}, + {"x =", true, "x", equal}, + {"x >=", true, "x", greaterEq}, + {"x >", true, "x", greaterThan}, + {"in >", true, "in", greaterThan}, + {"in>", true, "in", greaterThan}, + // Valid but (currently) unsupported ops. + {"x!=", false, "", 0}, + {"x !=", false, "", 0}, + {" x != ", false, "", 0}, + {"x IN", false, "", 0}, + {"x in", false, "", 0}, + // Invalid ops. + {"x EQ", false, "", 0}, + {"x lt", false, "", 0}, + {"x <>", false, "", 0}, + {"x >>", false, "", 0}, + {"x ==", false, "", 0}, + {"x =<", false, "", 0}, + {"x =>", false, "", 0}, + {"x !", false, "", 0}, + {"x ", false, "", 0}, + {"x", false, "", 0}, + // Quoted and interesting field names. + {"x > y =", true, "x > y", equal}, + {"` x ` =", true, " x ", equal}, + {`" x " =`, true, " x ", equal}, + {`" \"x " =`, true, ` "x `, equal}, + {`" x =`, false, "", 0}, + {`" x ="`, false, "", 0}, + {"` x \" =", false, "", 0}, + } + for _, tc := range testCases { + q := NewQuery("foo").Filter(tc.filterStr, 42) + if ok := q.err == nil; ok != tc.wantOK { + t.Errorf("%q: ok=%t, want %t", tc.filterStr, ok, tc.wantOK) + continue + } + if !tc.wantOK { + continue + } + if len(q.filter) != 1 { + t.Errorf("%q: len=%d, want %d", tc.filterStr, len(q.filter), 1) + continue + } + got, want := q.filter[0], filter{tc.wantFieldName, tc.wantOp, 42} + if got != want { + t.Errorf("%q: got %v, want %v", tc.filterStr, got, want) + continue + } + } +} + +func TestNamespaceQuery(t *testing.T) { + gotNamespace := make(chan string, 1) + ctx := context.Background() + client := &Client{ + client: &fakeClient{ + queryFn: func(req *pb.RunQueryRequest) (*pb.RunQueryResponse, error) { + if part := req.PartitionId; part != nil { + gotNamespace <- part.NamespaceId + } else { + gotNamespace <- "" + } + return nil, errors.New("not implemented") + }, + }, + } + + var gs []Gopher + + client.GetAll(ctx, NewQuery("gopher"), &gs) + if got, want := <-gotNamespace, ""; got != want { + t.Errorf("GetAll: got namespace %q, want %q", got, want) + } + client.Count(ctx, NewQuery("gopher")) + if got, want := <-gotNamespace, ""; got != want { + t.Errorf("Count: got namespace %q, want %q", got, want) + } + + const ns = "not_default" + client.GetAll(ctx, NewQuery("gopher").Namespace(ns), &gs) + if got, want := <-gotNamespace, ns; got != want { + t.Errorf("GetAll: got namespace %q, want %q", got, want) + } + client.Count(ctx, NewQuery("gopher").Namespace(ns)) + if got, want := <-gotNamespace, ns; got != want { + t.Errorf("Count: got namespace %q, want %q", got, want) + } +} + +func TestReadOptions(t *testing.T) { + tid := []byte{1} + for _, test := range []struct { + q *Query + want *pb.ReadOptions + }{ + { + q: NewQuery(""), + want: nil, + }, + { + q: NewQuery("").Transaction(nil), + want: nil, + }, + { + q: NewQuery("").Transaction(&Transaction{id: tid}), + want: &pb.ReadOptions{&pb.ReadOptions_Transaction{tid}}, + }, + { + q: NewQuery("").EventualConsistency(), + want: &pb.ReadOptions{&pb.ReadOptions_ReadConsistency_{pb.ReadOptions_EVENTUAL}}, + }, + } { + req := &pb.RunQueryRequest{} + if err := test.q.toProto(req); err != nil { + t.Fatalf("%+v: got %v, want no error", test.q, err) + } + if got := req.ReadOptions; !proto.Equal(got, test.want) { + t.Errorf("%+v:\ngot %+v\nwant %+v", test.q, got, test.want) + } + } + // Test errors. + for _, q := range []*Query{ + NewQuery("").Transaction(&Transaction{id: nil}), + NewQuery("").Transaction(&Transaction{id: tid}).EventualConsistency(), + } { + req := &pb.RunQueryRequest{} + if err := q.toProto(req); err == nil { + t.Errorf("%+v: got nil, wanted error", q) + } + } +} diff --git a/vendor/cloud.google.com/go/datastore/save.go b/vendor/cloud.google.com/go/datastore/save.go new file mode 100644 index 00000000..70bfaaa6 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/save.go @@ -0,0 +1,383 @@ +// Copyright 4 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "errors" + "fmt" + "reflect" + "time" + + timepb "github.com/golang/protobuf/ptypes/timestamp" + pb "google.golang.org/genproto/googleapis/datastore/v1" + llpb "google.golang.org/genproto/googleapis/type/latlng" +) + +type saveOpts struct { + noIndex bool + flatten bool + omitEmpty bool +} + +// saveEntity saves an EntityProto into a PropertyLoadSaver or struct pointer. +func saveEntity(key *Key, src interface{}) (*pb.Entity, error) { + var err error + var props []Property + if e, ok := src.(PropertyLoadSaver); ok { + props, err = e.Save() + } else { + props, err = SaveStruct(src) + } + if err != nil { + return nil, err + } + return propertiesToProto(key, props) +} + +// TODO(djd): Convert this and below to return ([]Property, error). +func saveStructProperty(props *[]Property, name string, opts saveOpts, v reflect.Value) error { + p := Property{ + Name: name, + NoIndex: opts.noIndex, + } + + if opts.omitEmpty && isEmptyValue(v) { + return nil + } + + // Check if v implements PropertyLoadSaver. + pls, isPLS := v.Interface().(PropertyLoadSaver) + + switch x := v.Interface().(type) { + case *Key, time.Time, GeoPoint: + p.Value = x + default: + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p.Value = v.Int() + case reflect.Bool: + p.Value = v.Bool() + case reflect.String: + p.Value = v.String() + case reflect.Float32, reflect.Float64: + p.Value = v.Float() + case reflect.Slice: + if v.Type().Elem().Kind() == reflect.Uint8 { + p.Value = v.Bytes() + } else { + return saveSliceProperty(props, name, opts, v) + } + case reflect.Ptr: + if v.Type().Elem().Kind() != reflect.Struct { + return fmt.Errorf("datastore: unsupported struct field type: %s", v.Type()) + } + if v.IsNil() { + return nil + } + v = v.Elem() + fallthrough + case reflect.Struct: + if isPLS { + subProps, err := pls.Save() + if err != nil { + return err + } + p.Value = &Entity{Properties: subProps} + break + } + + if !v.CanAddr() { + return fmt.Errorf("datastore: unsupported struct field: value is unaddressable") + } + sub, err := newStructPLS(v.Addr().Interface()) + if err != nil { + return fmt.Errorf("datastore: unsupported struct field: %v", err) + } + + if opts.flatten { + return sub.save(props, opts, name+".") + } + + var subProps []Property + err = sub.save(&subProps, opts, "") + if err != nil { + return err + } + subKey, err := sub.key(v) + if err != nil { + return err + } + + p.Value = &Entity{ + Key: subKey, + Properties: subProps, + } + } + } + if p.Value == nil { + return fmt.Errorf("datastore: unsupported struct field type: %v", v.Type()) + } + *props = append(*props, p) + return nil +} + +// key extracts the *Key struct field from struct v based on the structCodec of s. +func (s structPLS) key(v reflect.Value) (*Key, error) { + if v.Kind() != reflect.Struct { + return nil, errors.New("datastore: cannot save key of non-struct type") + } + + keyField := s.codec.Match(keyFieldName) + + if keyField == nil { + return nil, nil + } + + f := v.FieldByIndex(keyField.Index) + k, ok := f.Interface().(*Key) + if !ok { + return nil, fmt.Errorf("datastore: %s field on struct %T is not a *datastore.Key", keyFieldName, v.Interface()) + } + + return k, nil +} + +func saveSliceProperty(props *[]Property, name string, opts saveOpts, v reflect.Value) error { + // Easy case: if the slice is empty, we're done. + if v.Len() == 0 { + return nil + } + // Work out the properties generated by the first element in the slice. This will + // usually be a single property, but will be more if this is a slice of structs. + var headProps []Property + if err := saveStructProperty(&headProps, name, opts, v.Index(0)); err != nil { + return err + } + + // Convert the first element's properties into slice properties, and + // keep track of the values in a map. + values := make(map[string][]interface{}, len(headProps)) + for _, p := range headProps { + values[p.Name] = append(make([]interface{}, 0, v.Len()), p.Value) + } + + // Find the elements for the subsequent elements. + for i := 1; i < v.Len(); i++ { + elemProps := make([]Property, 0, len(headProps)) + if err := saveStructProperty(&elemProps, name, opts, v.Index(i)); err != nil { + return err + } + for _, p := range elemProps { + v, ok := values[p.Name] + if !ok { + return fmt.Errorf("datastore: unexpected property %q in elem %d of slice", p.Name, i) + } + values[p.Name] = append(v, p.Value) + } + } + + // Convert to the final properties. + for _, p := range headProps { + p.Value = values[p.Name] + *props = append(*props, p) + } + return nil +} + +func (s structPLS) Save() ([]Property, error) { + var props []Property + if err := s.save(&props, saveOpts{}, ""); err != nil { + return nil, err + } + return props, nil +} + +func (s structPLS) save(props *[]Property, opts saveOpts, prefix string) error { + for _, f := range s.codec { + name := prefix + f.Name + v := getField(s.v, f.Index) + if !v.IsValid() || !v.CanSet() { + continue + } + + var tagOpts saveOpts + if f.ParsedTag != nil { + tagOpts = f.ParsedTag.(saveOpts) + } + + var opts1 saveOpts + opts1.noIndex = opts.noIndex || tagOpts.noIndex + opts1.flatten = opts.flatten || tagOpts.flatten + opts1.omitEmpty = tagOpts.omitEmpty // don't propagate + if err := saveStructProperty(props, name, opts1, v); err != nil { + return err + } + } + return nil +} + +// getField returns the field from v at the given index path. +// If it encounters a nil-valued field in the path, getField +// stops and returns a zero-valued reflect.Value, preventing the +// panic that would have been caused by reflect's FieldByIndex. +func getField(v reflect.Value, index []int) reflect.Value { + var zero reflect.Value + if v.Type().Kind() != reflect.Struct { + return zero + } + + for _, i := range index { + if v.Kind() == reflect.Ptr && v.Type().Elem().Kind() == reflect.Struct { + if v.IsNil() { + return zero + } + v = v.Elem() + } + v = v.Field(i) + } + return v +} + +func propertiesToProto(key *Key, props []Property) (*pb.Entity, error) { + e := &pb.Entity{ + Key: keyToProto(key), + Properties: map[string]*pb.Value{}, + } + indexedProps := 0 + for _, p := range props { + // Do not send a Key value a a field to datastore. + if p.Name == keyFieldName { + continue + } + + val, err := interfaceToProto(p.Value, p.NoIndex) + if err != nil { + return nil, fmt.Errorf("datastore: %v for a Property with Name %q", err, p.Name) + } + if !p.NoIndex { + rVal := reflect.ValueOf(p.Value) + if rVal.Kind() == reflect.Slice && rVal.Type().Elem().Kind() != reflect.Uint8 { + indexedProps += rVal.Len() + } else { + indexedProps++ + } + } + if indexedProps > maxIndexedProperties { + return nil, errors.New("datastore: too many indexed properties") + } + + if _, ok := e.Properties[p.Name]; ok { + return nil, fmt.Errorf("datastore: duplicate Property with Name %q", p.Name) + } + e.Properties[p.Name] = val + } + return e, nil +} + +func interfaceToProto(iv interface{}, noIndex bool) (*pb.Value, error) { + val := &pb.Value{ExcludeFromIndexes: noIndex} + switch v := iv.(type) { + case int: + val.ValueType = &pb.Value_IntegerValue{int64(v)} + case int32: + val.ValueType = &pb.Value_IntegerValue{int64(v)} + case int64: + val.ValueType = &pb.Value_IntegerValue{v} + case bool: + val.ValueType = &pb.Value_BooleanValue{v} + case string: + if len(v) > 1500 && !noIndex { + return nil, errors.New("string property too long to index") + } + val.ValueType = &pb.Value_StringValue{v} + case float32: + val.ValueType = &pb.Value_DoubleValue{float64(v)} + case float64: + val.ValueType = &pb.Value_DoubleValue{v} + case *Key: + if v == nil { + val.ValueType = &pb.Value_NullValue{} + } else { + val.ValueType = &pb.Value_KeyValue{keyToProto(v)} + } + case GeoPoint: + if !v.Valid() { + return nil, errors.New("invalid GeoPoint value") + } + val.ValueType = &pb.Value_GeoPointValue{&llpb.LatLng{ + Latitude: v.Lat, + Longitude: v.Lng, + }} + case time.Time: + if v.Before(minTime) || v.After(maxTime) { + return nil, errors.New("time value out of range") + } + val.ValueType = &pb.Value_TimestampValue{&timepb.Timestamp{ + Seconds: v.Unix(), + Nanos: int32(v.Nanosecond()), + }} + case []byte: + if len(v) > 1500 && !noIndex { + return nil, errors.New("[]byte property too long to index") + } + val.ValueType = &pb.Value_BlobValue{v} + case *Entity: + e, err := propertiesToProto(v.Key, v.Properties) + if err != nil { + return nil, err + } + val.ValueType = &pb.Value_EntityValue{e} + case []interface{}: + arr := make([]*pb.Value, 0, len(v)) + for i, v := range v { + elem, err := interfaceToProto(v, noIndex) + if err != nil { + return nil, fmt.Errorf("%v at index %d", err, i) + } + arr = append(arr, elem) + } + val.ValueType = &pb.Value_ArrayValue{&pb.ArrayValue{arr}} + // ArrayValues have ExcludeFromIndexes set on the individual items, rather + // than the top-level value. + val.ExcludeFromIndexes = false + default: + if iv != nil { + return nil, fmt.Errorf("invalid Value type %t", iv) + } + val.ValueType = &pb.Value_NullValue{} + } + // TODO(jbd): Support EntityValue. + return val, nil +} + +// isEmptyValue is taken from the encoding/json package in the +// standard library. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} diff --git a/vendor/cloud.google.com/go/datastore/save_test.go b/vendor/cloud.google.com/go/datastore/save_test.go new file mode 100644 index 00000000..4d729baa --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/save_test.go @@ -0,0 +1,194 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "reflect" + "testing" + + pb "google.golang.org/genproto/googleapis/datastore/v1" +) + +func TestInterfaceToProtoNilKey(t *testing.T) { + var iv *Key + pv, err := interfaceToProto(iv, false) + if err != nil { + t.Fatalf("nil key: interfaceToProto: %v", err) + } + + _, ok := pv.ValueType.(*pb.Value_NullValue) + if !ok { + t.Errorf("nil key: type:\ngot: %T\nwant: %T", pv.ValueType, &pb.Value_NullValue{}) + } +} + +func TestSaveEntityNested(t *testing.T) { + type WithKey struct { + X string + I int + K *Key `datastore:"__key__"` + } + + type NestedWithKey struct { + Y string + N WithKey + } + + type WithoutKey struct { + X string + I int + } + + type NestedWithoutKey struct { + Y string + N WithoutKey + } + + type a struct { + S string + } + + type UnexpAnonym struct { + a + } + + testCases := []struct { + desc string + src interface{} + key *Key + want *pb.Entity + }{ + { + "nested entity with key", + &NestedWithKey{ + Y: "yyy", + N: WithKey{ + X: "two", + I: 2, + K: testKey1a, + }, + }, + testKey0, + &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Y": {ValueType: &pb.Value_StringValue{"yyy"}}, + "N": {ValueType: &pb.Value_EntityValue{ + &pb.Entity{ + Key: keyToProto(testKey1a), + Properties: map[string]*pb.Value{ + "X": {ValueType: &pb.Value_StringValue{"two"}}, + "I": {ValueType: &pb.Value_IntegerValue{2}}, + }, + }, + }}, + }, + }, + }, + { + "nested entity with incomplete key", + &NestedWithKey{ + Y: "yyy", + N: WithKey{ + X: "two", + I: 2, + K: incompleteKey, + }, + }, + testKey0, + &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Y": {ValueType: &pb.Value_StringValue{"yyy"}}, + "N": {ValueType: &pb.Value_EntityValue{ + &pb.Entity{ + Key: keyToProto(incompleteKey), + Properties: map[string]*pb.Value{ + "X": {ValueType: &pb.Value_StringValue{"two"}}, + "I": {ValueType: &pb.Value_IntegerValue{2}}, + }, + }, + }}, + }, + }, + }, + { + "nested entity without key", + &NestedWithoutKey{ + Y: "yyy", + N: WithoutKey{ + X: "two", + I: 2, + }, + }, + testKey0, + &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "Y": {ValueType: &pb.Value_StringValue{"yyy"}}, + "N": {ValueType: &pb.Value_EntityValue{ + &pb.Entity{ + Properties: map[string]*pb.Value{ + "X": {ValueType: &pb.Value_StringValue{"two"}}, + "I": {ValueType: &pb.Value_IntegerValue{2}}, + }, + }, + }}, + }, + }, + }, + { + "key at top level", + &WithKey{ + X: "three", + I: 3, + K: testKey0, + }, + testKey0, + &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "X": {ValueType: &pb.Value_StringValue{"three"}}, + "I": {ValueType: &pb.Value_IntegerValue{3}}, + }, + }, + }, + { + "nested unexported anonymous struct field", + &UnexpAnonym{ + a{S: "hello"}, + }, + testKey0, + &pb.Entity{ + Key: keyToProto(testKey0), + Properties: map[string]*pb.Value{ + "S": {ValueType: &pb.Value_StringValue{"hello"}}, + }, + }, + }, + } + + for _, tc := range testCases { + got, err := saveEntity(tc.key, tc.src) + if err != nil { + t.Errorf("saveEntity: %s: %v", tc.desc, err) + continue + } + + if !reflect.DeepEqual(tc.want, got) { + t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, got, tc.want) + } + } +} diff --git a/vendor/cloud.google.com/go/datastore/testdata/index.yaml b/vendor/cloud.google.com/go/datastore/testdata/index.yaml new file mode 100644 index 00000000..47bc9de8 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/testdata/index.yaml @@ -0,0 +1,41 @@ +indexes: + +- kind: SQChild + ancestor: yes + properties: + - name: T + - name: I + +- kind: SQChild + ancestor: yes + properties: + - name: T + - name: I + direction: desc + +- kind: SQChild + ancestor: yes + properties: + - name: I + - name: T + - name: U + +- kind: SQChild + ancestor: yes + properties: + - name: I + - name: T + - name: U + +- kind: SQChild + ancestor: yes + properties: + - name: T + - name: J + +- kind: SQChild + ancestor: yes + properties: + - name: T + - name: J + - name: U \ No newline at end of file diff --git a/vendor/cloud.google.com/go/datastore/time.go b/vendor/cloud.google.com/go/datastore/time.go new file mode 100644 index 00000000..e7f6a193 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/time.go @@ -0,0 +1,36 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "math" + "time" +) + +var ( + minTime = time.Unix(int64(math.MinInt64)/1e6, (int64(math.MinInt64)%1e6)*1e3) + maxTime = time.Unix(int64(math.MaxInt64)/1e6, (int64(math.MaxInt64)%1e6)*1e3) +) + +func toUnixMicro(t time.Time) int64 { + // We cannot use t.UnixNano() / 1e3 because we want to handle times more than + // 2^63 nanoseconds (which is about 292 years) away from 1970, and those cannot + // be represented in the numerator of a single int64 divide. + return t.Unix()*1e6 + int64(t.Nanosecond()/1e3) +} + +func fromUnixMicro(t int64) time.Time { + return time.Unix(t/1e6, (t%1e6)*1e3) +} diff --git a/vendor/cloud.google.com/go/datastore/time_test.go b/vendor/cloud.google.com/go/datastore/time_test.go new file mode 100644 index 00000000..5cc846c4 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/time_test.go @@ -0,0 +1,75 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "testing" + "time" +) + +func TestUnixMicro(t *testing.T) { + // Test that all these time.Time values survive a round trip to unix micros. + testCases := []time.Time{ + {}, + time.Date(2, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(23, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(234, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1000, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1600, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1700, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1800, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC), + time.Unix(-1e6, -1000), + time.Unix(-1e6, 0), + time.Unix(-1e6, +1000), + time.Unix(-60, -1000), + time.Unix(-60, 0), + time.Unix(-60, +1000), + time.Unix(-1, -1000), + time.Unix(-1, 0), + time.Unix(-1, +1000), + time.Unix(0, -3000), + time.Unix(0, -2000), + time.Unix(0, -1000), + time.Unix(0, 0), + time.Unix(0, +1000), + time.Unix(0, +2000), + time.Unix(+60, -1000), + time.Unix(+60, 0), + time.Unix(+60, +1000), + time.Unix(+1e6, -1000), + time.Unix(+1e6, 0), + time.Unix(+1e6, +1000), + time.Date(1999, 12, 31, 23, 59, 59, 999000, time.UTC), + time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC), + time.Date(2006, 1, 2, 15, 4, 5, 678000, time.UTC), + time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC), + time.Date(3456, 1, 1, 0, 0, 0, 0, time.UTC), + } + for _, tc := range testCases { + got := fromUnixMicro(toUnixMicro(tc)) + if !got.Equal(tc) { + t.Errorf("got %q, want %q", got, tc) + } + } + + // Test that a time.Time that isn't an integral number of microseconds + // is not perfectly reconstructed after a round trip. + t0 := time.Unix(0, 123) + t1 := fromUnixMicro(toUnixMicro(t0)) + if t1.Nanosecond()%1000 != 0 || t0.Nanosecond()%1000 == 0 { + t.Errorf("quantization to µs: got %q with %d ns, started with %d ns", t1, t1.Nanosecond(), t0.Nanosecond()) + } +} diff --git a/vendor/cloud.google.com/go/datastore/transaction.go b/vendor/cloud.google.com/go/datastore/transaction.go new file mode 100644 index 00000000..4eb0ea25 --- /dev/null +++ b/vendor/cloud.google.com/go/datastore/transaction.go @@ -0,0 +1,310 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package datastore + +import ( + "errors" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + + pb "google.golang.org/genproto/googleapis/datastore/v1" +) + +// ErrConcurrentTransaction is returned when a transaction is rolled back due +// to a conflict with a concurrent transaction. +var ErrConcurrentTransaction = errors.New("datastore: concurrent transaction") + +var errExpiredTransaction = errors.New("datastore: transaction expired") + +type transactionSettings struct { + attempts int +} + +// newTransactionSettings creates a transactionSettings with a given TransactionOption slice. +// Unconfigured options will be set to default values. +func newTransactionSettings(opts []TransactionOption) *transactionSettings { + s := &transactionSettings{attempts: 3} + for _, o := range opts { + o.apply(s) + } + return s +} + +// TransactionOption configures the way a transaction is executed. +type TransactionOption interface { + apply(*transactionSettings) +} + +// MaxAttempts returns a TransactionOption that overrides the default 3 attempt times. +func MaxAttempts(attempts int) TransactionOption { + return maxAttempts(attempts) +} + +type maxAttempts int + +func (w maxAttempts) apply(s *transactionSettings) { + if w > 0 { + s.attempts = int(w) + } +} + +// Transaction represents a set of datastore operations to be committed atomically. +// +// Operations are enqueued by calling the Put and Delete methods on Transaction +// (or their Multi-equivalents). These operations are only committed when the +// Commit method is invoked. To ensure consistency, reads must be performed by +// using Transaction's Get method or by using the Transaction method when +// building a query. +// +// A Transaction must be committed or rolled back exactly once. +type Transaction struct { + id []byte + client *Client + ctx context.Context + mutations []*pb.Mutation // The mutations to apply. + pending map[int]*PendingKey // Map from mutation index to incomplete keys pending transaction completion. +} + +// NewTransaction starts a new transaction. +func (c *Client) NewTransaction(ctx context.Context, opts ...TransactionOption) (*Transaction, error) { + for _, o := range opts { + if _, ok := o.(maxAttempts); ok { + return nil, errors.New("datastore: NewTransaction does not accept MaxAttempts option") + } + } + req := &pb.BeginTransactionRequest{ + ProjectId: c.dataset, + } + resp, err := c.client.BeginTransaction(ctx, req) + if err != nil { + return nil, err + } + + return &Transaction{ + id: resp.Transaction, + ctx: ctx, + client: c, + mutations: nil, + pending: make(map[int]*PendingKey), + }, nil +} + +// RunInTransaction runs f in a transaction. f is invoked with a Transaction +// that f should use for all the transaction's datastore operations. +// +// f must not call Commit or Rollback on the provided Transaction. +// +// If f returns nil, RunInTransaction commits the transaction, +// returning the Commit and a nil error if it succeeds. If the commit fails due +// to a conflicting transaction, RunInTransaction retries f with a new +// Transaction. It gives up and returns ErrConcurrentTransaction after three +// failed attempts (or as configured with MaxAttempts). +// +// If f returns non-nil, then the transaction will be rolled back and +// RunInTransaction will return the same error. The function f is not retried. +// +// Note that when f returns, the transaction is not committed. Calling code +// must not assume that any of f's changes have been committed until +// RunInTransaction returns nil. +// +// Since f may be called multiple times, f should usually be idempotent – that +// is, it should have the same result when called multiple times. Note that +// Transaction.Get will append when unmarshalling slice fields, so it is not +// necessarily idempotent. +func (c *Client) RunInTransaction(ctx context.Context, f func(tx *Transaction) error, opts ...TransactionOption) (*Commit, error) { + settings := newTransactionSettings(opts) + for n := 0; n < settings.attempts; n++ { + tx, err := c.NewTransaction(ctx) + if err != nil { + return nil, err + } + if err := f(tx); err != nil { + tx.Rollback() + return nil, err + } + if cmt, err := tx.Commit(); err != ErrConcurrentTransaction { + return cmt, err + } + } + return nil, ErrConcurrentTransaction +} + +// Commit applies the enqueued operations atomically. +func (t *Transaction) Commit() (*Commit, error) { + if t.id == nil { + return nil, errExpiredTransaction + } + req := &pb.CommitRequest{ + ProjectId: t.client.dataset, + TransactionSelector: &pb.CommitRequest_Transaction{t.id}, + Mutations: t.mutations, + Mode: pb.CommitRequest_TRANSACTIONAL, + } + t.id = nil + resp, err := t.client.client.Commit(t.ctx, req) + if err != nil { + if grpc.Code(err) == codes.Aborted { + return nil, ErrConcurrentTransaction + } + return nil, err + } + + // Copy any newly minted keys into the returned keys. + commit := &Commit{} + for i, p := range t.pending { + if i >= len(resp.MutationResults) || resp.MutationResults[i].Key == nil { + return nil, errors.New("datastore: internal error: server returned the wrong mutation results") + } + key, err := protoToKey(resp.MutationResults[i].Key) + if err != nil { + return nil, errors.New("datastore: internal error: server returned an invalid key") + } + p.key = key + p.commit = commit + } + + return commit, nil +} + +// Rollback abandons a pending transaction. +func (t *Transaction) Rollback() error { + if t.id == nil { + return errExpiredTransaction + } + id := t.id + t.id = nil + _, err := t.client.client.Rollback(t.ctx, &pb.RollbackRequest{ + ProjectId: t.client.dataset, + Transaction: id, + }) + return err +} + +// Get is the transaction-specific version of the package function Get. +// All reads performed during the transaction will come from a single consistent +// snapshot. Furthermore, if the transaction is set to a serializable isolation +// level, another transaction cannot concurrently modify the data that is read +// or modified by this transaction. +func (t *Transaction) Get(key *Key, dst interface{}) error { + opts := &pb.ReadOptions{ + ConsistencyType: &pb.ReadOptions_Transaction{t.id}, + } + err := t.client.get(t.ctx, []*Key{key}, []interface{}{dst}, opts) + if me, ok := err.(MultiError); ok { + return me[0] + } + return err +} + +// GetMulti is a batch version of Get. +func (t *Transaction) GetMulti(keys []*Key, dst interface{}) error { + if t.id == nil { + return errExpiredTransaction + } + opts := &pb.ReadOptions{ + ConsistencyType: &pb.ReadOptions_Transaction{t.id}, + } + return t.client.get(t.ctx, keys, dst, opts) +} + +// Put is the transaction-specific version of the package function Put. +// +// Put returns a PendingKey which can be resolved into a Key using the +// return value from a successful Commit. If key is an incomplete key, the +// returned pending key will resolve to a unique key generated by the +// datastore. +func (t *Transaction) Put(key *Key, src interface{}) (*PendingKey, error) { + h, err := t.PutMulti([]*Key{key}, []interface{}{src}) + if err != nil { + if me, ok := err.(MultiError); ok { + return nil, me[0] + } + return nil, err + } + return h[0], nil +} + +// PutMulti is a batch version of Put. One PendingKey is returned for each +// element of src in the same order. +func (t *Transaction) PutMulti(keys []*Key, src interface{}) ([]*PendingKey, error) { + if t.id == nil { + return nil, errExpiredTransaction + } + mutations, err := putMutations(keys, src) + if err != nil { + return nil, err + } + origin := len(t.mutations) + t.mutations = append(t.mutations, mutations...) + + // Prepare the returned handles, pre-populating where possible. + ret := make([]*PendingKey, len(keys)) + for i, key := range keys { + p := &PendingKey{} + if key.Incomplete() { + // This key will be in the final commit result. + t.pending[origin+i] = p + } else { + p.key = key + } + ret[i] = p + } + + return ret, nil +} + +// Delete is the transaction-specific version of the package function Delete. +// Delete enqueues the deletion of the entity for the given key, to be +// committed atomically upon calling Commit. +func (t *Transaction) Delete(key *Key) error { + err := t.DeleteMulti([]*Key{key}) + if me, ok := err.(MultiError); ok { + return me[0] + } + return err +} + +// DeleteMulti is a batch version of Delete. +func (t *Transaction) DeleteMulti(keys []*Key) error { + if t.id == nil { + return errExpiredTransaction + } + mutations, err := deleteMutations(keys) + if err != nil { + return err + } + t.mutations = append(t.mutations, mutations...) + return nil +} + +// Commit represents the result of a committed transaction. +type Commit struct{} + +// Key resolves a pending key handle into a final key. +func (c *Commit) Key(p *PendingKey) *Key { + if c != p.commit { + panic("PendingKey was not created by corresponding transaction") + } + return p.key +} + +// PendingKey represents the key for newly-inserted entity. It can be +// resolved into a Key by calling the Key method of Commit. +type PendingKey struct { + key *Key + commit *Commit +} diff --git a/vendor/cloud.google.com/go/debugger/apiv2/controller2_client.go b/vendor/cloud.google.com/go/debugger/apiv2/controller2_client.go new file mode 100644 index 00000000..8d8bd3f9 --- /dev/null +++ b/vendor/cloud.google.com/go/debugger/apiv2/controller2_client.go @@ -0,0 +1,226 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package debugger + +import ( + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + clouddebuggerpb "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// Controller2CallOptions contains the retry settings for each method of Controller2Client. +type Controller2CallOptions struct { + RegisterDebuggee []gax.CallOption + ListActiveBreakpoints []gax.CallOption + UpdateActiveBreakpoint []gax.CallOption +} + +func defaultController2ClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("clouddebugger.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud_debugger", + ), + } +} + +func defaultController2CallOptions() *Controller2CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + {"default", "non_idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &Controller2CallOptions{ + RegisterDebuggee: retry[[2]string{"default", "non_idempotent"}], + ListActiveBreakpoints: retry[[2]string{"default", "idempotent"}], + UpdateActiveBreakpoint: retry[[2]string{"default", "idempotent"}], + } +} + +// Controller2Client is a client for interacting with Stackdriver Debugger API. +type Controller2Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + controller2Client clouddebuggerpb.Controller2Client + + // The call options for this service. + CallOptions *Controller2CallOptions + + // The metadata to be sent with each request. + xGoogHeader string +} + +// NewController2Client creates a new controller2 client. +// +// The Controller service provides the API for orchestrating a collection of +// debugger agents to perform debugging tasks. These agents are each attached +// to a process of an application which may include one or more replicas. +// +// The debugger agents register with the Controller to identify the application +// being debugged, the Debuggee. All agents that register with the same data, +// represent the same Debuggee, and are assigned the same `debuggee_id`. +// +// The debugger agents call the Controller to retrieve the list of active +// Breakpoints. Agents with the same `debuggee_id` get the same breakpoints +// list. An agent that can fulfill the breakpoint request updates the +// Controller with the breakpoint result. The controller selects the first +// result received and discards the rest of the results. +// Agents that poll again for active breakpoints will no longer have +// the completed breakpoint in the list and should remove that breakpoint from +// their attached process. +// +// The Controller service does not provide a way to retrieve the results of +// a completed breakpoint. This functionality is available using the Debugger +// service. +func NewController2Client(ctx context.Context, opts ...option.ClientOption) (*Controller2Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultController2ClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Controller2Client{ + conn: conn, + CallOptions: defaultController2CallOptions(), + + controller2Client: clouddebuggerpb.NewController2Client(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Controller2Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Controller2Client) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Controller2Client) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", "") + c.xGoogHeader = gax.XGoogHeader(kv...) +} + +// RegisterDebuggee registers the debuggee with the controller service. +// +// All agents attached to the same application should call this method with +// the same request content to get back the same stable `debuggee_id`. Agents +// should call this method again whenever `google.rpc.Code.NOT_FOUND` is +// returned from any controller method. +// +// This allows the controller service to disable the agent or recover from any +// data loss. If the debuggee is disabled by the server, the response will +// have `is_disabled` set to `true`. +func (c *Controller2Client) RegisterDebuggee(ctx context.Context, req *clouddebuggerpb.RegisterDebuggeeRequest) (*clouddebuggerpb.RegisterDebuggeeResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *clouddebuggerpb.RegisterDebuggeeResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.controller2Client.RegisterDebuggee(ctx, req) + return err + }, c.CallOptions.RegisterDebuggee...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListActiveBreakpoints returns the list of all active breakpoints for the debuggee. +// +// The breakpoint specification (location, condition, and expression +// fields) is semantically immutable, although the field values may +// change. For example, an agent may update the location line number +// to reflect the actual line where the breakpoint was set, but this +// doesn't change the breakpoint semantics. +// +// This means that an agent does not need to check if a breakpoint has changed +// when it encounters the same breakpoint on a successive call. +// Moreover, an agent should remember the breakpoints that are completed +// until the controller removes them from the active list to avoid +// setting those breakpoints again. +func (c *Controller2Client) ListActiveBreakpoints(ctx context.Context, req *clouddebuggerpb.ListActiveBreakpointsRequest) (*clouddebuggerpb.ListActiveBreakpointsResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *clouddebuggerpb.ListActiveBreakpointsResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.controller2Client.ListActiveBreakpoints(ctx, req) + return err + }, c.CallOptions.ListActiveBreakpoints...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateActiveBreakpoint updates the breakpoint state or mutable fields. +// The entire Breakpoint message must be sent back to the controller +// service. +// +// Updates to active breakpoint fields are only allowed if the new value +// does not change the breakpoint specification. Updates to the `location`, +// `condition` and `expression` fields should not alter the breakpoint +// semantics. These may only make changes such as canonicalizing a value +// or snapping the location to the correct line of code. +func (c *Controller2Client) UpdateActiveBreakpoint(ctx context.Context, req *clouddebuggerpb.UpdateActiveBreakpointRequest) (*clouddebuggerpb.UpdateActiveBreakpointResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *clouddebuggerpb.UpdateActiveBreakpointResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.controller2Client.UpdateActiveBreakpoint(ctx, req) + return err + }, c.CallOptions.UpdateActiveBreakpoint...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/debugger/apiv2/controller2_client_example_test.go b/vendor/cloud.google.com/go/debugger/apiv2/controller2_client_example_test.go new file mode 100644 index 00000000..4f9b6f46 --- /dev/null +++ b/vendor/cloud.google.com/go/debugger/apiv2/controller2_client_example_test.go @@ -0,0 +1,87 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package debugger_test + +import ( + "cloud.google.com/go/debugger/apiv2" + "golang.org/x/net/context" + clouddebuggerpb "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2" +) + +func ExampleNewController2Client() { + ctx := context.Background() + c, err := debugger.NewController2Client(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleController2Client_RegisterDebuggee() { + ctx := context.Background() + c, err := debugger.NewController2Client(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouddebuggerpb.RegisterDebuggeeRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.RegisterDebuggee(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleController2Client_ListActiveBreakpoints() { + ctx := context.Background() + c, err := debugger.NewController2Client(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouddebuggerpb.ListActiveBreakpointsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ListActiveBreakpoints(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleController2Client_UpdateActiveBreakpoint() { + ctx := context.Background() + c, err := debugger.NewController2Client(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouddebuggerpb.UpdateActiveBreakpointRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateActiveBreakpoint(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/debugger/apiv2/debugger2_client.go b/vendor/cloud.google.com/go/debugger/apiv2/debugger2_client.go new file mode 100644 index 00000000..1c8738b7 --- /dev/null +++ b/vendor/cloud.google.com/go/debugger/apiv2/debugger2_client.go @@ -0,0 +1,219 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package debugger + +import ( + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + clouddebuggerpb "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// Debugger2CallOptions contains the retry settings for each method of Debugger2Client. +type Debugger2CallOptions struct { + SetBreakpoint []gax.CallOption + GetBreakpoint []gax.CallOption + DeleteBreakpoint []gax.CallOption + ListBreakpoints []gax.CallOption + ListDebuggees []gax.CallOption +} + +func defaultDebugger2ClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("clouddebugger.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud_debugger", + ), + } +} + +func defaultDebugger2CallOptions() *Debugger2CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + {"default", "non_idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &Debugger2CallOptions{ + SetBreakpoint: retry[[2]string{"default", "non_idempotent"}], + GetBreakpoint: retry[[2]string{"default", "idempotent"}], + DeleteBreakpoint: retry[[2]string{"default", "idempotent"}], + ListBreakpoints: retry[[2]string{"default", "idempotent"}], + ListDebuggees: retry[[2]string{"default", "idempotent"}], + } +} + +// Debugger2Client is a client for interacting with Stackdriver Debugger API. +type Debugger2Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + debugger2Client clouddebuggerpb.Debugger2Client + + // The call options for this service. + CallOptions *Debugger2CallOptions + + // The metadata to be sent with each request. + xGoogHeader string +} + +// NewDebugger2Client creates a new debugger2 client. +// +// The Debugger service provides the API that allows users to collect run-time +// information from a running application, without stopping or slowing it down +// and without modifying its state. An application may include one or +// more replicated processes performing the same work. +// +// The application is represented using the Debuggee concept. The Debugger +// service provides a way to query for available Debuggees, but does not +// provide a way to create one. A debuggee is created using the Controller +// service, usually by running a debugger agent with the application. +// +// The Debugger service enables the client to set one or more Breakpoints on a +// Debuggee and collect the results of the set Breakpoints. +func NewDebugger2Client(ctx context.Context, opts ...option.ClientOption) (*Debugger2Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultDebugger2ClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Debugger2Client{ + conn: conn, + CallOptions: defaultDebugger2CallOptions(), + + debugger2Client: clouddebuggerpb.NewDebugger2Client(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Debugger2Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Debugger2Client) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Debugger2Client) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", "") + c.xGoogHeader = gax.XGoogHeader(kv...) +} + +// SetBreakpoint sets the breakpoint to the debuggee. +func (c *Debugger2Client) SetBreakpoint(ctx context.Context, req *clouddebuggerpb.SetBreakpointRequest) (*clouddebuggerpb.SetBreakpointResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *clouddebuggerpb.SetBreakpointResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.debugger2Client.SetBreakpoint(ctx, req) + return err + }, c.CallOptions.SetBreakpoint...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetBreakpoint gets breakpoint information. +func (c *Debugger2Client) GetBreakpoint(ctx context.Context, req *clouddebuggerpb.GetBreakpointRequest) (*clouddebuggerpb.GetBreakpointResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *clouddebuggerpb.GetBreakpointResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.debugger2Client.GetBreakpoint(ctx, req) + return err + }, c.CallOptions.GetBreakpoint...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteBreakpoint deletes the breakpoint from the debuggee. +func (c *Debugger2Client) DeleteBreakpoint(ctx context.Context, req *clouddebuggerpb.DeleteBreakpointRequest) error { + ctx = insertXGoog(ctx, c.xGoogHeader) + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + _, err = c.debugger2Client.DeleteBreakpoint(ctx, req) + return err + }, c.CallOptions.DeleteBreakpoint...) + return err +} + +// ListBreakpoints lists all breakpoints for the debuggee. +func (c *Debugger2Client) ListBreakpoints(ctx context.Context, req *clouddebuggerpb.ListBreakpointsRequest) (*clouddebuggerpb.ListBreakpointsResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *clouddebuggerpb.ListBreakpointsResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.debugger2Client.ListBreakpoints(ctx, req) + return err + }, c.CallOptions.ListBreakpoints...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListDebuggees lists all the debuggees that the user can set breakpoints to. +func (c *Debugger2Client) ListDebuggees(ctx context.Context, req *clouddebuggerpb.ListDebuggeesRequest) (*clouddebuggerpb.ListDebuggeesResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *clouddebuggerpb.ListDebuggeesResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.debugger2Client.ListDebuggees(ctx, req) + return err + }, c.CallOptions.ListDebuggees...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/debugger/apiv2/debugger2_client_example_test.go b/vendor/cloud.google.com/go/debugger/apiv2/debugger2_client_example_test.go new file mode 100644 index 00000000..ea3e7e92 --- /dev/null +++ b/vendor/cloud.google.com/go/debugger/apiv2/debugger2_client_example_test.go @@ -0,0 +1,121 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package debugger_test + +import ( + "cloud.google.com/go/debugger/apiv2" + "golang.org/x/net/context" + clouddebuggerpb "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2" +) + +func ExampleNewDebugger2Client() { + ctx := context.Background() + c, err := debugger.NewDebugger2Client(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleDebugger2Client_SetBreakpoint() { + ctx := context.Background() + c, err := debugger.NewDebugger2Client(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouddebuggerpb.SetBreakpointRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetBreakpoint(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleDebugger2Client_GetBreakpoint() { + ctx := context.Background() + c, err := debugger.NewDebugger2Client(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouddebuggerpb.GetBreakpointRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetBreakpoint(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleDebugger2Client_DeleteBreakpoint() { + ctx := context.Background() + c, err := debugger.NewDebugger2Client(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouddebuggerpb.DeleteBreakpointRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteBreakpoint(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleDebugger2Client_ListBreakpoints() { + ctx := context.Background() + c, err := debugger.NewDebugger2Client(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouddebuggerpb.ListBreakpointsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ListBreakpoints(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleDebugger2Client_ListDebuggees() { + ctx := context.Background() + c, err := debugger.NewDebugger2Client(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouddebuggerpb.ListDebuggeesRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ListDebuggees(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/debugger/apiv2/doc.go b/vendor/cloud.google.com/go/debugger/apiv2/doc.go new file mode 100644 index 00000000..2a9df3c5 --- /dev/null +++ b/vendor/cloud.google.com/go/debugger/apiv2/doc.go @@ -0,0 +1,36 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package debugger is an experimental, auto-generated package for the +// debugger API. +// +// Examines the call stack and variables of a running application +// without stopping or slowing it down. +// +// Use the client at cloud.google.com/go/cmd/go-cloud-debug-agent in preference to this. +package debugger // import "cloud.google.com/go/debugger/apiv2" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertXGoog(ctx context.Context, val string) context.Context { + md, _ := metadata.FromContext(ctx) + md = md.Copy() + md["x-goog-api-client"] = []string{val} + return metadata.NewContext(ctx, md) +} diff --git a/vendor/cloud.google.com/go/debugger/apiv2/mock_test.go b/vendor/cloud.google.com/go/debugger/apiv2/mock_test.go new file mode 100644 index 00000000..35c2f495 --- /dev/null +++ b/vendor/cloud.google.com/go/debugger/apiv2/mock_test.go @@ -0,0 +1,641 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package debugger + +import ( + google_protobuf "github.com/golang/protobuf/ptypes/empty" + clouddebuggerpb "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2" +) + +import ( + "flag" + "io" + "log" + "net" + "os" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockDebugger2Server struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + clouddebuggerpb.Debugger2Server + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockDebugger2Server) SetBreakpoint(_ context.Context, req *clouddebuggerpb.SetBreakpointRequest) (*clouddebuggerpb.SetBreakpointResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouddebuggerpb.SetBreakpointResponse), nil +} + +func (s *mockDebugger2Server) GetBreakpoint(_ context.Context, req *clouddebuggerpb.GetBreakpointRequest) (*clouddebuggerpb.GetBreakpointResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouddebuggerpb.GetBreakpointResponse), nil +} + +func (s *mockDebugger2Server) DeleteBreakpoint(_ context.Context, req *clouddebuggerpb.DeleteBreakpointRequest) (*google_protobuf.Empty, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*google_protobuf.Empty), nil +} + +func (s *mockDebugger2Server) ListBreakpoints(_ context.Context, req *clouddebuggerpb.ListBreakpointsRequest) (*clouddebuggerpb.ListBreakpointsResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouddebuggerpb.ListBreakpointsResponse), nil +} + +func (s *mockDebugger2Server) ListDebuggees(_ context.Context, req *clouddebuggerpb.ListDebuggeesRequest) (*clouddebuggerpb.ListDebuggeesResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouddebuggerpb.ListDebuggeesResponse), nil +} + +type mockController2Server struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + clouddebuggerpb.Controller2Server + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockController2Server) RegisterDebuggee(_ context.Context, req *clouddebuggerpb.RegisterDebuggeeRequest) (*clouddebuggerpb.RegisterDebuggeeResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouddebuggerpb.RegisterDebuggeeResponse), nil +} + +func (s *mockController2Server) ListActiveBreakpoints(_ context.Context, req *clouddebuggerpb.ListActiveBreakpointsRequest) (*clouddebuggerpb.ListActiveBreakpointsResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouddebuggerpb.ListActiveBreakpointsResponse), nil +} + +func (s *mockController2Server) UpdateActiveBreakpoint(_ context.Context, req *clouddebuggerpb.UpdateActiveBreakpointRequest) (*clouddebuggerpb.UpdateActiveBreakpointResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouddebuggerpb.UpdateActiveBreakpointResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockDebugger2 mockDebugger2Server + mockController2 mockController2Server +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + clouddebuggerpb.RegisterDebugger2Server(serv, &mockDebugger2) + clouddebuggerpb.RegisterController2Server(serv, &mockController2) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestDebugger2SetBreakpoint(t *testing.T) { + var expectedResponse *clouddebuggerpb.SetBreakpointResponse = &clouddebuggerpb.SetBreakpointResponse{} + + mockDebugger2.err = nil + mockDebugger2.reqs = nil + + mockDebugger2.resps = append(mockDebugger2.resps[:0], expectedResponse) + + var debuggeeId string = "debuggeeId-997255898" + var breakpoint *clouddebuggerpb.Breakpoint = &clouddebuggerpb.Breakpoint{} + var clientVersion string = "clientVersion-1506231196" + var request = &clouddebuggerpb.SetBreakpointRequest{ + DebuggeeId: debuggeeId, + Breakpoint: breakpoint, + ClientVersion: clientVersion, + } + + c, err := NewDebugger2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetBreakpoint(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDebugger2.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDebugger2SetBreakpointError(t *testing.T) { + errCode := codes.Internal + mockDebugger2.err = grpc.Errorf(errCode, "test error") + + var debuggeeId string = "debuggeeId-997255898" + var breakpoint *clouddebuggerpb.Breakpoint = &clouddebuggerpb.Breakpoint{} + var clientVersion string = "clientVersion-1506231196" + var request = &clouddebuggerpb.SetBreakpointRequest{ + DebuggeeId: debuggeeId, + Breakpoint: breakpoint, + ClientVersion: clientVersion, + } + + c, err := NewDebugger2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetBreakpoint(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDebugger2GetBreakpoint(t *testing.T) { + var expectedResponse *clouddebuggerpb.GetBreakpointResponse = &clouddebuggerpb.GetBreakpointResponse{} + + mockDebugger2.err = nil + mockDebugger2.reqs = nil + + mockDebugger2.resps = append(mockDebugger2.resps[:0], expectedResponse) + + var debuggeeId string = "debuggeeId-997255898" + var breakpointId string = "breakpointId498424873" + var clientVersion string = "clientVersion-1506231196" + var request = &clouddebuggerpb.GetBreakpointRequest{ + DebuggeeId: debuggeeId, + BreakpointId: breakpointId, + ClientVersion: clientVersion, + } + + c, err := NewDebugger2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetBreakpoint(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDebugger2.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDebugger2GetBreakpointError(t *testing.T) { + errCode := codes.Internal + mockDebugger2.err = grpc.Errorf(errCode, "test error") + + var debuggeeId string = "debuggeeId-997255898" + var breakpointId string = "breakpointId498424873" + var clientVersion string = "clientVersion-1506231196" + var request = &clouddebuggerpb.GetBreakpointRequest{ + DebuggeeId: debuggeeId, + BreakpointId: breakpointId, + ClientVersion: clientVersion, + } + + c, err := NewDebugger2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetBreakpoint(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDebugger2DeleteBreakpoint(t *testing.T) { + var expectedResponse *google_protobuf.Empty = &google_protobuf.Empty{} + + mockDebugger2.err = nil + mockDebugger2.reqs = nil + + mockDebugger2.resps = append(mockDebugger2.resps[:0], expectedResponse) + + var debuggeeId string = "debuggeeId-997255898" + var breakpointId string = "breakpointId498424873" + var clientVersion string = "clientVersion-1506231196" + var request = &clouddebuggerpb.DeleteBreakpointRequest{ + DebuggeeId: debuggeeId, + BreakpointId: breakpointId, + ClientVersion: clientVersion, + } + + c, err := NewDebugger2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteBreakpoint(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDebugger2.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDebugger2DeleteBreakpointError(t *testing.T) { + errCode := codes.Internal + mockDebugger2.err = grpc.Errorf(errCode, "test error") + + var debuggeeId string = "debuggeeId-997255898" + var breakpointId string = "breakpointId498424873" + var clientVersion string = "clientVersion-1506231196" + var request = &clouddebuggerpb.DeleteBreakpointRequest{ + DebuggeeId: debuggeeId, + BreakpointId: breakpointId, + ClientVersion: clientVersion, + } + + c, err := NewDebugger2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteBreakpoint(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestDebugger2ListBreakpoints(t *testing.T) { + var nextWaitToken string = "nextWaitToken1006864251" + var expectedResponse = &clouddebuggerpb.ListBreakpointsResponse{ + NextWaitToken: nextWaitToken, + } + + mockDebugger2.err = nil + mockDebugger2.reqs = nil + + mockDebugger2.resps = append(mockDebugger2.resps[:0], expectedResponse) + + var debuggeeId string = "debuggeeId-997255898" + var clientVersion string = "clientVersion-1506231196" + var request = &clouddebuggerpb.ListBreakpointsRequest{ + DebuggeeId: debuggeeId, + ClientVersion: clientVersion, + } + + c, err := NewDebugger2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListBreakpoints(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDebugger2.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDebugger2ListBreakpointsError(t *testing.T) { + errCode := codes.Internal + mockDebugger2.err = grpc.Errorf(errCode, "test error") + + var debuggeeId string = "debuggeeId-997255898" + var clientVersion string = "clientVersion-1506231196" + var request = &clouddebuggerpb.ListBreakpointsRequest{ + DebuggeeId: debuggeeId, + ClientVersion: clientVersion, + } + + c, err := NewDebugger2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListBreakpoints(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDebugger2ListDebuggees(t *testing.T) { + var expectedResponse *clouddebuggerpb.ListDebuggeesResponse = &clouddebuggerpb.ListDebuggeesResponse{} + + mockDebugger2.err = nil + mockDebugger2.reqs = nil + + mockDebugger2.resps = append(mockDebugger2.resps[:0], expectedResponse) + + var project string = "project-309310695" + var clientVersion string = "clientVersion-1506231196" + var request = &clouddebuggerpb.ListDebuggeesRequest{ + Project: project, + ClientVersion: clientVersion, + } + + c, err := NewDebugger2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDebuggees(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDebugger2.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDebugger2ListDebuggeesError(t *testing.T) { + errCode := codes.Internal + mockDebugger2.err = grpc.Errorf(errCode, "test error") + + var project string = "project-309310695" + var clientVersion string = "clientVersion-1506231196" + var request = &clouddebuggerpb.ListDebuggeesRequest{ + Project: project, + ClientVersion: clientVersion, + } + + c, err := NewDebugger2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDebuggees(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestController2RegisterDebuggee(t *testing.T) { + var expectedResponse *clouddebuggerpb.RegisterDebuggeeResponse = &clouddebuggerpb.RegisterDebuggeeResponse{} + + mockController2.err = nil + mockController2.reqs = nil + + mockController2.resps = append(mockController2.resps[:0], expectedResponse) + + var debuggee *clouddebuggerpb.Debuggee = &clouddebuggerpb.Debuggee{} + var request = &clouddebuggerpb.RegisterDebuggeeRequest{ + Debuggee: debuggee, + } + + c, err := NewController2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.RegisterDebuggee(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockController2.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestController2RegisterDebuggeeError(t *testing.T) { + errCode := codes.Internal + mockController2.err = grpc.Errorf(errCode, "test error") + + var debuggee *clouddebuggerpb.Debuggee = &clouddebuggerpb.Debuggee{} + var request = &clouddebuggerpb.RegisterDebuggeeRequest{ + Debuggee: debuggee, + } + + c, err := NewController2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.RegisterDebuggee(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestController2ListActiveBreakpoints(t *testing.T) { + var nextWaitToken string = "nextWaitToken1006864251" + var waitExpired bool = false + var expectedResponse = &clouddebuggerpb.ListActiveBreakpointsResponse{ + NextWaitToken: nextWaitToken, + WaitExpired: waitExpired, + } + + mockController2.err = nil + mockController2.reqs = nil + + mockController2.resps = append(mockController2.resps[:0], expectedResponse) + + var debuggeeId string = "debuggeeId-997255898" + var request = &clouddebuggerpb.ListActiveBreakpointsRequest{ + DebuggeeId: debuggeeId, + } + + c, err := NewController2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListActiveBreakpoints(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockController2.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestController2ListActiveBreakpointsError(t *testing.T) { + errCode := codes.Internal + mockController2.err = grpc.Errorf(errCode, "test error") + + var debuggeeId string = "debuggeeId-997255898" + var request = &clouddebuggerpb.ListActiveBreakpointsRequest{ + DebuggeeId: debuggeeId, + } + + c, err := NewController2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListActiveBreakpoints(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestController2UpdateActiveBreakpoint(t *testing.T) { + var expectedResponse *clouddebuggerpb.UpdateActiveBreakpointResponse = &clouddebuggerpb.UpdateActiveBreakpointResponse{} + + mockController2.err = nil + mockController2.reqs = nil + + mockController2.resps = append(mockController2.resps[:0], expectedResponse) + + var debuggeeId string = "debuggeeId-997255898" + var breakpoint *clouddebuggerpb.Breakpoint = &clouddebuggerpb.Breakpoint{} + var request = &clouddebuggerpb.UpdateActiveBreakpointRequest{ + DebuggeeId: debuggeeId, + Breakpoint: breakpoint, + } + + c, err := NewController2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateActiveBreakpoint(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockController2.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestController2UpdateActiveBreakpointError(t *testing.T) { + errCode := codes.Internal + mockController2.err = grpc.Errorf(errCode, "test error") + + var debuggeeId string = "debuggeeId-997255898" + var breakpoint *clouddebuggerpb.Breakpoint = &clouddebuggerpb.Breakpoint{} + var request = &clouddebuggerpb.UpdateActiveBreakpointRequest{ + DebuggeeId: debuggeeId, + Breakpoint: breakpoint, + } + + c, err := NewController2Client(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateActiveBreakpoint(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/doc.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/doc.go new file mode 100644 index 00000000..58044e68 --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/doc.go @@ -0,0 +1,35 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package errorreporting is an experimental, auto-generated package for the +// errorreporting API. +// +// Stackdriver Error Reporting groups and counts similar errors from cloud +// services. The Stackdriver Error Reporting API provides a way to report new +// errors and read access to error groups and their associated errors. +package errorreporting // import "cloud.google.com/go/errorreporting/apiv1beta1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertXGoog(ctx context.Context, val string) context.Context { + md, _ := metadata.FromContext(ctx) + md = md.Copy() + md["x-goog-api-client"] = []string{val} + return metadata.NewContext(ctx, md) +} diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client.go new file mode 100644 index 00000000..6144da3a --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client.go @@ -0,0 +1,166 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package errorreporting + +import ( + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + errorGroupGroupPathTemplate = gax.MustCompilePathTemplate("projects/{project}/groups/{group}") +) + +// ErrorGroupCallOptions contains the retry settings for each method of ErrorGroupClient. +type ErrorGroupCallOptions struct { + GetGroup []gax.CallOption + UpdateGroup []gax.CallOption +} + +func defaultErrorGroupClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("clouderrorreporting.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + ), + } +} + +func defaultErrorGroupCallOptions() *ErrorGroupCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &ErrorGroupCallOptions{ + GetGroup: retry[[2]string{"default", "idempotent"}], + UpdateGroup: retry[[2]string{"default", "idempotent"}], + } +} + +// ErrorGroupClient is a client for interacting with Stackdriver Error Reporting API. +type ErrorGroupClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + errorGroupClient clouderrorreportingpb.ErrorGroupServiceClient + + // The call options for this service. + CallOptions *ErrorGroupCallOptions + + // The metadata to be sent with each request. + xGoogHeader string +} + +// NewErrorGroupClient creates a new error group service client. +// +// Service for retrieving and updating individual error groups. +func NewErrorGroupClient(ctx context.Context, opts ...option.ClientOption) (*ErrorGroupClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultErrorGroupClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &ErrorGroupClient{ + conn: conn, + CallOptions: defaultErrorGroupCallOptions(), + + errorGroupClient: clouderrorreportingpb.NewErrorGroupServiceClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *ErrorGroupClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *ErrorGroupClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *ErrorGroupClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", "") + c.xGoogHeader = gax.XGoogHeader(kv...) +} + +// ErrorGroupGroupPath returns the path for the group resource. +func ErrorGroupGroupPath(project, group string) string { + path, err := errorGroupGroupPathTemplate.Render(map[string]string{ + "project": project, + "group": group, + }) + if err != nil { + panic(err) + } + return path +} + +// GetGroup get the specified group. +func (c *ErrorGroupClient) GetGroup(ctx context.Context, req *clouderrorreportingpb.GetGroupRequest) (*clouderrorreportingpb.ErrorGroup, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *clouderrorreportingpb.ErrorGroup + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.errorGroupClient.GetGroup(ctx, req) + return err + }, c.CallOptions.GetGroup...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateGroup replace the data for the specified group. +// Fails if the group does not exist. +func (c *ErrorGroupClient) UpdateGroup(ctx context.Context, req *clouderrorreportingpb.UpdateGroupRequest) (*clouderrorreportingpb.ErrorGroup, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *clouderrorreportingpb.ErrorGroup + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.errorGroupClient.UpdateGroup(ctx, req) + return err + }, c.CallOptions.UpdateGroup...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client_example_test.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client_example_test.go new file mode 100644 index 00000000..bc8619ff --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client_example_test.go @@ -0,0 +1,69 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package errorreporting_test + +import ( + "cloud.google.com/go/errorreporting/apiv1beta1" + "golang.org/x/net/context" + clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" +) + +func ExampleNewErrorGroupClient() { + ctx := context.Background() + c, err := errorreporting.NewErrorGroupClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleErrorGroupClient_GetGroup() { + ctx := context.Background() + c, err := errorreporting.NewErrorGroupClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouderrorreportingpb.GetGroupRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetGroup(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleErrorGroupClient_UpdateGroup() { + ctx := context.Background() + c, err := errorreporting.NewErrorGroupClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouderrorreportingpb.UpdateGroupRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateGroup(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client.go new file mode 100644 index 00000000..38cdcd05 --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client.go @@ -0,0 +1,306 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package errorreporting + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + errorStatsProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}") +) + +// ErrorStatsCallOptions contains the retry settings for each method of ErrorStatsClient. +type ErrorStatsCallOptions struct { + ListGroupStats []gax.CallOption + ListEvents []gax.CallOption + DeleteEvents []gax.CallOption +} + +func defaultErrorStatsClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("clouderrorreporting.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + ), + } +} + +func defaultErrorStatsCallOptions() *ErrorStatsCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &ErrorStatsCallOptions{ + ListGroupStats: retry[[2]string{"default", "idempotent"}], + ListEvents: retry[[2]string{"default", "idempotent"}], + DeleteEvents: retry[[2]string{"default", "idempotent"}], + } +} + +// ErrorStatsClient is a client for interacting with Stackdriver Error Reporting API. +type ErrorStatsClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + errorStatsClient clouderrorreportingpb.ErrorStatsServiceClient + + // The call options for this service. + CallOptions *ErrorStatsCallOptions + + // The metadata to be sent with each request. + xGoogHeader string +} + +// NewErrorStatsClient creates a new error stats service client. +// +// An API for retrieving and managing error statistics as well as data for +// individual events. +func NewErrorStatsClient(ctx context.Context, opts ...option.ClientOption) (*ErrorStatsClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultErrorStatsClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &ErrorStatsClient{ + conn: conn, + CallOptions: defaultErrorStatsCallOptions(), + + errorStatsClient: clouderrorreportingpb.NewErrorStatsServiceClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *ErrorStatsClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *ErrorStatsClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *ErrorStatsClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", "") + c.xGoogHeader = gax.XGoogHeader(kv...) +} + +// ErrorStatsProjectPath returns the path for the project resource. +func ErrorStatsProjectPath(project string) string { + path, err := errorStatsProjectPathTemplate.Render(map[string]string{ + "project": project, + }) + if err != nil { + panic(err) + } + return path +} + +// ListGroupStats lists the specified groups. +func (c *ErrorStatsClient) ListGroupStats(ctx context.Context, req *clouderrorreportingpb.ListGroupStatsRequest) *ErrorGroupStatsIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + it := &ErrorGroupStatsIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*clouderrorreportingpb.ErrorGroupStats, string, error) { + var resp *clouderrorreportingpb.ListGroupStatsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.errorStatsClient.ListGroupStats(ctx, req) + return err + }, c.CallOptions.ListGroupStats...) + if err != nil { + return nil, "", err + } + return resp.ErrorGroupStats, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// ListEvents lists the specified events. +func (c *ErrorStatsClient) ListEvents(ctx context.Context, req *clouderrorreportingpb.ListEventsRequest) *ErrorEventIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + it := &ErrorEventIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*clouderrorreportingpb.ErrorEvent, string, error) { + var resp *clouderrorreportingpb.ListEventsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.errorStatsClient.ListEvents(ctx, req) + return err + }, c.CallOptions.ListEvents...) + if err != nil { + return nil, "", err + } + return resp.ErrorEvents, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// DeleteEvents deletes all error events of a given project. +func (c *ErrorStatsClient) DeleteEvents(ctx context.Context, req *clouderrorreportingpb.DeleteEventsRequest) (*clouderrorreportingpb.DeleteEventsResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *clouderrorreportingpb.DeleteEventsResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.errorStatsClient.DeleteEvents(ctx, req) + return err + }, c.CallOptions.DeleteEvents...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ErrorEventIterator manages a stream of *clouderrorreportingpb.ErrorEvent. +type ErrorEventIterator struct { + items []*clouderrorreportingpb.ErrorEvent + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*clouderrorreportingpb.ErrorEvent, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ErrorEventIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *ErrorEventIterator) Next() (*clouderrorreportingpb.ErrorEvent, error) { + var item *clouderrorreportingpb.ErrorEvent + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ErrorEventIterator) bufLen() int { + return len(it.items) +} + +func (it *ErrorEventIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// ErrorGroupStatsIterator manages a stream of *clouderrorreportingpb.ErrorGroupStats. +type ErrorGroupStatsIterator struct { + items []*clouderrorreportingpb.ErrorGroupStats + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*clouderrorreportingpb.ErrorGroupStats, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ErrorGroupStatsIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *ErrorGroupStatsIterator) Next() (*clouderrorreportingpb.ErrorGroupStats, error) { + var item *clouderrorreportingpb.ErrorGroupStats + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ErrorGroupStatsIterator) bufLen() int { + return len(it.items) +} + +func (it *ErrorGroupStatsIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client_example_test.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client_example_test.go new file mode 100644 index 00000000..6f7e9e60 --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client_example_test.go @@ -0,0 +1,95 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package errorreporting_test + +import ( + "cloud.google.com/go/errorreporting/apiv1beta1" + "golang.org/x/net/context" + clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" +) + +func ExampleNewErrorStatsClient() { + ctx := context.Background() + c, err := errorreporting.NewErrorStatsClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleErrorStatsClient_ListGroupStats() { + ctx := context.Background() + c, err := errorreporting.NewErrorStatsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouderrorreportingpb.ListGroupStatsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListGroupStats(ctx, req) + for { + resp, err := it.Next() + if err != nil { + // TODO: Handle error. + break + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleErrorStatsClient_ListEvents() { + ctx := context.Background() + c, err := errorreporting.NewErrorStatsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouderrorreportingpb.ListEventsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListEvents(ctx, req) + for { + resp, err := it.Next() + if err != nil { + // TODO: Handle error. + break + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleErrorStatsClient_DeleteEvents() { + ctx := context.Background() + c, err := errorreporting.NewErrorStatsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouderrorreportingpb.DeleteEventsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.DeleteEvents(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/mock_test.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/mock_test.go new file mode 100644 index 00000000..491f7b63 --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/mock_test.go @@ -0,0 +1,547 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package errorreporting + +import ( + clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" +) + +import ( + "flag" + "io" + "log" + "net" + "os" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockErrorGroupServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + clouderrorreportingpb.ErrorGroupServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockErrorGroupServer) GetGroup(_ context.Context, req *clouderrorreportingpb.GetGroupRequest) (*clouderrorreportingpb.ErrorGroup, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouderrorreportingpb.ErrorGroup), nil +} + +func (s *mockErrorGroupServer) UpdateGroup(_ context.Context, req *clouderrorreportingpb.UpdateGroupRequest) (*clouderrorreportingpb.ErrorGroup, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouderrorreportingpb.ErrorGroup), nil +} + +type mockErrorStatsServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + clouderrorreportingpb.ErrorStatsServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockErrorStatsServer) ListGroupStats(_ context.Context, req *clouderrorreportingpb.ListGroupStatsRequest) (*clouderrorreportingpb.ListGroupStatsResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouderrorreportingpb.ListGroupStatsResponse), nil +} + +func (s *mockErrorStatsServer) ListEvents(_ context.Context, req *clouderrorreportingpb.ListEventsRequest) (*clouderrorreportingpb.ListEventsResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouderrorreportingpb.ListEventsResponse), nil +} + +func (s *mockErrorStatsServer) DeleteEvents(_ context.Context, req *clouderrorreportingpb.DeleteEventsRequest) (*clouderrorreportingpb.DeleteEventsResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouderrorreportingpb.DeleteEventsResponse), nil +} + +type mockReportErrorsServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + clouderrorreportingpb.ReportErrorsServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockReportErrorsServer) ReportErrorEvent(_ context.Context, req *clouderrorreportingpb.ReportErrorEventRequest) (*clouderrorreportingpb.ReportErrorEventResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*clouderrorreportingpb.ReportErrorEventResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockErrorGroup mockErrorGroupServer + mockErrorStats mockErrorStatsServer + mockReportErrors mockReportErrorsServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + clouderrorreportingpb.RegisterErrorGroupServiceServer(serv, &mockErrorGroup) + clouderrorreportingpb.RegisterErrorStatsServiceServer(serv, &mockErrorStats) + clouderrorreportingpb.RegisterReportErrorsServiceServer(serv, &mockReportErrors) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestErrorGroupServiceGetGroup(t *testing.T) { + var name string = "name3373707" + var groupId string = "groupId506361563" + var expectedResponse = &clouderrorreportingpb.ErrorGroup{ + Name: name, + GroupId: groupId, + } + + mockErrorGroup.err = nil + mockErrorGroup.reqs = nil + + mockErrorGroup.resps = append(mockErrorGroup.resps[:0], expectedResponse) + + var formattedGroupName string = ErrorGroupGroupPath("[PROJECT]", "[GROUP]") + var request = &clouderrorreportingpb.GetGroupRequest{ + GroupName: formattedGroupName, + } + + c, err := NewErrorGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetGroup(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockErrorGroup.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestErrorGroupServiceGetGroupError(t *testing.T) { + errCode := codes.Internal + mockErrorGroup.err = grpc.Errorf(errCode, "test error") + + var formattedGroupName string = ErrorGroupGroupPath("[PROJECT]", "[GROUP]") + var request = &clouderrorreportingpb.GetGroupRequest{ + GroupName: formattedGroupName, + } + + c, err := NewErrorGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetGroup(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestErrorGroupServiceUpdateGroup(t *testing.T) { + var name string = "name3373707" + var groupId string = "groupId506361563" + var expectedResponse = &clouderrorreportingpb.ErrorGroup{ + Name: name, + GroupId: groupId, + } + + mockErrorGroup.err = nil + mockErrorGroup.reqs = nil + + mockErrorGroup.resps = append(mockErrorGroup.resps[:0], expectedResponse) + + var group *clouderrorreportingpb.ErrorGroup = &clouderrorreportingpb.ErrorGroup{} + var request = &clouderrorreportingpb.UpdateGroupRequest{ + Group: group, + } + + c, err := NewErrorGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateGroup(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockErrorGroup.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestErrorGroupServiceUpdateGroupError(t *testing.T) { + errCode := codes.Internal + mockErrorGroup.err = grpc.Errorf(errCode, "test error") + + var group *clouderrorreportingpb.ErrorGroup = &clouderrorreportingpb.ErrorGroup{} + var request = &clouderrorreportingpb.UpdateGroupRequest{ + Group: group, + } + + c, err := NewErrorGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateGroup(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestErrorStatsServiceListGroupStats(t *testing.T) { + var nextPageToken string = "" + var errorGroupStatsElement *clouderrorreportingpb.ErrorGroupStats = &clouderrorreportingpb.ErrorGroupStats{} + var errorGroupStats = []*clouderrorreportingpb.ErrorGroupStats{errorGroupStatsElement} + var expectedResponse = &clouderrorreportingpb.ListGroupStatsResponse{ + NextPageToken: nextPageToken, + ErrorGroupStats: errorGroupStats, + } + + mockErrorStats.err = nil + mockErrorStats.reqs = nil + + mockErrorStats.resps = append(mockErrorStats.resps[:0], expectedResponse) + + var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]") + var timeRange *clouderrorreportingpb.QueryTimeRange = &clouderrorreportingpb.QueryTimeRange{} + var request = &clouderrorreportingpb.ListGroupStatsRequest{ + ProjectName: formattedProjectName, + TimeRange: timeRange, + } + + c, err := NewErrorStatsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListGroupStats(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockErrorStats.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.ErrorGroupStats[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestErrorStatsServiceListGroupStatsError(t *testing.T) { + errCode := codes.Internal + mockErrorStats.err = grpc.Errorf(errCode, "test error") + + var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]") + var timeRange *clouderrorreportingpb.QueryTimeRange = &clouderrorreportingpb.QueryTimeRange{} + var request = &clouderrorreportingpb.ListGroupStatsRequest{ + ProjectName: formattedProjectName, + TimeRange: timeRange, + } + + c, err := NewErrorStatsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListGroupStats(context.Background(), request).Next() + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestErrorStatsServiceListEvents(t *testing.T) { + var nextPageToken string = "" + var errorEventsElement *clouderrorreportingpb.ErrorEvent = &clouderrorreportingpb.ErrorEvent{} + var errorEvents = []*clouderrorreportingpb.ErrorEvent{errorEventsElement} + var expectedResponse = &clouderrorreportingpb.ListEventsResponse{ + NextPageToken: nextPageToken, + ErrorEvents: errorEvents, + } + + mockErrorStats.err = nil + mockErrorStats.reqs = nil + + mockErrorStats.resps = append(mockErrorStats.resps[:0], expectedResponse) + + var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]") + var groupId string = "groupId506361563" + var request = &clouderrorreportingpb.ListEventsRequest{ + ProjectName: formattedProjectName, + GroupId: groupId, + } + + c, err := NewErrorStatsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListEvents(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockErrorStats.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.ErrorEvents[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestErrorStatsServiceListEventsError(t *testing.T) { + errCode := codes.Internal + mockErrorStats.err = grpc.Errorf(errCode, "test error") + + var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]") + var groupId string = "groupId506361563" + var request = &clouderrorreportingpb.ListEventsRequest{ + ProjectName: formattedProjectName, + GroupId: groupId, + } + + c, err := NewErrorStatsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListEvents(context.Background(), request).Next() + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestErrorStatsServiceDeleteEvents(t *testing.T) { + var expectedResponse *clouderrorreportingpb.DeleteEventsResponse = &clouderrorreportingpb.DeleteEventsResponse{} + + mockErrorStats.err = nil + mockErrorStats.reqs = nil + + mockErrorStats.resps = append(mockErrorStats.resps[:0], expectedResponse) + + var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]") + var request = &clouderrorreportingpb.DeleteEventsRequest{ + ProjectName: formattedProjectName, + } + + c, err := NewErrorStatsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.DeleteEvents(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockErrorStats.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestErrorStatsServiceDeleteEventsError(t *testing.T) { + errCode := codes.Internal + mockErrorStats.err = grpc.Errorf(errCode, "test error") + + var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]") + var request = &clouderrorreportingpb.DeleteEventsRequest{ + ProjectName: formattedProjectName, + } + + c, err := NewErrorStatsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.DeleteEvents(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestReportErrorsServiceReportErrorEvent(t *testing.T) { + var expectedResponse *clouderrorreportingpb.ReportErrorEventResponse = &clouderrorreportingpb.ReportErrorEventResponse{} + + mockReportErrors.err = nil + mockReportErrors.reqs = nil + + mockReportErrors.resps = append(mockReportErrors.resps[:0], expectedResponse) + + var formattedProjectName string = ReportErrorsProjectPath("[PROJECT]") + var event *clouderrorreportingpb.ReportedErrorEvent = &clouderrorreportingpb.ReportedErrorEvent{} + var request = &clouderrorreportingpb.ReportErrorEventRequest{ + ProjectName: formattedProjectName, + Event: event, + } + + c, err := NewReportErrorsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ReportErrorEvent(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockReportErrors.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestReportErrorsServiceReportErrorEventError(t *testing.T) { + errCode := codes.Internal + mockReportErrors.err = grpc.Errorf(errCode, "test error") + + var formattedProjectName string = ReportErrorsProjectPath("[PROJECT]") + var event *clouderrorreportingpb.ReportedErrorEvent = &clouderrorreportingpb.ReportedErrorEvent{} + var request = &clouderrorreportingpb.ReportErrorEventRequest{ + ProjectName: formattedProjectName, + Event: event, + } + + c, err := NewReportErrorsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ReportErrorEvent(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client.go new file mode 100644 index 00000000..fb19705d --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client.go @@ -0,0 +1,153 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package errorreporting + +import ( + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + reportErrorsProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}") +) + +// ReportErrorsCallOptions contains the retry settings for each method of ReportErrorsClient. +type ReportErrorsCallOptions struct { + ReportErrorEvent []gax.CallOption +} + +func defaultReportErrorsClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("clouderrorreporting.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + ), + } +} + +func defaultReportErrorsCallOptions() *ReportErrorsCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "non_idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &ReportErrorsCallOptions{ + ReportErrorEvent: retry[[2]string{"default", "non_idempotent"}], + } +} + +// ReportErrorsClient is a client for interacting with Stackdriver Error Reporting API. +type ReportErrorsClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + reportErrorsClient clouderrorreportingpb.ReportErrorsServiceClient + + // The call options for this service. + CallOptions *ReportErrorsCallOptions + + // The metadata to be sent with each request. + xGoogHeader string +} + +// NewReportErrorsClient creates a new report errors service client. +// +// An API for reporting error events. +func NewReportErrorsClient(ctx context.Context, opts ...option.ClientOption) (*ReportErrorsClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultReportErrorsClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &ReportErrorsClient{ + conn: conn, + CallOptions: defaultReportErrorsCallOptions(), + + reportErrorsClient: clouderrorreportingpb.NewReportErrorsServiceClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *ReportErrorsClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *ReportErrorsClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *ReportErrorsClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", "") + c.xGoogHeader = gax.XGoogHeader(kv...) +} + +// ReportErrorsProjectPath returns the path for the project resource. +func ReportErrorsProjectPath(project string) string { + path, err := reportErrorsProjectPathTemplate.Render(map[string]string{ + "project": project, + }) + if err != nil { + panic(err) + } + return path +} + +// ReportErrorEvent report an individual error event. +// +// This endpoint accepts either an OAuth token, +// or an +// API key +// for authentication. To use an API key, append it to the URL as the value of +// a `key` parameter. For example: +//
POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456
+func (c *ReportErrorsClient) ReportErrorEvent(ctx context.Context, req *clouderrorreportingpb.ReportErrorEventRequest) (*clouderrorreportingpb.ReportErrorEventResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *clouderrorreportingpb.ReportErrorEventResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.reportErrorsClient.ReportErrorEvent(ctx, req) + return err + }, c.CallOptions.ReportErrorEvent...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client_example_test.go b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client_example_test.go new file mode 100644 index 00000000..ed4cfc44 --- /dev/null +++ b/vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client_example_test.go @@ -0,0 +1,51 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package errorreporting_test + +import ( + "cloud.google.com/go/errorreporting/apiv1beta1" + "golang.org/x/net/context" + clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" +) + +func ExampleNewReportErrorsClient() { + ctx := context.Background() + c, err := errorreporting.NewReportErrorsClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleReportErrorsClient_ReportErrorEvent() { + ctx := context.Background() + c, err := errorreporting.NewReportErrorsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &clouderrorreportingpb.ReportErrorEventRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ReportErrorEvent(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/errors/error_logging_test.go b/vendor/cloud.google.com/go/errors/error_logging_test.go new file mode 100644 index 00000000..5c420b55 --- /dev/null +++ b/vendor/cloud.google.com/go/errors/error_logging_test.go @@ -0,0 +1,202 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "bytes" + "errors" + "log" + "strings" + "testing" + + "cloud.google.com/go/logging" + "golang.org/x/net/context" + "google.golang.org/api/option" +) + +type fakeLogger struct { + entry *logging.Entry + fail bool +} + +func (c *fakeLogger) LogSync(ctx context.Context, e logging.Entry) error { + if c.fail { + return errors.New("request failed") + } + c.entry = &e + return nil +} + +func newTestClientUsingLogging(c *fakeLogger) *Client { + newLoggerInterface = func(ctx context.Context, project string, opts ...option.ClientOption) (loggerInterface, error) { + return c, nil + } + t, err := NewClient(context.Background(), testProjectID, "myservice", "v1.000", true) + if err != nil { + panic(err) + } + t.RepanicDefault = false + return t +} + +func TestCatchNothingUsingLogging(t *testing.T) { + fl := &fakeLogger{} + c := newTestClientUsingLogging(fl) + defer func() { + e := fl.entry + if e != nil { + t.Errorf("got error report, expected none") + } + }() + defer c.Catch(ctx) +} + +func entryMessage(e *logging.Entry) string { + return e.Payload.(map[string]interface{})["message"].(string) +} + +func commonLoggingChecks(t *testing.T, e *logging.Entry, panickingFunction string) { + if e.Payload.(map[string]interface{})["serviceContext"].(map[string]string)["service"] != "myservice" { + t.Errorf("error report didn't contain service name") + } + if e.Payload.(map[string]interface{})["serviceContext"].(map[string]string)["version"] != "v1.000" { + t.Errorf("error report didn't contain version name") + } + if !strings.Contains(entryMessage(e), "hello, error") { + t.Errorf("error report didn't contain message") + } + if !strings.Contains(entryMessage(e), panickingFunction) { + t.Errorf("error report didn't contain stack trace") + } +} + +func TestCatchPanicUsingLogging(t *testing.T) { + fl := &fakeLogger{} + c := newTestClientUsingLogging(fl) + defer func() { + e := fl.entry + if e == nil { + t.Fatalf("got no error report, expected one") + } + commonLoggingChecks(t, e, "TestCatchPanic") + if !strings.Contains(entryMessage(e), "divide by zero") { + t.Errorf("error report didn't contain recovered value") + } + }() + defer c.Catch(ctx, WithMessage("hello, error")) + var x int + x = x / x +} + +func TestCatchPanicNilClientUsingLogging(t *testing.T) { + buf := new(bytes.Buffer) + log.SetOutput(buf) + defer func() { + recover() + body := buf.String() + if !strings.Contains(body, "divide by zero") { + t.Errorf("error report didn't contain recovered value") + } + if !strings.Contains(body, "hello, error") { + t.Errorf("error report didn't contain message") + } + if !strings.Contains(body, "TestCatchPanicNilClient") { + t.Errorf("error report didn't contain recovered value") + } + }() + var c *Client + defer c.Catch(ctx, WithMessage("hello, error")) + var x int + x = x / x +} + +func TestLogFailedReportsUsingLogging(t *testing.T) { + fl := &fakeLogger{fail: true} + c := newTestClientUsingLogging(fl) + buf := new(bytes.Buffer) + log.SetOutput(buf) + defer func() { + recover() + body := buf.String() + if !strings.Contains(body, "hello, error") { + t.Errorf("error report didn't contain message") + } + if !strings.Contains(body, "errors.TestLogFailedReports") { + t.Errorf("error report didn't contain stack trace") + } + if !strings.Contains(body, "divide by zero") { + t.Errorf("error report didn't contain recovered value") + } + }() + defer c.Catch(ctx, WithMessage("hello, error")) + var x int + x = x / x +} + +func TestCatchNilPanicUsingLogging(t *testing.T) { + fl := &fakeLogger{} + c := newTestClientUsingLogging(fl) + defer func() { + e := fl.entry + if e == nil { + t.Fatalf("got no error report, expected one") + } + commonLoggingChecks(t, e, "TestCatchNilPanic") + if !strings.Contains(entryMessage(e), "nil") { + t.Errorf("error report didn't contain recovered value") + } + }() + b := true + defer c.Catch(ctx, WithMessage("hello, error"), PanicFlag(&b)) + panic(nil) +} + +func TestNotCatchNilPanicUsingLogging(t *testing.T) { + fl := &fakeLogger{} + c := newTestClientUsingLogging(fl) + defer func() { + e := fl.entry + if e != nil { + t.Errorf("got error report, expected none") + } + }() + defer c.Catch(ctx, WithMessage("hello, error")) + panic(nil) +} + +func TestReportUsingLogging(t *testing.T) { + fl := &fakeLogger{} + c := newTestClientUsingLogging(fl) + c.Report(ctx, nil, "hello, ", "error") + e := fl.entry + if e == nil { + t.Fatalf("got no error report, expected one") + } + commonLoggingChecks(t, e, "TestReport") +} + +func TestReportfUsingLogging(t *testing.T) { + fl := &fakeLogger{} + c := newTestClientUsingLogging(fl) + c.Reportf(ctx, nil, "hello, error 2+%d=%d", 2, 2+2) + e := fl.entry + if e == nil { + t.Fatalf("got no error report, expected one") + } + commonLoggingChecks(t, e, "TestReportf") + if !strings.Contains(entryMessage(e), "2+2=4") { + t.Errorf("error report didn't contain formatted message") + } +} diff --git a/vendor/cloud.google.com/go/errors/errors.go b/vendor/cloud.google.com/go/errors/errors.go new file mode 100644 index 00000000..88a1be81 --- /dev/null +++ b/vendor/cloud.google.com/go/errors/errors.go @@ -0,0 +1,422 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package errors is a Google Stackdriver Error Reporting library. +// +// This package is still experimental and subject to change. +// +// See https://cloud.google.com/error-reporting/ for more information. +// +// To initialize a client, use the NewClient function. Generally you will want +// to do this on program initialization. The NewClient function takes as +// arguments a context, the project name, a service name, and a version string. +// The service name and version string identify the running program, and are +// included in error reports. The version string can be left empty. NewClient +// also takes a bool that indicates whether to report errors using Stackdriver +// Logging, which will result in errors appearing in both the logs and the error +// dashboard. This is useful if you are already a user of Stackdriver Logging. +// +// import "cloud.google.com/go/errors" +// ... +// errorsClient, err = errors.NewClient(ctx, projectID, "myservice", "v1.0", true) +// +// The client can recover panics in your program and report them as errors. +// To use this functionality, defer its Catch method, as you would any other +// function for recovering panics. +// +// func foo(ctx context.Context, ...) { +// defer errorsClient.Catch(ctx) +// ... +// } +// +// Catch writes an error report containing the recovered value and a stack trace +// to Stackdriver Error Reporting. +// +// There are various options you can add to the call to Catch that modify how +// panics are handled. +// +// WithMessage and WithMessagef add a custom message after the recovered value, +// using fmt.Sprint and fmt.Sprintf respectively. +// +// defer errorsClient.Catch(ctx, errors.WithMessagef("x=%d", x)) +// +// WithRequest fills in various fields in the error report with information +// about an http.Request that's being handled. +// +// defer errorsClient.Catch(ctx, errors.WithRequest(httpReq)) +// +// By default, after recovering a panic, Catch will panic again with the +// recovered value. You can turn off this behavior with the Repanic option. +// +// defer errorsClient.Catch(ctx, errors.Repanic(false)) +// +// You can also change the default behavior for the client by changing the +// RepanicDefault field. +// +// errorsClient.RepanicDefault = false +// +// It is also possible to write an error report directly without recovering a +// panic, using Report or Reportf. +// +// if err != nil { +// errorsClient.Reportf(ctx, r, "unexpected error %v", err) +// } +// +// If you try to write an error report with a nil client, or if the client +// fails to write the report to the server, the error report is logged using +// log.Println. +package errors // import "cloud.google.com/go/errors" + +import ( + "bytes" + "fmt" + "log" + "net/http" + "runtime" + "strings" + "time" + + api "cloud.google.com/go/errorreporting/apiv1beta1" + "cloud.google.com/go/internal/version" + "cloud.google.com/go/logging" + "github.com/golang/protobuf/ptypes/timestamp" + "golang.org/x/net/context" + "google.golang.org/api/option" + erpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" +) + +const ( + userAgent = `gcloud-golang-errorreporting/20160701` +) + +type apiInterface interface { + ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest) (*erpb.ReportErrorEventResponse, error) +} + +var newApiInterface = func(ctx context.Context, opts ...option.ClientOption) (apiInterface, error) { + client, err := api.NewReportErrorsClient(ctx, opts...) + if err != nil { + return nil, err + } + client.SetGoogleClientInfo("gccl", version.Repo) + return client, nil +} + +type loggerInterface interface { + LogSync(ctx context.Context, e logging.Entry) error +} + +var newLoggerInterface = func(ctx context.Context, projectID string, opts ...option.ClientOption) (loggerInterface, error) { + lc, err := logging.NewClient(ctx, projectID, opts...) + if err != nil { + return nil, fmt.Errorf("creating Logging client: %v", err) + } + l := lc.Logger("errorreports") + return l, nil +} + +type sender interface { + send(ctx context.Context, r *http.Request, message string) +} + +// errorApiSender sends error reports using the Stackdriver Error Reporting API. +type errorApiSender struct { + apiClient apiInterface + projectID string + serviceContext erpb.ServiceContext +} + +// loggingSender sends error reports using the Stackdriver Logging API. +type loggingSender struct { + logger loggerInterface + projectID string + serviceContext map[string]string +} + +type Client struct { + sender + // RepanicDefault determines whether Catch will re-panic after recovering a + // panic. This behavior can be overridden for an individual call to Catch using + // the Repanic option. + RepanicDefault bool +} + +func NewClient(ctx context.Context, projectID, serviceName, serviceVersion string, useLogging bool, opts ...option.ClientOption) (*Client, error) { + if useLogging { + l, err := newLoggerInterface(ctx, projectID, opts...) + if err != nil { + return nil, fmt.Errorf("creating Logging client: %v", err) + } + sender := &loggingSender{ + logger: l, + projectID: projectID, + serviceContext: map[string]string{ + "service": serviceName, + }, + } + if serviceVersion != "" { + sender.serviceContext["version"] = serviceVersion + } + c := &Client{ + sender: sender, + RepanicDefault: true, + } + return c, nil + } else { + a, err := newApiInterface(ctx, opts...) + if err != nil { + return nil, fmt.Errorf("creating Error Reporting client: %v", err) + } + c := &Client{ + sender: &errorApiSender{ + apiClient: a, + projectID: "projects/" + projectID, + serviceContext: erpb.ServiceContext{ + Service: serviceName, + Version: serviceVersion, + }, + }, + RepanicDefault: true, + } + return c, nil + } +} + +// An Option is an optional argument to Catch. +type Option interface { + isOption() +} + +// PanicFlag returns an Option that can inform Catch that a panic has occurred. +// If *p is true when Catch is called, an error report is made even if recover +// returns nil. This allows Catch to report an error for panic(nil). +// If p is nil, the option is ignored. +// +// Here is an example of how to use PanicFlag: +// +// func foo(ctx context.Context, ...) { +// hasPanicked := true +// defer errorsClient.Catch(ctx, errors.PanicFlag(&hasPanicked)) +// ... +// ... +// // We have reached the end of the function, so we're not panicking. +// hasPanicked = false +// } +func PanicFlag(p *bool) Option { return panicFlag{p} } + +type panicFlag struct { + *bool +} + +func (h panicFlag) isOption() {} + +// Repanic returns an Option that determines whether Catch will re-panic after +// it reports an error. This overrides the default in the client. +func Repanic(r bool) Option { return repanic(r) } + +type repanic bool + +func (r repanic) isOption() {} + +// WithRequest returns an Option that informs Catch or Report of an http.Request +// that is being handled. Information from the Request is included in the error +// report, if one is made. +func WithRequest(r *http.Request) Option { return withRequest{r} } + +type withRequest struct { + *http.Request +} + +func (w withRequest) isOption() {} + +// WithMessage returns an Option that sets a message to be included in the error +// report, if one is made. v is converted to a string with fmt.Sprint. +func WithMessage(v ...interface{}) Option { return message(v) } + +type message []interface{} + +func (m message) isOption() {} + +// WithMessagef returns an Option that sets a message to be included in the error +// report, if one is made. format and v are converted to a string with fmt.Sprintf. +func WithMessagef(format string, v ...interface{}) Option { return messagef{format, v} } + +type messagef struct { + format string + v []interface{} +} + +func (m messagef) isOption() {} + +// Catch tries to recover a panic; if it succeeds, it writes an error report. +// It should be called by deferring it, like any other function for recovering +// panics. +// +// Catch can be called concurrently with other calls to Catch, Report or Reportf. +func (c *Client) Catch(ctx context.Context, opt ...Option) { + panicked := false + for _, o := range opt { + switch o := o.(type) { + case panicFlag: + panicked = panicked || o.bool != nil && *o.bool + } + } + x := recover() + if x == nil && !panicked { + return + } + var ( + r *http.Request + shouldRepanic = true + messages = []string{fmt.Sprint(x)} + ) + if c != nil { + shouldRepanic = c.RepanicDefault + } + for _, o := range opt { + switch o := o.(type) { + case repanic: + shouldRepanic = bool(o) + case withRequest: + r = o.Request + case message: + messages = append(messages, fmt.Sprint(o...)) + case messagef: + messages = append(messages, fmt.Sprintf(o.format, o.v...)) + } + } + c.logInternal(ctx, r, true, strings.Join(messages, " ")) + if shouldRepanic { + panic(x) + } +} + +// Report writes an error report unconditionally, instead of only when a panic +// occurs. +// If r is non-nil, information from the Request is included in the error report. +// +// Report can be called concurrently with other calls to Catch, Report or Reportf. +func (c *Client) Report(ctx context.Context, r *http.Request, v ...interface{}) { + c.logInternal(ctx, r, false, fmt.Sprint(v...)) +} + +// Reportf writes an error report unconditionally, instead of only when a panic +// occurs. +// If r is non-nil, information from the Request is included in the error report. +// +// Reportf can be called concurrently with other calls to Catch, Report or Reportf. +func (c *Client) Reportf(ctx context.Context, r *http.Request, format string, v ...interface{}) { + c.logInternal(ctx, r, false, fmt.Sprintf(format, v...)) +} + +func (c *Client) logInternal(ctx context.Context, r *http.Request, isPanic bool, msg string) { + // limit the stack trace to 16k. + var buf [16384]byte + stack := buf[0:runtime.Stack(buf[:], false)] + message := msg + "\n" + chopStack(stack, isPanic) + if c == nil { + log.Println("Error report used nil client:", message) + return + } + c.send(ctx, r, message) +} + +func (s *loggingSender) send(ctx context.Context, r *http.Request, message string) { + payload := map[string]interface{}{ + "eventTime": time.Now().In(time.UTC).Format(time.RFC3339Nano), + "message": message, + "serviceContext": s.serviceContext, + } + if r != nil { + payload["context"] = map[string]interface{}{ + "httpRequest": map[string]interface{}{ + "method": r.Method, + "url": r.Host + r.RequestURI, + "userAgent": r.UserAgent(), + "referrer": r.Referer(), + "remoteIp": r.RemoteAddr, + }, + } + } + e := logging.Entry{ + Severity: logging.Error, + Payload: payload, + } + err := s.logger.LogSync(ctx, e) + if err != nil { + log.Println("Error writing error report:", err, "report:", payload) + } +} + +func (s *errorApiSender) send(ctx context.Context, r *http.Request, message string) { + time := time.Now() + var errorContext *erpb.ErrorContext + if r != nil { + errorContext = &erpb.ErrorContext{ + HttpRequest: &erpb.HttpRequestContext{ + Method: r.Method, + Url: r.Host + r.RequestURI, + UserAgent: r.UserAgent(), + Referrer: r.Referer(), + RemoteIp: r.RemoteAddr, + }, + } + } + req := erpb.ReportErrorEventRequest{ + ProjectName: s.projectID, + Event: &erpb.ReportedErrorEvent{ + EventTime: ×tamp.Timestamp{ + Seconds: time.Unix(), + Nanos: int32(time.Nanosecond()), + }, + ServiceContext: &s.serviceContext, + Message: message, + Context: errorContext, + }, + } + _, err := s.apiClient.ReportErrorEvent(ctx, &req) + if err != nil { + log.Println("Error writing error report:", err, "report:", message) + } +} + +// chopStack trims a stack trace so that the function which panics or calls +// Report is first. +func chopStack(s []byte, isPanic bool) string { + var f []byte + if isPanic { + f = []byte("panic(") + } else { + f = []byte("cloud.google.com/go/errors.(*Client).Report") + } + + lfFirst := bytes.IndexByte(s, '\n') + if lfFirst == -1 { + return string(s) + } + stack := s[lfFirst:] + panicLine := bytes.Index(stack, f) + if panicLine == -1 { + return string(s) + } + stack = stack[panicLine+1:] + for i := 0; i < 2; i++ { + nextLine := bytes.IndexByte(stack, '\n') + if nextLine == -1 { + return string(s) + } + stack = stack[nextLine+1:] + } + return string(s[:lfFirst+1]) + string(stack) +} diff --git a/vendor/cloud.google.com/go/errors/errors_test.go b/vendor/cloud.google.com/go/errors/errors_test.go new file mode 100644 index 00000000..773bce74 --- /dev/null +++ b/vendor/cloud.google.com/go/errors/errors_test.go @@ -0,0 +1,206 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "bytes" + "errors" + "log" + "strings" + "testing" + + "golang.org/x/net/context" + "google.golang.org/api/option" + erpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1" +) + +const testProjectID = "testproject" + +type fakeReportErrorsClient struct { + req *erpb.ReportErrorEventRequest + fail bool +} + +func (c *fakeReportErrorsClient) ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest) (*erpb.ReportErrorEventResponse, error) { + if c.fail { + return nil, errors.New("request failed") + } + c.req = req + return &erpb.ReportErrorEventResponse{}, nil +} + +func newTestClient(c *fakeReportErrorsClient) *Client { + newApiInterface = func(ctx context.Context, opts ...option.ClientOption) (apiInterface, error) { + return c, nil + } + t, err := NewClient(context.Background(), testProjectID, "myservice", "v1.000", false) + if err != nil { + panic(err) + } + t.RepanicDefault = false + return t +} + +var ctx context.Context + +func init() { + ctx = context.Background() +} + +func TestCatchNothing(t *testing.T) { + fc := &fakeReportErrorsClient{} + c := newTestClient(fc) + defer func() { + r := fc.req + if r != nil { + t.Errorf("got error report, expected none") + } + }() + defer c.Catch(ctx) +} + +func commonChecks(t *testing.T, req *erpb.ReportErrorEventRequest, panickingFunction string) { + if req.Event.ServiceContext.Service != "myservice" { + t.Errorf("error report didn't contain service name") + } + if req.Event.ServiceContext.Version != "v1.000" { + t.Errorf("error report didn't contain version name") + } + if !strings.Contains(req.Event.Message, "hello, error") { + t.Errorf("error report didn't contain message") + } + if !strings.Contains(req.Event.Message, panickingFunction) { + t.Errorf("error report didn't contain stack trace") + } +} + +func TestCatchPanic(t *testing.T) { + fc := &fakeReportErrorsClient{} + c := newTestClient(fc) + defer func() { + r := fc.req + if r == nil { + t.Fatalf("got no error report, expected one") + } + commonChecks(t, r, "errors.TestCatchPanic") + if !strings.Contains(r.Event.Message, "divide by zero") { + t.Errorf("error report didn't contain recovered value") + } + }() + defer c.Catch(ctx, WithMessage("hello, error")) + var x int + x = x / x +} + +func TestCatchPanicNilClient(t *testing.T) { + buf := new(bytes.Buffer) + log.SetOutput(buf) + defer func() { + recover() + body := buf.String() + if !strings.Contains(body, "divide by zero") { + t.Errorf("error report didn't contain recovered value") + } + if !strings.Contains(body, "hello, error") { + t.Errorf("error report didn't contain message") + } + if !strings.Contains(body, "TestCatchPanicNilClient") { + t.Errorf("error report didn't contain recovered value") + } + }() + var c *Client + defer c.Catch(ctx, WithMessage("hello, error")) + var x int + x = x / x +} + +func TestLogFailedReports(t *testing.T) { + fc := &fakeReportErrorsClient{fail: true} + c := newTestClient(fc) + buf := new(bytes.Buffer) + log.SetOutput(buf) + defer func() { + recover() + body := buf.String() + if !strings.Contains(body, "hello, error") { + t.Errorf("error report didn't contain message") + } + if !strings.Contains(body, "errors.TestLogFailedReports") { + t.Errorf("error report didn't contain stack trace") + } + if !strings.Contains(body, "divide by zero") { + t.Errorf("error report didn't contain recovered value") + } + }() + defer c.Catch(ctx, WithMessage("hello, error")) + var x int + x = x / x +} + +func TestCatchNilPanic(t *testing.T) { + fc := &fakeReportErrorsClient{} + c := newTestClient(fc) + defer func() { + r := fc.req + if r == nil { + t.Fatalf("got no error report, expected one") + } + commonChecks(t, r, "errors.TestCatchNilPanic") + if !strings.Contains(r.Event.Message, "nil") { + t.Errorf("error report didn't contain recovered value") + } + }() + b := true + defer c.Catch(ctx, WithMessage("hello, error"), PanicFlag(&b)) + panic(nil) +} + +func TestNotCatchNilPanic(t *testing.T) { + fc := &fakeReportErrorsClient{} + c := newTestClient(fc) + defer func() { + r := fc.req + if r != nil { + t.Errorf("got error report, expected none") + } + }() + defer c.Catch(ctx, WithMessage("hello, error")) + panic(nil) +} + +func TestReport(t *testing.T) { + fc := &fakeReportErrorsClient{} + c := newTestClient(fc) + c.Report(ctx, nil, "hello, ", "error") + r := fc.req + if r == nil { + t.Fatalf("got no error report, expected one") + } + commonChecks(t, r, "errors.TestReport") +} + +func TestReportf(t *testing.T) { + fc := &fakeReportErrorsClient{} + c := newTestClient(fc) + c.Reportf(ctx, nil, "hello, error 2+%d=%d", 2, 2+2) + r := fc.req + if r == nil { + t.Fatalf("got no error report, expected one") + } + commonChecks(t, r, "errors.TestReportf") + if !strings.Contains(r.Event.Message, "2+2=4") { + t.Errorf("error report didn't contain formatted message") + } +} diff --git a/vendor/cloud.google.com/go/errors/stack_test.go b/vendor/cloud.google.com/go/errors/stack_test.go new file mode 100644 index 00000000..25cd4c60 --- /dev/null +++ b/vendor/cloud.google.com/go/errors/stack_test.go @@ -0,0 +1,118 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import "testing" + +func TestChopStack(t *testing.T) { + for _, test := range []struct { + name string + in []byte + expected string + isPanic bool + }{ + { + name: "Catch", + in: []byte(`goroutine 20 [running]: +runtime/debug.Stack() + /gopath/src/runtime/debug/stack.go:24 +0x79 +cloud.google.com/go/errors.(*Client).logInternal() + /gopath/src/cloud.google.com/go/errors/errors.go:259 +0x18b +cloud.google.com/go/errors.(*Client).Catch() + /gopath/src/cloud.google.com/go/errors/errors.go:219 +0x6ed +panic() + /gopath/src/runtime/panic.go:458 +0x243 +cloud.google.com/go/errors_test.TestCatchPanic() + /gopath/src/cloud.google.com/go/errors/errors_test.go:93 +0x171 +testing.tRunner() + /gopath/src/testing/testing.go:610 +0x81 +created by testing.(*T).Run + /gopath/src/testing/testing.go:646 +0x2ec +`), + expected: `goroutine 20 [running]: +cloud.google.com/go/errors_test.TestCatchPanic() + /gopath/src/cloud.google.com/go/errors/errors_test.go:93 +0x171 +testing.tRunner() + /gopath/src/testing/testing.go:610 +0x81 +created by testing.(*T).Run + /gopath/src/testing/testing.go:646 +0x2ec +`, + isPanic: true, + }, + { + name: "function not found", + in: []byte(`goroutine 20 [running]: +runtime/debug.Stack() + /gopath/src/runtime/debug/stack.go:24 +0x79 +cloud.google.com/go/errors.(*Client).logInternal() + /gopath/src/cloud.google.com/go/errors/errors.go:259 +0x18b +cloud.google.com/go/errors.(*Client).Catch() + /gopath/src/cloud.google.com/go/errors/errors.go:219 +0x6ed +cloud.google.com/go/errors_test.TestCatchPanic() + /gopath/src/cloud.google.com/go/errors/errors_test.go:93 +0x171 +testing.tRunner() + /gopath/src/testing/testing.go:610 +0x81 +created by testing.(*T).Run + /gopath/src/testing/testing.go:646 +0x2ec +`), + expected: `goroutine 20 [running]: +runtime/debug.Stack() + /gopath/src/runtime/debug/stack.go:24 +0x79 +cloud.google.com/go/errors.(*Client).logInternal() + /gopath/src/cloud.google.com/go/errors/errors.go:259 +0x18b +cloud.google.com/go/errors.(*Client).Catch() + /gopath/src/cloud.google.com/go/errors/errors.go:219 +0x6ed +cloud.google.com/go/errors_test.TestCatchPanic() + /gopath/src/cloud.google.com/go/errors/errors_test.go:93 +0x171 +testing.tRunner() + /gopath/src/testing/testing.go:610 +0x81 +created by testing.(*T).Run + /gopath/src/testing/testing.go:646 +0x2ec +`, + isPanic: true, + }, + { + name: "Report", + in: []byte(` goroutine 39 [running]: +runtime/debug.Stack() + /gopath/runtime/debug/stack.go:24 +0x79 +cloud.google.com/go/errors.(*Client).logInternal() + /gopath/cloud.google.com/go/errors/errors.go:259 +0x18b +cloud.google.com/go/errors.(*Client).Report() + /gopath/cloud.google.com/go/errors/errors.go:248 +0x4ed +cloud.google.com/go/errors_test.TestReport() + /gopath/cloud.google.com/go/errors/errors_test.go:137 +0x2a1 +testing.tRunner() + /gopath/testing/testing.go:610 +0x81 +created by testing.(*T).Run + /gopath/testing/testing.go:646 +0x2ec +`), + expected: ` goroutine 39 [running]: +cloud.google.com/go/errors_test.TestReport() + /gopath/cloud.google.com/go/errors/errors_test.go:137 +0x2a1 +testing.tRunner() + /gopath/testing/testing.go:610 +0x81 +created by testing.(*T).Run + /gopath/testing/testing.go:646 +0x2ec +`, + isPanic: false, + }, + } { + out := chopStack(test.in, test.isPanic) + if out != test.expected { + t.Errorf("case %q: chopStack(%q, %t): got %q want %q", test.name, test.in, test.isPanic, out, test.expected) + } + } +} diff --git a/vendor/cloud.google.com/go/examples/bigquery/concat_table/main.go b/vendor/cloud.google.com/go/examples/bigquery/concat_table/main.go new file mode 100644 index 00000000..3d66639f --- /dev/null +++ b/vendor/cloud.google.com/go/examples/bigquery/concat_table/main.go @@ -0,0 +1,93 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// concat_table is an example client of the bigquery client library. +// It concatenates two BigQuery tables and writes the result to another table. +package main + +import ( + "flag" + "fmt" + "log" + "os" + "time" + + "cloud.google.com/go/bigquery" + "golang.org/x/net/context" +) + +var ( + project = flag.String("project", "", "The ID of a Google Cloud Platform project") + dataset = flag.String("dataset", "", "The ID of a BigQuery dataset") + src1 = flag.String("src1", "", "The ID of the first BigQuery table to concatenate") + src2 = flag.String("src2", "", "The ID of the second BigQuery table to concatenate") + dest = flag.String("dest", "", "The ID of the BigQuery table to write the result to") + pollint = flag.Duration("pollint", 10*time.Second, "Polling interval for checking job status") +) + +func main() { + flag.Parse() + + flagsOk := true + for _, f := range []string{"project", "dataset", "src1", "src2", "dest"} { + if flag.Lookup(f).Value.String() == "" { + fmt.Fprintf(os.Stderr, "Flag --%s is required\n", f) + flagsOk = false + } + } + if !flagsOk { + os.Exit(1) + } + if *src1 == *src2 || *src1 == *dest || *src2 == *dest { + log.Fatalf("Different values must be supplied for each of --src1, --src2 and --dest") + } + + ctx := context.Background() + client, err := bigquery.NewClient(ctx, *project) + if err != nil { + log.Fatalf("Creating bigquery client: %v", err) + } + + s1 := client.Dataset(*dataset).Table(*src1) + s2 := client.Dataset(*dataset).Table(*src2) + d := client.Dataset(*dataset).Table(*dest) + + // Concatenate data. + copier := d.CopierFrom(s1, s2) + copier.WriteDisposition = bigquery.WriteTruncate + job, err := copier.Run(ctx) + if err != nil { + log.Fatalf("Concatenating: %v", err) + } + + fmt.Printf("Job for concatenation operation: %+v\n", job) + fmt.Printf("Waiting for job to complete.\n") + + for range time.Tick(*pollint) { + status, err := job.Status(ctx) + if err != nil { + fmt.Printf("Failure determining status: %v", err) + break + } + if !status.Done() { + continue + } + if err := status.Err(); err == nil { + fmt.Printf("Success\n") + } else { + fmt.Printf("Failure: %+v\n", err) + } + break + } +} diff --git a/vendor/cloud.google.com/go/examples/bigquery/load/main.go b/vendor/cloud.google.com/go/examples/bigquery/load/main.go new file mode 100644 index 00000000..e8d51404 --- /dev/null +++ b/vendor/cloud.google.com/go/examples/bigquery/load/main.go @@ -0,0 +1,95 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// load is an example client of the bigquery client library. +// It loads a file from Google Cloud Storage into a BigQuery table. +package main + +import ( + "flag" + "fmt" + "log" + "os" + "time" + + "cloud.google.com/go/bigquery" + "golang.org/x/net/context" +) + +var ( + project = flag.String("project", "", "The ID of a Google Cloud Platform project") + dataset = flag.String("dataset", "", "The ID of a BigQuery dataset") + table = flag.String("table", "", "The ID of a BigQuery table to load data into") + bucket = flag.String("bucket", "", "The name of a Google Cloud Storage bucket to load data from") + object = flag.String("object", "", "The name of a Google Cloud Storage object to load data from. Must exist within the bucket specified by --bucket") + skiprows = flag.Int64("skiprows", 0, "The number of rows of the source data to skip when loading") + pollint = flag.Duration("pollint", 10*time.Second, "Polling interval for checking job status") +) + +func main() { + flag.Parse() + + flagsOk := true + for _, f := range []string{"project", "dataset", "table", "bucket", "object"} { + if flag.Lookup(f).Value.String() == "" { + fmt.Fprintf(os.Stderr, "Flag --%s is required\n", f) + flagsOk = false + } + } + if !flagsOk { + os.Exit(1) + } + + ctx := context.Background() + client, err := bigquery.NewClient(ctx, *project) + if err != nil { + log.Fatalf("Creating bigquery client: %v", err) + } + + table := client.Dataset(*dataset).Table(*table) + + gcs := bigquery.NewGCSReference(fmt.Sprintf("gs://%s/%s", *bucket, *object)) + gcs.SkipLeadingRows = *skiprows + gcs.MaxBadRecords = 1 + gcs.AllowQuotedNewlines = true + + // Load data from Google Cloud Storage into a BigQuery table. + loader := table.LoaderFrom(gcs) + loader.WriteDisposition = bigquery.WriteTruncate + job, err := loader.Run(ctx) + + if err != nil { + log.Fatalf("Loading data: %v", err) + } + + fmt.Printf("Job for data load operation: %+v\n", job) + fmt.Printf("Waiting for job to complete.\n") + + for range time.Tick(*pollint) { + status, err := job.Status(ctx) + if err != nil { + fmt.Printf("Failure determining status: %v", err) + break + } + if !status.Done() { + continue + } + if err := status.Err(); err == nil { + fmt.Printf("Success\n") + } else { + fmt.Printf("Failure: %+v\n", err) + } + break + } +} diff --git a/vendor/cloud.google.com/go/examples/bigquery/query/main.go b/vendor/cloud.google.com/go/examples/bigquery/query/main.go new file mode 100644 index 00000000..4ddcf4af --- /dev/null +++ b/vendor/cloud.google.com/go/examples/bigquery/query/main.go @@ -0,0 +1,98 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// query is an example client of the bigquery client library. +// It submits a query and writes the result to a table. +package main + +import ( + "flag" + "fmt" + "log" + "os" + "time" + + "cloud.google.com/go/bigquery" + "golang.org/x/net/context" +) + +var ( + project = flag.String("project", "", "The ID of a Google Cloud Platform project") + dataset = flag.String("dataset", "", "The ID of a BigQuery dataset") + q = flag.String("q", "", "The query string") + dest = flag.String("dest", "", "The ID of the BigQuery table to write the result to. If unset, an ephemeral table ID will be generated.") + pollint = flag.Duration("pollint", 10*time.Second, "Polling interval for checking job status") + wait = flag.Bool("wait", false, "Whether to wait for the query job to complete.") +) + +func main() { + flag.Parse() + + flagsOk := true + for _, f := range []string{"project", "dataset", "q"} { + if flag.Lookup(f).Value.String() == "" { + fmt.Fprintf(os.Stderr, "Flag --%s is required\n", f) + flagsOk = false + } + } + if !flagsOk { + os.Exit(1) + } + + ctx := context.Background() + client, err := bigquery.NewClient(ctx, *project) + if err != nil { + log.Fatalf("Creating bigquery client: %v", err) + } + + query := client.Query(*q) + query.DefaultProjectID = *project + query.DefaultDatasetID = *dataset + query.WriteDisposition = bigquery.WriteTruncate + + if *dest != "" { + query.Dst = client.Dataset(*dataset).Table(*dest) + } + + // Query data. + job, err := query.Run(ctx) + + if err != nil { + log.Fatalf("Querying: %v", err) + } + + fmt.Printf("Submitted query. Job ID: %s\n", job.ID()) + if !*wait { + return + } + + fmt.Printf("Waiting for job to complete.\n") + + for range time.Tick(*pollint) { + status, err := job.Status(ctx) + if err != nil { + fmt.Printf("Failure determining status: %v", err) + break + } + if !status.Done() { + continue + } + if err := status.Err(); err == nil { + fmt.Printf("Success\n") + } else { + fmt.Printf("Failure: %+v\n", err) + } + break + } +} diff --git a/vendor/cloud.google.com/go/examples/bigquery/read/main.go b/vendor/cloud.google.com/go/examples/bigquery/read/main.go new file mode 100644 index 00000000..551e2e3a --- /dev/null +++ b/vendor/cloud.google.com/go/examples/bigquery/read/main.go @@ -0,0 +1,142 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// read is an example client of the bigquery client library. +// It reads from a table, returning the data via an Iterator. +package main + +import ( + "flag" + "fmt" + "log" + "os" + "regexp" + "strings" + "text/tabwriter" + + "cloud.google.com/go/bigquery" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +var ( + project = flag.String("project", "", "The ID of a Google Cloud Platform project") + dataset = flag.String("dataset", "", "The ID of a BigQuery dataset") + table = flag.String("table", ".*", "A regular expression to match the IDs of tables to read.") + jobID = flag.String("jobid", "", "The ID of a query job that has already been submitted."+ + " If set, --dataset, --table will be ignored, and results will be read from the specified job.") +) + +func printValues(ctx context.Context, it *bigquery.RowIterator) { + // one-space padding. + tw := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0) + + for { + var vals []bigquery.Value + err := it.Next(&vals) + if err == iterator.Done { + break + } + if err != nil { + fmt.Printf("err calling get: %v\n", err) + } + sep := "" + for _, v := range vals { + fmt.Fprintf(tw, "%s%v", sep, v) + sep = "\t" + } + fmt.Fprintf(tw, "\n") + } + tw.Flush() + fmt.Println() +} + +func printTable(ctx context.Context, client *bigquery.Client, t *bigquery.Table) { + it := t.Read(ctx) + id := t.FullyQualifiedName() + fmt.Printf("%s\n%s\n", id, strings.Repeat("-", len(id))) + printValues(ctx, it) +} + +func printQueryResults(ctx context.Context, client *bigquery.Client, queryJobID string) { + job, err := client.JobFromID(ctx, queryJobID) + if err != nil { + log.Fatalf("Loading job: %v", err) + } + + it, err := job.Read(ctx) + if err != nil { + log.Fatalf("Reading: %v", err) + } + + // TODO: print schema. + printValues(ctx, it) +} + +func main() { + flag.Parse() + + flagsOk := true + if flag.Lookup("project").Value.String() == "" { + fmt.Fprintf(os.Stderr, "Flag --project is required\n") + flagsOk = false + } + + var sourceFlagCount int + if flag.Lookup("dataset").Value.String() != "" { + sourceFlagCount++ + } + if flag.Lookup("jobid").Value.String() != "" { + sourceFlagCount++ + } + if sourceFlagCount != 1 { + fmt.Fprintf(os.Stderr, "Exactly one of --dataset or --jobid must be set\n") + flagsOk = false + } + + if !flagsOk { + os.Exit(1) + } + + ctx := context.Background() + tableRE, err := regexp.Compile(*table) + if err != nil { + fmt.Fprintf(os.Stderr, "--table is not a valid regular expression: %q\n", *table) + os.Exit(1) + } + + client, err := bigquery.NewClient(ctx, *project) + if err != nil { + log.Fatalf("Creating bigquery client: %v", err) + } + + if *jobID != "" { + printQueryResults(ctx, client, *jobID) + return + } + ds := client.Dataset(*dataset) + tableIter := ds.Tables(context.Background()) + for { + t, err := tableIter.Next() + if err == iterator.Done { + break + } + if err != nil { + log.Fatalf("Listing tables: %v", err) + } + if tableRE.MatchString(t.TableID) { + printTable(ctx, client, t) + } + } +} diff --git a/vendor/cloud.google.com/go/examples/bigtable/helloworld/README.md b/vendor/cloud.google.com/go/examples/bigtable/helloworld/README.md new file mode 100644 index 00000000..b96ba53c --- /dev/null +++ b/vendor/cloud.google.com/go/examples/bigtable/helloworld/README.md @@ -0,0 +1,46 @@ +# Cloud Bigtable Hello World in Go + +This is a simple application that demonstrates using the [Google Cloud APIs Go +Client Library](https://github.com/GoogleCloudPlatform/google-cloud-go) to connect +to and interact with Cloud Bigtable. + +## Prerequisites + +1. Set up Cloud Console. + 1. Go to the [Cloud Console](https://cloud.google.com/console) and create or select your project. + You will need the project ID later. + 1. Go to **Settings > Project Billing Settings** and enable billing. + 1. Select **APIs & Auth > APIs**. + 1. Enable the **Cloud Bigtable API** and the **Cloud Bigtable Admin API**. + (You may need to search for the API). +1. Set up gcloud. + 1. `gcloud components update` + 1. `gcloud auth login` + 1. `gcloud config set project PROJECT_ID` +1. Provision a Cloud Bigtable instance + 1. Follow the instructions in the [user +documentation](https://cloud.google.com/bigtable/docs/creating-instance) to +create a Google Cloud Platform project and Cloud Bigtable instance if necessary. + 1. You'll need to reference your project id and instance id to run the application. + +## Running + +1. From the hello_world example folder, `go run main.go -project PROJECT_ID -instance INSTANCE_ID`, substituting your project id and instance id. + +## Cleaning up + +To avoid incurring extra charges to your Google Cloud Platform account, remove +the resources created for this sample. + +1. Go to the Clusters page in the [Cloud + Console](https://console.cloud.google.com). + + [Go to the Clusters page](https://console.cloud.google.com/project/_/bigtable/clusters) + +1. Click the cluster name. + +1. Click **Delete**. + + ![Delete](https://cloud.google.com/bigtable/img/delete-quickstart-cluster.png) + +1. Type the cluster ID, then click **Delete** to delete the cluster. diff --git a/vendor/cloud.google.com/go/examples/bigtable/helloworld/main.go b/vendor/cloud.google.com/go/examples/bigtable/helloworld/main.go new file mode 100644 index 00000000..fa6692c7 --- /dev/null +++ b/vendor/cloud.google.com/go/examples/bigtable/helloworld/main.go @@ -0,0 +1,157 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Hello world is a sample program demonstrating use of the Bigtable client +// library to perform basic CRUD operations +package main + +import ( + "flag" + "fmt" + "log" + + "cloud.google.com/go/bigtable" + "golang.org/x/net/context" +) + +// User-provided constants. +const ( + tableName = "Hello-Bigtable" + columnFamilyName = "cf1" + columnName = "greeting" +) + +var greetings = []string{"Hello World!", "Hello Cloud Bigtable!", "Hello golang!"} + +// sliceContains reports whether the provided string is present in the given slice of strings. +func sliceContains(list []string, target string) bool { + for _, s := range list { + if s == target { + return true + } + } + return false +} + +func main() { + project := flag.String("project", "", "The Google Cloud Platform project ID. Required.") + instance := flag.String("instance", "", "The Google Cloud Bigtable instance ID. Required.") + flag.Parse() + + for _, f := range []string{"project", "instance"} { + if flag.Lookup(f).Value.String() == "" { + log.Fatalf("The %s flag is required.", f) + } + } + + ctx := context.Background() + + // Set up admin client, tables, and column families. + // NewAdminClient uses Application Default Credentials to authenticate. + adminClient, err := bigtable.NewAdminClient(ctx, *project, *instance) + if err != nil { + log.Fatalf("Could not create admin client: %v", err) + } + + tables, err := adminClient.Tables(ctx) + if err != nil { + log.Fatalf("Could not fetch table list: %v", err) + } + + if !sliceContains(tables, tableName) { + log.Printf("Creating table %s", tableName) + if err := adminClient.CreateTable(ctx, tableName); err != nil { + log.Fatalf("Could not create table %s: %v", tableName, err) + } + } + + tblInfo, err := adminClient.TableInfo(ctx, tableName) + if err != nil { + log.Fatalf("Could not read info for table %s: %v", tableName, err) + } + + if !sliceContains(tblInfo.Families, columnFamilyName) { + if err := adminClient.CreateColumnFamily(ctx, tableName, columnFamilyName); err != nil { + log.Fatalf("Could not create column family %s: %v", columnFamilyName, err) + } + } + + // Set up Bigtable data operations client. + // NewClient uses Application Default Credentials to authenticate. + client, err := bigtable.NewClient(ctx, *project, *instance) + if err != nil { + log.Fatalf("Could not create data operations client: %v", err) + } + + tbl := client.Open(tableName) + muts := make([]*bigtable.Mutation, len(greetings)) + rowKeys := make([]string, len(greetings)) + + log.Printf("Writing greeting rows to table") + for i, greeting := range greetings { + muts[i] = bigtable.NewMutation() + muts[i].Set(columnFamilyName, columnName, bigtable.Now(), []byte(greeting)) + + // Each row has a unique row key. + // + // Note: This example uses sequential numeric IDs for simplicity, but + // this can result in poor performance in a production application. + // Since rows are stored in sorted order by key, sequential keys can + // result in poor distribution of operations across nodes. + // + // For more information about how to design a Bigtable schema for the + // best performance, see the documentation: + // + // https://cloud.google.com/bigtable/docs/schema-design + rowKeys[i] = fmt.Sprintf("%s%d", columnName, i) + } + + rowErrs, err := tbl.ApplyBulk(ctx, rowKeys, muts) + if err != nil { + log.Fatalf("Could not apply bulk row mutation: %v", err) + } + if rowErrs != nil { + for _, rowErr := range rowErrs { + log.Printf("Error writing row: %v", rowErr) + } + log.Fatalf("Could not write some rows") + } + + log.Printf("Getting a single greeting by row key:") + row, err := tbl.ReadRow(ctx, rowKeys[0], bigtable.RowFilter(bigtable.ColumnFilter(columnName))) + if err != nil { + log.Fatalf("Could not read row with key %s: %v", rowKeys[0], err) + } + log.Printf("\t%s = %s\n", rowKeys[0], string(row[columnFamilyName][0].Value)) + + log.Printf("Reading all greeting rows:") + err = tbl.ReadRows(ctx, bigtable.PrefixRange(columnName), func(row bigtable.Row) bool { + item := row[columnFamilyName][0] + log.Printf("\t%s = %s\n", item.Row, string(item.Value)) + return true + }, bigtable.RowFilter(bigtable.ColumnFilter(columnName))) + + if err = client.Close(); err != nil { + log.Fatalf("Could not close data operations client: %v", err) + } + + log.Printf("Deleting the table") + if err = adminClient.DeleteTable(ctx, tableName); err != nil { + log.Fatalf("Could not delete table %s: %v", tableName, err) + } + + if err = adminClient.Close(); err != nil { + log.Fatalf("Could not close admin client: %v", err) + } +} diff --git a/vendor/cloud.google.com/go/examples/bigtable/search/search.go b/vendor/cloud.google.com/go/examples/bigtable/search/search.go new file mode 100644 index 00000000..2049e5fe --- /dev/null +++ b/vendor/cloud.google.com/go/examples/bigtable/search/search.go @@ -0,0 +1,453 @@ +/* +Copyright 2015 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Search is a sample web server that uses Cloud Bigtable as the storage layer +// for a simple document-storage and full-text-search service. +// It has four functions: +// - Initialize and clear the table. +// - Add a document. This adds the content of a user-supplied document to the +// Bigtable, and adds references to the document to an index in the Bigtable. +// The document is indexed under each unique word in the document. +// - Search the index. This returns documents containing each word in a user +// query, with snippets and links to view the whole document. +// - Copy table. This copies the documents and index from another table and +// adds them to the current one. +package main + +import ( + "bytes" + "flag" + "fmt" + "html/template" + "io" + "log" + "net/http" + "strconv" + "strings" + "sync" + "time" + "unicode" + + "cloud.google.com/go/bigtable" + "golang.org/x/net/context" +) + +var ( + addTemplate = template.Must(template.New("").Parse(` +Added {{.Title}} +`)) + + contentTemplate = template.Must(template.New("").Parse(` +{{.Title}}

+{{.Content}} +`)) + + searchTemplate = template.Must(template.New("").Parse(` +Results for {{.Query}}:

+{{range .Results}} +{{.Title}}
+{{.Snippet}}

+{{end}} +`)) +) + +const ( + indexColumnFamily = "i" + contentColumnFamily = "c" + mainPage = ` + + + Document Search + + + Initialize and clear table: +
+
+ + + Search for documents: +
+
+
+ + + Add a document: +
+ Document name: +
+ Document text: +
+
+ + + Copy data from another table: +
+ Source table name: +
+
+ + + + ` +) + +func main() { + var ( + project = flag.String("project", "", "The name of the project.") + instance = flag.String("instance", "", "The name of the Cloud Bigtable instance.") + tableName = flag.String("table", "docindex", "The name of the table containing the documents and index.") + port = flag.Int("port", 8080, "TCP port for server.") + ) + flag.Parse() + + // Make an admin client. + adminClient, err := bigtable.NewAdminClient(context.Background(), *project, *instance) + if err != nil { + log.Fatal("Bigtable NewAdminClient:", err) + } + + // Make a regular client. + client, err := bigtable.NewClient(context.Background(), *project, *instance) + if err != nil { + log.Fatal("Bigtable NewClient:", err) + } + + // Open the table. + table := client.Open(*tableName) + + // Set up HTML handlers, and start the web server. + http.HandleFunc("/search", func(w http.ResponseWriter, r *http.Request) { handleSearch(w, r, table) }) + http.HandleFunc("/content", func(w http.ResponseWriter, r *http.Request) { handleContent(w, r, table) }) + http.HandleFunc("/add", func(w http.ResponseWriter, r *http.Request) { handleAddDoc(w, r, table) }) + http.HandleFunc("/reset", func(w http.ResponseWriter, r *http.Request) { handleReset(w, r, *tableName, adminClient) }) + http.HandleFunc("/copy", func(w http.ResponseWriter, r *http.Request) { handleCopy(w, r, *tableName, client, adminClient) }) + http.HandleFunc("/", handleMain) + log.Fatal(http.ListenAndServe(":"+strconv.Itoa(*port), nil)) +} + +// handleMain outputs the home page. +func handleMain(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, mainPage) +} + +// tokenize splits a string into tokens. +// This is very simple, it's not a good tokenization function. +func tokenize(s string) []string { + wordMap := make(map[string]bool) + f := strings.FieldsFunc(s, func(r rune) bool { return !unicode.IsLetter(r) }) + for _, word := range f { + word = strings.ToLower(word) + wordMap[word] = true + } + words := make([]string, 0, len(wordMap)) + for word := range wordMap { + words = append(words, word) + } + return words +} + +// handleContent fetches the content of a document from the Bigtable and returns it. +func handleContent(w http.ResponseWriter, r *http.Request, table *bigtable.Table) { + ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) + name := r.FormValue("name") + if len(name) == 0 { + http.Error(w, "No document name supplied.", http.StatusBadRequest) + return + } + + row, err := table.ReadRow(ctx, name) + if err != nil { + http.Error(w, "Error reading content: "+err.Error(), http.StatusInternalServerError) + return + } + content := row[contentColumnFamily] + if len(content) == 0 { + http.Error(w, "Document not found.", http.StatusNotFound) + return + } + var buf bytes.Buffer + if err := contentTemplate.ExecuteTemplate(&buf, "", struct{ Title, Content string }{name, string(content[0].Value)}); err != nil { + http.Error(w, "Error executing HTML template: "+err.Error(), http.StatusInternalServerError) + return + } + io.Copy(w, &buf) +} + +// handleSearch responds to search queries, returning links and snippets for matching documents. +func handleSearch(w http.ResponseWriter, r *http.Request, table *bigtable.Table) { + ctx, _ := context.WithTimeout(context.Background(), 10*time.Second) + query := r.FormValue("q") + // Split the query into words. + words := tokenize(query) + if len(words) == 0 { + http.Error(w, "Empty query.", http.StatusBadRequest) + return + } + + // readRows reads from many rows concurrently. + readRows := func(rows []string) ([]bigtable.Row, error) { + results := make([]bigtable.Row, len(rows)) + errors := make([]error, len(rows)) + var wg sync.WaitGroup + for i, row := range rows { + wg.Add(1) + go func(i int, row string) { + defer wg.Done() + results[i], errors[i] = table.ReadRow(ctx, row, bigtable.RowFilter(bigtable.LatestNFilter(1))) + }(i, row) + } + wg.Wait() + for _, err := range errors { + if err != nil { + return nil, err + } + } + return results, nil + } + + // For each query word, get the list of documents containing it. + results, err := readRows(words) + if err != nil { + http.Error(w, "Error reading index: "+err.Error(), http.StatusInternalServerError) + return + } + + // Count how many of the query words each result contained. + hits := make(map[string]int) + for _, r := range results { + for _, r := range r[indexColumnFamily] { + hits[r.Column]++ + } + } + + // Build a slice of all the documents that matched every query word. + var matches []string + for doc, count := range hits { + if count == len(words) { + matches = append(matches, doc[len(indexColumnFamily+":"):]) + } + } + + // Fetch the content of those documents from the Bigtable. + content, err := readRows(matches) + if err != nil { + http.Error(w, "Error reading results: "+err.Error(), http.StatusInternalServerError) + return + } + + type result struct{ Title, Snippet string } + data := struct { + Query string + Results []result + }{query, nil} + + // Output links and snippets. + for i, doc := range matches { + var text string + c := content[i][contentColumnFamily] + if len(c) > 0 { + text = string(c[0].Value) + } + if len(text) > 100 { + text = text[:100] + "..." + } + data.Results = append(data.Results, result{doc, text}) + } + var buf bytes.Buffer + if err := searchTemplate.ExecuteTemplate(&buf, "", data); err != nil { + http.Error(w, "Error executing HTML template: "+err.Error(), http.StatusInternalServerError) + return + } + io.Copy(w, &buf) +} + +// handleAddDoc adds a document to the index. +func handleAddDoc(w http.ResponseWriter, r *http.Request, table *bigtable.Table) { + if r.Method != "POST" { + http.Error(w, "POST requests only", http.StatusMethodNotAllowed) + return + } + + ctx, _ := context.WithTimeout(context.Background(), time.Minute) + + name := r.FormValue("name") + if len(name) == 0 { + http.Error(w, "Empty document name!", http.StatusBadRequest) + return + } + + content := r.FormValue("content") + if len(content) == 0 { + http.Error(w, "Empty document content!", http.StatusBadRequest) + return + } + + var ( + writeErr error // Set if any write fails. + mu sync.Mutex // Protects writeErr + wg sync.WaitGroup // Used to wait for all writes to finish. + ) + + // writeOneColumn writes one column in one row, updates err if there is an error, + // and signals wg that one operation has finished. + writeOneColumn := func(row, family, column, value string, ts bigtable.Timestamp) { + mut := bigtable.NewMutation() + mut.Set(family, column, ts, []byte(value)) + err := table.Apply(ctx, row, mut) + if err != nil { + mu.Lock() + writeErr = err + mu.Unlock() + } + } + + // Start a write to store the document content. + wg.Add(1) + go func() { + writeOneColumn(name, contentColumnFamily, "", content, bigtable.Now()) + wg.Done() + }() + + // Start writes to store the document name in the index for each word in the document. + words := tokenize(content) + for _, word := range words { + var ( + row = word + family = indexColumnFamily + column = name + value = "" + ts = bigtable.Now() + ) + wg.Add(1) + go func() { + // TODO: should use a semaphore to limit the number of concurrent writes. + writeOneColumn(row, family, column, value, ts) + wg.Done() + }() + } + wg.Wait() + if writeErr != nil { + http.Error(w, "Error writing to Bigtable: "+writeErr.Error(), http.StatusInternalServerError) + return + } + var buf bytes.Buffer + if err := addTemplate.ExecuteTemplate(&buf, "", struct{ Title string }{name}); err != nil { + http.Error(w, "Error executing HTML template: "+err.Error(), http.StatusInternalServerError) + return + } + io.Copy(w, &buf) +} + +// handleReset deletes the table if it exists, creates it again, and creates its column families. +func handleReset(w http.ResponseWriter, r *http.Request, table string, adminClient *bigtable.AdminClient) { + if r.Method != "POST" { + http.Error(w, "POST requests only", http.StatusMethodNotAllowed) + return + } + ctx, _ := context.WithTimeout(context.Background(), 5*time.Minute) + adminClient.DeleteTable(ctx, table) + if err := adminClient.CreateTable(ctx, table); err != nil { + http.Error(w, "Error creating Bigtable: "+err.Error(), http.StatusInternalServerError) + return + } + time.Sleep(20 * time.Second) + // Create two column families, and set the GC policy for each one to keep one version. + for _, family := range []string{indexColumnFamily, contentColumnFamily} { + if err := adminClient.CreateColumnFamily(ctx, table, family); err != nil { + http.Error(w, "Error creating column family: "+err.Error(), http.StatusInternalServerError) + return + } + if err := adminClient.SetGCPolicy(ctx, table, family, bigtable.MaxVersionsPolicy(1)); err != nil { + http.Error(w, "Error setting GC policy: "+err.Error(), http.StatusInternalServerError) + return + } + } + w.Write([]byte("Done.")) + return +} + +// copyTable copies data from one table to another. +func copyTable(src, dst string, client *bigtable.Client, adminClient *bigtable.AdminClient) error { + if src == "" || src == dst { + return nil + } + ctx, _ := context.WithTimeout(context.Background(), time.Minute) + + // Open the source and destination tables. + srcTable := client.Open(src) + dstTable := client.Open(dst) + + var ( + writeErr error // Set if any write fails. + mu sync.Mutex // Protects writeErr + wg sync.WaitGroup // Used to wait for all writes to finish. + ) + copyRowToTable := func(row bigtable.Row) bool { + mu.Lock() + failed := writeErr != nil + mu.Unlock() + if failed { + return false + } + mut := bigtable.NewMutation() + for family, items := range row { + for _, item := range items { + // Get the column name, excluding the column family name and ':' character. + columnWithoutFamily := item.Column[len(family)+1:] + mut.Set(family, columnWithoutFamily, bigtable.Now(), item.Value) + } + } + wg.Add(1) + go func() { + // TODO: should use a semaphore to limit the number of concurrent writes. + if err := dstTable.Apply(ctx, row.Key(), mut); err != nil { + mu.Lock() + writeErr = err + mu.Unlock() + } + wg.Done() + }() + return true + } + + // Create a filter that only accepts the column families we're interested in. + filter := bigtable.FamilyFilter(indexColumnFamily + "|" + contentColumnFamily) + // Read every row from srcTable, and call copyRowToTable to copy it to our table. + err := srcTable.ReadRows(ctx, bigtable.InfiniteRange(""), copyRowToTable, bigtable.RowFilter(filter)) + wg.Wait() + if err != nil { + return err + } + return writeErr +} + +// handleCopy copies data from one table to another. +func handleCopy(w http.ResponseWriter, r *http.Request, dst string, client *bigtable.Client, adminClient *bigtable.AdminClient) { + if r.Method != "POST" { + http.Error(w, "POST requests only", http.StatusMethodNotAllowed) + return + } + src := r.FormValue("name") + if src == "" { + http.Error(w, "No source table specified.", http.StatusBadRequest) + return + } + if err := copyTable(src, dst, client, adminClient); err != nil { + http.Error(w, "Failed to rebuild index: "+err.Error(), http.StatusInternalServerError) + return + } + fmt.Fprint(w, "Copied table.\n") +} diff --git a/vendor/cloud.google.com/go/examples/bigtable/usercounter/README.md b/vendor/cloud.google.com/go/examples/bigtable/usercounter/README.md new file mode 100644 index 00000000..57ba4be4 --- /dev/null +++ b/vendor/cloud.google.com/go/examples/bigtable/usercounter/README.md @@ -0,0 +1,29 @@ +# User Counter +# (Cloud Bigtable on Managed VMs using Go) + +This app counts how often each user visits. The app uses Cloud Bigtable to store the visit counts for each user. + +## Prerequisites + +1. Set up Cloud Console. + 1. Go to the [Cloud Console](https://cloud.google.com/console) and create or select your project. + You will need the project ID later. + 1. Go to **Settings > Project Billing Settings** and enable billing. + 1. Select **APIs & Auth > APIs**. + 1. Enable the **Cloud Bigtable API** and the **Cloud Bigtable Admin API**. + (You may need to search for the API). +1. Set up gcloud. + 1. `gcloud components update` + 1. `gcloud auth login` + 1. `gcloud config set project PROJECT_ID` +1. Download App Engine SDK for Go. + 1. `go get -u google.golang.org/appengine/...` +1. In main.go, change the `project` and `instance` constants. + +## Running locally + +1. From the sample project folder, `dev_appserver.py app.yaml`. + +## Deploying on Google App Engine flexible environment + +Follow the [deployment instructions](https://cloud.google.com/appengine/docs/flexible/go/testing-and-deploying-your-app). diff --git a/vendor/cloud.google.com/go/examples/bigtable/usercounter/app.yaml b/vendor/cloud.google.com/go/examples/bigtable/usercounter/app.yaml new file mode 100644 index 00000000..4f5fef0e --- /dev/null +++ b/vendor/cloud.google.com/go/examples/bigtable/usercounter/app.yaml @@ -0,0 +1,11 @@ +runtime: go +api_version: go1 +vm: true + +manual_scaling: + instances: 1 + +handlers: +# Serve only the web root. +- url: / + script: _go_app diff --git a/vendor/cloud.google.com/go/examples/bigtable/usercounter/main.go b/vendor/cloud.google.com/go/examples/bigtable/usercounter/main.go new file mode 100644 index 00000000..a898369b --- /dev/null +++ b/vendor/cloud.google.com/go/examples/bigtable/usercounter/main.go @@ -0,0 +1,180 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +User counter is a program that tracks how often a user has visited the index page. + +This program demonstrates usage of the Cloud Bigtable API for App Engine flexible environment and Go. +Instructions for running this program are in the README.md. +*/ +package main + +import ( + "bytes" + "encoding/binary" + "html/template" + "log" + "net/http" + + "cloud.google.com/go/bigtable" + "golang.org/x/net/context" + "google.golang.org/appengine" + aelog "google.golang.org/appengine/log" + "google.golang.org/appengine/user" +) + +// User-provided constants. +const ( + project = "PROJECT_ID" + instance = "INSTANCE" +) + +var ( + tableName = "user-visit-counter" + familyName = "emails" + + // Client is initialized by main. + client *bigtable.Client +) + +func main() { + ctx := context.Background() + + // Set up admin client, tables, and column families. + // NewAdminClient uses Application Default Credentials to authenticate. + adminClient, err := bigtable.NewAdminClient(ctx, project, instance) + if err != nil { + log.Fatalf("Unable to create a table admin client. %v", err) + } + tables, err := adminClient.Tables(ctx) + if err != nil { + log.Fatalf("Unable to fetch table list. %v", err) + } + if !sliceContains(tables, tableName) { + if err := adminClient.CreateTable(ctx, tableName); err != nil { + log.Fatalf("Unable to create table: %v. %v", tableName, err) + } + } + tblInfo, err := adminClient.TableInfo(ctx, tableName) + if err != nil { + log.Fatalf("Unable to read info for table: %v. %v", tableName, err) + } + if !sliceContains(tblInfo.Families, familyName) { + if err := adminClient.CreateColumnFamily(ctx, tableName, familyName); err != nil { + log.Fatalf("Unable to create column family: %v. %v", familyName, err) + } + } + adminClient.Close() + + // Set up Bigtable data operations client. + // NewClient uses Application Default Credentials to authenticate. + client, err = bigtable.NewClient(ctx, project, instance) + if err != nil { + log.Fatalf("Unable to create data operations client. %v", err) + } + + http.Handle("/", appHandler(mainHandler)) + appengine.Main() // Never returns. +} + +// mainHandler tracks how many times each user has visited this page. +func mainHandler(w http.ResponseWriter, r *http.Request) *appError { + if r.URL.Path != "/" { + http.NotFound(w, r) + return nil + } + + ctx := appengine.NewContext(r) + u := user.Current(ctx) + if u == nil { + login, err := user.LoginURL(ctx, r.URL.String()) + if err != nil { + return &appError{err, "Error finding login URL", http.StatusInternalServerError} + } + http.Redirect(w, r, login, http.StatusFound) + return nil + } + logoutURL, err := user.LogoutURL(ctx, "/") + if err != nil { + return &appError{err, "Error finding logout URL", http.StatusInternalServerError} + } + + // Increment visit count for user. + tbl := client.Open(tableName) + rmw := bigtable.NewReadModifyWrite() + rmw.Increment(familyName, u.Email, 1) + row, err := tbl.ApplyReadModifyWrite(ctx, u.Email, rmw) + if err != nil { + return &appError{err, "Error applying ReadModifyWrite to row: " + u.Email, http.StatusInternalServerError} + } + data := struct { + Username, Logout string + Visits uint64 + }{ + Username: u.Email, + // Retrieve the most recently edited column. + Visits: binary.BigEndian.Uint64(row[familyName][0].Value), + Logout: logoutURL, + } + + // Display hello page. + var buf bytes.Buffer + if err := tmpl.Execute(&buf, data); err != nil { + return &appError{err, "Error writing template", http.StatusInternalServerError} + } + buf.WriteTo(w) + return nil +} + +var tmpl = template.Must(template.New("").Parse(` + + +

+{{with .Username}} Hello {{.}}{{end}} +{{with .Logout}}Sign out{{end}} + +

+ +

+You have visited {{.Visits}} +

+ +`)) + +// sliceContains reports whether the provided string is present in the given slice of strings. +func sliceContains(list []string, target string) bool { + for _, s := range list { + if s == target { + return true + } + } + return false +} + +// More info about this method of error handling can be found at: http://blog.golang.org/error-handling-and-go +type appHandler func(http.ResponseWriter, *http.Request) *appError + +type appError struct { + Error error + Message string + Code int +} + +func (fn appHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if e := fn(w, r); e != nil { + ctx := appengine.NewContext(r) + aelog.Errorf(ctx, "%v", e.Error) + http.Error(w, e.Message, e.Code) + } +} diff --git a/vendor/cloud.google.com/go/examples/storage/appengine/app.go b/vendor/cloud.google.com/go/examples/storage/appengine/app.go new file mode 100644 index 00000000..f01758ef --- /dev/null +++ b/vendor/cloud.google.com/go/examples/storage/appengine/app.go @@ -0,0 +1,428 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//[START sample] +// Package gcsdemo is an example App Engine app using the Google Cloud Storage API. +package gcsdemo + +//[START imports] +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + + "cloud.google.com/go/storage" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/appengine" + "google.golang.org/appengine/file" + "google.golang.org/appengine/log" +) + +//[END imports] + +func init() { + http.HandleFunc("/", handler) +} + +// demo struct holds information needed to run the various demo functions. +type demo struct { + client *storage.Client + bucketName string + bucket *storage.BucketHandle + + w io.Writer + ctx context.Context + // cleanUp is a list of filenames that need cleaning up at the end of the demo. + cleanUp []string + // failed indicates that one or more of the demo steps failed. + failed bool +} + +func (d *demo) errorf(format string, args ...interface{}) { + d.failed = true + fmt.Fprintln(d.w, fmt.Sprintf(format, args...)) + log.Errorf(d.ctx, format, args...) +} + +// handler is the main demo entry point that calls the GCS operations. +func handler(w http.ResponseWriter, r *http.Request) { + ctx := appengine.NewContext(r) + + if r.URL.Path != "/" { + http.NotFound(w, r) + return + } + + //[START get_default_bucket] + // Use `dev_appserver.py --default_gcs_bucket_name GCS_BUCKET_NAME` + // when running locally. + bucket, err := file.DefaultBucketName(ctx) + if err != nil { + log.Errorf(ctx, "failed to get default GCS bucket name: %v", err) + } + //[END get_default_bucket] + + client, err := storage.NewClient(ctx) + if err != nil { + log.Errorf(ctx, "failed to create client: %v", err) + return + } + defer client.Close() + + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + fmt.Fprintf(w, "Demo GCS Application running from Version: %v\n", appengine.VersionID(ctx)) + fmt.Fprintf(w, "Using bucket name: %v\n\n", bucket) + + buf := &bytes.Buffer{} + d := &demo{ + w: buf, + ctx: ctx, + client: client, + bucket: client.Bucket(bucket), + bucketName: bucket, + } + + n := "demo-testfile-go" + d.createFile(n) + d.readFile(n) + d.copyFile(n) + d.statFile(n) + d.createListFiles() + d.listBucket() + d.listBucketDirMode() + d.defaultACL() + d.putDefaultACLRule() + d.deleteDefaultACLRule() + d.bucketACL() + d.putBucketACLRule() + d.deleteBucketACLRule() + d.acl(n) + d.putACLRule(n) + d.deleteACLRule(n) + d.deleteFiles() + + if d.failed { + w.WriteHeader(http.StatusInternalServerError) + buf.WriteTo(w) + fmt.Fprintf(w, "\nDemo failed.\n") + } else { + w.WriteHeader(http.StatusOK) + buf.WriteTo(w) + fmt.Fprintf(w, "\nDemo succeeded.\n") + } +} + +//[START write] +// createFile creates a file in Google Cloud Storage. +func (d *demo) createFile(fileName string) { + fmt.Fprintf(d.w, "Creating file /%v/%v\n", d.bucketName, fileName) + + wc := d.bucket.Object(fileName).NewWriter(d.ctx) + wc.ContentType = "text/plain" + wc.Metadata = map[string]string{ + "x-goog-meta-foo": "foo", + "x-goog-meta-bar": "bar", + } + d.cleanUp = append(d.cleanUp, fileName) + + if _, err := wc.Write([]byte("abcde\n")); err != nil { + d.errorf("createFile: unable to write data to bucket %q, file %q: %v", d.bucketName, fileName, err) + return + } + if _, err := wc.Write([]byte(strings.Repeat("f", 1024*4) + "\n")); err != nil { + d.errorf("createFile: unable to write data to bucket %q, file %q: %v", d.bucketName, fileName, err) + return + } + if err := wc.Close(); err != nil { + d.errorf("createFile: unable to close bucket %q, file %q: %v", d.bucketName, fileName, err) + return + } +} + +//[END write] + +//[START read] +// readFile reads the named file in Google Cloud Storage. +func (d *demo) readFile(fileName string) { + io.WriteString(d.w, "\nAbbreviated file content (first line and last 1K):\n") + + rc, err := d.bucket.Object(fileName).NewReader(d.ctx) + if err != nil { + d.errorf("readFile: unable to open file from bucket %q, file %q: %v", d.bucketName, fileName, err) + return + } + defer rc.Close() + slurp, err := ioutil.ReadAll(rc) + if err != nil { + d.errorf("readFile: unable to read data from bucket %q, file %q: %v", d.bucketName, fileName, err) + return + } + + fmt.Fprintf(d.w, "%s\n", bytes.SplitN(slurp, []byte("\n"), 2)[0]) + if len(slurp) > 1024 { + fmt.Fprintf(d.w, "...%s\n", slurp[len(slurp)-1024:]) + } else { + fmt.Fprintf(d.w, "%s\n", slurp) + } +} + +//[END read] + +//[START copy] +// copyFile copies a file in Google Cloud Storage. +func (d *demo) copyFile(fileName string) { + copyName := fileName + "-copy" + fmt.Fprintf(d.w, "Copying file /%v/%v to /%v/%v:\n", d.bucketName, fileName, d.bucketName, copyName) + + obj, err := d.bucket.Object(copyName).CopierFrom(d.bucket.Object(fileName)).Run(d.ctx) + if err != nil { + d.errorf("copyFile: unable to copy /%v/%v to bucket %q, file %q: %v", d.bucketName, fileName, d.bucketName, copyName, err) + return + } + d.cleanUp = append(d.cleanUp, copyName) + + d.dumpStats(obj) +} + +//[END copy] + +func (d *demo) dumpStats(obj *storage.ObjectAttrs) { + fmt.Fprintf(d.w, "(filename: /%v/%v, ", obj.Bucket, obj.Name) + fmt.Fprintf(d.w, "ContentType: %q, ", obj.ContentType) + fmt.Fprintf(d.w, "ACL: %#v, ", obj.ACL) + fmt.Fprintf(d.w, "Owner: %v, ", obj.Owner) + fmt.Fprintf(d.w, "ContentEncoding: %q, ", obj.ContentEncoding) + fmt.Fprintf(d.w, "Size: %v, ", obj.Size) + fmt.Fprintf(d.w, "MD5: %q, ", obj.MD5) + fmt.Fprintf(d.w, "CRC32C: %q, ", obj.CRC32C) + fmt.Fprintf(d.w, "Metadata: %#v, ", obj.Metadata) + fmt.Fprintf(d.w, "MediaLink: %q, ", obj.MediaLink) + fmt.Fprintf(d.w, "StorageClass: %q, ", obj.StorageClass) + if !obj.Deleted.IsZero() { + fmt.Fprintf(d.w, "Deleted: %v, ", obj.Deleted) + } + fmt.Fprintf(d.w, "Updated: %v)\n", obj.Updated) +} + +//[START file_metadata] +// statFile reads the stats of the named file in Google Cloud Storage. +func (d *demo) statFile(fileName string) { + io.WriteString(d.w, "\nFile stat:\n") + + obj, err := d.bucket.Object(fileName).Attrs(d.ctx) + if err != nil { + d.errorf("statFile: unable to stat file from bucket %q, file %q: %v", d.bucketName, fileName, err) + return + } + + d.dumpStats(obj) +} + +//[END file_metadata] + +// createListFiles creates files that will be used by listBucket. +func (d *demo) createListFiles() { + io.WriteString(d.w, "\nCreating more files for listbucket...\n") + for _, n := range []string{"foo1", "foo2", "bar", "bar/1", "bar/2", "boo/"} { + d.createFile(n) + } +} + +//[START list_bucket] +// listBucket lists the contents of a bucket in Google Cloud Storage. +func (d *demo) listBucket() { + io.WriteString(d.w, "\nListbucket result:\n") + + query := &storage.Query{Prefix: "foo"} + it := d.bucket.Objects(d.ctx, query) + for { + obj, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + d.errorf("listBucket: unable to list bucket %q: %v", d.bucketName, err) + return + } + d.dumpStats(obj) + } +} + +//[END list_bucket] + +func (d *demo) listDir(name, indent string) { + query := &storage.Query{Prefix: name, Delimiter: "/"} + it := d.bucket.Objects(d.ctx, query) + for { + obj, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + d.errorf("listBucketDirMode: unable to list bucket %q: %v", d.bucketName, err) + return + } + if obj.Prefix == "" { + fmt.Fprint(d.w, indent) + d.dumpStats(obj) + continue + } + fmt.Fprintf(d.w, "%v(directory: /%v/%v)\n", indent, d.bucketName, obj.Prefix) + d.listDir(obj.Prefix, indent+" ") + } +} + +// listBucketDirMode lists the contents of a bucket in dir mode in Google Cloud Storage. +func (d *demo) listBucketDirMode() { + io.WriteString(d.w, "\nListbucket directory mode result:\n") + d.listDir("b", "") +} + +// dumpDefaultACL prints out the default object ACL for this bucket. +func (d *demo) dumpDefaultACL() { + acl, err := d.bucket.ACL().List(d.ctx) + if err != nil { + d.errorf("defaultACL: unable to list default object ACL for bucket %q: %v", d.bucketName, err) + return + } + for _, v := range acl { + fmt.Fprintf(d.w, "Scope: %q, Permission: %q\n", v.Entity, v.Role) + } +} + +// defaultACL displays the default object ACL for this bucket. +func (d *demo) defaultACL() { + io.WriteString(d.w, "\nDefault object ACL:\n") + d.dumpDefaultACL() +} + +// putDefaultACLRule adds the "allUsers" default object ACL rule for this bucket. +func (d *demo) putDefaultACLRule() { + io.WriteString(d.w, "\nPut Default object ACL Rule:\n") + err := d.bucket.DefaultObjectACL().Set(d.ctx, storage.AllUsers, storage.RoleReader) + if err != nil { + d.errorf("putDefaultACLRule: unable to save default object ACL rule for bucket %q: %v", d.bucketName, err) + return + } + d.dumpDefaultACL() +} + +// deleteDefaultACLRule deleted the "allUsers" default object ACL rule for this bucket. +func (d *demo) deleteDefaultACLRule() { + io.WriteString(d.w, "\nDelete Default object ACL Rule:\n") + err := d.bucket.DefaultObjectACL().Delete(d.ctx, storage.AllUsers) + if err != nil { + d.errorf("deleteDefaultACLRule: unable to delete default object ACL rule for bucket %q: %v", d.bucketName, err) + return + } + d.dumpDefaultACL() +} + +// dumpBucketACL prints out the bucket ACL. +func (d *demo) dumpBucketACL() { + acl, err := d.bucket.ACL().List(d.ctx) + if err != nil { + d.errorf("dumpBucketACL: unable to list bucket ACL for bucket %q: %v", d.bucketName, err) + return + } + for _, v := range acl { + fmt.Fprintf(d.w, "Scope: %q, Permission: %q\n", v.Entity, v.Role) + } +} + +// bucketACL displays the bucket ACL for this bucket. +func (d *demo) bucketACL() { + io.WriteString(d.w, "\nBucket ACL:\n") + d.dumpBucketACL() +} + +// putBucketACLRule adds the "allUsers" bucket ACL rule for this bucket. +func (d *demo) putBucketACLRule() { + io.WriteString(d.w, "\nPut Bucket ACL Rule:\n") + err := d.bucket.ACL().Set(d.ctx, storage.AllUsers, storage.RoleReader) + if err != nil { + d.errorf("putBucketACLRule: unable to save bucket ACL rule for bucket %q: %v", d.bucketName, err) + return + } + d.dumpBucketACL() +} + +// deleteBucketACLRule deleted the "allUsers" bucket ACL rule for this bucket. +func (d *demo) deleteBucketACLRule() { + io.WriteString(d.w, "\nDelete Bucket ACL Rule:\n") + err := d.bucket.ACL().Delete(d.ctx, storage.AllUsers) + if err != nil { + d.errorf("deleteBucketACLRule: unable to delete bucket ACL rule for bucket %q: %v", d.bucketName, err) + return + } + d.dumpBucketACL() +} + +// dumpACL prints out the ACL of the named file. +func (d *demo) dumpACL(fileName string) { + acl, err := d.bucket.Object(fileName).ACL().List(d.ctx) + if err != nil { + d.errorf("dumpACL: unable to list file ACL for bucket %q, file %q: %v", d.bucketName, fileName, err) + return + } + for _, v := range acl { + fmt.Fprintf(d.w, "Scope: %q, Permission: %q\n", v.Entity, v.Role) + } +} + +// acl displays the ACL for the named file. +func (d *demo) acl(fileName string) { + fmt.Fprintf(d.w, "\nACL for file %v:\n", fileName) + d.dumpACL(fileName) +} + +// putACLRule adds the "allUsers" ACL rule for the named file. +func (d *demo) putACLRule(fileName string) { + fmt.Fprintf(d.w, "\nPut ACL rule for file %v:\n", fileName) + err := d.bucket.Object(fileName).ACL().Set(d.ctx, storage.AllUsers, storage.RoleReader) + if err != nil { + d.errorf("putACLRule: unable to save ACL rule for bucket %q, file %q: %v", d.bucketName, fileName, err) + return + } + d.dumpACL(fileName) +} + +// deleteACLRule deleted the "allUsers" ACL rule for the named file. +func (d *demo) deleteACLRule(fileName string) { + fmt.Fprintf(d.w, "\nDelete ACL rule for file %v:\n", fileName) + err := d.bucket.Object(fileName).ACL().Delete(d.ctx, storage.AllUsers) + if err != nil { + d.errorf("deleteACLRule: unable to delete ACL rule for bucket %q, file %q: %v", d.bucketName, fileName, err) + return + } + d.dumpACL(fileName) +} + +// deleteFiles deletes all the temporary files from a bucket created by this demo. +func (d *demo) deleteFiles() { + io.WriteString(d.w, "\nDeleting files...\n") + for _, v := range d.cleanUp { + fmt.Fprintf(d.w, "Deleting file %v\n", v) + if err := d.bucket.Object(v).Delete(d.ctx); err != nil { + d.errorf("deleteFiles: unable to delete bucket %q, file %q: %v", d.bucketName, v, err) + return + } + } +} + +//[END sample] diff --git a/vendor/cloud.google.com/go/examples/storage/appengine/app.yaml b/vendor/cloud.google.com/go/examples/storage/appengine/app.yaml new file mode 100644 index 00000000..efdf66f2 --- /dev/null +++ b/vendor/cloud.google.com/go/examples/storage/appengine/app.yaml @@ -0,0 +1,8 @@ +application: your-app-id +version: v1 +runtime: go +api_version: go1 + +handlers: +- url: /.* + script: _go_app diff --git a/vendor/cloud.google.com/go/examples/storage/appenginevm/app.go b/vendor/cloud.google.com/go/examples/storage/appenginevm/app.go new file mode 100644 index 00000000..cdf99a97 --- /dev/null +++ b/vendor/cloud.google.com/go/examples/storage/appenginevm/app.go @@ -0,0 +1,432 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package main is an example Mananged VM app using the Google Cloud Storage API. +package main + +//[START imports] +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + + "cloud.google.com/go/storage" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/appengine" + "google.golang.org/appengine/file" + "google.golang.org/appengine/log" +) + +//[END imports] + +func main() { + http.HandleFunc("/", handler) + appengine.Main() +} + +//[START bucket_struct] +// demo struct holds information needed to run the various demo functions. +type demo struct { + client *storage.Client + bucketName string + bucket *storage.BucketHandle + + w io.Writer + ctx context.Context + // cleanUp is a list of filenames that need cleaning up at the end of the demo. + cleanUp []string + // failed indicates that one or more of the demo steps failed. + failed bool +} + +//[END bucket_struct] + +func (d *demo) errorf(format string, args ...interface{}) { + d.failed = true + fmt.Fprintln(d.w, fmt.Sprintf(format, args...)) + log.Errorf(d.ctx, format, args...) +} + +// handler is the main demo entry point that calls the GCS operations. +func handler(w http.ResponseWriter, r *http.Request) { + ctx := appengine.NewContext(r) + + if r.URL.Path != "/" { + http.NotFound(w, r) + return + } + + //[START get_default_bucket] + // Use `dev_appserver.py --default_gcs_bucket_name GCS_BUCKET_NAME` + // when running locally. + bucket, err := file.DefaultBucketName(ctx) + if err != nil { + log.Errorf(ctx, "failed to get default GCS bucket name: %v", err) + } + //[END get_default_bucket] + + client, err := storage.NewClient(ctx) + if err != nil { + log.Errorf(ctx, "failed to create client: %v", err) + return + } + defer client.Close() + + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + fmt.Fprintf(w, "Demo GCS Application running from Version: %v\n", appengine.VersionID(ctx)) + fmt.Fprintf(w, "Using bucket name: %v\n\n", bucket) + + buf := &bytes.Buffer{} + d := &demo{ + w: buf, + ctx: ctx, + client: client, + bucket: client.Bucket(bucket), + bucketName: bucket, + } + + n := "demo-testfile-go" + d.createFile(n) + d.readFile(n) + d.copyFile(n) + d.statFile(n) + d.createListFiles() + d.listBucket() + d.listBucketDirMode() + d.defaultACL() + d.putDefaultACLRule() + d.deleteDefaultACLRule() + d.bucketACL() + d.putBucketACLRule() + d.deleteBucketACLRule() + d.acl(n) + d.putACLRule(n) + d.deleteACLRule(n) + d.deleteFiles() + + if d.failed { + w.WriteHeader(http.StatusInternalServerError) + buf.WriteTo(w) + fmt.Fprintf(w, "\nDemo failed.\n") + } else { + w.WriteHeader(http.StatusOK) + buf.WriteTo(w) + fmt.Fprintf(w, "\nDemo succeeded.\n") + } +} + +//[START write] +// createFile creates a file in Google Cloud Storage. +func (d *demo) createFile(fileName string) { + fmt.Fprintf(d.w, "Creating file /%v/%v\n", d.bucket, fileName) + + wc := d.bucket.Object(fileName).NewWriter(d.ctx) + wc.ContentType = "text/plain" + wc.Metadata = map[string]string{ + "x-goog-meta-foo": "foo", + "x-goog-meta-bar": "bar", + } + d.cleanUp = append(d.cleanUp, fileName) + + if _, err := wc.Write([]byte("abcde\n")); err != nil { + d.errorf("createFile: unable to write data to bucket %q, file %q: %v", d.bucketName, fileName, err) + return + } + if _, err := wc.Write([]byte(strings.Repeat("f", 1024*4) + "\n")); err != nil { + d.errorf("createFile: unable to write data to bucket %q, file %q: %v", d.bucketName, fileName, err) + return + } + if err := wc.Close(); err != nil { + d.errorf("createFile: unable to close bucket %q, file %q: %v", d.bucketName, fileName, err) + return + } +} + +//[END write] + +//[START read] +// readFile reads the named file in Google Cloud Storage. +func (d *demo) readFile(fileName string) { + io.WriteString(d.w, "\nAbbreviated file content (first line and last 1K):\n") + + rc, err := d.bucket.Object(fileName).NewReader(d.ctx) + if err != nil { + d.errorf("readFile: unable to open file from bucket %q, file %q: %v", d.bucketName, fileName, err) + return + } + defer rc.Close() + slurp, err := ioutil.ReadAll(rc) + if err != nil { + d.errorf("readFile: unable to read data from bucket %q, file %q: %v", d.bucketName, fileName, err) + return + } + + fmt.Fprintf(d.w, "%s\n", bytes.SplitN(slurp, []byte("\n"), 2)[0]) + if len(slurp) > 1024 { + fmt.Fprintf(d.w, "...%s\n", slurp[len(slurp)-1024:]) + } else { + fmt.Fprintf(d.w, "%s\n", slurp) + } +} + +//[END read] + +//[START copy] +// copyFile copies a file in Google Cloud Storage. +func (d *demo) copyFile(fileName string) { + copyName := fileName + "-copy" + fmt.Fprintf(d.w, "Copying file /%v/%v to /%v/%v:\n", d.bucket, fileName, d.bucketName, copyName) + + obj, err := d.bucket.Object(copyName).CopierFrom(d.bucket.Object(fileName)).Run(d.ctx) + if err != nil { + d.errorf("copyFile: unable to copy /%v/%v to bucket %q, file %q: %v", d.bucketName, fileName, d.bucketName, copyName, err) + return + } + d.cleanUp = append(d.cleanUp, copyName) + + d.dumpStats(obj) +} + +//[END copy] + +func (d *demo) dumpStats(obj *storage.ObjectAttrs) { + fmt.Fprintf(d.w, "(filename: /%v/%v, ", obj.Bucket, obj.Name) + fmt.Fprintf(d.w, "ContentType: %q, ", obj.ContentType) + fmt.Fprintf(d.w, "ACL: %#v, ", obj.ACL) + fmt.Fprintf(d.w, "Owner: %v, ", obj.Owner) + fmt.Fprintf(d.w, "ContentEncoding: %q, ", obj.ContentEncoding) + fmt.Fprintf(d.w, "Size: %v, ", obj.Size) + fmt.Fprintf(d.w, "MD5: %q, ", obj.MD5) + fmt.Fprintf(d.w, "CRC32C: %q, ", obj.CRC32C) + fmt.Fprintf(d.w, "Metadata: %#v, ", obj.Metadata) + fmt.Fprintf(d.w, "MediaLink: %q, ", obj.MediaLink) + fmt.Fprintf(d.w, "StorageClass: %q, ", obj.StorageClass) + if !obj.Deleted.IsZero() { + fmt.Fprintf(d.w, "Deleted: %v, ", obj.Deleted) + } + fmt.Fprintf(d.w, "Updated: %v)\n", obj.Updated) +} + +//[START file_metadata] +// statFile reads the stats of the named file in Google Cloud Storage. +func (d *demo) statFile(fileName string) { + io.WriteString(d.w, "\nFile stat:\n") + + obj, err := d.bucket.Object(fileName).Attrs(d.ctx) + if err != nil { + d.errorf("statFile: unable to stat file from bucket %q, file %q: %v", d.bucketName, fileName, err) + return + } + + d.dumpStats(obj) +} + +//[END file_metadata] + +// createListFiles creates files that will be used by listBucket. +func (d *demo) createListFiles() { + io.WriteString(d.w, "\nCreating more files for listbucket...\n") + for _, n := range []string{"foo1", "foo2", "bar", "bar/1", "bar/2", "boo/"} { + d.createFile(n) + } +} + +//[START list_bucket] +// listBucket lists the contents of a bucket in Google Cloud Storage. +func (d *demo) listBucket() { + io.WriteString(d.w, "\nListbucket result:\n") + + query := &storage.Query{Prefix: "foo"} + it := d.bucket.Objects(d.ctx, query) + for { + obj, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + d.errorf("listBucket: unable to list bucket %q: %v", d.bucketName, err) + return + } + d.dumpStats(obj) + } +} + +//[END list_bucket] + +func (d *demo) listDir(name, indent string) { + query := &storage.Query{Prefix: name, Delimiter: "/"} + it := d.bucket.Objects(d.ctx, query) + for { + obj, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + d.errorf("listBucketDirMode: unable to list bucket %q: %v", d.bucketName, err) + return + } + if obj.Prefix == "" { + fmt.Fprint(d.w, indent) + d.dumpStats(obj) + continue + } + fmt.Fprintf(d.w, "%v(directory: /%v/%v)\n", indent, d.bucketName, obj.Prefix) + d.listDir(obj.Prefix, indent+" ") + } +} + +// listBucketDirMode lists the contents of a bucket in dir mode in Google Cloud Storage. +func (d *demo) listBucketDirMode() { + io.WriteString(d.w, "\nListbucket directory mode result:\n") + d.listDir("b", "") +} + +// dumpDefaultACL prints out the default object ACL for this bucket. +func (d *demo) dumpDefaultACL() { + acl, err := d.bucket.ACL().List(d.ctx) + if err != nil { + d.errorf("defaultACL: unable to list default object ACL for bucket %q: %v", d.bucketName, err) + return + } + for _, v := range acl { + fmt.Fprintf(d.w, "Scope: %q, Permission: %q\n", v.Entity, v.Role) + } +} + +// defaultACL displays the default object ACL for this bucket. +func (d *demo) defaultACL() { + io.WriteString(d.w, "\nDefault object ACL:\n") + d.dumpDefaultACL() +} + +// putDefaultACLRule adds the "allUsers" default object ACL rule for this bucket. +func (d *demo) putDefaultACLRule() { + io.WriteString(d.w, "\nPut Default object ACL Rule:\n") + err := d.bucket.DefaultObjectACL().Set(d.ctx, storage.AllUsers, storage.RoleReader) + if err != nil { + d.errorf("putDefaultACLRule: unable to save default object ACL rule for bucket %q: %v", d.bucketName, err) + return + } + d.dumpDefaultACL() +} + +// deleteDefaultACLRule deleted the "allUsers" default object ACL rule for this bucket. +func (d *demo) deleteDefaultACLRule() { + io.WriteString(d.w, "\nDelete Default object ACL Rule:\n") + err := d.bucket.DefaultObjectACL().Delete(d.ctx, storage.AllUsers) + if err != nil { + d.errorf("deleteDefaultACLRule: unable to delete default object ACL rule for bucket %q: %v", d.bucketName, err) + return + } + d.dumpDefaultACL() +} + +// dumpBucketACL prints out the bucket ACL. +func (d *demo) dumpBucketACL() { + acl, err := d.bucket.ACL().List(d.ctx) + if err != nil { + d.errorf("dumpBucketACL: unable to list bucket ACL for bucket %q: %v", d.bucketName, err) + return + } + for _, v := range acl { + fmt.Fprintf(d.w, "Scope: %q, Permission: %q\n", v.Entity, v.Role) + } +} + +// bucketACL displays the bucket ACL for this bucket. +func (d *demo) bucketACL() { + io.WriteString(d.w, "\nBucket ACL:\n") + d.dumpBucketACL() +} + +// putBucketACLRule adds the "allUsers" bucket ACL rule for this bucket. +func (d *demo) putBucketACLRule() { + io.WriteString(d.w, "\nPut Bucket ACL Rule:\n") + err := d.bucket.ACL().Set(d.ctx, storage.AllUsers, storage.RoleReader) + if err != nil { + d.errorf("putBucketACLRule: unable to save bucket ACL rule for bucket %q: %v", d.bucketName, err) + return + } + d.dumpBucketACL() +} + +// deleteBucketACLRule deleted the "allUsers" bucket ACL rule for this bucket. +func (d *demo) deleteBucketACLRule() { + io.WriteString(d.w, "\nDelete Bucket ACL Rule:\n") + err := d.bucket.ACL().Delete(d.ctx, storage.AllUsers) + if err != nil { + d.errorf("deleteBucketACLRule: unable to delete bucket ACL rule for bucket %q: %v", d.bucketName, err) + return + } + d.dumpBucketACL() +} + +// dumpACL prints out the ACL of the named file. +func (d *demo) dumpACL(fileName string) { + acl, err := d.bucket.Object(fileName).ACL().List(d.ctx) + if err != nil { + d.errorf("dumpACL: unable to list file ACL for bucket %q, file %q: %v", d.bucketName, fileName, err) + return + } + for _, v := range acl { + fmt.Fprintf(d.w, "Scope: %q, Permission: %q\n", v.Entity, v.Role) + } +} + +// acl displays the ACL for the named file. +func (d *demo) acl(fileName string) { + fmt.Fprintf(d.w, "\nACL for file %v:\n", fileName) + d.dumpACL(fileName) +} + +// putACLRule adds the "allUsers" ACL rule for the named file. +func (d *demo) putACLRule(fileName string) { + fmt.Fprintf(d.w, "\nPut ACL rule for file %v:\n", fileName) + err := d.bucket.Object(fileName).ACL().Set(d.ctx, storage.AllUsers, storage.RoleReader) + if err != nil { + d.errorf("putACLRule: unable to save ACL rule for bucket %q, file %q: %v", d.bucketName, fileName, err) + return + } + d.dumpACL(fileName) +} + +// deleteACLRule deleted the "allUsers" ACL rule for the named file. +func (d *demo) deleteACLRule(fileName string) { + fmt.Fprintf(d.w, "\nDelete ACL rule for file %v:\n", fileName) + err := d.bucket.Object(fileName).ACL().Delete(d.ctx, storage.AllUsers) + if err != nil { + d.errorf("deleteACLRule: unable to delete ACL rule for bucket %q, file %q: %v", d.bucketName, fileName, err) + return + } + d.dumpACL(fileName) +} + +//[START delete] +// deleteFiles deletes all the temporary files from a bucket created by this demo. +func (d *demo) deleteFiles() { + io.WriteString(d.w, "\nDeleting files...\n") + for _, v := range d.cleanUp { + fmt.Fprintf(d.w, "Deleting file %v\n", v) + if err := d.bucket.Object(v).Delete(d.ctx); err != nil { + d.errorf("deleteFiles: unable to delete bucket %q, file %q: %v", d.bucketName, v, err) + return + } + } +} + +//[END delete] diff --git a/vendor/cloud.google.com/go/examples/storage/appenginevm/app.yaml b/vendor/cloud.google.com/go/examples/storage/appenginevm/app.yaml new file mode 100644 index 00000000..562055a6 --- /dev/null +++ b/vendor/cloud.google.com/go/examples/storage/appenginevm/app.yaml @@ -0,0 +1,10 @@ +runtime: go +api_version: 1 +vm: true + +manual_scaling: + instances: 1 + +handlers: +- url: /.* + script: _go_app diff --git a/vendor/cloud.google.com/go/iam/admin/apiv1/doc.go b/vendor/cloud.google.com/go/iam/admin/apiv1/doc.go new file mode 100644 index 00000000..a976ac09 --- /dev/null +++ b/vendor/cloud.google.com/go/iam/admin/apiv1/doc.go @@ -0,0 +1,35 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package admin is an experimental, auto-generated package for the +// admin API. +// +// Manages identity and access control for Google Cloud Platform resources, +// including the creation of service accounts, which you can use to +// authenticate to Google and make API calls. +package admin // import "cloud.google.com/go/iam/admin/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertXGoog(ctx context.Context, val string) context.Context { + md, _ := metadata.FromContext(ctx) + md = md.Copy() + md["x-goog-api-client"] = []string{val} + return metadata.NewContext(ctx, md) +} diff --git a/vendor/cloud.google.com/go/iam/admin/apiv1/iam_client.go b/vendor/cloud.google.com/go/iam/admin/apiv1/iam_client.go new file mode 100644 index 00000000..c743a58a --- /dev/null +++ b/vendor/cloud.google.com/go/iam/admin/apiv1/iam_client.go @@ -0,0 +1,490 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package admin + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + adminpb "google.golang.org/genproto/googleapis/iam/admin/v1" + iampb "google.golang.org/genproto/googleapis/iam/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + iamProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}") + iamServiceAccountPathTemplate = gax.MustCompilePathTemplate("projects/{project}/serviceAccounts/{service_account}") + iamKeyPathTemplate = gax.MustCompilePathTemplate("projects/{project}/serviceAccounts/{service_account}/keys/{key}") +) + +// IamCallOptions contains the retry settings for each method of IamClient. +type IamCallOptions struct { + ListServiceAccounts []gax.CallOption + GetServiceAccount []gax.CallOption + CreateServiceAccount []gax.CallOption + UpdateServiceAccount []gax.CallOption + DeleteServiceAccount []gax.CallOption + ListServiceAccountKeys []gax.CallOption + GetServiceAccountKey []gax.CallOption + CreateServiceAccountKey []gax.CallOption + DeleteServiceAccountKey []gax.CallOption + SignBlob []gax.CallOption + GetIamPolicy []gax.CallOption + SetIamPolicy []gax.CallOption + TestIamPermissions []gax.CallOption + QueryGrantableRoles []gax.CallOption +} + +func defaultIamClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("iam.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/iam", + ), + } +} + +func defaultIamCallOptions() *IamCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + {"default", "non_idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &IamCallOptions{ + ListServiceAccounts: retry[[2]string{"default", "idempotent"}], + GetServiceAccount: retry[[2]string{"default", "idempotent"}], + CreateServiceAccount: retry[[2]string{"default", "non_idempotent"}], + UpdateServiceAccount: retry[[2]string{"default", "idempotent"}], + DeleteServiceAccount: retry[[2]string{"default", "idempotent"}], + ListServiceAccountKeys: retry[[2]string{"default", "idempotent"}], + GetServiceAccountKey: retry[[2]string{"default", "idempotent"}], + CreateServiceAccountKey: retry[[2]string{"default", "non_idempotent"}], + DeleteServiceAccountKey: retry[[2]string{"default", "idempotent"}], + SignBlob: retry[[2]string{"default", "non_idempotent"}], + GetIamPolicy: retry[[2]string{"default", "non_idempotent"}], + SetIamPolicy: retry[[2]string{"default", "non_idempotent"}], + TestIamPermissions: retry[[2]string{"default", "non_idempotent"}], + QueryGrantableRoles: retry[[2]string{"default", "non_idempotent"}], + } +} + +// IamClient is a client for interacting with Google Identity and Access Management (IAM) API. +type IamClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + iamClient adminpb.IAMClient + + // The call options for this service. + CallOptions *IamCallOptions + + // The metadata to be sent with each request. + xGoogHeader string +} + +// NewIamClient creates a new iam client. +// +// Creates and manages service account objects. +// +// Service account is an account that belongs to your project instead +// of to an individual end user. It is used to authenticate calls +// to a Google API. +// +// To create a service account, specify the `project_id` and `account_id` +// for the account. The `account_id` is unique within the project, and used +// to generate the service account email address and a stable +// `unique_id`. +// +// All other methods can identify accounts using the format +// `projects/{project}/serviceAccounts/{account}`. +// Using `-` as a wildcard for the project will infer the project from +// the account. The `account` value can be the `email` address or the +// `unique_id` of the service account. +func NewIamClient(ctx context.Context, opts ...option.ClientOption) (*IamClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultIamClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &IamClient{ + conn: conn, + CallOptions: defaultIamCallOptions(), + + iamClient: adminpb.NewIAMClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *IamClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *IamClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *IamClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", "") + c.xGoogHeader = gax.XGoogHeader(kv...) +} + +// IamProjectPath returns the path for the project resource. +func IamProjectPath(project string) string { + path, err := iamProjectPathTemplate.Render(map[string]string{ + "project": project, + }) + if err != nil { + panic(err) + } + return path +} + +// IamServiceAccountPath returns the path for the service account resource. +func IamServiceAccountPath(project, serviceAccount string) string { + path, err := iamServiceAccountPathTemplate.Render(map[string]string{ + "project": project, + "service_account": serviceAccount, + }) + if err != nil { + panic(err) + } + return path +} + +// IamKeyPath returns the path for the key resource. +func IamKeyPath(project, serviceAccount, key string) string { + path, err := iamKeyPathTemplate.Render(map[string]string{ + "project": project, + "service_account": serviceAccount, + "key": key, + }) + if err != nil { + panic(err) + } + return path +} + +// ListServiceAccounts lists [ServiceAccounts][google.iam.admin.v1.ServiceAccount] for a project. +func (c *IamClient) ListServiceAccounts(ctx context.Context, req *adminpb.ListServiceAccountsRequest) *ServiceAccountIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + it := &ServiceAccountIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*adminpb.ServiceAccount, string, error) { + var resp *adminpb.ListServiceAccountsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.iamClient.ListServiceAccounts(ctx, req) + return err + }, c.CallOptions.ListServiceAccounts...) + if err != nil { + return nil, "", err + } + return resp.Accounts, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetServiceAccount gets a [ServiceAccount][google.iam.admin.v1.ServiceAccount]. +func (c *IamClient) GetServiceAccount(ctx context.Context, req *adminpb.GetServiceAccountRequest) (*adminpb.ServiceAccount, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *adminpb.ServiceAccount + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.iamClient.GetServiceAccount(ctx, req) + return err + }, c.CallOptions.GetServiceAccount...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateServiceAccount creates a [ServiceAccount][google.iam.admin.v1.ServiceAccount] +// and returns it. +func (c *IamClient) CreateServiceAccount(ctx context.Context, req *adminpb.CreateServiceAccountRequest) (*adminpb.ServiceAccount, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *adminpb.ServiceAccount + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.iamClient.CreateServiceAccount(ctx, req) + return err + }, c.CallOptions.CreateServiceAccount...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateServiceAccount updates a [ServiceAccount][google.iam.admin.v1.ServiceAccount]. +// +// Currently, only the following fields are updatable: +// `display_name` . +// The `etag` is mandatory. +func (c *IamClient) UpdateServiceAccount(ctx context.Context, req *adminpb.ServiceAccount) (*adminpb.ServiceAccount, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *adminpb.ServiceAccount + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.iamClient.UpdateServiceAccount(ctx, req) + return err + }, c.CallOptions.UpdateServiceAccount...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteServiceAccount deletes a [ServiceAccount][google.iam.admin.v1.ServiceAccount]. +func (c *IamClient) DeleteServiceAccount(ctx context.Context, req *adminpb.DeleteServiceAccountRequest) error { + ctx = insertXGoog(ctx, c.xGoogHeader) + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + _, err = c.iamClient.DeleteServiceAccount(ctx, req) + return err + }, c.CallOptions.DeleteServiceAccount...) + return err +} + +// ListServiceAccountKeys lists [ServiceAccountKeys][google.iam.admin.v1.ServiceAccountKey]. +func (c *IamClient) ListServiceAccountKeys(ctx context.Context, req *adminpb.ListServiceAccountKeysRequest) (*adminpb.ListServiceAccountKeysResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *adminpb.ListServiceAccountKeysResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.iamClient.ListServiceAccountKeys(ctx, req) + return err + }, c.CallOptions.ListServiceAccountKeys...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetServiceAccountKey gets the [ServiceAccountKey][google.iam.admin.v1.ServiceAccountKey] +// by key id. +func (c *IamClient) GetServiceAccountKey(ctx context.Context, req *adminpb.GetServiceAccountKeyRequest) (*adminpb.ServiceAccountKey, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *adminpb.ServiceAccountKey + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.iamClient.GetServiceAccountKey(ctx, req) + return err + }, c.CallOptions.GetServiceAccountKey...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateServiceAccountKey creates a [ServiceAccountKey][google.iam.admin.v1.ServiceAccountKey] +// and returns it. +func (c *IamClient) CreateServiceAccountKey(ctx context.Context, req *adminpb.CreateServiceAccountKeyRequest) (*adminpb.ServiceAccountKey, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *adminpb.ServiceAccountKey + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.iamClient.CreateServiceAccountKey(ctx, req) + return err + }, c.CallOptions.CreateServiceAccountKey...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteServiceAccountKey deletes a [ServiceAccountKey][google.iam.admin.v1.ServiceAccountKey]. +func (c *IamClient) DeleteServiceAccountKey(ctx context.Context, req *adminpb.DeleteServiceAccountKeyRequest) error { + ctx = insertXGoog(ctx, c.xGoogHeader) + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + _, err = c.iamClient.DeleteServiceAccountKey(ctx, req) + return err + }, c.CallOptions.DeleteServiceAccountKey...) + return err +} + +// SignBlob signs a blob using a service account's system-managed private key. +func (c *IamClient) SignBlob(ctx context.Context, req *adminpb.SignBlobRequest) (*adminpb.SignBlobResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *adminpb.SignBlobResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.iamClient.SignBlob(ctx, req) + return err + }, c.CallOptions.SignBlob...) + if err != nil { + return nil, err + } + return resp, nil +} + +// getIamPolicy returns the IAM access control policy for a +// [ServiceAccount][google.iam.admin.v1.ServiceAccount]. +func (c *IamClient) getIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.iamClient.GetIamPolicy(ctx, req) + return err + }, c.CallOptions.GetIamPolicy...) + if err != nil { + return nil, err + } + return resp, nil +} + +// setIamPolicy sets the IAM access control policy for a +// [ServiceAccount][google.iam.admin.v1.ServiceAccount]. +func (c *IamClient) setIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.iamClient.SetIamPolicy(ctx, req) + return err + }, c.CallOptions.SetIamPolicy...) + if err != nil { + return nil, err + } + return resp, nil +} + +// TestIamPermissions tests the specified permissions against the IAM access control policy +// for a [ServiceAccount][google.iam.admin.v1.ServiceAccount]. +func (c *IamClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *iampb.TestIamPermissionsResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.iamClient.TestIamPermissions(ctx, req) + return err + }, c.CallOptions.TestIamPermissions...) + if err != nil { + return nil, err + } + return resp, nil +} + +// QueryGrantableRoles queries roles that can be granted on a particular resource. +// A role is grantable if it can be used as the role in a binding for a policy +// for that resource. +func (c *IamClient) QueryGrantableRoles(ctx context.Context, req *adminpb.QueryGrantableRolesRequest) (*adminpb.QueryGrantableRolesResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *adminpb.QueryGrantableRolesResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.iamClient.QueryGrantableRoles(ctx, req) + return err + }, c.CallOptions.QueryGrantableRoles...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ServiceAccountIterator manages a stream of *adminpb.ServiceAccount. +type ServiceAccountIterator struct { + items []*adminpb.ServiceAccount + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*adminpb.ServiceAccount, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ServiceAccountIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *ServiceAccountIterator) Next() (*adminpb.ServiceAccount, error) { + var item *adminpb.ServiceAccount + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ServiceAccountIterator) bufLen() int { + return len(it.items) +} + +func (it *ServiceAccountIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/iam/admin/apiv1/iam_client_example_test.go b/vendor/cloud.google.com/go/iam/admin/apiv1/iam_client_example_test.go new file mode 100644 index 00000000..983d48e2 --- /dev/null +++ b/vendor/cloud.google.com/go/iam/admin/apiv1/iam_client_example_test.go @@ -0,0 +1,250 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package admin_test + +import ( + "cloud.google.com/go/iam/admin/apiv1" + "golang.org/x/net/context" + adminpb "google.golang.org/genproto/googleapis/iam/admin/v1" + iampb "google.golang.org/genproto/googleapis/iam/v1" +) + +func ExampleNewIamClient() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleIamClient_ListServiceAccounts() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.ListServiceAccountsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListServiceAccounts(ctx, req) + for { + resp, err := it.Next() + if err != nil { + // TODO: Handle error. + break + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleIamClient_GetServiceAccount() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.GetServiceAccountRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetServiceAccount(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleIamClient_CreateServiceAccount() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.CreateServiceAccountRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateServiceAccount(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleIamClient_UpdateServiceAccount() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.ServiceAccount{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateServiceAccount(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleIamClient_DeleteServiceAccount() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.DeleteServiceAccountRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteServiceAccount(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleIamClient_ListServiceAccountKeys() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.ListServiceAccountKeysRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.ListServiceAccountKeys(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleIamClient_GetServiceAccountKey() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.GetServiceAccountKeyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetServiceAccountKey(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleIamClient_CreateServiceAccountKey() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.CreateServiceAccountKeyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateServiceAccountKey(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleIamClient_DeleteServiceAccountKey() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.DeleteServiceAccountKeyRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteServiceAccountKey(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleIamClient_SignBlob() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.SignBlobRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SignBlob(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleIamClient_TestIamPermissions() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &iampb.TestIamPermissionsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.TestIamPermissions(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleIamClient_QueryGrantableRoles() { + ctx := context.Background() + c, err := admin.NewIamClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &adminpb.QueryGrantableRolesRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.QueryGrantableRoles(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/iam/admin/apiv1/mock_test.go b/vendor/cloud.google.com/go/iam/admin/apiv1/mock_test.go new file mode 100644 index 00000000..5d8a1fd8 --- /dev/null +++ b/vendor/cloud.google.com/go/iam/admin/apiv1/mock_test.go @@ -0,0 +1,1055 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package admin + +import ( + google_protobuf "github.com/golang/protobuf/ptypes/empty" + adminpb "google.golang.org/genproto/googleapis/iam/admin/v1" + iampb "google.golang.org/genproto/googleapis/iam/v1" +) + +import ( + "flag" + "io" + "log" + "net" + "os" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockIamServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + adminpb.IAMServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockIamServer) ListServiceAccounts(_ context.Context, req *adminpb.ListServiceAccountsRequest) (*adminpb.ListServiceAccountsResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*adminpb.ListServiceAccountsResponse), nil +} + +func (s *mockIamServer) GetServiceAccount(_ context.Context, req *adminpb.GetServiceAccountRequest) (*adminpb.ServiceAccount, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*adminpb.ServiceAccount), nil +} + +func (s *mockIamServer) CreateServiceAccount(_ context.Context, req *adminpb.CreateServiceAccountRequest) (*adminpb.ServiceAccount, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*adminpb.ServiceAccount), nil +} + +func (s *mockIamServer) UpdateServiceAccount(_ context.Context, req *adminpb.ServiceAccount) (*adminpb.ServiceAccount, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*adminpb.ServiceAccount), nil +} + +func (s *mockIamServer) DeleteServiceAccount(_ context.Context, req *adminpb.DeleteServiceAccountRequest) (*google_protobuf.Empty, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*google_protobuf.Empty), nil +} + +func (s *mockIamServer) ListServiceAccountKeys(_ context.Context, req *adminpb.ListServiceAccountKeysRequest) (*adminpb.ListServiceAccountKeysResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*adminpb.ListServiceAccountKeysResponse), nil +} + +func (s *mockIamServer) GetServiceAccountKey(_ context.Context, req *adminpb.GetServiceAccountKeyRequest) (*adminpb.ServiceAccountKey, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*adminpb.ServiceAccountKey), nil +} + +func (s *mockIamServer) CreateServiceAccountKey(_ context.Context, req *adminpb.CreateServiceAccountKeyRequest) (*adminpb.ServiceAccountKey, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*adminpb.ServiceAccountKey), nil +} + +func (s *mockIamServer) DeleteServiceAccountKey(_ context.Context, req *adminpb.DeleteServiceAccountKeyRequest) (*google_protobuf.Empty, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*google_protobuf.Empty), nil +} + +func (s *mockIamServer) SignBlob(_ context.Context, req *adminpb.SignBlobRequest) (*adminpb.SignBlobResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*adminpb.SignBlobResponse), nil +} + +func (s *mockIamServer) GetIamPolicy(_ context.Context, req *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.Policy), nil +} + +func (s *mockIamServer) SetIamPolicy(_ context.Context, req *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.Policy), nil +} + +func (s *mockIamServer) TestIamPermissions(_ context.Context, req *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.TestIamPermissionsResponse), nil +} + +func (s *mockIamServer) QueryGrantableRoles(_ context.Context, req *adminpb.QueryGrantableRolesRequest) (*adminpb.QueryGrantableRolesResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*adminpb.QueryGrantableRolesResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockIam mockIamServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + adminpb.RegisterIAMServer(serv, &mockIam) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestIamListServiceAccounts(t *testing.T) { + var nextPageToken string = "" + var accountsElement *adminpb.ServiceAccount = &adminpb.ServiceAccount{} + var accounts = []*adminpb.ServiceAccount{accountsElement} + var expectedResponse = &adminpb.ListServiceAccountsResponse{ + NextPageToken: nextPageToken, + Accounts: accounts, + } + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedName string = IamProjectPath("[PROJECT]") + var request = &adminpb.ListServiceAccountsRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListServiceAccounts(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Accounts[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamListServiceAccountsError(t *testing.T) { + errCode := codes.Internal + mockIam.err = grpc.Errorf(errCode, "test error") + + var formattedName string = IamProjectPath("[PROJECT]") + var request = &adminpb.ListServiceAccountsRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListServiceAccounts(context.Background(), request).Next() + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamGetServiceAccount(t *testing.T) { + var name2 string = "name2-1052831874" + var projectId string = "projectId-1969970175" + var uniqueId string = "uniqueId-538310583" + var email string = "email96619420" + var displayName string = "displayName1615086568" + var etag []byte = []byte("21") + var oauth2ClientId string = "oauth2ClientId-1833466037" + var expectedResponse = &adminpb.ServiceAccount{ + Name: name2, + ProjectId: projectId, + UniqueId: uniqueId, + Email: email, + DisplayName: displayName, + Etag: etag, + Oauth2ClientId: oauth2ClientId, + } + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var request = &adminpb.GetServiceAccountRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetServiceAccount(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamGetServiceAccountError(t *testing.T) { + errCode := codes.Internal + mockIam.err = grpc.Errorf(errCode, "test error") + + var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var request = &adminpb.GetServiceAccountRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetServiceAccount(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamCreateServiceAccount(t *testing.T) { + var name2 string = "name2-1052831874" + var projectId string = "projectId-1969970175" + var uniqueId string = "uniqueId-538310583" + var email string = "email96619420" + var displayName string = "displayName1615086568" + var etag []byte = []byte("21") + var oauth2ClientId string = "oauth2ClientId-1833466037" + var expectedResponse = &adminpb.ServiceAccount{ + Name: name2, + ProjectId: projectId, + UniqueId: uniqueId, + Email: email, + DisplayName: displayName, + Etag: etag, + Oauth2ClientId: oauth2ClientId, + } + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedName string = IamProjectPath("[PROJECT]") + var accountId string = "accountId-803333011" + var request = &adminpb.CreateServiceAccountRequest{ + Name: formattedName, + AccountId: accountId, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateServiceAccount(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamCreateServiceAccountError(t *testing.T) { + errCode := codes.Internal + mockIam.err = grpc.Errorf(errCode, "test error") + + var formattedName string = IamProjectPath("[PROJECT]") + var accountId string = "accountId-803333011" + var request = &adminpb.CreateServiceAccountRequest{ + Name: formattedName, + AccountId: accountId, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateServiceAccount(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamUpdateServiceAccount(t *testing.T) { + var name string = "name3373707" + var projectId string = "projectId-1969970175" + var uniqueId string = "uniqueId-538310583" + var email string = "email96619420" + var displayName string = "displayName1615086568" + var etag2 []byte = []byte("-120") + var oauth2ClientId string = "oauth2ClientId-1833466037" + var expectedResponse = &adminpb.ServiceAccount{ + Name: name, + ProjectId: projectId, + UniqueId: uniqueId, + Email: email, + DisplayName: displayName, + Etag: etag2, + Oauth2ClientId: oauth2ClientId, + } + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var etag []byte = []byte("21") + var request = &adminpb.ServiceAccount{ + Etag: etag, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateServiceAccount(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamUpdateServiceAccountError(t *testing.T) { + errCode := codes.Internal + mockIam.err = grpc.Errorf(errCode, "test error") + + var etag []byte = []byte("21") + var request = &adminpb.ServiceAccount{ + Etag: etag, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateServiceAccount(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamDeleteServiceAccount(t *testing.T) { + var expectedResponse *google_protobuf.Empty = &google_protobuf.Empty{} + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var request = &adminpb.DeleteServiceAccountRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteServiceAccount(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestIamDeleteServiceAccountError(t *testing.T) { + errCode := codes.Internal + mockIam.err = grpc.Errorf(errCode, "test error") + + var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var request = &adminpb.DeleteServiceAccountRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteServiceAccount(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestIamListServiceAccountKeys(t *testing.T) { + var expectedResponse *adminpb.ListServiceAccountKeysResponse = &adminpb.ListServiceAccountKeysResponse{} + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var request = &adminpb.ListServiceAccountKeysRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListServiceAccountKeys(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamListServiceAccountKeysError(t *testing.T) { + errCode := codes.Internal + mockIam.err = grpc.Errorf(errCode, "test error") + + var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var request = &adminpb.ListServiceAccountKeysRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListServiceAccountKeys(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamGetServiceAccountKey(t *testing.T) { + var name2 string = "name2-1052831874" + var privateKeyData []byte = []byte("-58") + var publicKeyData []byte = []byte("-96") + var expectedResponse = &adminpb.ServiceAccountKey{ + Name: name2, + PrivateKeyData: privateKeyData, + PublicKeyData: publicKeyData, + } + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedName string = IamKeyPath("[PROJECT]", "[SERVICE_ACCOUNT]", "[KEY]") + var request = &adminpb.GetServiceAccountKeyRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetServiceAccountKey(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamGetServiceAccountKeyError(t *testing.T) { + errCode := codes.Internal + mockIam.err = grpc.Errorf(errCode, "test error") + + var formattedName string = IamKeyPath("[PROJECT]", "[SERVICE_ACCOUNT]", "[KEY]") + var request = &adminpb.GetServiceAccountKeyRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetServiceAccountKey(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamCreateServiceAccountKey(t *testing.T) { + var name2 string = "name2-1052831874" + var privateKeyData []byte = []byte("-58") + var publicKeyData []byte = []byte("-96") + var expectedResponse = &adminpb.ServiceAccountKey{ + Name: name2, + PrivateKeyData: privateKeyData, + PublicKeyData: publicKeyData, + } + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var request = &adminpb.CreateServiceAccountKeyRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateServiceAccountKey(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamCreateServiceAccountKeyError(t *testing.T) { + errCode := codes.Internal + mockIam.err = grpc.Errorf(errCode, "test error") + + var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var request = &adminpb.CreateServiceAccountKeyRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateServiceAccountKey(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamDeleteServiceAccountKey(t *testing.T) { + var expectedResponse *google_protobuf.Empty = &google_protobuf.Empty{} + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedName string = IamKeyPath("[PROJECT]", "[SERVICE_ACCOUNT]", "[KEY]") + var request = &adminpb.DeleteServiceAccountKeyRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteServiceAccountKey(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestIamDeleteServiceAccountKeyError(t *testing.T) { + errCode := codes.Internal + mockIam.err = grpc.Errorf(errCode, "test error") + + var formattedName string = IamKeyPath("[PROJECT]", "[SERVICE_ACCOUNT]", "[KEY]") + var request = &adminpb.DeleteServiceAccountKeyRequest{ + Name: formattedName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteServiceAccountKey(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestIamSignBlob(t *testing.T) { + var keyId string = "keyId-1134673157" + var signature []byte = []byte("-72") + var expectedResponse = &adminpb.SignBlobResponse{ + KeyId: keyId, + Signature: signature, + } + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var bytesToSign []byte = []byte("45") + var request = &adminpb.SignBlobRequest{ + Name: formattedName, + BytesToSign: bytesToSign, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SignBlob(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamSignBlobError(t *testing.T) { + errCode := codes.Internal + mockIam.err = grpc.Errorf(errCode, "test error") + + var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var bytesToSign []byte = []byte("45") + var request = &adminpb.SignBlobRequest{ + Name: formattedName, + BytesToSign: bytesToSign, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SignBlob(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamGetIamPolicy(t *testing.T) { + var version int32 = 351608024 + var etag []byte = []byte("21") + var expectedResponse = &iampb.Policy{ + Version: version, + Etag: etag, + } + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedResource string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var request = &iampb.GetIamPolicyRequest{ + Resource: formattedResource, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.getIamPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamGetIamPolicyError(t *testing.T) { + errCode := codes.Internal + mockIam.err = grpc.Errorf(errCode, "test error") + + var formattedResource string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var request = &iampb.GetIamPolicyRequest{ + Resource: formattedResource, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.getIamPolicy(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamSetIamPolicy(t *testing.T) { + var version int32 = 351608024 + var etag []byte = []byte("21") + var expectedResponse = &iampb.Policy{ + Version: version, + Etag: etag, + } + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedResource string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var policy *iampb.Policy = &iampb.Policy{} + var request = &iampb.SetIamPolicyRequest{ + Resource: formattedResource, + Policy: policy, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.setIamPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamSetIamPolicyError(t *testing.T) { + errCode := codes.Internal + mockIam.err = grpc.Errorf(errCode, "test error") + + var formattedResource string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var policy *iampb.Policy = &iampb.Policy{} + var request = &iampb.SetIamPolicyRequest{ + Resource: formattedResource, + Policy: policy, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.setIamPolicy(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamTestIamPermissions(t *testing.T) { + var expectedResponse *iampb.TestIamPermissionsResponse = &iampb.TestIamPermissionsResponse{} + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var formattedResource string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var permissions []string = nil + var request = &iampb.TestIamPermissionsRequest{ + Resource: formattedResource, + Permissions: permissions, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.TestIamPermissions(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamTestIamPermissionsError(t *testing.T) { + errCode := codes.Internal + mockIam.err = grpc.Errorf(errCode, "test error") + + var formattedResource string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]") + var permissions []string = nil + var request = &iampb.TestIamPermissionsRequest{ + Resource: formattedResource, + Permissions: permissions, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.TestIamPermissions(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestIamQueryGrantableRoles(t *testing.T) { + var expectedResponse *adminpb.QueryGrantableRolesResponse = &adminpb.QueryGrantableRolesResponse{} + + mockIam.err = nil + mockIam.reqs = nil + + mockIam.resps = append(mockIam.resps[:0], expectedResponse) + + var fullResourceName string = "fullResourceName1300993644" + var request = &adminpb.QueryGrantableRolesRequest{ + FullResourceName: fullResourceName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.QueryGrantableRoles(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockIam.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestIamQueryGrantableRolesError(t *testing.T) { + errCode := codes.Internal + mockIam.err = grpc.Errorf(errCode, "test error") + + var fullResourceName string = "fullResourceName1300993644" + var request = &adminpb.QueryGrantableRolesRequest{ + FullResourceName: fullResourceName, + } + + c, err := NewIamClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.QueryGrantableRoles(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/iam/admin/apiv1/policy_methods.go b/vendor/cloud.google.com/go/iam/admin/apiv1/policy_methods.go new file mode 100644 index 00000000..a3ff72e6 --- /dev/null +++ b/vendor/cloud.google.com/go/iam/admin/apiv1/policy_methods.go @@ -0,0 +1,52 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This is handwritten code. These methods are implemented by hand so they can use +// the iam.Policy type. + +package admin + +import ( + "cloud.google.com/go/iam" + "golang.org/x/net/context" + iampb "google.golang.org/genproto/googleapis/iam/v1" +) + +// GetIamPolicy returns the IAM access control policy for a ServiceAccount. +func (c *IamClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest) (*iam.Policy, error) { + policy, err := c.getIamPolicy(ctx, req) + if err != nil { + return nil, err + } + return &iam.Policy{InternalProto: policy}, nil +} + +// SetIamPolicyRequest is the request type for the SetIamPolicy method. +type SetIamPolicyRequest struct { + Resource string + Policy *iam.Policy +} + +// SetIamPolicy sets the IAM access control policy for a ServiceAccount. +func (c *IamClient) SetIamPolicy(ctx context.Context, req *SetIamPolicyRequest) (*iam.Policy, error) { + preq := &iampb.SetIamPolicyRequest{ + Resource: req.Resource, + Policy: req.Policy.InternalProto, + } + policy, err := c.setIamPolicy(ctx, preq) + if err != nil { + return nil, err + } + return &iam.Policy{InternalProto: policy}, nil +} diff --git a/vendor/cloud.google.com/go/iam/iam.go b/vendor/cloud.google.com/go/iam/iam.go new file mode 100644 index 00000000..9f4941d2 --- /dev/null +++ b/vendor/cloud.google.com/go/iam/iam.go @@ -0,0 +1,199 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package iam supports the resource-specific operations of Google Cloud +// IAM (Identity and Access Management) for the Google Cloud Libraries. +// See https://cloud.google.com/iam for more about IAM. +// +// Users of the Google Cloud Libraries will typically not use this package +// directly. Instead they will begin with some resource that supports IAM, like +// a pubsub topic, and call its IAM method to get a Handle for that resource. +package iam + +import ( + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/iam/v1" + "google.golang.org/grpc" +) + +// A Handle provides IAM operations for a resource. +type Handle struct { + c pb.IAMPolicyClient + resource string +} + +// InternalNewHandle is for use by the Google Cloud Libraries only. +// +// InternalNewHandle returns a Handle for resource. +// The conn parameter refers to a server that must support the IAMPolicy service. +func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle { + return &Handle{ + c: pb.NewIAMPolicyClient(conn), + resource: resource, + } +} + +// Policy retrieves the IAM policy for the resource. +func (h *Handle) Policy(ctx context.Context) (*Policy, error) { + proto, err := h.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: h.resource}) + if err != nil { + return nil, err + } + return &Policy{InternalProto: proto}, nil +} + +// SetPolicy replaces the resource's current policy with the supplied Policy. +// +// If policy was created from a prior call to Get, then the modification will +// only succeed if the policy has not changed since the Get. +func (h *Handle) SetPolicy(ctx context.Context, policy *Policy) error { + _, err := h.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{ + Resource: h.resource, + Policy: policy.InternalProto, + }) + return err +} + +// TestPermissions returns the subset of permissions that the caller has on the resource. +func (h *Handle) TestPermissions(ctx context.Context, permissions []string) ([]string, error) { + res, err := h.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{ + Resource: h.resource, + Permissions: permissions, + }) + if err != nil { + return nil, err + } + return res.Permissions, nil +} + +// A RoleName is a name representing a collection of permissions. +type RoleName string + +// Common role names. +const ( + Owner RoleName = "roles/owner" + Editor RoleName = "roles/editor" + Viewer RoleName = "roles/viewer" +) + +const ( + // AllUsers is a special member that denotes all users, even unauthenticated ones. + AllUsers = "allUsers" + + // AllAuthenticatedUsers is a special member that denotes all authenticated users. + AllAuthenticatedUsers = "allAuthenticatedUsers" +) + +// A Policy is a list of Bindings representing roles +// granted to members. +// +// The zero Policy is a valid policy with no bindings. +type Policy struct { + // TODO(jba): when type aliases are available, put Policy into an internal package + // and provide an exported alias here. + + // This field is exported for use by the Google Cloud Libraries only. + // It may become unexported in a future release. + InternalProto *pb.Policy +} + +// Members returns the list of members with the supplied role. +// The return value should not be modified. Use Add and Remove +// to modify the members of a role. +func (p *Policy) Members(r RoleName) []string { + b := p.binding(r) + if b == nil { + return nil + } + return b.Members +} + +// HasRole reports whether member has role r. +func (p *Policy) HasRole(member string, r RoleName) bool { + return memberIndex(member, p.binding(r)) >= 0 +} + +// Add adds member member to role r if it is not already present. +// A new binding is created if there is no binding for the role. +func (p *Policy) Add(member string, r RoleName) { + b := p.binding(r) + if b == nil { + if p.InternalProto == nil { + p.InternalProto = &pb.Policy{} + } + p.InternalProto.Bindings = append(p.InternalProto.Bindings, &pb.Binding{ + Role: string(r), + Members: []string{member}, + }) + return + } + if memberIndex(member, b) < 0 { + b.Members = append(b.Members, member) + return + } +} + +// Remove removes member from role r if it is present. +func (p *Policy) Remove(member string, r RoleName) { + b := p.binding(r) + i := memberIndex(member, b) + if i < 0 { + return + } + // Order doesn't matter, so move the last member into the + // removed spot and shrink the slice. + // TODO(jba): worry about multiple copies of m? + last := len(b.Members) - 1 + b.Members[i] = b.Members[last] + b.Members[last] = "" + b.Members = b.Members[:last] +} + +// Roles returns the names of all the roles that appear in the Policy. +func (p *Policy) Roles() []RoleName { + if p.InternalProto == nil { + return nil + } + var rns []RoleName + for _, b := range p.InternalProto.Bindings { + rns = append(rns, RoleName(b.Role)) + } + return rns +} + +// binding returns the Binding for the suppied role, or nil if there isn't one. +func (p *Policy) binding(r RoleName) *pb.Binding { + if p.InternalProto == nil { + return nil + } + for _, b := range p.InternalProto.Bindings { + if b.Role == string(r) { + return b + } + } + return nil +} + +// memberIndex returns the index of m in b's Members, or -1 if not found. +func memberIndex(m string, b *pb.Binding) int { + if b == nil { + return -1 + } + for i, mm := range b.Members { + if mm == m { + return i + } + } + return -1 +} diff --git a/vendor/cloud.google.com/go/iam/iam_test.go b/vendor/cloud.google.com/go/iam/iam_test.go new file mode 100644 index 00000000..b5c8e576 --- /dev/null +++ b/vendor/cloud.google.com/go/iam/iam_test.go @@ -0,0 +1,86 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package iam + +import ( + "fmt" + "reflect" + "sort" + "testing" +) + +func TestPolicy(t *testing.T) { + p := &Policy{} + + add := func(member string, role RoleName) { + p.Add(member, role) + } + remove := func(member string, role RoleName) { + p.Remove(member, role) + } + + if msg, ok := checkMembers(p, Owner, nil); !ok { + t.Fatal(msg) + } + add("m1", Owner) + if msg, ok := checkMembers(p, Owner, []string{"m1"}); !ok { + t.Fatal(msg) + } + add("m2", Owner) + if msg, ok := checkMembers(p, Owner, []string{"m1", "m2"}); !ok { + t.Fatal(msg) + } + add("m1", Owner) // duplicate adds ignored + if msg, ok := checkMembers(p, Owner, []string{"m1", "m2"}); !ok { + t.Fatal(msg) + } + // No other roles populated yet. + if msg, ok := checkMembers(p, Viewer, nil); !ok { + t.Fatal(msg) + } + remove("m1", Owner) + if msg, ok := checkMembers(p, Owner, []string{"m2"}); !ok { + t.Fatal(msg) + } + if msg, ok := checkMembers(p, Viewer, nil); !ok { + t.Fatal(msg) + } + remove("m3", Owner) // OK to remove non-existent member. + if msg, ok := checkMembers(p, Owner, []string{"m2"}); !ok { + t.Fatal(msg) + } + remove("m2", Owner) + if msg, ok := checkMembers(p, Owner, []string{}); !ok { + t.Fatal(msg) + } + if got, want := p.Roles(), []RoleName{Owner}; !reflect.DeepEqual(got, want) { + t.Fatalf("roles: got %v, want %v", got, want) + } +} + +func checkMembers(p *Policy, role RoleName, wantMembers []string) (string, bool) { + gotMembers := p.Members(role) + sort.Strings(gotMembers) + sort.Strings(wantMembers) + if !reflect.DeepEqual(gotMembers, wantMembers) { + return fmt.Sprintf("got %v, want %v", gotMembers, wantMembers), false + } + for _, m := range wantMembers { + if !p.HasRole(m, role) { + return fmt.Sprintf("member %q should have role %s but does not", m, role), false + } + } + return "", true +} diff --git a/vendor/cloud.google.com/go/internal/atomiccache/atomiccache.go b/vendor/cloud.google.com/go/internal/atomiccache/atomiccache.go new file mode 100644 index 00000000..2bea8a15 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/atomiccache/atomiccache.go @@ -0,0 +1,58 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package atomiccache provides a map-based cache that supports very fast +// reads. +package atomiccache + +import ( + "sync" + "sync/atomic" +) + +type mapType map[interface{}]interface{} + +// Cache is a map-based cache that supports fast reads via use of atomics. +// Writes are slow, requiring a copy of the entire cache. +// The zero Cache is an empty cache, ready for use. +type Cache struct { + val atomic.Value // mapType + mu sync.Mutex // used only by writers +} + +// Get returns the value of the cache at key. If there is no value, +// getter is called to provide one, and the cache is updated. +// The getter function may be called concurrently. It should be pure, +// returning the same value for every call. +func (c *Cache) Get(key interface{}, getter func() interface{}) interface{} { + mp, _ := c.val.Load().(mapType) + if v, ok := mp[key]; ok { + return v + } + + // Compute value without lock. + // Might duplicate effort but won't hold other computations back. + newV := getter() + + c.mu.Lock() + mp, _ = c.val.Load().(mapType) + newM := make(mapType, len(mp)+1) + for k, v := range mp { + newM[k] = v + } + newM[key] = newV + c.val.Store(newM) + c.mu.Unlock() + return newV +} diff --git a/vendor/cloud.google.com/go/internal/atomiccache/atomiccache_test.go b/vendor/cloud.google.com/go/internal/atomiccache/atomiccache_test.go new file mode 100644 index 00000000..33105b34 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/atomiccache/atomiccache_test.go @@ -0,0 +1,46 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package atomiccache + +import ( + "fmt" + "testing" +) + +func TestGet(t *testing.T) { + var c Cache + called := false + get := func(k interface{}) interface{} { + return c.Get(k, func() interface{} { + called = true + return fmt.Sprintf("v%d", k) + }) + } + got := get(1) + if want := "v1"; got != want { + t.Errorf("got %v, want %v", got, want) + } + if !called { + t.Error("getter not called, expected a call") + } + called = false + got = get(1) + if want := "v1"; got != want { + t.Errorf("got %v, want %v", got, want) + } + if called { + t.Error("getter unexpectedly called") + } +} diff --git a/vendor/cloud.google.com/go/internal/cloud.go b/vendor/cloud.google.com/go/internal/cloud.go new file mode 100644 index 00000000..8e0c8f8e --- /dev/null +++ b/vendor/cloud.google.com/go/internal/cloud.go @@ -0,0 +1,64 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package internal provides support for the cloud packages. +// +// Users should not import this package directly. +package internal + +import ( + "fmt" + "net/http" +) + +const userAgent = "gcloud-golang/0.1" + +// Transport is an http.RoundTripper that appends Google Cloud client's +// user-agent to the original request's user-agent header. +type Transport struct { + // TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does. + // Do User-Agent some other way. + + // Base is the actual http.RoundTripper + // requests will use. It must not be nil. + Base http.RoundTripper +} + +// RoundTrip appends a user-agent to the existing user-agent +// header and delegates the request to the base http.RoundTripper. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + req = cloneRequest(req) + ua := req.Header.Get("User-Agent") + if ua == "" { + ua = userAgent + } else { + ua = fmt.Sprintf("%s %s", ua, userAgent) + } + req.Header.Set("User-Agent", ua) + return t.Base.RoundTrip(req) +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header) + for k, s := range r.Header { + r2.Header[k] = s + } + return r2 +} diff --git a/vendor/cloud.google.com/go/internal/fields/fields.go b/vendor/cloud.google.com/go/internal/fields/fields.go new file mode 100644 index 00000000..4f5516ea --- /dev/null +++ b/vendor/cloud.google.com/go/internal/fields/fields.go @@ -0,0 +1,444 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package fields provides a view of the fields of a struct that follows the Go +// rules, amended to consider tags and case insensitivity. +// +// Usage +// +// First define a function that interprets tags: +// +// func parseTag(st reflect.StructTag) (name string, keep bool, other interface{}, err error) { ... } +// +// The function's return values describe whether to ignore the field +// completely or provide an alternate name, as well as other data from the +// parse that is stored to avoid re-parsing. +// +// Then define a function to validate the type: +// +// func validate(t reflect.Type) error { ... } +// +// Then, if necessary, define a function to specify leaf types - types +// which should be considered one field and not be recursed into: +// +// func isLeafType(t reflect.Type) bool { ... } +// +// eg: +// +// func isLeafType(t reflect.Type) bool { +// return t == reflect.TypeOf(time.Time{}) +// } +// +// Next, construct a Cache, passing your functions. As its name suggests, a +// Cache remembers validation and field information for a type, so subsequent +// calls with the same type are very fast. +// +// cache := fields.NewCache(parseTag, validate, isLeafType) +// +// To get the fields of a struct type as determined by the above rules, call +// the Fields method: +// +// fields, err := cache.Fields(reflect.TypeOf(MyStruct{})) +// +// The return value can be treated as a slice of Fields. +// +// Given a string, such as a key or column name obtained during unmarshalling, +// call Match on the list of fields to find a field whose name is the best +// match: +// +// field := fields.Match(name) +// +// Match looks for an exact match first, then falls back to a case-insensitive +// comparison. +package fields + +import ( + "bytes" + "reflect" + "sort" + + "cloud.google.com/go/internal/atomiccache" +) + +// A Field records information about a struct field. +type Field struct { + Name string // effective field name + NameFromTag bool // did Name come from a tag? + Type reflect.Type // field type + Index []int // index sequence, for reflect.Value.FieldByIndex + ParsedTag interface{} // third return value of the parseTag function + + nameBytes []byte + equalFold func(s, t []byte) bool +} + +type ParseTagFunc func(reflect.StructTag) (name string, keep bool, other interface{}, err error) + +type ValidateFunc func(reflect.Type) error + +type LeafTypesFunc func(reflect.Type) bool + +// A Cache records information about the fields of struct types. +// +// A Cache is safe for use by multiple goroutines. +type Cache struct { + parseTag ParseTagFunc + validate ValidateFunc + leafTypes LeafTypesFunc + cache atomiccache.Cache // from reflect.Type to cacheValue +} + +// NewCache constructs a Cache. +// +// Its first argument should be a function that accepts +// a struct tag and returns four values: an alternative name for the field +// extracted from the tag, a boolean saying whether to keep the field or ignore +// it, additional data that is stored with the field information to avoid +// having to parse the tag again, and an error. +// +// Its second argument should be a function that accepts a reflect.Type and +// returns an error if the struct type is invalid in any way. For example, it +// may check that all of the struct field tags are valid, or that all fields +// are of an appropriate type. +func NewCache(parseTag ParseTagFunc, validate ValidateFunc, leafTypes LeafTypesFunc) *Cache { + if parseTag == nil { + parseTag = func(reflect.StructTag) (string, bool, interface{}, error) { + return "", true, nil, nil + } + } + if validate == nil { + validate = func(reflect.Type) error { + return nil + } + } + if leafTypes == nil { + leafTypes = func(reflect.Type) bool { + return false + } + } + + return &Cache{ + parseTag: parseTag, + validate: validate, + leafTypes: leafTypes, + } +} + +// A fieldScan represents an item on the fieldByNameFunc scan work list. +type fieldScan struct { + typ reflect.Type + index []int +} + +// Fields returns all the exported fields of t, which must be a struct type. It +// follows the standard Go rules for embedded fields, modified by the presence +// of tags. The result is sorted lexicographically by index. +// +// These rules apply in the absence of tags: +// Anonymous struct fields are treated as if their inner exported fields were +// fields in the outer struct (embedding). The result includes all fields that +// aren't shadowed by fields at higher level of embedding. If more than one +// field with the same name exists at the same level of embedding, it is +// excluded. An anonymous field that is not of struct type is treated as having +// its type as its name. +// +// Tags modify these rules as follows: +// A field's tag is used as its name. +// An anonymous struct field with a name given in its tag is treated as +// a field having that name, rather than an embedded struct (the struct's +// fields will not be returned). +// If more than one field with the same name exists at the same level of embedding, +// but exactly one of them is tagged, then the tagged field is reported and the others +// are ignored. +func (c *Cache) Fields(t reflect.Type) (List, error) { + if t.Kind() != reflect.Struct { + panic("fields: Fields of non-struct type") + } + return c.cachedTypeFields(t) +} + +// A List is a list of Fields. +type List []Field + +// Match returns the field in the list whose name best matches the supplied +// name, nor nil if no field does. If there is a field with the exact name, it +// is returned. Otherwise the first field (sorted by index) whose name matches +// case-insensitively is returned. +func (l List) Match(name string) *Field { + return l.MatchBytes([]byte(name)) +} + +// MatchBytes is identical to Match, except that the argument is a byte slice. +func (l List) MatchBytes(name []byte) *Field { + var f *Field + for i := range l { + ff := &l[i] + if bytes.Equal(ff.nameBytes, name) { + return ff + } + if f == nil && ff.equalFold(ff.nameBytes, name) { + f = ff + } + } + return f +} + +type cacheValue struct { + fields List + err error +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +// This code has been copied and modified from +// https://go.googlesource.com/go/+/go1.7.3/src/encoding/json/encode.go. +func (c *Cache) cachedTypeFields(t reflect.Type) (List, error) { + cv := c.cache.Get(t, func() interface{} { + if err := c.validate(t); err != nil { + return cacheValue{nil, err} + } + f, err := c.typeFields(t) + return cacheValue{List(f), err} + }).(cacheValue) + return cv.fields, cv.err +} + +func (c *Cache) typeFields(t reflect.Type) ([]Field, error) { + fields, err := c.listFields(t) + if err != nil { + return nil, err + } + sort.Sort(byName(fields)) + // Delete all fields that are hidden by the Go rules for embedded fields. + + // The fields are sorted in primary order of name, secondary order of field + // index length. So the first field with a given name is the dominant one. + var out []Field + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.Name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.Name != name { + break + } + } + // Find the dominant field, if any, out of all fields that have the same name. + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + sort.Sort(byIndex(out)) + return out, nil +} + +func (c *Cache) listFields(t reflect.Type) ([]Field, error) { + // This uses the same condition that the Go language does: there must be a unique instance + // of the match at a given depth level. If there are multiple instances of a match at the + // same depth, they annihilate each other and inhibit any possible match at a lower level. + // The algorithm is breadth first search, one depth level at a time. + + // The current and next slices are work queues: + // current lists the fields to visit on this depth level, + // and next lists the fields on the next lower level. + current := []fieldScan{} + next := []fieldScan{{typ: t}} + + // nextCount records the number of times an embedded type has been + // encountered and considered for queueing in the 'next' slice. + // We only queue the first one, but we increment the count on each. + // If a struct type T can be reached more than once at a given depth level, + // then it annihilates itself and need not be considered at all when we + // process that next depth level. + var nextCount map[reflect.Type]int + + // visited records the structs that have been considered already. + // Embedded pointer fields can create cycles in the graph of + // reachable embedded types; visited avoids following those cycles. + // It also avoids duplicated effort: if we didn't find the field in an + // embedded type T at level 2, we won't find it in one at level 4 either. + visited := map[reflect.Type]bool{} + + var fields []Field // Fields found. + + for len(next) > 0 { + current, next = next, current[:0] + count := nextCount + nextCount = nil + + // Process all the fields at this depth, now listed in 'current'. + // The loop queues embedded fields found in 'next', for processing during the next + // iteration. The multiplicity of the 'current' field counts is recorded + // in 'count'; the multiplicity of the 'next' field counts is recorded in 'nextCount'. + for _, scan := range current { + t := scan.typ + if visited[t] { + // We've looked through this type before, at a higher level. + // That higher level would shadow the lower level we're now at, + // so this one can't be useful to us. Ignore it. + continue + } + visited[t] = true + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + + exported := (f.PkgPath == "") + + // If a named field is unexported, ignore it. An anonymous + // unexported field is processed, because it may contain + // exported fields, which are visible. + if !exported && !f.Anonymous { + continue + } + + // Examine the tag. + tagName, keep, other, err := c.parseTag(f.Tag) + if err != nil { + return nil, err + } + if !keep { + continue + } + if c.leafTypes(f.Type) { + fields = append(fields, newField(f, tagName, other, scan.index, i)) + continue + } + + var ntyp reflect.Type + if f.Anonymous { + // Anonymous field of type T or *T. + ntyp = f.Type + if ntyp.Kind() == reflect.Ptr { + ntyp = ntyp.Elem() + } + } + + // Record fields with a tag name, non-anonymous fields, or + // anonymous non-struct fields. + if tagName != "" || ntyp == nil || ntyp.Kind() != reflect.Struct { + if !exported { + continue + } + fields = append(fields, newField(f, tagName, other, scan.index, i)) + if count[t] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Queue embedded struct fields for processing with next level, + // but only if the embedded types haven't already been queued. + if nextCount[ntyp] > 0 { + nextCount[ntyp] = 2 // exact multiple doesn't matter + continue + } + if nextCount == nil { + nextCount = map[reflect.Type]int{} + } + nextCount[ntyp] = 1 + if count[t] > 1 { + nextCount[ntyp] = 2 // exact multiple doesn't matter + } + var index []int + index = append(index, scan.index...) + index = append(index, i) + next = append(next, fieldScan{ntyp, index}) + } + } + } + return fields, nil +} + +func newField(f reflect.StructField, tagName string, other interface{}, index []int, i int) Field { + name := tagName + if name == "" { + name = f.Name + } + sf := Field{ + Name: name, + NameFromTag: tagName != "", + Type: f.Type, + ParsedTag: other, + nameBytes: []byte(name), + } + sf.equalFold = foldFunc(sf.nameBytes) + sf.Index = append(sf.Index, index...) + sf.Index = append(sf.Index, i) + return sf +} + +// byName sorts fields using the following criteria, in order: +// 1. name +// 2. embedding depth +// 3. tag presence (preferring a tagged field) +// 4. index sequence. +type byName []Field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].Name != x[j].Name { + return x[i].Name < x[j].Name + } + if len(x[i].Index) != len(x[j].Index) { + return len(x[i].Index) < len(x[j].Index) + } + if x[i].NameFromTag != x[j].NameFromTag { + return x[i].NameFromTag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []Field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + xi := x[i].Index + xj := x[j].Index + ln := len(xi) + if l := len(xj); l < ln { + ln = l + } + for k := 0; k < ln; k++ { + if xi[k] != xj[k] { + return xi[k] < xj[k] + } + } + return len(xi) < len(xj) +} + +// dominantField looks through the fields, all of which are known to have the +// same name, to find the single field that dominates the others using Go's +// embedding rules, modified by the presence of tags. If there are multiple +// top-level fields, the boolean will be false: This condition is an error in +// Go and we skip all the fields. +func dominantField(fs []Field) (Field, bool) { + // The fields are sorted in increasing index-length order, then by presence of tag. + // That means that the first field is the dominant one. We need only check + // for error cases: two fields at top level, either both tagged or neither tagged. + if len(fs) > 1 && len(fs[0].Index) == len(fs[1].Index) && fs[0].NameFromTag == fs[1].NameFromTag { + return Field{}, false + } + return fs[0], true +} diff --git a/vendor/cloud.google.com/go/internal/fields/fields_test.go b/vendor/cloud.google.com/go/internal/fields/fields_test.go new file mode 100644 index 00000000..904d8b85 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/fields/fields_test.go @@ -0,0 +1,561 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fields + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "strings" + "testing" + "time" +) + +type embed1 struct { + Em1 int + Dup int // annihilates with embed2.Dup + Shadow int + embed3 +} + +type embed2 struct { + Dup int + embed3 + embed4 +} + +type embed3 struct { + Em3 int // annihilated because embed3 is in both embed1 and embed2 + embed5 +} + +type embed4 struct { + Em4 int + Dup int // annihilation of Dup in embed1, embed2 hides this Dup + *embed1 // ignored because it occurs at a higher level +} + +type embed5 struct { + x int +} + +type Anonymous int + +type S1 struct { + Exported int + unexported int + Shadow int // shadows S1.Shadow + embed1 + *embed2 + Anonymous +} + +type Time struct { + time.Time +} + +var intType = reflect.TypeOf(int(0)) + +func field(name string, tval interface{}, index ...int) *Field { + return &Field{ + Name: name, + Type: reflect.TypeOf(tval), + Index: index, + } +} + +func tfield(name string, tval interface{}, index ...int) *Field { + return &Field{ + Name: name, + Type: reflect.TypeOf(tval), + Index: index, + NameFromTag: true, + } +} + +func TestFieldsNoTags(t *testing.T) { + c := NewCache(nil, nil, nil) + got, err := c.Fields(reflect.TypeOf(S1{})) + if err != nil { + t.Fatal(err) + } + want := []*Field{ + field("Exported", int(0), 0), + field("Shadow", int(0), 2), + field("Em1", int(0), 3, 0), + field("Em4", int(0), 4, 2, 0), + field("Anonymous", Anonymous(0), 5), + } + if msg, ok := compareFields(got, want); !ok { + t.Error(msg) + } +} + +func TestAgainstJSONEncodingNoTags(t *testing.T) { + // Demonstrates that this package produces the same set of fields as encoding/json. + s1 := S1{ + Exported: 1, + unexported: 2, + Shadow: 3, + embed1: embed1{ + Em1: 4, + Dup: 5, + Shadow: 6, + embed3: embed3{ + Em3: 7, + embed5: embed5{x: 8}, + }, + }, + embed2: &embed2{ + Dup: 9, + embed3: embed3{ + Em3: 10, + embed5: embed5{x: 11}, + }, + embed4: embed4{ + Em4: 12, + Dup: 13, + embed1: &embed1{Em1: 14}, + }, + }, + Anonymous: Anonymous(15), + } + var want S1 + jsonRoundTrip(t, s1, &want) + var got S1 + got.embed2 = &embed2{} // need this because reflection won't create it + fields, err := NewCache(nil, nil, nil).Fields(reflect.TypeOf(got)) + if err != nil { + t.Fatal(err) + } + setFields(fields, &got, s1) + if !reflect.DeepEqual(got, want) { + t.Errorf("got\n%+v\nwant\n%+v", got, want) + } +} + +// Tests use of LeafTypes parameter to NewCache +func TestAgainstJSONEncodingEmbeddedTime(t *testing.T) { + timeLeafFn := func(t reflect.Type) bool { + return t == reflect.TypeOf(time.Time{}) + } + // Demonstrates that this package can produce the same set of + // fields as encoding/json for a struct with an embedded time.Time. + now := time.Now().UTC() + myt := Time{ + now, + } + var want Time + jsonRoundTrip(t, myt, &want) + var got Time + fields, err := NewCache(nil, nil, timeLeafFn).Fields(reflect.TypeOf(got)) + if err != nil { + t.Fatal(err) + } + setFields(fields, &got, myt) + if !reflect.DeepEqual(got, want) { + t.Errorf("got\n%+v\nwant\n%+v", got, want) + } +} + +type S2 struct { + NoTag int + XXX int `json:"tag"` // tag name takes precedence + Anonymous `json:"anon"` // anonymous non-structs also get their name from the tag + unexported int `json:"tag"` + Embed `json:"em"` // embedded structs with tags become fields + Tag int + YYY int `json:"Tag"` // tag takes precedence over untagged field of the same name + Empty int `json:""` // empty tag is noop + tEmbed1 + tEmbed2 +} + +type Embed struct { + Em int +} + +type tEmbed1 struct { + Dup int + X int `json:"Dup2"` +} + +type tEmbed2 struct { + Y int `json:"Dup"` // takes precedence over tEmbed1.Dup because it is tagged + Z int `json:"Dup2"` // same name as tEmbed1.X and both tagged, so ignored +} + +func jsonTagParser(t reflect.StructTag) (name string, keep bool, other interface{}, err error) { + s := t.Get("json") + parts := strings.Split(s, ",") + if parts[0] == "-" { + return "", false, nil, nil + } + if len(parts) > 1 { + other = parts[1:] + } + return parts[0], true, other, nil +} + +func validateFunc(t reflect.Type) (err error) { + if t.Kind() != reflect.Struct { + return errors.New("non-struct type used") + } + + for i := 0; i < t.NumField(); i++ { + if t.Field(i).Type.Kind() == reflect.Slice { + return fmt.Errorf("slice field found at field %s on struct %s", t.Field(i).Name, t.Name()) + } + } + + return nil +} + +func TestFieldsWithTags(t *testing.T) { + got, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S2{})) + if err != nil { + t.Fatal(err) + } + want := []*Field{ + field("NoTag", int(0), 0), + tfield("tag", int(0), 1), + tfield("anon", Anonymous(0), 2), + tfield("em", Embed{}, 4), + tfield("Tag", int(0), 6), + field("Empty", int(0), 7), + tfield("Dup", int(0), 8, 0), + } + if msg, ok := compareFields(got, want); !ok { + t.Error(msg) + } +} + +func TestAgainstJSONEncodingWithTags(t *testing.T) { + // Demonstrates that this package produces the same set of fields as encoding/json. + s2 := S2{ + NoTag: 1, + XXX: 2, + Anonymous: 3, + Embed: Embed{ + Em: 4, + }, + tEmbed1: tEmbed1{ + Dup: 5, + X: 6, + }, + tEmbed2: tEmbed2{ + Y: 7, + Z: 8, + }, + } + var want S2 + jsonRoundTrip(t, s2, &want) + var got S2 + fields, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(got)) + if err != nil { + t.Fatal(err) + } + setFields(fields, &got, s2) + if !reflect.DeepEqual(got, want) { + t.Errorf("got\n%+v\nwant\n%+v", got, want) + } +} + +func TestUnexportedAnonymousNonStruct(t *testing.T) { + // An unexported anonymous non-struct field should not be recorded. + // This is currently a bug in encoding/json. + // https://github.com/golang/go/issues/18009 + type ( + u int + v int + S struct { + u + v `json:"x"` + int + } + ) + + got, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S{})) + if err != nil { + t.Fatal(err) + } + if len(got) != 0 { + t.Errorf("got %d fields, want 0", len(got)) + } +} + +func TestUnexportedAnonymousStruct(t *testing.T) { + // An unexported anonymous struct with a tag is ignored. + // This is currently a bug in encoding/json. + // https://github.com/golang/go/issues/18009 + type ( + s1 struct{ X int } + S2 struct { + s1 `json:"Y"` + } + ) + got, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S2{})) + if err != nil { + t.Fatal(err) + } + if len(got) != 0 { + t.Errorf("got %d fields, want 0", len(got)) + } +} + +func TestDominantField(t *testing.T) { + // With fields sorted by index length and then by tag presence, + // the dominant field is always the first. Make sure all error + // cases are caught. + for _, test := range []struct { + fields []Field + wantOK bool + }{ + // A single field is OK. + {[]Field{{Index: []int{0}}}, true}, + {[]Field{{Index: []int{0}, NameFromTag: true}}, true}, + // A single field at top level is OK. + {[]Field{{Index: []int{0}}, {Index: []int{1, 0}}}, true}, + {[]Field{{Index: []int{0}}, {Index: []int{1, 0}, NameFromTag: true}}, true}, + {[]Field{{Index: []int{0}, NameFromTag: true}, {Index: []int{1, 0}, NameFromTag: true}}, true}, + // A single tagged field is OK. + {[]Field{{Index: []int{0}, NameFromTag: true}, {Index: []int{1}}}, true}, + // Two untagged fields at the same level is an error. + {[]Field{{Index: []int{0}}, {Index: []int{1}}}, false}, + // Two tagged fields at the same level is an error. + {[]Field{{Index: []int{0}, NameFromTag: true}, {Index: []int{1}, NameFromTag: true}}, false}, + } { + _, gotOK := dominantField(test.fields) + if gotOK != test.wantOK { + t.Errorf("%v: got %t, want %t", test.fields, gotOK, test.wantOK) + } + } +} + +func TestIgnore(t *testing.T) { + type S struct { + X int `json:"-"` + } + got, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S{})) + if err != nil { + t.Fatal(err) + } + if len(got) != 0 { + t.Errorf("got %d fields, want 0", len(got)) + } +} + +func TestParsedTag(t *testing.T) { + type S struct { + X int `json:"name,omitempty"` + } + got, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S{})) + if err != nil { + t.Fatal(err) + } + want := []*Field{ + {Name: "name", NameFromTag: true, Type: intType, + Index: []int{0}, ParsedTag: []string{"omitempty"}}, + } + if msg, ok := compareFields(got, want); !ok { + t.Error(msg) + } +} + +func TestValidateFunc(t *testing.T) { + type MyInvalidStruct struct { + A string + B []int + } + + _, err := NewCache(nil, validateFunc, nil).Fields(reflect.TypeOf(MyInvalidStruct{})) + if err == nil { + t.Fatal("expected error, got nil") + } + + type MyValidStruct struct { + A string + B int + } + _, err = NewCache(nil, validateFunc, nil).Fields(reflect.TypeOf(MyValidStruct{})) + if err != nil { + t.Fatalf("expected nil, got error: %s\n", err) + } +} + +func compareFields(got []Field, want []*Field) (msg string, ok bool) { + if len(got) != len(want) { + return fmt.Sprintf("got %d fields, want %d", len(got), len(want)), false + } + for i, g := range got { + w := *want[i] + if !fieldsEqual(&g, &w) { + return fmt.Sprintf("got %+v, want %+v", g, w), false + } + } + return "", true +} + +// Need this because Field contains a function, which cannot be compared even +// by reflect.DeepEqual. +func fieldsEqual(f1, f2 *Field) bool { + if f1 == nil || f2 == nil { + return f1 == f2 + } + return f1.Name == f2.Name && + f1.NameFromTag == f2.NameFromTag && + f1.Type == f2.Type && + reflect.DeepEqual(f1.ParsedTag, f2.ParsedTag) +} + +// Set the fields of dst from those of src. +// dst must be a pointer to a struct value. +// src must be a struct value. +func setFields(fields []Field, dst, src interface{}) { + vsrc := reflect.ValueOf(src) + vdst := reflect.ValueOf(dst).Elem() + for _, f := range fields { + fdst := vdst.FieldByIndex(f.Index) + fsrc := vsrc.FieldByIndex(f.Index) + fdst.Set(fsrc) + } +} + +func jsonRoundTrip(t *testing.T, in, out interface{}) { + bytes, err := json.Marshal(in) + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(bytes, out); err != nil { + t.Fatal(err) + } +} + +type S3 struct { + S4 + Abc int + AbC int + Tag int + X int `json:"Tag"` + unexported int +} + +type S4 struct { + ABc int + Y int `json:"Abc"` // ignored because of top-level Abc +} + +func TestMatchingField(t *testing.T) { + fields, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S3{})) + if err != nil { + t.Fatal(err) + } + for _, test := range []struct { + name string + want *Field + }{ + // Exact match wins. + {"Abc", field("Abc", int(0), 1)}, + {"AbC", field("AbC", int(0), 2)}, + {"ABc", field("ABc", int(0), 0, 0)}, + // If there are multiple matches but no exact match or tag, + // the first field wins, lexicographically by index. + // Here, "ABc" is at a deeper embedding level, but since S4 appears + // first in S3, its index precedes the other fields of S3. + {"abc", field("ABc", int(0), 0, 0)}, + // Tag name takes precedence over untagged field of the same name. + {"Tag", tfield("Tag", int(0), 4)}, + // Unexported fields disappear. + {"unexported", nil}, + // Untagged embedded structs disappear. + {"S4", nil}, + } { + if got := fields.Match(test.name); !fieldsEqual(got, test.want) { + t.Errorf("match %q:\ngot %+v\nwant %+v", test.name, got, test.want) + } + } +} + +func TestAgainstJSONMatchingField(t *testing.T) { + s3 := S3{ + S4: S4{ABc: 1, Y: 2}, + Abc: 3, + AbC: 4, + Tag: 5, + X: 6, + unexported: 7, + } + var want S3 + jsonRoundTrip(t, s3, &want) + v := reflect.ValueOf(want) + fields, err := NewCache(jsonTagParser, nil, nil).Fields(reflect.TypeOf(S3{})) + if err != nil { + t.Fatal(err) + } + for _, test := range []struct { + name string + got int + }{ + {"Abc", 3}, + {"AbC", 4}, + {"ABc", 1}, + {"abc", 1}, + {"Tag", 6}, + } { + f := fields.Match(test.name) + if f == nil { + t.Fatalf("%s: no match", test.name) + } + w := v.FieldByIndex(f.Index).Interface() + if test.got != w { + t.Errorf("%s: got %d, want %d", test.name, test.got, w) + } + } +} + +func TestTagErrors(t *testing.T) { + called := false + c := NewCache(func(t reflect.StructTag) (string, bool, interface{}, error) { + called = true + s := t.Get("f") + if s == "bad" { + return "", false, nil, errors.New("error") + } + return s, true, nil, nil + }, nil, nil) + + type T struct { + X int `f:"ok"` + Y int `f:"bad"` + } + + _, err := c.Fields(reflect.TypeOf(T{})) + if !called { + t.Fatal("tag parser not called") + } + if err == nil { + t.Error("want error, got nil") + } + // Second time, we should cache the error. + called = false + _, err = c.Fields(reflect.TypeOf(T{})) + if called { + t.Fatal("tag parser called on second time") + } + if err == nil { + t.Error("want error, got nil") + } +} diff --git a/vendor/cloud.google.com/go/internal/fields/fold.go b/vendor/cloud.google.com/go/internal/fields/fold.go new file mode 100644 index 00000000..10a68189 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/fields/fold.go @@ -0,0 +1,156 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fields + +// This file was copied from https://go.googlesource.com/go/+/go1.7.3/src/encoding/json/fold.go. +// Only the license and package were changed. + +import ( + "bytes" + "unicode/utf8" +) + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'ſ' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See https://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} diff --git a/vendor/cloud.google.com/go/internal/fields/fold_test.go b/vendor/cloud.google.com/go/internal/fields/fold_test.go new file mode 100644 index 00000000..eadded18 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/fields/fold_test.go @@ -0,0 +1,129 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package fields + +// This file was copied from https://go.googlesource.com/go/+/go1.7.3/src/encoding/json/fold_test.go. +// Only the license and package were changed. + +import ( + "bytes" + "strings" + "testing" + "unicode/utf8" +) + +var foldTests = []struct { + fn func(s, t []byte) bool + s, t string + want bool +}{ + {equalFoldRight, "", "", true}, + {equalFoldRight, "a", "a", true}, + {equalFoldRight, "", "a", false}, + {equalFoldRight, "a", "", false}, + {equalFoldRight, "a", "A", true}, + {equalFoldRight, "AB", "ab", true}, + {equalFoldRight, "AB", "ac", false}, + {equalFoldRight, "sbkKc", "ſbKKc", true}, + {equalFoldRight, "SbKkc", "ſbKKc", true}, + {equalFoldRight, "SbKkc", "ſbKK", false}, + {equalFoldRight, "e", "é", false}, + {equalFoldRight, "s", "S", true}, + + {simpleLetterEqualFold, "", "", true}, + {simpleLetterEqualFold, "abc", "abc", true}, + {simpleLetterEqualFold, "abc", "ABC", true}, + {simpleLetterEqualFold, "abc", "ABCD", false}, + {simpleLetterEqualFold, "abc", "xxx", false}, + + {asciiEqualFold, "a_B", "A_b", true}, + {asciiEqualFold, "aa@", "aa`", false}, // verify 0x40 and 0x60 aren't case-equivalent +} + +func TestFold(t *testing.T) { + for i, tt := range foldTests { + if got := tt.fn([]byte(tt.s), []byte(tt.t)); got != tt.want { + t.Errorf("%d. %q, %q = %v; want %v", i, tt.s, tt.t, got, tt.want) + } + truth := strings.EqualFold(tt.s, tt.t) + if truth != tt.want { + t.Errorf("strings.EqualFold doesn't agree with case %d", i) + } + } +} + +func TestFoldAgainstUnicode(t *testing.T) { + const bufSize = 5 + buf1 := make([]byte, 0, bufSize) + buf2 := make([]byte, 0, bufSize) + var runes []rune + for i := 0x20; i <= 0x7f; i++ { + runes = append(runes, rune(i)) + } + runes = append(runes, kelvin, smallLongEss) + + funcs := []struct { + name string + fold func(s, t []byte) bool + letter bool // must be ASCII letter + simple bool // must be simple ASCII letter (not 'S' or 'K') + }{ + { + name: "equalFoldRight", + fold: equalFoldRight, + }, + { + name: "asciiEqualFold", + fold: asciiEqualFold, + simple: true, + }, + { + name: "simpleLetterEqualFold", + fold: simpleLetterEqualFold, + simple: true, + letter: true, + }, + } + + for _, ff := range funcs { + for _, r := range runes { + if r >= utf8.RuneSelf { + continue + } + if ff.letter && !isASCIILetter(byte(r)) { + continue + } + if ff.simple && (r == 's' || r == 'S' || r == 'k' || r == 'K') { + continue + } + for _, r2 := range runes { + buf1 := append(buf1[:0], 'x') + buf2 := append(buf2[:0], 'x') + buf1 = buf1[:1+utf8.EncodeRune(buf1[1:bufSize], r)] + buf2 = buf2[:1+utf8.EncodeRune(buf2[1:bufSize], r2)] + buf1 = append(buf1, 'x') + buf2 = append(buf2, 'x') + want := bytes.EqualFold(buf1, buf2) + if got := ff.fold(buf1, buf2); got != want { + t.Errorf("%s(%q, %q) = %v; want %v", ff.name, buf1, buf2, got, want) + } + } + } + } +} + +func isASCIILetter(b byte) bool { + return ('A' <= b && b <= 'Z') || ('a' <= b && b <= 'z') +} diff --git a/vendor/cloud.google.com/go/internal/kokoro/build.sh b/vendor/cloud.google.com/go/internal/kokoro/build.sh new file mode 100755 index 00000000..9b93ac4c --- /dev/null +++ b/vendor/cloud.google.com/go/internal/kokoro/build.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +# Fail on any error +set -eo pipefail + +# Display commands being run +set -x + +# cd to project dir on Kokoro instance +cd git/gocloud + +go version + +# Set $GOPATH +export GOPATH="$HOME/go" +GOCLOUD_HOME=$GOPATH/src/cloud.google.com/go +mkdir -p $GOCLOUD_HOME + +# Move code into $GOPATH and get dependencies +cp -R ./* $GOCLOUD_HOME +cd $GOCLOUD_HOME +go get -v ./... + +# # Don't run integration tests until we can protect against code from +# # untrusted forks reading and storing our service account key. +# cd internal/kokoro +# # Don't print out encryption keys, etc +# set +x +# key=$(cat $KOKORO_ARTIFACTS_DIR/keystore/*_encrypted_ba2d6f7723ed_key) +# iv=$(cat $KOKORO_ARTIFACTS_DIR/keystore/*_encrypted_ba2d6f7723ed_iv) +# pass=$(cat $KOKORO_ARTIFACTS_DIR/keystore/*_encrypted_ba2d6f7723ed_pass) + +# openssl aes-256-cbc -K $key -iv $iv -pass pass:$pass -in kokoro-key.json.enc -out key.json -d +# set -x + +# export GCLOUD_TESTS_GOLANG_KEY="$(pwd)/key.json" +# export GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" +# cd $GOCLOUD_HOME + +# Run tests and tee output to log file, to be pushed to GCS as artifact. + go test -race -v -short ./... 2>&1 | tee $KOKORO_ARTIFACTS_DIR/$KOKORO_GERRIT_REVISION.log \ No newline at end of file diff --git a/vendor/cloud.google.com/go/internal/kokoro/kokoro-key.json.enc b/vendor/cloud.google.com/go/internal/kokoro/kokoro-key.json.enc new file mode 100644 index 0000000000000000000000000000000000000000..b23885469fcf2d3e8fa42629fc8af94274761689 GIT binary patch literal 2448 zcmV;B32*jOVQh3|WM5y>qZ<-mbg%i}GwZgTCa$aR^oFv=WL!zlR6Ol+2_D~inxLR$(aw?a6VSu@6lC|T1CaNO1Whe+WipxV ztYtRwuSpO!F`P;TWYkxVqt7-N5*4dJwlgdJ$X=C+zb+T%#2vKK6J59^+u+x7px=6X zFxhgMjht1{C!jCCBs{8v)>2I4Byk!u>Xgp zIRTdo-~EGF-jF$vyZnIR*23b&(U(RjGkwKk=z~AgdCbW>tVOuBiB><-@q8lmM6nok zH4x30juG*7ouwkZR~^&gS4jVfms9Y^Rbx=8x7?Ivmkw{4YLG8E^!n+8nZF8I;n1U7 z{tuIkrpAD{$>XRF3&lq+$}arB6xPADnByt)U`@BUhvoEyGujn=d{srtZc8e)ZGs_R{=L8Y>Pz0KR6=P+81%gyF?ftViOmCy0MQbQU;;^ zC6*U&<2q*Q_@CVfH>qpn*%9kTj`jQ;W95-QwdMnE?Nls31&MmVwbcB?qikW}p`qD% zLXR&?2=v#Oa*i90r~;!i^~RBjiAoMXV{Sf=BiITEzC!HlBePZXW06DjojP(Ro{|Xg z^*owrfH;TYoHaFKy>X2%#Y?z?G2+ve)ZQTM7>HYuk=Y6o{*&{3hnnV&UhuLfCF8KpI@}gp>X5l-%&tc{N;!k^JjndK-%olcREx z!VhSjR;Y~w7Dw5KW5}&=-{(Ihya|LxgN~YZY^(XZ-~zs+%?%eaFpX&z*U3NZ$UvOo zA@JY!ah*7`*G|bhrweCYTT?wE2R8s?Rk^In{y$X;OW>Z+u2@|esL;zzTBFX+={i|n zX}I6MqfN>wog^$a0+0QUrA=RK0ssIl6<4trL9gO;Q`yJj`*G-2XRCb7$tqE8kvM-9 z-qK4pj5lA8*lMXH`kPe??d64%U}T^piys$=bw0_XWqgZoqcz84kEN<8XkC z2?e*+-P3yHm4MR&H=RhUX%_@U`W%+>l}N(+Hj|*~e~*I8hz5R9nG93QbuMik-B zAYCqEm7ib%?S_JEB57VZ#BV>))s*Qy>DmK8fU9@}odW8a`LdTX8G@%vsQ`6nEYF0u z@qDgz7u$>Y%8gFiUKS(&z}Vd7l7_kZ*f0$hwwc^XFH3BgZbodNnmmDH$*Bh0+Tip# zM;=6_pc`x#V-y)zCV}Uw@*w-IO;KV2XJz&HQ6D_KR8xF2LDjy!JU$$)?|rbxBt>q` zIySN>m<#O|{v4<7>upx=Aw$*_W*|l~FcJ(QzD%nW)*7F_`MRHt#KTao*xd8ciNI9m z!?%97f&ShW4QV3Fh`JQVx{R|j+~U*K?!mrxHPjZl^1l2$2{~dqyPfY+&BZ?&b{pfi zhI(22cx)l*feb!atqpx89vdvvr+eFk!vZEcZ&GD<8TePgz-0B?(`4-<7(wLbQ(!{o z%howZD$*xD5TdOoJ$g{@(-OXUZ5_rxU7c#)v_~_;%x}u5BDI@9z={O^npK}7Kl<*2 z#a_i@6zT#59p2BOgt;aKr09Ml{92_IlE6@SCkhVq6*{}q-5a;_LI{-~tpV#m9X-Kgz(S;O&T;P5;K-FbEeWPBPe!5F0Od~n{7w*7$+ zTlYg_u|eqSSu{dt0y;;-?(2Q5bY*th0WaWjIJMiTMLIY0BHn?~KUbE^0C}F2OFsWW zY-P}O%tcEnDrn{fg042LlT4{+hX3_!IsH9#7^@2P@YpZ6#&RyivXG0HudJtpUtrv% z7)0}UoA~Zl%`xc!u+3)S0 zQ<1>X79Ked2Y-qMngv#q)BEbF$Z(AwO3N`>7n0S;c33e5gB8o@u`|d4Nzi4z6XzU+ zA*$27gnJ7WMBJFgH_-S*d*~xI%ayOsR^rxe9E46>=ARc?y>mpkM=%-boVUAr+h(@y zXO;RF&9}@|Y5`Wwmh;LFdmq*}(`NP)^T9;YRHk=B4ydnZK~jur z$)D;YGn-WNv4>yo#i^5QW=o_YQ`~|v+gK5P1$)QcPr3Bs{3hLx6E8Br2(hNSslXhwF;1N(3?>DZ1F#bQy$K1F%5?M`fV zwNstwa=aZz&QB|^`Ce>z*5|;9@5oA^ZJ@MS;R7mBR3CAOA+8*t3oorKxTeau+)08) z2q~b{#_?dhcV$J@8#n+^7u7|i2#=(4Rxh+q4>-x)BL@qceqj$@PnQ9f;a;h5WR0eO z8m*WzbP0(#iyTU#=7|EkN({{Jif=_+9HEuKs*~1rYB 0 { + fmt.Fprintln(w) + for i := 0; i < v.Len(); i++ { + fprint(w, v.Index(i), state{ + level: s.level + 1, + prefix: "", + suffix: ",", + defaults: s.defaults, + }) + fmt.Fprintln(w) + } + } + fmt.Fprintf(w, "%s}%s", indent, s.suffix) + + case reflect.Map: + fmt.Fprintf(w, "%s{", v.Type()) + if v.Len() > 0 { + fmt.Fprintln(w) + keys := v.MapKeys() + maybeSort(keys, v.Type().Key()) + for _, key := range keys { + val := v.MapIndex(key) + if s.defaults || !isDefault(val) { + fprint(w, val, state{ + level: s.level + 1, + prefix: short(key) + ": ", + suffix: ",", + defaults: s.defaults, + }) + fmt.Fprintln(w) + } + } + } + fmt.Fprintf(w, "%s}%s", indent, s.suffix) + + case reflect.Struct: + t := v.Type() + fmt.Fprintf(w, "%s{\n", t) + for i := 0; i < t.NumField(); i++ { + f := v.Field(i) + if s.defaults || !isDefault(f) { + fprint(w, f, state{ + level: s.level + 1, + prefix: t.Field(i).Name + ": ", + suffix: ",", + defaults: s.defaults, + }) + fmt.Fprintln(w) + } + } + fmt.Fprintf(w, "%s}%s", indent, s.suffix) + } +} + +func isNil(v reflect.Value) bool { + if !v.IsValid() { + return true + } + switch v.Type().Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + default: + return false + } +} + +func isDefault(v reflect.Value) bool { + if !v.IsValid() { + return true + } + t := v.Type() + switch t.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + default: + if !v.CanInterface() { + return false + } + return t.Comparable() && v.Interface() == reflect.Zero(t).Interface() + } +} + +// short returns a short, one-line string for v. +func short(v reflect.Value) string { + if !v.IsValid() { + return "nil" + } + if v.Type().Kind() == reflect.String { + return fmt.Sprintf("%q", v) + } + return fmt.Sprintf("%v", v) +} + +func indent(w io.Writer, level int) { + for i := 0; i < level; i++ { + io.WriteString(w, Indent) // ignore errors + } +} + +func maybeSort(vs []reflect.Value, t reflect.Type) { + if less := lessFunc(t); less != nil { + sort.Sort(&sorter{vs, less}) + } +} + +// lessFunc returns a function that implements the "<" operator +// for the given type, or nil if the type doesn't support "<" . +func lessFunc(t reflect.Type) func(v1, v2 interface{}) bool { + switch t.Kind() { + case reflect.String: + return func(v1, v2 interface{}) bool { return v1.(string) < v2.(string) } + case reflect.Int: + return func(v1, v2 interface{}) bool { return v1.(int) < v2.(int) } + case reflect.Int8: + return func(v1, v2 interface{}) bool { return v1.(int8) < v2.(int8) } + case reflect.Int16: + return func(v1, v2 interface{}) bool { return v1.(int16) < v2.(int16) } + case reflect.Int32: + return func(v1, v2 interface{}) bool { return v1.(int32) < v2.(int32) } + case reflect.Int64: + return func(v1, v2 interface{}) bool { return v1.(int64) < v2.(int64) } + case reflect.Uint: + return func(v1, v2 interface{}) bool { return v1.(uint) < v2.(uint) } + case reflect.Uint8: + return func(v1, v2 interface{}) bool { return v1.(uint8) < v2.(uint8) } + case reflect.Uint16: + return func(v1, v2 interface{}) bool { return v1.(uint16) < v2.(uint16) } + case reflect.Uint32: + return func(v1, v2 interface{}) bool { return v1.(uint32) < v2.(uint32) } + case reflect.Uint64: + return func(v1, v2 interface{}) bool { return v1.(uint64) < v2.(uint64) } + case reflect.Float32: + return func(v1, v2 interface{}) bool { return v1.(float32) < v2.(float32) } + case reflect.Float64: + return func(v1, v2 interface{}) bool { return v1.(float64) < v2.(float64) } + default: + return nil + } +} + +type sorter struct { + vs []reflect.Value + less func(v1, v2 interface{}) bool +} + +func (s *sorter) Len() int { return len(s.vs) } +func (s *sorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s *sorter) Less(i, j int) bool { return s.less(s.vs[i].Interface(), s.vs[j].Interface()) } diff --git a/vendor/cloud.google.com/go/internal/pretty/pretty_test.go b/vendor/cloud.google.com/go/internal/pretty/pretty_test.go new file mode 100644 index 00000000..6e2ff8b7 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/pretty/pretty_test.go @@ -0,0 +1,105 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pretty + +import ( + "fmt" + "strings" + "testing" +) + +type S struct { + X int + Y bool + z *string +} + +func TestSprint(t *testing.T) { + Indent = "~" + i := 17 + + for _, test := range []struct { + value interface{} + want string + }{ + // primitives and pointer + {nil, "nil"}, + {3, "3"}, + {9.8, "9.8"}, + {true, "true"}, + {"foo", `"foo"`}, + {&i, "&17"}, + // array and slice + {[3]int{1, 2, 3}, "[3]int{\n~1,\n~2,\n~3,\n}"}, + {[]int{1, 2, 3}, "[]int{\n~1,\n~2,\n~3,\n}"}, + {[]int{}, "[]int{}"}, + {[]string{"foo"}, "[]string{\n~\"foo\",\n}"}, + // map + {map[int]bool{}, "map[int]bool{}"}, + {map[int]bool{1: true, 2: false, 3: true}, + "map[int]bool{\n~1: true,\n~3: true,\n}"}, + // struct + {S{}, "pretty.S{\n}"}, + {S{3, true, ptr("foo")}, + "pretty.S{\n~X: 3,\n~Y: true,\n~z: &\"foo\",\n}"}, + // interface + {[]interface{}{&i}, "[]interface {}{\n~&17,\n}"}, + // nesting + {[]S{{1, false, ptr("a")}, {2, true, ptr("b")}}, + `[]pretty.S{ +~pretty.S{ +~~X: 1, +~~z: &"a", +~}, +~pretty.S{ +~~X: 2, +~~Y: true, +~~z: &"b", +~}, +}`}, + } { + got := fmt.Sprintf("%v", Value(test.value)) + if got != test.want { + t.Errorf("%v: got:\n%q\nwant:\n%q", test.value, got, test.want) + } + } +} + +func TestWithDefaults(t *testing.T) { + Indent = "~" + for _, test := range []struct { + value interface{} + want string + }{ + {map[int]bool{1: true, 2: false, 3: true}, + "map[int]bool{\n~1: true,\n~2: false,\n~3: true,\n}"}, + {S{}, "pretty.S{\n~X: 0,\n~Y: false,\n~z: nil,\n}"}, + } { + got := fmt.Sprintf("%+v", Value(test.value)) + if got != test.want { + t.Errorf("%v: got:\n%q\nwant:\n%q", test.value, got, test.want) + } + } +} + +func TestBadVerb(t *testing.T) { + got := fmt.Sprintf("%d", Value(8)) + want := "%!d(" + if !strings.HasPrefix(got, want) { + t.Errorf("got %q, want prefix %q", got, want) + } +} + +func ptr(s string) *string { return &s } diff --git a/vendor/cloud.google.com/go/internal/retry.go b/vendor/cloud.google.com/go/internal/retry.go new file mode 100644 index 00000000..79995be4 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/retry.go @@ -0,0 +1,55 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "fmt" + "time" + + gax "github.com/googleapis/gax-go" + + "golang.org/x/net/context" +) + +// Retry calls the supplied function f repeatedly according to the provided +// backoff parameters. It returns when one of the following occurs: +// When f's first return value is true, Retry immediately returns with f's second +// return value. +// When the provided context is done, Retry returns with ctx.Err(). +func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error { + return retry(ctx, bo, f, gax.Sleep) +} + +func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error), + sleep func(context.Context, time.Duration) error) error { + var lastErr error + for { + stop, err := f() + if stop { + return err + } + // Remember the last "real" error from f. + if err != nil && err != context.Canceled && err != context.DeadlineExceeded { + lastErr = err + } + p := bo.Pause() + if cerr := sleep(ctx, p); cerr != nil { + if lastErr != nil { + return fmt.Errorf("%v; last function err: %v", cerr, lastErr) + } + return cerr + } + } +} diff --git a/vendor/cloud.google.com/go/internal/retry_test.go b/vendor/cloud.google.com/go/internal/retry_test.go new file mode 100644 index 00000000..590b5550 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/retry_test.go @@ -0,0 +1,64 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "errors" + "testing" + "time" + + "golang.org/x/net/context" + + gax "github.com/googleapis/gax-go" +) + +func TestRetry(t *testing.T) { + ctx := context.Background() + // Without a context deadline, retry will run until the function + // says not to retry any more. + n := 0 + endRetry := errors.New("end retry") + err := retry(ctx, gax.Backoff{}, + func() (bool, error) { + n++ + if n < 10 { + return false, nil + } + return true, endRetry + }, + func(context.Context, time.Duration) error { return nil }) + if got, want := err, endRetry; got != want { + t.Errorf("got %v, want %v", err, endRetry) + } + if n != 10 { + t.Errorf("n: got %d, want %d", n, 10) + } + + // If the context has a deadline, sleep will return an error + // and end the function. + n = 0 + err = retry(ctx, gax.Backoff{}, + func() (bool, error) { return false, nil }, + func(context.Context, time.Duration) error { + n++ + if n < 10 { + return nil + } + return context.DeadlineExceeded + }) + if err == nil { + t.Error("got nil, want error") + } +} diff --git a/vendor/cloud.google.com/go/internal/testutil/context.go b/vendor/cloud.google.com/go/internal/testutil/context.go new file mode 100644 index 00000000..cdb440ee --- /dev/null +++ b/vendor/cloud.google.com/go/internal/testutil/context.go @@ -0,0 +1,67 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package testutil contains helper functions for writing tests. +package testutil + +import ( + "io/ioutil" + "log" + "os" + + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" +) + +const ( + envProjID = "GCLOUD_TESTS_GOLANG_PROJECT_ID" + envPrivateKey = "GCLOUD_TESTS_GOLANG_KEY" +) + +// ProjID returns the project ID to use in integration tests, or the empty +// string if none is configured. +func ProjID() string { + projID := os.Getenv(envProjID) + if projID == "" { + return "" + } + return projID +} + +// TokenSource returns the OAuth2 token source to use in integration tests, +// or nil if none is configured. If the environment variable is unset, +// TokenSource will try to find 'Application Default Credentials'. Else, +// TokenSource will return nil. +// TokenSource will log.Fatal if the token source is specified but missing or invalid. +func TokenSource(ctx context.Context, scopes ...string) oauth2.TokenSource { + key := os.Getenv(envPrivateKey) + if key == "" { // Try for application default credentials. + ts, err := google.DefaultTokenSource(ctx, scopes...) + if err != nil { + log.Println("No 'Application Default Credentials' found.") + return nil + } + return ts + } + jsonKey, err := ioutil.ReadFile(key) + if err != nil { + log.Fatalf("Cannot read the JSON key file, err: %v", err) + } + conf, err := google.JWTConfigFromJSON(jsonKey, scopes...) + if err != nil { + log.Fatalf("google.JWTConfigFromJSON: %v", err) + } + return conf.TokenSource(ctx) +} diff --git a/vendor/cloud.google.com/go/internal/testutil/server.go b/vendor/cloud.google.com/go/internal/testutil/server.go new file mode 100644 index 00000000..287599f3 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/testutil/server.go @@ -0,0 +1,73 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutil + +import ( + "net" + + grpc "google.golang.org/grpc" +) + +// A Server is an in-process gRPC server, listening on a system-chosen port on +// the local loopback interface. Servers are for testing only and are not +// intended to be used in production code. +// +// To create a server, make a new Server, register your handlers, then call +// Start: +// +// srv, err := NewServer() +// ... +// mypb.RegisterMyServiceServer(srv.Gsrv, &myHandler) +// .... +// srv.Start() +// +// Clients should connect to the server with no security: +// +// conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) +// ... +type Server struct { + Addr string + l net.Listener + Gsrv *grpc.Server +} + +// NewServer creates a new Server. The Server will be listening for gRPC connections +// at the address named by the Addr field, without TLS. +func NewServer() (*Server, error) { + l, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + return nil, err + } + s := &Server{ + Addr: l.Addr().String(), + l: l, + Gsrv: grpc.NewServer(), + } + return s, nil +} + +// Start causes the server to start accepting incoming connections. +// Call Start after registering handlers. +func (s *Server) Start() { + go s.Gsrv.Serve(s.l) +} + +// Close shuts down the server. +func (s *Server) Close() { + s.Gsrv.Stop() + s.l.Close() +} diff --git a/vendor/cloud.google.com/go/internal/testutil/server_test.go b/vendor/cloud.google.com/go/internal/testutil/server_test.go new file mode 100644 index 00000000..817ce4ee --- /dev/null +++ b/vendor/cloud.google.com/go/internal/testutil/server_test.go @@ -0,0 +1,35 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutil + +import ( + "testing" + + grpc "google.golang.org/grpc" +) + +func TestNewServer(t *testing.T) { + srv, err := NewServer() + if err != nil { + t.Fatal(err) + } + srv.Start() + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) + if err != nil { + t.Fatal(err) + } + conn.Close() + srv.Close() +} diff --git a/vendor/cloud.google.com/go/internal/version/update_version.sh b/vendor/cloud.google.com/go/internal/version/update_version.sh new file mode 100755 index 00000000..fecf1f03 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/version/update_version.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +today=$(date +%Y%m%d) + +sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE + diff --git a/vendor/cloud.google.com/go/internal/version/version.go b/vendor/cloud.google.com/go/internal/version/version.go new file mode 100644 index 00000000..0d7f05d7 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/version/version.go @@ -0,0 +1,49 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate ./update_version.sh + +// Package version contains version information for Google Cloud Client +// Libraries for Go, as reported in request headers. +package version + +import ( + "bytes" + "runtime" + "unicode" +) + +// Repo is the current version of the client libraries in this +// repo. It should be a date in YYYYMMDD format. +const Repo = "20170210" + +// Go returns the Go runtime version. The returned string +// has no whitespace. +func Go() string { + return goVersion +} + +var goVersion = removeWhitespace(runtime.Version()) + +func removeWhitespace(s string) string { + var buf bytes.Buffer + for _, r := range s { + if unicode.IsSpace(r) { + buf.WriteByte('_') + } else { + buf.WriteRune(r) + } + } + return buf.String() +} diff --git a/vendor/cloud.google.com/go/internal/version/version_test.go b/vendor/cloud.google.com/go/internal/version/version_test.go new file mode 100644 index 00000000..a7bc4db6 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/version/version_test.go @@ -0,0 +1,47 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "regexp" + "testing" +) + +func TestGo(t *testing.T) { + got := Go() + want := `^\S+$` + match, err := regexp.MatchString(want, got) + if err != nil { + t.Fatal(err) + } + if !match { + t.Errorf("got %q, want match of regexp %q", got, want) + } +} + +func TestRemoveWhitespace(t *testing.T) { + for _, test := range []struct { + in, want string + }{ + {"", ""}, + {"go1.7", "go1.7"}, + {" a b c ", "_a_b_c_"}, + {"a\tb\t c\n", "a_b__c_"}, + } { + if got := removeWhitespace(test.in); got != test.want { + t.Errorf("removeWhitespace(%q) = %q, want %q", test.in, got, test.want) + } + } +} diff --git a/vendor/cloud.google.com/go/key.json.enc b/vendor/cloud.google.com/go/key.json.enc new file mode 100644 index 0000000000000000000000000000000000000000..2f673a84b143c71c53fb2b1619a9c96f48ce1f54 GIT binary patch literal 1248 zcmV<61Rwj0Zv%9Mm*y(^-mW4!3RUy2jtHzDzqBac1!ANM5s{p;?JXvx&2>S>?&XkF zbPIGkWM>T~87d^b%*>nJaAymr5U<`@59@t~4JOftxQT!nX^@`S7|o~~O+Q#D(Tea< z0C6)A8SLE!UVU{sAQSPn29g38Y+ zrV zh6d*@2I!vcc;^lf(cQ?Pz?3Ffe`floQ4ON~yPjBX#2YS)8P-#1)~jzFmH>!#k_@6* z?Pa|=)iAO~EZEsyQPlyI@pPP|aa_}tTnzQ9mri9C=?}A^Int5dTDS)n9ga#mJv#L$ zBY9m{rCPaM=sSjZ=a3+0`3^E(t4n8bT_bUS3MpJWBpL&r6h zJ%Xct#s$ZR(jRC3mjD0TPMY@#rsSEiCtk*Ko=v>Mh-?797)&q4y~SHHepsfE9xmSg z4n8vqb#TmGqZt~l>S+M!&90~(5}z8lX}09MudKf5L= zoQ7bN;+9yVdn_4$#-W-8a=%bwPC+qYzuI##;8LEi@-?!OqX9i;E%3&xZZ$lC!UKW0 ztI@XV?CzcLtFjWGYQ%v+Iu!2ou0&QhpI8MI0hRO^CO&sfNGho0#)!##9qVZs+e^Zn zAM>BcIsA(NAq5bM;1+QtF)fOjm;*qDcL^dv{jdk9uv>HE?a0GDBGXJLK#`lp-e;&o zjd}Qxu}rQ6d00k>F=`7~Y;Mfbj8Q+q+Lx(1k8^~qCv}{{z!kzo5=@e_E)7uoUE+8Hx*1ZStR~&B#Pa~ zJSQQ9U3v>5>e$B4bX1m8ykOK;!Ca5JLSVI-@PG=TTEUyz&H?6DLda~RTskwU z2NgP9h<51tLqz{r-|_ZmG$PUnKo>?$aw(m&R_rgQ%+~6fzd4b~_8Q{JATzF`8UL*a zx%$Q*4uJL|74$eDw`n>9I1NL&1U~&?N&_!HUB@zp?uwL$D6`!7m4f{o_+fh{>s7&^ z`UClq@1IQ8yiYQ{rh=X+;)yZS@p`NPA$jn#OuY5sSx#Z>MroeKFfb+uaa7p&G)VRN Kkn>IvykJ$=6K6R9 literal 0 HcmV?d00001 diff --git a/vendor/cloud.google.com/go/language/apiv1/doc.go b/vendor/cloud.google.com/go/language/apiv1/doc.go new file mode 100644 index 00000000..8c8cb2fc --- /dev/null +++ b/vendor/cloud.google.com/go/language/apiv1/doc.go @@ -0,0 +1,35 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package language is an experimental, auto-generated package for the +// language API. +// +// Google Cloud Natural Language API provides natural language understanding +// technologies to developers. Examples include sentiment analysis, entity +// recognition, and text annotations. +package language // import "cloud.google.com/go/language/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertXGoog(ctx context.Context, val string) context.Context { + md, _ := metadata.FromContext(ctx) + md = md.Copy() + md["x-goog-api-client"] = []string{val} + return metadata.NewContext(ctx, md) +} diff --git a/vendor/cloud.google.com/go/language/apiv1/language_client.go b/vendor/cloud.google.com/go/language/apiv1/language_client.go new file mode 100644 index 00000000..62716920 --- /dev/null +++ b/vendor/cloud.google.com/go/language/apiv1/language_client.go @@ -0,0 +1,188 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package language + +import ( + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + languagepb "google.golang.org/genproto/googleapis/cloud/language/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + AnalyzeSentiment []gax.CallOption + AnalyzeEntities []gax.CallOption + AnalyzeSyntax []gax.CallOption + AnnotateText []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("language.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + ), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &CallOptions{ + AnalyzeSentiment: retry[[2]string{"default", "idempotent"}], + AnalyzeEntities: retry[[2]string{"default", "idempotent"}], + AnalyzeSyntax: retry[[2]string{"default", "idempotent"}], + AnnotateText: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with Google Cloud Natural Language API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client languagepb.LanguageServiceClient + + // The call options for this service. + CallOptions *CallOptions + + // The metadata to be sent with each request. + xGoogHeader string +} + +// NewClient creates a new language service client. +// +// Provides text analysis operations such as sentiment analysis and entity +// recognition. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: languagepb.NewLanguageServiceClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", "") + c.xGoogHeader = gax.XGoogHeader(kv...) +} + +// AnalyzeSentiment analyzes the sentiment of the provided text. +func (c *Client) AnalyzeSentiment(ctx context.Context, req *languagepb.AnalyzeSentimentRequest) (*languagepb.AnalyzeSentimentResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *languagepb.AnalyzeSentimentResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.AnalyzeSentiment(ctx, req) + return err + }, c.CallOptions.AnalyzeSentiment...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AnalyzeEntities finds named entities (currently finds proper names) in the text, +// entity types, salience, mentions for each entity, and other properties. +func (c *Client) AnalyzeEntities(ctx context.Context, req *languagepb.AnalyzeEntitiesRequest) (*languagepb.AnalyzeEntitiesResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *languagepb.AnalyzeEntitiesResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.AnalyzeEntities(ctx, req) + return err + }, c.CallOptions.AnalyzeEntities...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AnalyzeSyntax analyzes the syntax of the text and provides sentence boundaries and +// tokenization along with part of speech tags, dependency trees, and other +// properties. +func (c *Client) AnalyzeSyntax(ctx context.Context, req *languagepb.AnalyzeSyntaxRequest) (*languagepb.AnalyzeSyntaxResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *languagepb.AnalyzeSyntaxResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.AnalyzeSyntax(ctx, req) + return err + }, c.CallOptions.AnalyzeSyntax...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AnnotateText a convenience method that provides all the features that analyzeSentiment, +// analyzeEntities, and analyzeSyntax provide in one call. +func (c *Client) AnnotateText(ctx context.Context, req *languagepb.AnnotateTextRequest) (*languagepb.AnnotateTextResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *languagepb.AnnotateTextResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.AnnotateText(ctx, req) + return err + }, c.CallOptions.AnnotateText...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/language/apiv1/language_client_example_test.go b/vendor/cloud.google.com/go/language/apiv1/language_client_example_test.go new file mode 100644 index 00000000..f1fcf147 --- /dev/null +++ b/vendor/cloud.google.com/go/language/apiv1/language_client_example_test.go @@ -0,0 +1,105 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package language_test + +import ( + "cloud.google.com/go/language/apiv1" + "golang.org/x/net/context" + languagepb "google.golang.org/genproto/googleapis/cloud/language/v1" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_AnalyzeSentiment() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &languagepb.AnalyzeSentimentRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.AnalyzeSentiment(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_AnalyzeEntities() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &languagepb.AnalyzeEntitiesRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.AnalyzeEntities(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_AnalyzeSyntax() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &languagepb.AnalyzeSyntaxRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.AnalyzeSyntax(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_AnnotateText() { + ctx := context.Background() + c, err := language.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &languagepb.AnnotateTextRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.AnnotateText(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/language/apiv1/mock_test.go b/vendor/cloud.google.com/go/language/apiv1/mock_test.go new file mode 100644 index 00000000..adf11e34 --- /dev/null +++ b/vendor/cloud.google.com/go/language/apiv1/mock_test.go @@ -0,0 +1,363 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package language + +import ( + languagepb "google.golang.org/genproto/googleapis/cloud/language/v1" +) + +import ( + "flag" + "io" + "log" + "net" + "os" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockLanguageServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + languagepb.LanguageServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockLanguageServer) AnalyzeSentiment(_ context.Context, req *languagepb.AnalyzeSentimentRequest) (*languagepb.AnalyzeSentimentResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*languagepb.AnalyzeSentimentResponse), nil +} + +func (s *mockLanguageServer) AnalyzeEntities(_ context.Context, req *languagepb.AnalyzeEntitiesRequest) (*languagepb.AnalyzeEntitiesResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*languagepb.AnalyzeEntitiesResponse), nil +} + +func (s *mockLanguageServer) AnalyzeSyntax(_ context.Context, req *languagepb.AnalyzeSyntaxRequest) (*languagepb.AnalyzeSyntaxResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*languagepb.AnalyzeSyntaxResponse), nil +} + +func (s *mockLanguageServer) AnnotateText(_ context.Context, req *languagepb.AnnotateTextRequest) (*languagepb.AnnotateTextResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*languagepb.AnnotateTextResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockLanguage mockLanguageServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + languagepb.RegisterLanguageServiceServer(serv, &mockLanguage) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestLanguageServiceAnalyzeSentiment(t *testing.T) { + var language string = "language-1613589672" + var expectedResponse = &languagepb.AnalyzeSentimentResponse{ + Language: language, + } + + mockLanguage.err = nil + mockLanguage.reqs = nil + + mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeSentimentRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeSentiment(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLanguageServiceAnalyzeSentimentError(t *testing.T) { + errCode := codes.Internal + mockLanguage.err = grpc.Errorf(errCode, "test error") + + var document *languagepb.Document = &languagepb.Document{} + var request = &languagepb.AnalyzeSentimentRequest{ + Document: document, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeSentiment(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLanguageServiceAnalyzeEntities(t *testing.T) { + var language string = "language-1613589672" + var expectedResponse = &languagepb.AnalyzeEntitiesResponse{ + Language: language, + } + + mockLanguage.err = nil + mockLanguage.reqs = nil + + mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) + + var document *languagepb.Document = &languagepb.Document{} + var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE + var request = &languagepb.AnalyzeEntitiesRequest{ + Document: document, + EncodingType: encodingType, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeEntities(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLanguageServiceAnalyzeEntitiesError(t *testing.T) { + errCode := codes.Internal + mockLanguage.err = grpc.Errorf(errCode, "test error") + + var document *languagepb.Document = &languagepb.Document{} + var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE + var request = &languagepb.AnalyzeEntitiesRequest{ + Document: document, + EncodingType: encodingType, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeEntities(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLanguageServiceAnalyzeSyntax(t *testing.T) { + var language string = "language-1613589672" + var expectedResponse = &languagepb.AnalyzeSyntaxResponse{ + Language: language, + } + + mockLanguage.err = nil + mockLanguage.reqs = nil + + mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) + + var document *languagepb.Document = &languagepb.Document{} + var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE + var request = &languagepb.AnalyzeSyntaxRequest{ + Document: document, + EncodingType: encodingType, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeSyntax(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLanguageServiceAnalyzeSyntaxError(t *testing.T) { + errCode := codes.Internal + mockLanguage.err = grpc.Errorf(errCode, "test error") + + var document *languagepb.Document = &languagepb.Document{} + var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE + var request = &languagepb.AnalyzeSyntaxRequest{ + Document: document, + EncodingType: encodingType, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnalyzeSyntax(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLanguageServiceAnnotateText(t *testing.T) { + var language string = "language-1613589672" + var expectedResponse = &languagepb.AnnotateTextResponse{ + Language: language, + } + + mockLanguage.err = nil + mockLanguage.reqs = nil + + mockLanguage.resps = append(mockLanguage.resps[:0], expectedResponse) + + var document *languagepb.Document = &languagepb.Document{} + var features *languagepb.AnnotateTextRequest_Features = &languagepb.AnnotateTextRequest_Features{} + var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE + var request = &languagepb.AnnotateTextRequest{ + Document: document, + Features: features, + EncodingType: encodingType, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnnotateText(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLanguage.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLanguageServiceAnnotateTextError(t *testing.T) { + errCode := codes.Internal + mockLanguage.err = grpc.Errorf(errCode, "test error") + + var document *languagepb.Document = &languagepb.Document{} + var features *languagepb.AnnotateTextRequest_Features = &languagepb.AnnotateTextRequest_Features{} + var encodingType languagepb.EncodingType = languagepb.EncodingType_NONE + var request = &languagepb.AnnotateTextRequest{ + Document: document, + Features: features, + EncodingType: encodingType, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.AnnotateText(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/license_test.go b/vendor/cloud.google.com/go/license_test.go new file mode 100644 index 00000000..4b87878a --- /dev/null +++ b/vendor/cloud.google.com/go/license_test.go @@ -0,0 +1,70 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloud + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" +) + +var sentinels = []string{ + "Copyright", + "Google Inc", + `Licensed under the Apache License, Version 2.0 (the "License");`, +} + +func TestLicense(t *testing.T) { + err := filepath.Walk(".", func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + + if ext := filepath.Ext(path); ext != ".go" && ext != ".proto" { + return nil + } + if strings.HasSuffix(path, ".pb.go") { + // .pb.go files are generated from the proto files. + // .proto files must have license headers. + return nil + } + if path == "bigtable/cmd/cbt/cbtdoc.go" { + // Automatically generated. + return nil + } + + src, err := ioutil.ReadFile(path) + if err != nil { + return nil + } + src = src[:140] // Ensure all of the sentinel values are at the top of the file. + + // Find license + for _, sentinel := range sentinels { + if !bytes.Contains(src, []byte(sentinel)) { + t.Errorf("%v: license header not present. want %q", path, sentinel) + return nil + } + } + + return nil + }) + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/README.md b/vendor/cloud.google.com/go/logging/apiv2/README.md new file mode 100644 index 00000000..d2d9a176 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/README.md @@ -0,0 +1,11 @@ +Auto-generated logging v2 clients +================================= + +This package includes auto-generated clients for the logging v2 API. + +Use the handwritten logging client (in the parent directory, +cloud.google.com/go/logging) in preference to this. + +This code is EXPERIMENTAL and subject to CHANGE AT ANY TIME. + + diff --git a/vendor/cloud.google.com/go/logging/apiv2/config_client.go b/vendor/cloud.google.com/go/logging/apiv2/config_client.go new file mode 100644 index 00000000..deb83f7f --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/config_client.go @@ -0,0 +1,315 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + loggingpb "google.golang.org/genproto/googleapis/logging/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + configProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}") + configSinkPathTemplate = gax.MustCompilePathTemplate("projects/{project}/sinks/{sink}") +) + +// ConfigCallOptions contains the retry settings for each method of ConfigClient. +type ConfigCallOptions struct { + ListSinks []gax.CallOption + GetSink []gax.CallOption + CreateSink []gax.CallOption + UpdateSink []gax.CallOption + DeleteSink []gax.CallOption +} + +func defaultConfigClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("logging.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read", + "https://www.googleapis.com/auth/logging.write", + ), + } +} + +func defaultConfigCallOptions() *ConfigCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + }), + }, + {"default", "non_idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + }), + }, + } + return &ConfigCallOptions{ + ListSinks: retry[[2]string{"default", "idempotent"}], + GetSink: retry[[2]string{"default", "idempotent"}], + CreateSink: retry[[2]string{"default", "non_idempotent"}], + UpdateSink: retry[[2]string{"default", "non_idempotent"}], + DeleteSink: retry[[2]string{"default", "idempotent"}], + } +} + +// ConfigClient is a client for interacting with Stackdriver Logging API. +type ConfigClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + configClient loggingpb.ConfigServiceV2Client + + // The call options for this service. + CallOptions *ConfigCallOptions + + // The metadata to be sent with each request. + xGoogHeader string +} + +// NewConfigClient creates a new config service v2 client. +// +// Service for configuring sinks used to export log entries outside of +// Stackdriver Logging. +func NewConfigClient(ctx context.Context, opts ...option.ClientOption) (*ConfigClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultConfigClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &ConfigClient{ + conn: conn, + CallOptions: defaultConfigCallOptions(), + + configClient: loggingpb.NewConfigServiceV2Client(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *ConfigClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *ConfigClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *ConfigClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", "") + c.xGoogHeader = gax.XGoogHeader(kv...) +} + +// ConfigProjectPath returns the path for the project resource. +func ConfigProjectPath(project string) string { + path, err := configProjectPathTemplate.Render(map[string]string{ + "project": project, + }) + if err != nil { + panic(err) + } + return path +} + +// ConfigSinkPath returns the path for the sink resource. +func ConfigSinkPath(project, sink string) string { + path, err := configSinkPathTemplate.Render(map[string]string{ + "project": project, + "sink": sink, + }) + if err != nil { + panic(err) + } + return path +} + +// ListSinks lists sinks. +func (c *ConfigClient) ListSinks(ctx context.Context, req *loggingpb.ListSinksRequest) *LogSinkIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + it := &LogSinkIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogSink, string, error) { + var resp *loggingpb.ListSinksResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.configClient.ListSinks(ctx, req) + return err + }, c.CallOptions.ListSinks...) + if err != nil { + return nil, "", err + } + return resp.Sinks, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetSink gets a sink. +func (c *ConfigClient) GetSink(ctx context.Context, req *loggingpb.GetSinkRequest) (*loggingpb.LogSink, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *loggingpb.LogSink + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.configClient.GetSink(ctx, req) + return err + }, c.CallOptions.GetSink...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateSink creates a sink that exports specified log entries to a destination. The +// export of newly-ingested log entries begins immediately, unless the current +// time is outside the sink's start and end times or the sink's +// `writer_identity` is not permitted to write to the destination. A sink can +// export log entries only from the resource owning the sink. +func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSinkRequest) (*loggingpb.LogSink, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *loggingpb.LogSink + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.configClient.CreateSink(ctx, req) + return err + }, c.CallOptions.CreateSink...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateSink updates a sink. If the named sink doesn't exist, then this method is +// identical to +// [sinks.create](/logging/docs/api/reference/rest/v2/projects.sinks/create). +// If the named sink does exist, then this method replaces the following +// fields in the existing sink with values from the new sink: `destination`, +// `filter`, `output_version_format`, `start_time`, and `end_time`. +// The updated filter might also have a new `writer_identity`; see the +// `unique_writer_identity` field. +func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSinkRequest) (*loggingpb.LogSink, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *loggingpb.LogSink + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.configClient.UpdateSink(ctx, req) + return err + }, c.CallOptions.UpdateSink...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteSink deletes a sink. If the sink has a unique `writer_identity`, then that +// service account is also deleted. +func (c *ConfigClient) DeleteSink(ctx context.Context, req *loggingpb.DeleteSinkRequest) error { + ctx = insertXGoog(ctx, c.xGoogHeader) + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + _, err = c.configClient.DeleteSink(ctx, req) + return err + }, c.CallOptions.DeleteSink...) + return err +} + +// LogSinkIterator manages a stream of *loggingpb.LogSink. +type LogSinkIterator struct { + items []*loggingpb.LogSink + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*loggingpb.LogSink, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *LogSinkIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *LogSinkIterator) Next() (*loggingpb.LogSink, error) { + var item *loggingpb.LogSink + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *LogSinkIterator) bufLen() int { + return len(it.items) +} + +func (it *LogSinkIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/config_client_example_test.go b/vendor/cloud.google.com/go/logging/apiv2/config_client_example_test.go new file mode 100644 index 00000000..620aa650 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/config_client_example_test.go @@ -0,0 +1,125 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging_test + +import ( + "cloud.google.com/go/logging/apiv2" + "golang.org/x/net/context" + loggingpb "google.golang.org/genproto/googleapis/logging/v2" +) + +func ExampleNewConfigClient() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleConfigClient_ListSinks() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.ListSinksRequest{ + // TODO: Fill request struct fields. + } + it := c.ListSinks(ctx, req) + for { + resp, err := it.Next() + if err != nil { + // TODO: Handle error. + break + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleConfigClient_GetSink() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.GetSinkRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetSink(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleConfigClient_CreateSink() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.CreateSinkRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateSink(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleConfigClient_UpdateSink() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.UpdateSinkRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateSink(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleConfigClient_DeleteSink() { + ctx := context.Background() + c, err := logging.NewConfigClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.DeleteSinkRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteSink(ctx, req) + if err != nil { + // TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/doc.go b/vendor/cloud.google.com/go/logging/apiv2/doc.go new file mode 100644 index 00000000..c0016d46 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/doc.go @@ -0,0 +1,36 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package logging is an experimental, auto-generated package for the +// logging API. +// +// The Stackdriver Logging API lets you write log entries and manage your +// logs, log sinks and logs-based metrics. +// +// Use the client at cloud.google.com/go/logging in preference to this. +package logging // import "cloud.google.com/go/logging/apiv2" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertXGoog(ctx context.Context, val string) context.Context { + md, _ := metadata.FromContext(ctx) + md = md.Copy() + md["x-goog-api-client"] = []string{val} + return metadata.NewContext(ctx, md) +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/logging_client.go b/vendor/cloud.google.com/go/logging/apiv2/logging_client.go new file mode 100644 index 00000000..318c5e51 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/logging_client.go @@ -0,0 +1,443 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + loggingpb "google.golang.org/genproto/googleapis/logging/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + loggingProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}") + loggingLogPathTemplate = gax.MustCompilePathTemplate("projects/{project}/logs/{log}") +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + DeleteLog []gax.CallOption + WriteLogEntries []gax.CallOption + ListLogEntries []gax.CallOption + ListMonitoredResourceDescriptors []gax.CallOption + ListLogs []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("logging.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read", + "https://www.googleapis.com/auth/logging.write", + ), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + }), + }, + {"default", "non_idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + }), + }, + {"list", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + }), + }, + } + return &CallOptions{ + DeleteLog: retry[[2]string{"default", "idempotent"}], + WriteLogEntries: retry[[2]string{"default", "non_idempotent"}], + ListLogEntries: retry[[2]string{"list", "idempotent"}], + ListMonitoredResourceDescriptors: retry[[2]string{"default", "idempotent"}], + ListLogs: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with Stackdriver Logging API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client loggingpb.LoggingServiceV2Client + + // The call options for this service. + CallOptions *CallOptions + + // The metadata to be sent with each request. + xGoogHeader string +} + +// NewClient creates a new logging service v2 client. +// +// Service for ingesting and querying logs. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: loggingpb.NewLoggingServiceV2Client(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", "") + c.xGoogHeader = gax.XGoogHeader(kv...) +} + +// LoggingProjectPath returns the path for the project resource. +func LoggingProjectPath(project string) string { + path, err := loggingProjectPathTemplate.Render(map[string]string{ + "project": project, + }) + if err != nil { + panic(err) + } + return path +} + +// LoggingLogPath returns the path for the log resource. +func LoggingLogPath(project, log string) string { + path, err := loggingLogPathTemplate.Render(map[string]string{ + "project": project, + "log": log, + }) + if err != nil { + panic(err) + } + return path +} + +// DeleteLog deletes all the log entries in a log. +// The log reappears if it receives new entries. +func (c *Client) DeleteLog(ctx context.Context, req *loggingpb.DeleteLogRequest) error { + ctx = insertXGoog(ctx, c.xGoogHeader) + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + _, err = c.client.DeleteLog(ctx, req) + return err + }, c.CallOptions.DeleteLog...) + return err +} + +// WriteLogEntries writes log entries to Stackdriver Logging. All log entries are +// written by this method. +func (c *Client) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEntriesRequest) (*loggingpb.WriteLogEntriesResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *loggingpb.WriteLogEntriesResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.WriteLogEntries(ctx, req) + return err + }, c.CallOptions.WriteLogEntries...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListLogEntries lists log entries. Use this method to retrieve log entries from +// Stackdriver Logging. For ways to export log entries, see +// [Exporting Logs](/logging/docs/export). +func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntriesRequest) *LogEntryIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + it := &LogEntryIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogEntry, string, error) { + var resp *loggingpb.ListLogEntriesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.ListLogEntries(ctx, req) + return err + }, c.CallOptions.ListLogEntries...) + if err != nil { + return nil, "", err + } + return resp.Entries, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// ListMonitoredResourceDescriptors lists the descriptors for monitored resource types used by Stackdriver +// Logging. +func (c *Client) ListMonitoredResourceDescriptors(ctx context.Context, req *loggingpb.ListMonitoredResourceDescriptorsRequest) *MonitoredResourceDescriptorIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + it := &MonitoredResourceDescriptorIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) { + var resp *loggingpb.ListMonitoredResourceDescriptorsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.ListMonitoredResourceDescriptors(ctx, req) + return err + }, c.CallOptions.ListMonitoredResourceDescriptors...) + if err != nil { + return nil, "", err + } + return resp.ResourceDescriptors, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// ListLogs lists the logs in projects or organizations. +// Only logs that have entries are listed. +func (c *Client) ListLogs(ctx context.Context, req *loggingpb.ListLogsRequest) *StringIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + it := &StringIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) { + var resp *loggingpb.ListLogsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.ListLogs(ctx, req) + return err + }, c.CallOptions.ListLogs...) + if err != nil { + return nil, "", err + } + return resp.LogNames, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// LogEntryIterator manages a stream of *loggingpb.LogEntry. +type LogEntryIterator struct { + items []*loggingpb.LogEntry + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*loggingpb.LogEntry, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *LogEntryIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *LogEntryIterator) Next() (*loggingpb.LogEntry, error) { + var item *loggingpb.LogEntry + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *LogEntryIterator) bufLen() int { + return len(it.items) +} + +func (it *LogEntryIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// MonitoredResourceDescriptorIterator manages a stream of *monitoredrespb.MonitoredResourceDescriptor. +type MonitoredResourceDescriptorIterator struct { + items []*monitoredrespb.MonitoredResourceDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResourceDescriptor, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MonitoredResourceDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MonitoredResourceDescriptorIterator) Next() (*monitoredrespb.MonitoredResourceDescriptor, error) { + var item *monitoredrespb.MonitoredResourceDescriptor + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MonitoredResourceDescriptorIterator) bufLen() int { + return len(it.items) +} + +func (it *MonitoredResourceDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// StringIterator manages a stream of string. +type StringIterator struct { + items []string + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []string, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *StringIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *StringIterator) Next() (string, error) { + var item string + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *StringIterator) bufLen() int { + return len(it.items) +} + +func (it *StringIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/logging_client_example_test.go b/vendor/cloud.google.com/go/logging/apiv2/logging_client_example_test.go new file mode 100644 index 00000000..6dc537f5 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/logging_client_example_test.go @@ -0,0 +1,133 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging_test + +import ( + "cloud.google.com/go/logging/apiv2" + "golang.org/x/net/context" + loggingpb "google.golang.org/genproto/googleapis/logging/v2" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := logging.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_DeleteLog() { + ctx := context.Background() + c, err := logging.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.DeleteLogRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteLog(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_WriteLogEntries() { + ctx := context.Background() + c, err := logging.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.WriteLogEntriesRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.WriteLogEntries(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListLogEntries() { + ctx := context.Background() + c, err := logging.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.ListLogEntriesRequest{ + // TODO: Fill request struct fields. + } + it := c.ListLogEntries(ctx, req) + for { + resp, err := it.Next() + if err != nil { + // TODO: Handle error. + break + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_ListMonitoredResourceDescriptors() { + ctx := context.Background() + c, err := logging.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.ListMonitoredResourceDescriptorsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListMonitoredResourceDescriptors(ctx, req) + for { + resp, err := it.Next() + if err != nil { + // TODO: Handle error. + break + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleClient_ListLogs() { + ctx := context.Background() + c, err := logging.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.ListLogsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListLogs(ctx, req) + for { + resp, err := it.Next() + if err != nil { + // TODO: Handle error. + break + } + // TODO: Use resp. + _ = resp + } +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go b/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go new file mode 100644 index 00000000..cfb3113f --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go @@ -0,0 +1,302 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + loggingpb "google.golang.org/genproto/googleapis/logging/v2" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + metricsProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}") + metricsMetricPathTemplate = gax.MustCompilePathTemplate("projects/{project}/metrics/{metric}") +) + +// MetricsCallOptions contains the retry settings for each method of MetricsClient. +type MetricsCallOptions struct { + ListLogMetrics []gax.CallOption + GetLogMetric []gax.CallOption + CreateLogMetric []gax.CallOption + UpdateLogMetric []gax.CallOption + DeleteLogMetric []gax.CallOption +} + +func defaultMetricsClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("logging.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read", + "https://www.googleapis.com/auth/logging.write", + ), + } +} + +func defaultMetricsCallOptions() *MetricsCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + }), + }, + {"default", "non_idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + }), + }, + } + return &MetricsCallOptions{ + ListLogMetrics: retry[[2]string{"default", "idempotent"}], + GetLogMetric: retry[[2]string{"default", "idempotent"}], + CreateLogMetric: retry[[2]string{"default", "non_idempotent"}], + UpdateLogMetric: retry[[2]string{"default", "non_idempotent"}], + DeleteLogMetric: retry[[2]string{"default", "idempotent"}], + } +} + +// MetricsClient is a client for interacting with Stackdriver Logging API. +type MetricsClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + metricsClient loggingpb.MetricsServiceV2Client + + // The call options for this service. + CallOptions *MetricsCallOptions + + // The metadata to be sent with each request. + xGoogHeader string +} + +// NewMetricsClient creates a new metrics service v2 client. +// +// Service for configuring logs-based metrics. +func NewMetricsClient(ctx context.Context, opts ...option.ClientOption) (*MetricsClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultMetricsClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &MetricsClient{ + conn: conn, + CallOptions: defaultMetricsCallOptions(), + + metricsClient: loggingpb.NewMetricsServiceV2Client(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *MetricsClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *MetricsClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *MetricsClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", "") + c.xGoogHeader = gax.XGoogHeader(kv...) +} + +// MetricsProjectPath returns the path for the project resource. +func MetricsProjectPath(project string) string { + path, err := metricsProjectPathTemplate.Render(map[string]string{ + "project": project, + }) + if err != nil { + panic(err) + } + return path +} + +// MetricsMetricPath returns the path for the metric resource. +func MetricsMetricPath(project, metric string) string { + path, err := metricsMetricPathTemplate.Render(map[string]string{ + "project": project, + "metric": metric, + }) + if err != nil { + panic(err) + } + return path +} + +// ListLogMetrics lists logs-based metrics. +func (c *MetricsClient) ListLogMetrics(ctx context.Context, req *loggingpb.ListLogMetricsRequest) *LogMetricIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + it := &LogMetricIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogMetric, string, error) { + var resp *loggingpb.ListLogMetricsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.metricsClient.ListLogMetrics(ctx, req) + return err + }, c.CallOptions.ListLogMetrics...) + if err != nil { + return nil, "", err + } + return resp.Metrics, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetLogMetric gets a logs-based metric. +func (c *MetricsClient) GetLogMetric(ctx context.Context, req *loggingpb.GetLogMetricRequest) (*loggingpb.LogMetric, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *loggingpb.LogMetric + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.metricsClient.GetLogMetric(ctx, req) + return err + }, c.CallOptions.GetLogMetric...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateLogMetric creates a logs-based metric. +func (c *MetricsClient) CreateLogMetric(ctx context.Context, req *loggingpb.CreateLogMetricRequest) (*loggingpb.LogMetric, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *loggingpb.LogMetric + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.metricsClient.CreateLogMetric(ctx, req) + return err + }, c.CallOptions.CreateLogMetric...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateLogMetric creates or updates a logs-based metric. +func (c *MetricsClient) UpdateLogMetric(ctx context.Context, req *loggingpb.UpdateLogMetricRequest) (*loggingpb.LogMetric, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *loggingpb.LogMetric + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.metricsClient.UpdateLogMetric(ctx, req) + return err + }, c.CallOptions.UpdateLogMetric...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteLogMetric deletes a logs-based metric. +func (c *MetricsClient) DeleteLogMetric(ctx context.Context, req *loggingpb.DeleteLogMetricRequest) error { + ctx = insertXGoog(ctx, c.xGoogHeader) + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + _, err = c.metricsClient.DeleteLogMetric(ctx, req) + return err + }, c.CallOptions.DeleteLogMetric...) + return err +} + +// LogMetricIterator manages a stream of *loggingpb.LogMetric. +type LogMetricIterator struct { + items []*loggingpb.LogMetric + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*loggingpb.LogMetric, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *LogMetricIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *LogMetricIterator) Next() (*loggingpb.LogMetric, error) { + var item *loggingpb.LogMetric + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *LogMetricIterator) bufLen() int { + return len(it.items) +} + +func (it *LogMetricIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/metrics_client_example_test.go b/vendor/cloud.google.com/go/logging/apiv2/metrics_client_example_test.go new file mode 100644 index 00000000..c9241204 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/metrics_client_example_test.go @@ -0,0 +1,125 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging_test + +import ( + "cloud.google.com/go/logging/apiv2" + "golang.org/x/net/context" + loggingpb "google.golang.org/genproto/googleapis/logging/v2" +) + +func ExampleNewMetricsClient() { + ctx := context.Background() + c, err := logging.NewMetricsClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleMetricsClient_ListLogMetrics() { + ctx := context.Background() + c, err := logging.NewMetricsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.ListLogMetricsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListLogMetrics(ctx, req) + for { + resp, err := it.Next() + if err != nil { + // TODO: Handle error. + break + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleMetricsClient_GetLogMetric() { + ctx := context.Background() + c, err := logging.NewMetricsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.GetLogMetricRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetLogMetric(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleMetricsClient_CreateLogMetric() { + ctx := context.Background() + c, err := logging.NewMetricsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.CreateLogMetricRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateLogMetric(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleMetricsClient_UpdateLogMetric() { + ctx := context.Background() + c, err := logging.NewMetricsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.UpdateLogMetricRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateLogMetric(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleMetricsClient_DeleteLogMetric() { + ctx := context.Background() + c, err := logging.NewMetricsClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &loggingpb.DeleteLogMetricRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteLogMetric(ctx, req) + if err != nil { + // TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/logging/apiv2/mock_test.go b/vendor/cloud.google.com/go/logging/apiv2/mock_test.go new file mode 100644 index 00000000..032982c3 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/apiv2/mock_test.go @@ -0,0 +1,1179 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package logging + +import ( + google_protobuf "github.com/golang/protobuf/ptypes/empty" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + loggingpb "google.golang.org/genproto/googleapis/logging/v2" +) + +import ( + "flag" + "io" + "log" + "net" + "os" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockLoggingServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + loggingpb.LoggingServiceV2Server + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockLoggingServer) DeleteLog(_ context.Context, req *loggingpb.DeleteLogRequest) (*google_protobuf.Empty, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*google_protobuf.Empty), nil +} + +func (s *mockLoggingServer) WriteLogEntries(_ context.Context, req *loggingpb.WriteLogEntriesRequest) (*loggingpb.WriteLogEntriesResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.WriteLogEntriesResponse), nil +} + +func (s *mockLoggingServer) ListLogEntries(_ context.Context, req *loggingpb.ListLogEntriesRequest) (*loggingpb.ListLogEntriesResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.ListLogEntriesResponse), nil +} + +func (s *mockLoggingServer) ListMonitoredResourceDescriptors(_ context.Context, req *loggingpb.ListMonitoredResourceDescriptorsRequest) (*loggingpb.ListMonitoredResourceDescriptorsResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.ListMonitoredResourceDescriptorsResponse), nil +} + +func (s *mockLoggingServer) ListLogs(_ context.Context, req *loggingpb.ListLogsRequest) (*loggingpb.ListLogsResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.ListLogsResponse), nil +} + +type mockConfigServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + loggingpb.ConfigServiceV2Server + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockConfigServer) ListSinks(_ context.Context, req *loggingpb.ListSinksRequest) (*loggingpb.ListSinksResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.ListSinksResponse), nil +} + +func (s *mockConfigServer) GetSink(_ context.Context, req *loggingpb.GetSinkRequest) (*loggingpb.LogSink, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.LogSink), nil +} + +func (s *mockConfigServer) CreateSink(_ context.Context, req *loggingpb.CreateSinkRequest) (*loggingpb.LogSink, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.LogSink), nil +} + +func (s *mockConfigServer) UpdateSink(_ context.Context, req *loggingpb.UpdateSinkRequest) (*loggingpb.LogSink, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.LogSink), nil +} + +func (s *mockConfigServer) DeleteSink(_ context.Context, req *loggingpb.DeleteSinkRequest) (*google_protobuf.Empty, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*google_protobuf.Empty), nil +} + +type mockMetricsServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + loggingpb.MetricsServiceV2Server + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockMetricsServer) ListLogMetrics(_ context.Context, req *loggingpb.ListLogMetricsRequest) (*loggingpb.ListLogMetricsResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.ListLogMetricsResponse), nil +} + +func (s *mockMetricsServer) GetLogMetric(_ context.Context, req *loggingpb.GetLogMetricRequest) (*loggingpb.LogMetric, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.LogMetric), nil +} + +func (s *mockMetricsServer) CreateLogMetric(_ context.Context, req *loggingpb.CreateLogMetricRequest) (*loggingpb.LogMetric, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.LogMetric), nil +} + +func (s *mockMetricsServer) UpdateLogMetric(_ context.Context, req *loggingpb.UpdateLogMetricRequest) (*loggingpb.LogMetric, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*loggingpb.LogMetric), nil +} + +func (s *mockMetricsServer) DeleteLogMetric(_ context.Context, req *loggingpb.DeleteLogMetricRequest) (*google_protobuf.Empty, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*google_protobuf.Empty), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockLogging mockLoggingServer + mockConfig mockConfigServer + mockMetrics mockMetricsServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + loggingpb.RegisterLoggingServiceV2Server(serv, &mockLogging) + loggingpb.RegisterConfigServiceV2Server(serv, &mockConfig) + loggingpb.RegisterMetricsServiceV2Server(serv, &mockMetrics) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestLoggingServiceV2DeleteLog(t *testing.T) { + var expectedResponse *google_protobuf.Empty = &google_protobuf.Empty{} + + mockLogging.err = nil + mockLogging.reqs = nil + + mockLogging.resps = append(mockLogging.resps[:0], expectedResponse) + + var formattedLogName string = LoggingLogPath("[PROJECT]", "[LOG]") + var request = &loggingpb.DeleteLogRequest{ + LogName: formattedLogName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteLog(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLogging.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestLoggingServiceV2DeleteLogError(t *testing.T) { + errCode := codes.Internal + mockLogging.err = grpc.Errorf(errCode, "test error") + + var formattedLogName string = LoggingLogPath("[PROJECT]", "[LOG]") + var request = &loggingpb.DeleteLogRequest{ + LogName: formattedLogName, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteLog(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestLoggingServiceV2WriteLogEntries(t *testing.T) { + var expectedResponse *loggingpb.WriteLogEntriesResponse = &loggingpb.WriteLogEntriesResponse{} + + mockLogging.err = nil + mockLogging.reqs = nil + + mockLogging.resps = append(mockLogging.resps[:0], expectedResponse) + + var entries []*loggingpb.LogEntry = nil + var request = &loggingpb.WriteLogEntriesRequest{ + Entries: entries, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.WriteLogEntries(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLogging.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLoggingServiceV2WriteLogEntriesError(t *testing.T) { + errCode := codes.Internal + mockLogging.err = grpc.Errorf(errCode, "test error") + + var entries []*loggingpb.LogEntry = nil + var request = &loggingpb.WriteLogEntriesRequest{ + Entries: entries, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.WriteLogEntries(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLoggingServiceV2ListLogEntries(t *testing.T) { + var nextPageToken string = "" + var entriesElement *loggingpb.LogEntry = &loggingpb.LogEntry{} + var entries = []*loggingpb.LogEntry{entriesElement} + var expectedResponse = &loggingpb.ListLogEntriesResponse{ + NextPageToken: nextPageToken, + Entries: entries, + } + + mockLogging.err = nil + mockLogging.reqs = nil + + mockLogging.resps = append(mockLogging.resps[:0], expectedResponse) + + var resourceNames []string = nil + var request = &loggingpb.ListLogEntriesRequest{ + ResourceNames: resourceNames, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListLogEntries(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLogging.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Entries[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLoggingServiceV2ListLogEntriesError(t *testing.T) { + errCode := codes.Internal + mockLogging.err = grpc.Errorf(errCode, "test error") + + var resourceNames []string = nil + var request = &loggingpb.ListLogEntriesRequest{ + ResourceNames: resourceNames, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListLogEntries(context.Background(), request).Next() + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLoggingServiceV2ListMonitoredResourceDescriptors(t *testing.T) { + var nextPageToken string = "" + var resourceDescriptorsElement *monitoredrespb.MonitoredResourceDescriptor = &monitoredrespb.MonitoredResourceDescriptor{} + var resourceDescriptors = []*monitoredrespb.MonitoredResourceDescriptor{resourceDescriptorsElement} + var expectedResponse = &loggingpb.ListMonitoredResourceDescriptorsResponse{ + NextPageToken: nextPageToken, + ResourceDescriptors: resourceDescriptors, + } + + mockLogging.err = nil + mockLogging.reqs = nil + + mockLogging.resps = append(mockLogging.resps[:0], expectedResponse) + + var request *loggingpb.ListMonitoredResourceDescriptorsRequest = &loggingpb.ListMonitoredResourceDescriptorsRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListMonitoredResourceDescriptors(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLogging.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.ResourceDescriptors[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLoggingServiceV2ListMonitoredResourceDescriptorsError(t *testing.T) { + errCode := codes.Internal + mockLogging.err = grpc.Errorf(errCode, "test error") + + var request *loggingpb.ListMonitoredResourceDescriptorsRequest = &loggingpb.ListMonitoredResourceDescriptorsRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListMonitoredResourceDescriptors(context.Background(), request).Next() + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestLoggingServiceV2ListLogs(t *testing.T) { + var nextPageToken string = "" + var logNamesElement string = "logNamesElement-1079688374" + var logNames = []string{logNamesElement} + var expectedResponse = &loggingpb.ListLogsResponse{ + NextPageToken: nextPageToken, + LogNames: logNames, + } + + mockLogging.err = nil + mockLogging.reqs = nil + + mockLogging.resps = append(mockLogging.resps[:0], expectedResponse) + + var formattedParent string = LoggingProjectPath("[PROJECT]") + var request = &loggingpb.ListLogsRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListLogs(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockLogging.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.LogNames[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestLoggingServiceV2ListLogsError(t *testing.T) { + errCode := codes.Internal + mockLogging.err = grpc.Errorf(errCode, "test error") + + var formattedParent string = LoggingProjectPath("[PROJECT]") + var request = &loggingpb.ListLogsRequest{ + Parent: formattedParent, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListLogs(context.Background(), request).Next() + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestConfigServiceV2ListSinks(t *testing.T) { + var nextPageToken string = "" + var sinksElement *loggingpb.LogSink = &loggingpb.LogSink{} + var sinks = []*loggingpb.LogSink{sinksElement} + var expectedResponse = &loggingpb.ListSinksResponse{ + NextPageToken: nextPageToken, + Sinks: sinks, + } + + mockConfig.err = nil + mockConfig.reqs = nil + + mockConfig.resps = append(mockConfig.resps[:0], expectedResponse) + + var formattedParent string = ConfigProjectPath("[PROJECT]") + var request = &loggingpb.ListSinksRequest{ + Parent: formattedParent, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListSinks(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Sinks[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestConfigServiceV2ListSinksError(t *testing.T) { + errCode := codes.Internal + mockConfig.err = grpc.Errorf(errCode, "test error") + + var formattedParent string = ConfigProjectPath("[PROJECT]") + var request = &loggingpb.ListSinksRequest{ + Parent: formattedParent, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListSinks(context.Background(), request).Next() + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestConfigServiceV2GetSink(t *testing.T) { + var name string = "name3373707" + var destination string = "destination-1429847026" + var filter string = "filter-1274492040" + var writerIdentity string = "writerIdentity775638794" + var expectedResponse = &loggingpb.LogSink{ + Name: name, + Destination: destination, + Filter: filter, + WriterIdentity: writerIdentity, + } + + mockConfig.err = nil + mockConfig.reqs = nil + + mockConfig.resps = append(mockConfig.resps[:0], expectedResponse) + + var formattedSinkName string = ConfigSinkPath("[PROJECT]", "[SINK]") + var request = &loggingpb.GetSinkRequest{ + SinkName: formattedSinkName, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetSink(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestConfigServiceV2GetSinkError(t *testing.T) { + errCode := codes.Internal + mockConfig.err = grpc.Errorf(errCode, "test error") + + var formattedSinkName string = ConfigSinkPath("[PROJECT]", "[SINK]") + var request = &loggingpb.GetSinkRequest{ + SinkName: formattedSinkName, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetSink(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestConfigServiceV2CreateSink(t *testing.T) { + var name string = "name3373707" + var destination string = "destination-1429847026" + var filter string = "filter-1274492040" + var writerIdentity string = "writerIdentity775638794" + var expectedResponse = &loggingpb.LogSink{ + Name: name, + Destination: destination, + Filter: filter, + WriterIdentity: writerIdentity, + } + + mockConfig.err = nil + mockConfig.reqs = nil + + mockConfig.resps = append(mockConfig.resps[:0], expectedResponse) + + var formattedParent string = ConfigProjectPath("[PROJECT]") + var sink *loggingpb.LogSink = &loggingpb.LogSink{} + var request = &loggingpb.CreateSinkRequest{ + Parent: formattedParent, + Sink: sink, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateSink(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestConfigServiceV2CreateSinkError(t *testing.T) { + errCode := codes.Internal + mockConfig.err = grpc.Errorf(errCode, "test error") + + var formattedParent string = ConfigProjectPath("[PROJECT]") + var sink *loggingpb.LogSink = &loggingpb.LogSink{} + var request = &loggingpb.CreateSinkRequest{ + Parent: formattedParent, + Sink: sink, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateSink(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestConfigServiceV2UpdateSink(t *testing.T) { + var name string = "name3373707" + var destination string = "destination-1429847026" + var filter string = "filter-1274492040" + var writerIdentity string = "writerIdentity775638794" + var expectedResponse = &loggingpb.LogSink{ + Name: name, + Destination: destination, + Filter: filter, + WriterIdentity: writerIdentity, + } + + mockConfig.err = nil + mockConfig.reqs = nil + + mockConfig.resps = append(mockConfig.resps[:0], expectedResponse) + + var formattedSinkName string = ConfigSinkPath("[PROJECT]", "[SINK]") + var sink *loggingpb.LogSink = &loggingpb.LogSink{} + var request = &loggingpb.UpdateSinkRequest{ + SinkName: formattedSinkName, + Sink: sink, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateSink(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestConfigServiceV2UpdateSinkError(t *testing.T) { + errCode := codes.Internal + mockConfig.err = grpc.Errorf(errCode, "test error") + + var formattedSinkName string = ConfigSinkPath("[PROJECT]", "[SINK]") + var sink *loggingpb.LogSink = &loggingpb.LogSink{} + var request = &loggingpb.UpdateSinkRequest{ + SinkName: formattedSinkName, + Sink: sink, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateSink(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestConfigServiceV2DeleteSink(t *testing.T) { + var expectedResponse *google_protobuf.Empty = &google_protobuf.Empty{} + + mockConfig.err = nil + mockConfig.reqs = nil + + mockConfig.resps = append(mockConfig.resps[:0], expectedResponse) + + var formattedSinkName string = ConfigSinkPath("[PROJECT]", "[SINK]") + var request = &loggingpb.DeleteSinkRequest{ + SinkName: formattedSinkName, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteSink(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockConfig.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestConfigServiceV2DeleteSinkError(t *testing.T) { + errCode := codes.Internal + mockConfig.err = grpc.Errorf(errCode, "test error") + + var formattedSinkName string = ConfigSinkPath("[PROJECT]", "[SINK]") + var request = &loggingpb.DeleteSinkRequest{ + SinkName: formattedSinkName, + } + + c, err := NewConfigClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteSink(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestMetricsServiceV2ListLogMetrics(t *testing.T) { + var nextPageToken string = "" + var metricsElement *loggingpb.LogMetric = &loggingpb.LogMetric{} + var metrics = []*loggingpb.LogMetric{metricsElement} + var expectedResponse = &loggingpb.ListLogMetricsResponse{ + NextPageToken: nextPageToken, + Metrics: metrics, + } + + mockMetrics.err = nil + mockMetrics.reqs = nil + + mockMetrics.resps = append(mockMetrics.resps[:0], expectedResponse) + + var formattedParent string = MetricsProjectPath("[PROJECT]") + var request = &loggingpb.ListLogMetricsRequest{ + Parent: formattedParent, + } + + c, err := NewMetricsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListLogMetrics(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetrics.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Metrics[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestMetricsServiceV2ListLogMetricsError(t *testing.T) { + errCode := codes.Internal + mockMetrics.err = grpc.Errorf(errCode, "test error") + + var formattedParent string = MetricsProjectPath("[PROJECT]") + var request = &loggingpb.ListLogMetricsRequest{ + Parent: formattedParent, + } + + c, err := NewMetricsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListLogMetrics(context.Background(), request).Next() + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricsServiceV2GetLogMetric(t *testing.T) { + var name string = "name3373707" + var description string = "description-1724546052" + var filter string = "filter-1274492040" + var expectedResponse = &loggingpb.LogMetric{ + Name: name, + Description: description, + Filter: filter, + } + + mockMetrics.err = nil + mockMetrics.reqs = nil + + mockMetrics.resps = append(mockMetrics.resps[:0], expectedResponse) + + var formattedMetricName string = MetricsMetricPath("[PROJECT]", "[METRIC]") + var request = &loggingpb.GetLogMetricRequest{ + MetricName: formattedMetricName, + } + + c, err := NewMetricsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetLogMetric(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetrics.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestMetricsServiceV2GetLogMetricError(t *testing.T) { + errCode := codes.Internal + mockMetrics.err = grpc.Errorf(errCode, "test error") + + var formattedMetricName string = MetricsMetricPath("[PROJECT]", "[METRIC]") + var request = &loggingpb.GetLogMetricRequest{ + MetricName: formattedMetricName, + } + + c, err := NewMetricsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetLogMetric(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricsServiceV2CreateLogMetric(t *testing.T) { + var name string = "name3373707" + var description string = "description-1724546052" + var filter string = "filter-1274492040" + var expectedResponse = &loggingpb.LogMetric{ + Name: name, + Description: description, + Filter: filter, + } + + mockMetrics.err = nil + mockMetrics.reqs = nil + + mockMetrics.resps = append(mockMetrics.resps[:0], expectedResponse) + + var formattedParent string = MetricsProjectPath("[PROJECT]") + var metric *loggingpb.LogMetric = &loggingpb.LogMetric{} + var request = &loggingpb.CreateLogMetricRequest{ + Parent: formattedParent, + Metric: metric, + } + + c, err := NewMetricsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateLogMetric(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetrics.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestMetricsServiceV2CreateLogMetricError(t *testing.T) { + errCode := codes.Internal + mockMetrics.err = grpc.Errorf(errCode, "test error") + + var formattedParent string = MetricsProjectPath("[PROJECT]") + var metric *loggingpb.LogMetric = &loggingpb.LogMetric{} + var request = &loggingpb.CreateLogMetricRequest{ + Parent: formattedParent, + Metric: metric, + } + + c, err := NewMetricsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateLogMetric(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricsServiceV2UpdateLogMetric(t *testing.T) { + var name string = "name3373707" + var description string = "description-1724546052" + var filter string = "filter-1274492040" + var expectedResponse = &loggingpb.LogMetric{ + Name: name, + Description: description, + Filter: filter, + } + + mockMetrics.err = nil + mockMetrics.reqs = nil + + mockMetrics.resps = append(mockMetrics.resps[:0], expectedResponse) + + var formattedMetricName string = MetricsMetricPath("[PROJECT]", "[METRIC]") + var metric *loggingpb.LogMetric = &loggingpb.LogMetric{} + var request = &loggingpb.UpdateLogMetricRequest{ + MetricName: formattedMetricName, + Metric: metric, + } + + c, err := NewMetricsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateLogMetric(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetrics.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestMetricsServiceV2UpdateLogMetricError(t *testing.T) { + errCode := codes.Internal + mockMetrics.err = grpc.Errorf(errCode, "test error") + + var formattedMetricName string = MetricsMetricPath("[PROJECT]", "[METRIC]") + var metric *loggingpb.LogMetric = &loggingpb.LogMetric{} + var request = &loggingpb.UpdateLogMetricRequest{ + MetricName: formattedMetricName, + Metric: metric, + } + + c, err := NewMetricsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateLogMetric(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricsServiceV2DeleteLogMetric(t *testing.T) { + var expectedResponse *google_protobuf.Empty = &google_protobuf.Empty{} + + mockMetrics.err = nil + mockMetrics.reqs = nil + + mockMetrics.resps = append(mockMetrics.resps[:0], expectedResponse) + + var formattedMetricName string = MetricsMetricPath("[PROJECT]", "[METRIC]") + var request = &loggingpb.DeleteLogMetricRequest{ + MetricName: formattedMetricName, + } + + c, err := NewMetricsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteLogMetric(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetrics.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestMetricsServiceV2DeleteLogMetricError(t *testing.T) { + errCode := codes.Internal + mockMetrics.err = grpc.Errorf(errCode, "test error") + + var formattedMetricName string = MetricsMetricPath("[PROJECT]", "[METRIC]") + var request = &loggingpb.DeleteLogMetricRequest{ + MetricName: formattedMetricName, + } + + c, err := NewMetricsClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteLogMetric(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} diff --git a/vendor/cloud.google.com/go/logging/doc.go b/vendor/cloud.google.com/go/logging/doc.go new file mode 100644 index 00000000..32ca717f --- /dev/null +++ b/vendor/cloud.google.com/go/logging/doc.go @@ -0,0 +1,90 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package logging contains a Stackdriver Logging client suitable for writing logs. +For reading logs, and working with sinks, metrics and monitored resources, +see package cloud.google.com/go/logging/logadmin. + +This client uses Logging API v2. +See https://cloud.google.com/logging/docs/api/v2/ for an introduction to the API. + + +Note: This package is in beta. Some backwards-incompatible changes may occur. + + +Creating a Client + +Use a Client to interact with the Stackdriver Logging API. + + // Create a Client + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + + +Basic Usage + +For most use-cases, you'll want to add log entries to a buffer to be periodically +flushed (automatically and asynchronously) to the Stackdriver Logging service. + + // Initialize a logger + lg := client.Logger("my-log") + + // Add entry to log buffer + lg.Log(logging.Entry{Payload: "something happened!"}) + + +Closing your Client + +You should call Client.Close before your program exits to flush any buffered log entries to the Stackdriver Logging service. + + // Close the client when finished. + err = client.Close() + if err != nil { + // TODO: Handle error. + } + + +Synchronous Logging + +For critical errors, you may want to send your log entries immediately. +LogSync is slow and will block until the log entry has been sent, so it is +not recommended for basic use. + + lg.LogSync(ctx, logging.Entry{Payload: "ALERT! Something critical happened!"}) + + +The Standard Logger Interface + +You may want use a standard log.Logger in your program. + + // stdlg implements log.Logger + stdlg := lg.StandardLogger(logging.Info) + stdlg.Println("some info") + + +Log Levels + +An Entry may have one of a number of severity levels associated with it. + + logging.Entry{ + Payload: "something terrible happened!", + Severity: logging.Critical, + } + +*/ +package logging // import "cloud.google.com/go/logging" diff --git a/vendor/cloud.google.com/go/logging/examples_test.go b/vendor/cloud.google.com/go/logging/examples_test.go new file mode 100644 index 00000000..167458da --- /dev/null +++ b/vendor/cloud.google.com/go/logging/examples_test.go @@ -0,0 +1,125 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logging_test + +import ( + "fmt" + "os" + + "cloud.google.com/go/logging" + "golang.org/x/net/context" +) + +func ExampleNewClient() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + // Use client to manage logs, metrics and sinks. + // Close the client when finished. + if err := client.Close(); err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_Ping() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + if err := client.Ping(ctx); err != nil { + // TODO: Handle error. + } +} + +func ExampleNewClient_errorFunc() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + // Print all errors to stdout. + client.OnError = func(e error) { + fmt.Fprintf(os.Stdout, "logging: %v", e) + } + // Use client to manage logs, metrics and sinks. + // Close the client when finished. + if err := client.Close(); err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_Logger() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + lg := client.Logger("my-log") + _ = lg // TODO: use the Logger. +} + +func ExampleLogger_LogSync() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + lg := client.Logger("my-log") + err = lg.LogSync(ctx, logging.Entry{Payload: "red alert"}) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleLogger_Log() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + lg := client.Logger("my-log") + lg.Log(logging.Entry{Payload: "something happened"}) +} + +func ExampleLogger_Flush() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + lg := client.Logger("my-log") + lg.Log(logging.Entry{Payload: "something happened"}) + lg.Flush() +} + +func ExampleLogger_StandardLogger() { + ctx := context.Background() + client, err := logging.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + lg := client.Logger("my-log") + slg := lg.StandardLogger(logging.Info) + slg.Println("an informative message") +} + +func ExampleParseSeverity() { + sev := logging.ParseSeverity("ALERT") + fmt.Println(sev) + // Output: Alert +} diff --git a/vendor/cloud.google.com/go/logging/internal/common.go b/vendor/cloud.google.com/go/logging/internal/common.go new file mode 100644 index 00000000..7d8ece09 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/internal/common.go @@ -0,0 +1,30 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "fmt" + "strings" +) + +const ( + ProdAddr = "logging.googleapis.com:443" + Version = "0.2.0" +) + +func LogPath(parent, logID string) string { + logID = strings.Replace(logID, "/", "%2F", -1) + return fmt.Sprintf("%s/logs/%s", parent, logID) +} diff --git a/vendor/cloud.google.com/go/logging/internal/testing/fake.go b/vendor/cloud.google.com/go/logging/internal/testing/fake.go new file mode 100644 index 00000000..f7b702e7 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/internal/testing/fake.go @@ -0,0 +1,408 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package testing provides support for testing the logging client. +package testing + +import ( + "errors" + "fmt" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "time" + + emptypb "github.com/golang/protobuf/ptypes/empty" + tspb "github.com/golang/protobuf/ptypes/timestamp" + + "cloud.google.com/go/internal/testutil" + context "golang.org/x/net/context" + lpb "google.golang.org/genproto/googleapis/api/label" + mrpb "google.golang.org/genproto/googleapis/api/monitoredres" + logpb "google.golang.org/genproto/googleapis/logging/v2" +) + +type loggingHandler struct { + logpb.LoggingServiceV2Server + + mu sync.Mutex + logs map[string][]*logpb.LogEntry // indexed by log name +} + +type configHandler struct { + logpb.ConfigServiceV2Server + + mu sync.Mutex + sinks map[string]*logpb.LogSink // indexed by (full) sink name +} + +type metricHandler struct { + logpb.MetricsServiceV2Server + + mu sync.Mutex + metrics map[string]*logpb.LogMetric // indexed by (full) metric name +} + +// NewServer creates a new in-memory fake server implementing the logging service. +// It returns the address of the server. +func NewServer() (string, error) { + srv, err := testutil.NewServer() + if err != nil { + return "", err + } + logpb.RegisterLoggingServiceV2Server(srv.Gsrv, &loggingHandler{ + logs: make(map[string][]*logpb.LogEntry), + }) + logpb.RegisterConfigServiceV2Server(srv.Gsrv, &configHandler{ + sinks: make(map[string]*logpb.LogSink), + }) + logpb.RegisterMetricsServiceV2Server(srv.Gsrv, &metricHandler{ + metrics: make(map[string]*logpb.LogMetric), + }) + srv.Start() + return srv.Addr, nil +} + +// DeleteLog deletes a log and all its log entries. The log will reappear if it +// receives new entries. +func (h *loggingHandler) DeleteLog(_ context.Context, req *logpb.DeleteLogRequest) (*emptypb.Empty, error) { + // TODO(jba): return NotFound if log isn't there? + h.mu.Lock() + defer h.mu.Unlock() + delete(h.logs, req.LogName) + return &emptypb.Empty{}, nil +} + +// The only project ID that WriteLogEntries will accept. +// Important for testing Ping. +const validProjectID = "PROJECT_ID" + +// WriteLogEntries writes log entries to Stackdriver Logging. All log entries in +// Stackdriver Logging are written by this method. +func (h *loggingHandler) WriteLogEntries(_ context.Context, req *logpb.WriteLogEntriesRequest) (*logpb.WriteLogEntriesResponse, error) { + if !strings.HasPrefix(req.LogName, "projects/"+validProjectID+"/") { + return nil, fmt.Errorf("bad project ID: %q", req.LogName) + } + // TODO(jba): support insertId? + h.mu.Lock() + defer h.mu.Unlock() + for _, e := range req.Entries { + // Assign timestamp if missing. + if e.Timestamp == nil { + e.Timestamp = &tspb.Timestamp{Seconds: time.Now().Unix(), Nanos: 0} + } + // Fill from common fields in request. + if e.LogName == "" { + e.LogName = req.LogName + } + if e.Resource == nil { + // TODO(jba): use a global one if nil? + e.Resource = req.Resource + } + for k, v := range req.Labels { + if _, ok := e.Labels[k]; !ok { + e.Labels[k] = v + } + } + + // Store by log name. + h.logs[e.LogName] = append(h.logs[e.LogName], e) + } + return &logpb.WriteLogEntriesResponse{}, nil +} + +// ListLogEntries lists log entries. Use this method to retrieve log entries +// from Stackdriver Logging. +// +// This fake implementation ignores project IDs. It does not support full filtering, only +// expressions of the form "logName = NAME". +func (h *loggingHandler) ListLogEntries(_ context.Context, req *logpb.ListLogEntriesRequest) (*logpb.ListLogEntriesResponse, error) { + h.mu.Lock() + defer h.mu.Unlock() + entries, err := h.filterEntries(req.Filter) + if err != nil { + return nil, err + } + if err = sortEntries(entries, req.OrderBy); err != nil { + return nil, err + } + + from, to, nextPageToken, err := getPage(int(req.PageSize), req.PageToken, len(entries)) + if err != nil { + return nil, err + } + return &logpb.ListLogEntriesResponse{ + Entries: entries[from:to], + NextPageToken: nextPageToken, + }, nil +} + +// getPage converts an incoming page size and token from an RPC request into +// slice bounds and the outgoing next-page token. +// +// getPage assumes that the complete, unpaginated list of items exists as a +// single slice. In addition to the page size and token, getPage needs the +// length of that slice. +// +// getPage's first two return values should be used to construct a sub-slice of +// the complete, unpaginated slice. E.g. if the complete slice is s, then +// s[from:to] is the desired page. Its third return value should be set as the +// NextPageToken field of the RPC response. +func getPage(pageSize int, pageToken string, length int) (from, to int, nextPageToken string, err error) { + from, to = 0, length + if pageToken != "" { + from, err = strconv.Atoi(pageToken) + if err != nil { + return 0, 0, "", invalidArgument("bad page token") + } + if from >= length { + return length, length, "", nil + } + } + if pageSize > 0 && from+pageSize < length { + to = from + pageSize + nextPageToken = strconv.Itoa(to) + } + return from, to, nextPageToken, nil +} + +func (h *loggingHandler) filterEntries(filter string) ([]*logpb.LogEntry, error) { + logName, err := parseFilter(filter) + if err != nil { + return nil, err + } + if logName != "" { + return h.logs[logName], nil + } + var entries []*logpb.LogEntry + for _, es := range h.logs { + entries = append(entries, es...) + } + return entries, nil +} + +var filterRegexp = regexp.MustCompile(`^logName\s*=\s*"?([-_/.%\w]+)"?$`) + +// returns the log name, or "" for the empty filter +func parseFilter(filter string) (string, error) { + if filter == "" { + return "", nil + } + subs := filterRegexp.FindStringSubmatch(filter) + if subs == nil { + return "", invalidArgument("bad filter") + } + return subs[1], nil // cannot panic by construction of regexp +} + +func sortEntries(entries []*logpb.LogEntry, orderBy string) error { + switch orderBy { + case "", "timestamp asc": + sort.Sort(byTimestamp(entries)) + return nil + + case "timestamp desc": + sort.Sort(sort.Reverse(byTimestamp(entries))) + return nil + + default: + return invalidArgument("bad order_by") + } +} + +type byTimestamp []*logpb.LogEntry + +func (s byTimestamp) Len() int { return len(s) } +func (s byTimestamp) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byTimestamp) Less(i, j int) bool { + c := compareTimestamps(s[i].Timestamp, s[j].Timestamp) + switch { + case c < 0: + return true + case c > 0: + return false + default: + return s[i].InsertId < s[j].InsertId + } +} + +func compareTimestamps(ts1, ts2 *tspb.Timestamp) int64 { + if ts1.Seconds != ts2.Seconds { + return ts1.Seconds - ts2.Seconds + } + return int64(ts1.Nanos - ts2.Nanos) +} + +// Lists monitored resource descriptors that are used by Stackdriver Logging. +func (h *loggingHandler) ListMonitoredResourceDescriptors(context.Context, *logpb.ListMonitoredResourceDescriptorsRequest) (*logpb.ListMonitoredResourceDescriptorsResponse, error) { + return &logpb.ListMonitoredResourceDescriptorsResponse{ + ResourceDescriptors: []*mrpb.MonitoredResourceDescriptor{ + { + Type: "global", + DisplayName: "Global", + Description: "... a log is not associated with any specific resource.", + Labels: []*lpb.LabelDescriptor{ + {Key: "project_id", Description: "The identifier of the GCP project..."}, + }, + }, + }, + }, nil +} + +// Gets a sink. +func (h *configHandler) GetSink(_ context.Context, req *logpb.GetSinkRequest) (*logpb.LogSink, error) { + h.mu.Lock() + defer h.mu.Unlock() + if s, ok := h.sinks[req.SinkName]; ok { + return s, nil + } + // TODO(jba): use error codes + return nil, fmt.Errorf("sink %q not found", req.SinkName) +} + +// Creates a sink. +func (h *configHandler) CreateSink(_ context.Context, req *logpb.CreateSinkRequest) (*logpb.LogSink, error) { + h.mu.Lock() + defer h.mu.Unlock() + fullName := fmt.Sprintf("%s/sinks/%s", req.Parent, req.Sink.Name) + if _, ok := h.sinks[fullName]; ok { + return nil, fmt.Errorf("sink with name %q already exists", fullName) + } + h.sinks[fullName] = req.Sink + return req.Sink, nil +} + +// Creates or updates a sink. +func (h *configHandler) UpdateSink(_ context.Context, req *logpb.UpdateSinkRequest) (*logpb.LogSink, error) { + h.mu.Lock() + defer h.mu.Unlock() + // Update of a non-existent sink will create it. + h.sinks[req.SinkName] = req.Sink + return req.Sink, nil +} + +// Deletes a sink. +func (h *configHandler) DeleteSink(_ context.Context, req *logpb.DeleteSinkRequest) (*emptypb.Empty, error) { + h.mu.Lock() + defer h.mu.Unlock() + delete(h.sinks, req.SinkName) + return &emptypb.Empty{}, nil +} + +// Lists sinks. This fake implementation ignores the Parent field of +// ListSinksRequest. All sinks are listed, regardless of their project. +func (h *configHandler) ListSinks(_ context.Context, req *logpb.ListSinksRequest) (*logpb.ListSinksResponse, error) { + h.mu.Lock() + var sinks []*logpb.LogSink + for _, s := range h.sinks { + sinks = append(sinks, s) + } + h.mu.Unlock() // safe because no *logpb.LogSink is ever modified + // Since map iteration varies, sort the sinks. + sort.Sort(sinksByName(sinks)) + from, to, nextPageToken, err := getPage(int(req.PageSize), req.PageToken, len(sinks)) + if err != nil { + return nil, err + } + return &logpb.ListSinksResponse{ + Sinks: sinks[from:to], + NextPageToken: nextPageToken, + }, nil + + return nil, nil +} + +type sinksByName []*logpb.LogSink + +func (s sinksByName) Len() int { return len(s) } +func (s sinksByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s sinksByName) Less(i, j int) bool { return s[i].Name < s[j].Name } + +// Gets a metric. +func (h *metricHandler) GetLogMetric(_ context.Context, req *logpb.GetLogMetricRequest) (*logpb.LogMetric, error) { + h.mu.Lock() + defer h.mu.Unlock() + if s, ok := h.metrics[req.MetricName]; ok { + return s, nil + } + // TODO(jba): use error codes + return nil, fmt.Errorf("metric %q not found", req.MetricName) +} + +// Creates a metric. +func (h *metricHandler) CreateLogMetric(_ context.Context, req *logpb.CreateLogMetricRequest) (*logpb.LogMetric, error) { + h.mu.Lock() + defer h.mu.Unlock() + fullName := fmt.Sprintf("%s/metrics/%s", req.Parent, req.Metric.Name) + if _, ok := h.metrics[fullName]; ok { + return nil, fmt.Errorf("metric with name %q already exists", fullName) + } + h.metrics[fullName] = req.Metric + return req.Metric, nil +} + +// Creates or updates a metric. +func (h *metricHandler) UpdateLogMetric(_ context.Context, req *logpb.UpdateLogMetricRequest) (*logpb.LogMetric, error) { + h.mu.Lock() + defer h.mu.Unlock() + // Update of a non-existent metric will create it. + h.metrics[req.MetricName] = req.Metric + return req.Metric, nil +} + +// Deletes a metric. +func (h *metricHandler) DeleteLogMetric(_ context.Context, req *logpb.DeleteLogMetricRequest) (*emptypb.Empty, error) { + h.mu.Lock() + defer h.mu.Unlock() + delete(h.metrics, req.MetricName) + return &emptypb.Empty{}, nil +} + +// Lists metrics. This fake implementation ignores the Parent field of +// ListMetricsRequest. All metrics are listed, regardless of their project. +func (h *metricHandler) ListLogMetrics(_ context.Context, req *logpb.ListLogMetricsRequest) (*logpb.ListLogMetricsResponse, error) { + h.mu.Lock() + var metrics []*logpb.LogMetric + for _, s := range h.metrics { + metrics = append(metrics, s) + } + h.mu.Unlock() // safe because no *logpb.LogMetric is ever modified + // Since map iteration varies, sort the metrics. + sort.Sort(metricsByName(metrics)) + from, to, nextPageToken, err := getPage(int(req.PageSize), req.PageToken, len(metrics)) + if err != nil { + return nil, err + } + return &logpb.ListLogMetricsResponse{ + Metrics: metrics[from:to], + NextPageToken: nextPageToken, + }, nil + + return nil, nil +} + +type metricsByName []*logpb.LogMetric + +func (s metricsByName) Len() int { return len(s) } +func (s metricsByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s metricsByName) Less(i, j int) bool { return s[i].Name < s[j].Name } + +func invalidArgument(msg string) error { + // TODO(jba): status codes + return errors.New(msg) +} diff --git a/vendor/cloud.google.com/go/logging/internal/testing/fake_test.go b/vendor/cloud.google.com/go/logging/internal/testing/fake_test.go new file mode 100644 index 00000000..b1267cd3 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/internal/testing/fake_test.go @@ -0,0 +1,110 @@ +/* +Copyright 2016 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains only basic checks. The fake is effectively tested by the +// logging client unit tests. + +package testing + +import ( + "reflect" + "testing" + "time" + + tspb "github.com/golang/protobuf/ptypes/timestamp" + logpb "google.golang.org/genproto/googleapis/logging/v2" + grpc "google.golang.org/grpc" +) + +func TestNewServer(t *testing.T) { + // Confirm that we can create and use a working gRPC server. + addr, err := NewServer() + if err != nil { + t.Fatal(err) + } + conn, err := grpc.Dial(addr, grpc.WithInsecure()) + if err != nil { + t.Fatal(err) + } + // Avoid "connection is closing; please retry" message from gRPC. + time.Sleep(300 * time.Millisecond) + conn.Close() +} + +func TestParseFilter(t *testing.T) { + for _, test := range []struct { + filter string + want string + wantErr bool + }{ + {"", "", false}, + {"logName = syslog", "syslog", false}, + {"logname = syslog", "", true}, + {"logName = 'syslog'", "", true}, + {"logName == syslog", "", true}, + } { + got, err := parseFilter(test.filter) + if err != nil { + if !test.wantErr { + t.Errorf("%q: got %v, want no error", test.filter, err) + } + continue + } + if test.wantErr { + t.Errorf("%q: got no error, want one", test.filter) + continue + } + if got != test.want { + t.Errorf("%q: got %q, want %q", test.filter, got, test.want) + } + } +} + +func TestSortEntries(t *testing.T) { + entries := []*logpb.LogEntry{ + /* 0 */ {Timestamp: &tspb.Timestamp{Seconds: 30}}, + /* 1 */ {Timestamp: &tspb.Timestamp{Seconds: 10}}, + /* 2 */ {Timestamp: &tspb.Timestamp{Seconds: 20}, InsertId: "b"}, + /* 3 */ {Timestamp: &tspb.Timestamp{Seconds: 20}, InsertId: "a"}, + /* 4 */ {Timestamp: &tspb.Timestamp{Seconds: 20}, InsertId: "c"}, + } + for _, test := range []struct { + orderBy string + want []int // slice of index into entries; nil == error + }{ + {"", []int{1, 3, 2, 4, 0}}, + {"timestamp asc", []int{1, 3, 2, 4, 0}}, + {"timestamp desc", []int{0, 4, 2, 3, 1}}, + {"something else", nil}, + } { + got := make([]*logpb.LogEntry, len(entries)) + copy(got, entries) + err := sortEntries(got, test.orderBy) + if err != nil { + if test.want != nil { + t.Errorf("%q: got %v, want nil error", test.orderBy, err) + } + continue + } + want := make([]*logpb.LogEntry, len(entries)) + for i, j := range test.want { + want[i] = entries[j] + } + if !reflect.DeepEqual(got, want) { + t.Errorf("%q: got %v, want %v", test.orderBy, got, want) + } + } +} diff --git a/vendor/cloud.google.com/go/logging/internal/testing/unique.go b/vendor/cloud.google.com/go/logging/internal/testing/unique.go new file mode 100644 index 00000000..3e6136bc --- /dev/null +++ b/vendor/cloud.google.com/go/logging/internal/testing/unique.go @@ -0,0 +1,73 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file supports generating unique IDs so that multiple test executions +// don't interfere with each other, and cleaning up old entities that may +// remain if tests exit early. + +package testing + +import ( + "fmt" + "strconv" + "strings" + "time" +) + +var ( + startTime = time.Now() + uniqueIDCounter int + // Items older than expiredAge are remnants from previous tests and can be deleted. + expiredAge = 24 * time.Hour +) + +// UniqueID generates unique IDs so tests don't interfere with each other. +// All unique IDs generated in the same test execution will have the same timestamp. +func UniqueID(prefix string) string { + uniqueIDCounter++ + // Zero-pad the counter for lexical sort order. + return fmt.Sprintf("%s-t%d-%04d", prefix, startTime.UnixNano(), uniqueIDCounter) +} + +// ExpiredUniqueIDs returns a subset of ids that are unique IDs as generated by +// UniqueID(prefix) and are older than expiredAge. +func ExpiredUniqueIDs(ids []string, prefix string) []string { + var expired []string + for _, id := range ids { + t, ok := extractTime(id, prefix) + if ok && time.Since(t) > expiredAge { + expired = append(expired, id) + } + } + return expired +} + +// extractTime extracts the timestamp of s, which must begin with prefix and +// match the form generated by uniqueID. The second return value is true on +// success, false if there was a problem. +func extractTime(s, prefix string) (time.Time, bool) { + if !strings.HasPrefix(s, prefix+"-t") { + return time.Time{}, false + } + s = s[len(prefix)+2:] + i := strings.Index(s, "-") + if i < 0 { + return time.Time{}, false + } + nanos, err := strconv.ParseInt(s[:i], 10, 64) + if err != nil { + return time.Time{}, false + } + return time.Unix(0, nanos), true +} diff --git a/vendor/cloud.google.com/go/logging/internal/testing/unique_test.go b/vendor/cloud.google.com/go/logging/internal/testing/unique_test.go new file mode 100644 index 00000000..087cdb96 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/internal/testing/unique_test.go @@ -0,0 +1,72 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file supports generating unique IDs so that multiple test executions +// don't interfere with each other, and cleaning up old entities that may +// remain if tests exit early. + +package testing + +import ( + "reflect" + "testing" + "time" +) + +func TestExtractTime(t *testing.T) { + uid := UniqueID("unique-ID") + got, ok := extractTime(uid, "unique-ID") + if !ok { + t.Fatal("got ok = false, want true") + } + if !startTime.Equal(got) { + t.Errorf("got %s, want %s", got, startTime) + } + + got, ok = extractTime("p-t0-doesnotmatter", "p") + if !ok { + t.Fatal("got false, want true") + } + if want := time.Unix(0, 0); !want.Equal(got) { + t.Errorf("got %s, want %s", got, want) + } + if _, ok = extractTime("invalid-time-1234", "invalid"); ok { + t.Error("got true, want false") + } +} + +func TestExpiredUniqueIDs(t *testing.T) { + const prefix = "uid" + // The freshly unique IDs will have startTime as their timestamp. + uids := []string{UniqueID(prefix), "uid-tinvalid-1234", UniqueID(prefix), "uid-t0-1111"} + + // This test hasn't been running for very long, so only the last ID is expired. + got := ExpiredUniqueIDs(uids, prefix) + want := []string{uids[3]} + if !reflect.DeepEqual(got, want) { + t.Errorf("got %v, want %v", got, want) + } + + time.Sleep(100 * time.Millisecond) + + prev := expiredAge + expiredAge = 10 * time.Millisecond + defer func() { expiredAge = prev }() + // This test has been running for at least 10ms, so all but the invalid ID have expired. + got = ExpiredUniqueIDs(uids, prefix) + want = []string{uids[0], uids[2], uids[3]} + if !reflect.DeepEqual(got, want) { + t.Errorf("got %v, want %v", got, want) + } +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/example_entry_iterator_test.go b/vendor/cloud.google.com/go/logging/logadmin/example_entry_iterator_test.go new file mode 100644 index 00000000..39e6f575 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/example_entry_iterator_test.go @@ -0,0 +1,66 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin_test + +import ( + "fmt" + "time" + + "cloud.google.com/go/logging/logadmin" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleClient_Entries() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.Entries(ctx, logadmin.Filter(`logName = "projects/my-project/logs/my-log"`)) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleFilter_timestamp() { + // This example demonstrates how to list the last 24 hours of log entries. + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + oneDayAgo := time.Now().Add(-24 * time.Hour) + t := oneDayAgo.Format(time.RFC3339) // Logging API wants timestamps in RFC 3339 format. + it := client.Entries(ctx, logadmin.Filter(fmt.Sprintf(`timestamp > "%s"`, t))) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleEntryIterator_Next() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.Entries(ctx) + for { + entry, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(entry) + } +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/example_metric_iterator_test.go b/vendor/cloud.google.com/go/logging/logadmin/example_metric_iterator_test.go new file mode 100644 index 00000000..2e876e9f --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/example_metric_iterator_test.go @@ -0,0 +1,52 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin_test + +import ( + "fmt" + + "cloud.google.com/go/logging/logadmin" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleClient_Metrics() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.Metrics(ctx) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleMetricIterator_Next() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.Metrics(ctx) + for { + metric, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(metric) + } +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/example_paging_test.go b/vendor/cloud.google.com/go/logging/logadmin/example_paging_test.go new file mode 100644 index 00000000..036eeeb9 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/example_paging_test.go @@ -0,0 +1,92 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin_test + +import ( + "bytes" + "flag" + "fmt" + "html/template" + "log" + "net/http" + + "cloud.google.com/go/logging" + "cloud.google.com/go/logging/logadmin" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +var ( + client *logadmin.Client + projectID = flag.String("project-id", "", "ID of the project to use") +) + +func ExampleClient_Entries_pagination() { + // This example demonstrates how to iterate through items a page at a time + // even if each successive page is fetched by a different process. It is a + // complete web server that displays pages of log entries. To run it as a + // standalone program, rename both the package and this function to "main". + ctx := context.Background() + flag.Parse() + if *projectID == "" { + log.Fatal("-project-id missing") + } + var err error + client, err = logadmin.NewClient(ctx, *projectID) + if err != nil { + log.Fatalf("creating logging client: %v", err) + } + + http.HandleFunc("/entries", handleEntries) + log.Print("listening on 8080") + log.Fatal(http.ListenAndServe(":8080", nil)) +} + +var pageTemplate = template.Must(template.New("").Parse(` +
+ {{range .Entries}} + + {{end}} +
{{.}}
+{{if .Next}} + Next Page +{{end}} +`)) + +func handleEntries(w http.ResponseWriter, r *http.Request) { + ctx := context.Background() + filter := fmt.Sprintf(`logName = "projects/%s/logs/testlog"`, *projectID) + it := client.Entries(ctx, logadmin.Filter(filter)) + var entries []*logging.Entry + nextTok, err := iterator.NewPager(it, 5, r.URL.Query().Get("pageToken")).NextPage(&entries) + if err != nil { + http.Error(w, fmt.Sprintf("problem getting the next page: %v", err), http.StatusInternalServerError) + return + } + data := struct { + Entries []*logging.Entry + Next string + }{ + entries, + nextTok, + } + var buf bytes.Buffer + if err := pageTemplate.Execute(&buf, data); err != nil { + http.Error(w, fmt.Sprintf("problem executing page template: %v", err), http.StatusInternalServerError) + } + if _, err := buf.WriteTo(w); err != nil { + log.Printf("writing response: %v", err) + } +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/example_resource_iterator_test.go b/vendor/cloud.google.com/go/logging/logadmin/example_resource_iterator_test.go new file mode 100644 index 00000000..fe67e233 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/example_resource_iterator_test.go @@ -0,0 +1,52 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin_test + +import ( + "fmt" + + "cloud.google.com/go/logging/logadmin" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleClient_ResourceDescriptors() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.ResourceDescriptors(ctx) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleResourceDescriptorIterator_Next() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.ResourceDescriptors(ctx) + for { + rdesc, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(rdesc) + } +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/example_sink_iterator_test.go b/vendor/cloud.google.com/go/logging/logadmin/example_sink_iterator_test.go new file mode 100644 index 00000000..918fd9ff --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/example_sink_iterator_test.go @@ -0,0 +1,52 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin_test + +import ( + "fmt" + + "cloud.google.com/go/logging/logadmin" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleClient_Sinks() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.Sinks(ctx) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleSinkIterator_Next() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + it := client.Sinks(ctx) + for { + sink, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(sink) + } +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/examples_test.go b/vendor/cloud.google.com/go/logging/logadmin/examples_test.go new file mode 100644 index 00000000..0926dd5f --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/examples_test.go @@ -0,0 +1,161 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin_test + +import ( + "fmt" + + "cloud.google.com/go/logging/logadmin" + "golang.org/x/net/context" +) + +func ExampleNewClient() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + // Use client to manage logs, metrics and sinks. + // Close the client when finished. + if err := client.Close(); err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_DeleteLog() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + err = client.DeleteLog(ctx, "my-log") + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_CreateMetric() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + err = client.CreateMetric(ctx, &logadmin.Metric{ + ID: "severe-errors", + Description: "entries at ERROR or higher severities", + Filter: "severity >= ERROR", + }) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_DeleteMetric() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + if err := client.DeleteMetric(ctx, "severe-errors"); err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_Metric() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + m, err := client.Metric(ctx, "severe-errors") + if err != nil { + // TODO: Handle error. + } + fmt.Println(m) +} + +func ExampleClient_UpdateMetric() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + err = client.UpdateMetric(ctx, &logadmin.Metric{ + ID: "severe-errors", + Description: "entries at high severities", + Filter: "severity > ERROR", + }) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_CreateSink() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + sink, err := client.CreateSink(ctx, &logadmin.Sink{ + ID: "severe-errors-to-gcs", + Destination: "storage.googleapis.com/my-bucket", + Filter: "severity >= ERROR", + }) + if err != nil { + // TODO: Handle error. + } + fmt.Println(sink) +} + +func ExampleClient_DeleteSink() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + if err := client.DeleteSink(ctx, "severe-errors-to-gcs"); err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_Sink() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + s, err := client.Sink(ctx, "severe-errors-to-gcs") + if err != nil { + // TODO: Handle error. + } + fmt.Println(s) +} + +func ExampleClient_UpdateSink() { + ctx := context.Background() + client, err := logadmin.NewClient(ctx, "my-project") + if err != nil { + // TODO: Handle error. + } + sink, err := client.UpdateSink(ctx, &logadmin.Sink{ + ID: "severe-errors-to-gcs", + Destination: "storage.googleapis.com/my-other-bucket", + Filter: "severity >= ERROR", + }) + if err != nil { + // TODO: Handle error. + } + fmt.Println(sink) +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/logadmin.go b/vendor/cloud.google.com/go/logging/logadmin/logadmin.go new file mode 100644 index 00000000..037aa9be --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/logadmin.go @@ -0,0 +1,347 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// These features are missing now, but will likely be added: +// - There is no way to specify CallOptions. + +// Package logadmin contains a Stackdriver Logging client that can be used +// for reading logs and working with sinks, metrics and monitored resources. +// For a client that can write logs, see package cloud.google.com/go/logging. +// +// The client uses Logging API v2. +// See https://cloud.google.com/logging/docs/api/v2/ for an introduction to the API. +// +// Note: This package is in beta. Some backwards-incompatible changes may occur. +package logadmin // import "cloud.google.com/go/logging/logadmin" + +import ( + "errors" + "fmt" + "math" + "net/http" + "net/url" + "strings" + "time" + + "cloud.google.com/go/internal/version" + "cloud.google.com/go/logging" + vkit "cloud.google.com/go/logging/apiv2" + "cloud.google.com/go/logging/internal" + "github.com/golang/protobuf/ptypes" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + logtypepb "google.golang.org/genproto/googleapis/logging/type" + logpb "google.golang.org/genproto/googleapis/logging/v2" + "google.golang.org/grpc/codes" + // Import the following so EntryIterator can unmarshal log protos. + _ "google.golang.org/genproto/googleapis/cloud/audit" +) + +// Client is a Logging client. A Client is associated with a single Cloud project. +type Client struct { + lClient *vkit.Client // logging client + sClient *vkit.ConfigClient // sink client + mClient *vkit.MetricsClient // metric client + projectID string + closed bool +} + +// NewClient returns a new logging client associated with the provided project ID. +// +// By default NewClient uses AdminScope. To use a different scope, call +// NewClient using a WithScopes option (see https://godoc.org/google.golang.org/api/option#WithScopes). +func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { + // Check for '/' in project ID to reserve the ability to support various owning resources, + // in the form "{Collection}/{Name}", for instance "organizations/my-org". + if strings.ContainsRune(projectID, '/') { + return nil, errors.New("logging: project ID contains '/'") + } + opts = append([]option.ClientOption{ + option.WithEndpoint(internal.ProdAddr), + option.WithScopes(logging.AdminScope), + }, opts...) + lc, err := vkit.NewClient(ctx, opts...) + if err != nil { + return nil, err + } + // TODO(jba): pass along any client options that should be provided to all clients. + sc, err := vkit.NewConfigClient(ctx, option.WithGRPCConn(lc.Connection())) + if err != nil { + return nil, err + } + mc, err := vkit.NewMetricsClient(ctx, option.WithGRPCConn(lc.Connection())) + if err != nil { + return nil, err + } + // Retry some non-idempotent methods on INTERNAL, because it happens sometimes + // and in all observed cases the operation did not complete. + retryerOnInternal := func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Internal, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + } + mc.CallOptions.CreateLogMetric = []gax.CallOption{gax.WithRetry(retryerOnInternal)} + mc.CallOptions.UpdateLogMetric = []gax.CallOption{gax.WithRetry(retryerOnInternal)} + + lc.SetGoogleClientInfo("gccl", version.Repo) + sc.SetGoogleClientInfo("gccl", version.Repo) + mc.SetGoogleClientInfo("gccl", version.Repo) + client := &Client{ + lClient: lc, + sClient: sc, + mClient: mc, + projectID: projectID, + } + return client, nil +} + +// parent returns the string used in many RPCs to denote the parent resource of the log. +func (c *Client) parent() string { + return "projects/" + c.projectID +} + +// Close closes the client. +func (c *Client) Close() error { + if c.closed { + return nil + } + // Return only the first error. Since all clients share an underlying connection, + // Closes after the first always report a "connection is closing" error. + err := c.lClient.Close() + _ = c.sClient.Close() + _ = c.mClient.Close() + c.closed = true + return err +} + +// DeleteLog deletes a log and all its log entries. The log will reappear if it receives new entries. +// logID identifies the log within the project. An example log ID is "syslog". Requires AdminScope. +func (c *Client) DeleteLog(ctx context.Context, logID string) error { + return c.lClient.DeleteLog(ctx, &logpb.DeleteLogRequest{ + LogName: internal.LogPath(c.parent(), logID), + }) +} + +func toHTTPRequest(p *logtypepb.HttpRequest) (*logging.HTTPRequest, error) { + if p == nil { + return nil, nil + } + u, err := url.Parse(p.RequestUrl) + if err != nil { + return nil, err + } + var dur time.Duration + if p.Latency != nil { + dur, err = ptypes.Duration(p.Latency) + if err != nil { + return nil, err + } + } + hr := &http.Request{ + Method: p.RequestMethod, + URL: u, + Header: map[string][]string{}, + } + if p.UserAgent != "" { + hr.Header.Set("User-Agent", p.UserAgent) + } + if p.Referer != "" { + hr.Header.Set("Referer", p.Referer) + } + return &logging.HTTPRequest{ + Request: hr, + RequestSize: p.RequestSize, + Status: int(p.Status), + ResponseSize: p.ResponseSize, + Latency: dur, + RemoteIP: p.RemoteIp, + CacheHit: p.CacheHit, + CacheValidatedWithOriginServer: p.CacheValidatedWithOriginServer, + }, nil +} + +// An EntriesOption is an option for listing log entries. +type EntriesOption interface { + set(*logpb.ListLogEntriesRequest) +} + +// ProjectIDs sets the project IDs or project numbers from which to retrieve +// log entries. Examples of a project ID: "my-project-1A", "1234567890". +func ProjectIDs(pids []string) EntriesOption { return projectIDs(pids) } + +type projectIDs []string + +func (p projectIDs) set(r *logpb.ListLogEntriesRequest) { r.ProjectIds = []string(p) } + +// Filter sets an advanced logs filter for listing log entries (see +// https://cloud.google.com/logging/docs/view/advanced_filters). The filter is +// compared against all log entries in the projects specified by ProjectIDs. +// Only entries that match the filter are retrieved. An empty filter (the +// default) matches all log entries. +// +// In the filter string, log names must be written in their full form, as +// "projects/PROJECT-ID/logs/LOG-ID". Forward slashes in LOG-ID must be +// replaced by %2F before calling Filter. +// +// Timestamps in the filter string must be written in RFC 3339 format. See the +// timestamp example. +func Filter(f string) EntriesOption { return filter(f) } + +type filter string + +func (f filter) set(r *logpb.ListLogEntriesRequest) { r.Filter = string(f) } + +// NewestFirst causes log entries to be listed from most recent (newest) to +// least recent (oldest). By default, they are listed from oldest to newest. +func NewestFirst() EntriesOption { return newestFirst{} } + +type newestFirst struct{} + +func (newestFirst) set(r *logpb.ListLogEntriesRequest) { r.OrderBy = "timestamp desc" } + +// Entries returns an EntryIterator for iterating over log entries. By default, +// the log entries will be restricted to those from the project passed to +// NewClient. This may be overridden by passing a ProjectIDs option. Requires ReadScope or AdminScope. +func (c *Client) Entries(ctx context.Context, opts ...EntriesOption) *EntryIterator { + it := &EntryIterator{ + it: c.lClient.ListLogEntries(ctx, listLogEntriesRequest(c.projectID, opts)), + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + return it +} + +func listLogEntriesRequest(projectID string, opts []EntriesOption) *logpb.ListLogEntriesRequest { + req := &logpb.ListLogEntriesRequest{ + ProjectIds: []string{projectID}, + } + for _, opt := range opts { + opt.set(req) + } + return req +} + +// An EntryIterator iterates over log entries. +type EntryIterator struct { + it *vkit.LogEntryIterator + pageInfo *iterator.PageInfo + nextFunc func() error + items []*logging.Entry +} + +// PageInfo supports pagination. See https://godoc.org/google.golang.org/api/iterator package for details. +func (it *EntryIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +// Next returns the next result. Its second return value is iterator.Done +// (https://godoc.org/google.golang.org/api/iterator) if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *EntryIterator) Next() (*logging.Entry, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *EntryIterator) fetch(pageSize int, pageToken string) (string, error) { + return iterFetch(pageSize, pageToken, it.it.PageInfo(), func() error { + item, err := it.it.Next() + if err != nil { + return err + } + e, err := fromLogEntry(item) + if err != nil { + return err + } + it.items = append(it.items, e) + return nil + }) +} + +func trunc32(i int) int32 { + if i > math.MaxInt32 { + i = math.MaxInt32 + } + return int32(i) +} + +var slashUnescaper = strings.NewReplacer("%2F", "/", "%2f", "/") + +func fromLogEntry(le *logpb.LogEntry) (*logging.Entry, error) { + time, err := ptypes.Timestamp(le.Timestamp) + if err != nil { + return nil, err + } + var payload interface{} + switch x := le.Payload.(type) { + case *logpb.LogEntry_TextPayload: + payload = x.TextPayload + + case *logpb.LogEntry_ProtoPayload: + var d ptypes.DynamicAny + if err := ptypes.UnmarshalAny(x.ProtoPayload, &d); err != nil { + return nil, fmt.Errorf("logging: unmarshalling proto payload: %v", err) + } + payload = d.Message + + case *logpb.LogEntry_JsonPayload: + // Leave this as a Struct. + // TODO(jba): convert to map[string]interface{}? + payload = x.JsonPayload + + default: + return nil, fmt.Errorf("logging: unknown payload type: %T", le.Payload) + } + hr, err := toHTTPRequest(le.HttpRequest) + if err != nil { + return nil, err + } + return &logging.Entry{ + Timestamp: time, + Severity: logging.Severity(le.Severity), + Payload: payload, + Labels: le.Labels, + InsertID: le.InsertId, + HTTPRequest: hr, + Operation: le.Operation, + LogName: slashUnescaper.Replace(le.LogName), + Resource: le.Resource, + }, nil +} + +// Common fetch code for iterators that are backed by vkit iterators. +func iterFetch(pageSize int, pageToken string, pi *iterator.PageInfo, next func() error) (string, error) { + pi.MaxSize = pageSize + pi.Token = pageToken + // Get one item, which will fill the buffer. + if err := next(); err != nil { + return "", err + } + // Collect the rest of the buffer. + for pi.Remaining() > 0 { + if err := next(); err != nil { + return "", err + } + } + return pi.Token, nil +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/logadmin_test.go b/vendor/cloud.google.com/go/logging/logadmin/logadmin_test.go new file mode 100644 index 00000000..6a75e4c8 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/logadmin_test.go @@ -0,0 +1,274 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// TODO(jba): test that OnError is getting called appropriately. + +package logadmin + +import ( + "flag" + "log" + "net/http" + "net/url" + "os" + "reflect" + "testing" + "time" + + "cloud.google.com/go/internal/testutil" + "cloud.google.com/go/logging" + ltesting "cloud.google.com/go/logging/internal/testing" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + durpb "github.com/golang/protobuf/ptypes/duration" + structpb "github.com/golang/protobuf/ptypes/struct" + "golang.org/x/net/context" + "google.golang.org/api/option" + mrpb "google.golang.org/genproto/googleapis/api/monitoredres" + audit "google.golang.org/genproto/googleapis/cloud/audit" + logtypepb "google.golang.org/genproto/googleapis/logging/type" + logpb "google.golang.org/genproto/googleapis/logging/v2" + "google.golang.org/grpc" +) + +var ( + client *Client + testProjectID string +) + +var ( + // If true, this test is using the production service, not a fake. + integrationTest bool + + newClient func(ctx context.Context, projectID string) *Client +) + +func TestMain(m *testing.M) { + flag.Parse() // needed for testing.Short() + ctx := context.Background() + testProjectID = testutil.ProjID() + if testProjectID == "" || testing.Short() { + integrationTest = false + if testProjectID != "" { + log.Print("Integration tests skipped in short mode (using fake instead)") + } + testProjectID = "PROJECT_ID" + addr, err := ltesting.NewServer() + if err != nil { + log.Fatalf("creating fake server: %v", err) + } + newClient = func(ctx context.Context, projectID string) *Client { + conn, err := grpc.Dial(addr, grpc.WithInsecure(), grpc.WithBlock()) + if err != nil { + log.Fatalf("dialing %q: %v", addr, err) + } + c, err := NewClient(ctx, projectID, option.WithGRPCConn(conn)) + if err != nil { + log.Fatalf("creating client for fake at %q: %v", addr, err) + } + return c + } + } else { + integrationTest = true + ts := testutil.TokenSource(ctx, logging.AdminScope) + if ts == nil { + log.Fatal("The project key must be set. See CONTRIBUTING.md for details") + } + log.Printf("running integration tests with project %s", testProjectID) + newClient = func(ctx context.Context, projectID string) *Client { + c, err := NewClient(ctx, projectID, option.WithTokenSource(ts), + option.WithGRPCDialOption(grpc.WithBlock())) + if err != nil { + log.Fatalf("creating prod client: %v", err) + } + return c + } + } + client = newClient(ctx, testProjectID) + initMetrics(ctx) + cleanup := initSinks(ctx) + exit := m.Run() + cleanup() + client.Close() + os.Exit(exit) +} + +// EntryIterator and DeleteLog are tested in the logging package. + +func TestClientClose(t *testing.T) { + c := newClient(context.Background(), testProjectID) + if err := c.Close(); err != nil { + t.Errorf("want got %v, want nil", err) + } +} + +func TestFromLogEntry(t *testing.T) { + now := time.Now() + res := &mrpb.MonitoredResource{Type: "global"} + ts, err := ptypes.TimestampProto(now) + if err != nil { + t.Fatal(err) + } + logEntry := logpb.LogEntry{ + LogName: "projects/PROJECT_ID/logs/LOG_ID", + Resource: res, + Payload: &logpb.LogEntry_TextPayload{"hello"}, + Timestamp: ts, + Severity: logtypepb.LogSeverity_INFO, + InsertId: "123", + HttpRequest: &logtypepb.HttpRequest{ + RequestMethod: "GET", + RequestUrl: "http:://example.com/path?q=1", + RequestSize: 100, + Status: 200, + ResponseSize: 25, + Latency: &durpb.Duration{Seconds: 100}, + UserAgent: "user-agent", + RemoteIp: "127.0.0.1", + Referer: "referer", + CacheHit: true, + CacheValidatedWithOriginServer: true, + }, + Labels: map[string]string{ + "a": "1", + "b": "two", + "c": "true", + }, + } + u, err := url.Parse("http:://example.com/path?q=1") + if err != nil { + t.Fatal(err) + } + want := &logging.Entry{ + LogName: "projects/PROJECT_ID/logs/LOG_ID", + Resource: res, + Timestamp: now.In(time.UTC), + Severity: logging.Info, + Payload: "hello", + Labels: map[string]string{ + "a": "1", + "b": "two", + "c": "true", + }, + InsertID: "123", + HTTPRequest: &logging.HTTPRequest{ + Request: &http.Request{ + Method: "GET", + URL: u, + Header: map[string][]string{ + "User-Agent": []string{"user-agent"}, + "Referer": []string{"referer"}, + }, + }, + RequestSize: 100, + Status: 200, + ResponseSize: 25, + Latency: 100 * time.Second, + RemoteIP: "127.0.0.1", + CacheHit: true, + CacheValidatedWithOriginServer: true, + }, + } + got, err := fromLogEntry(&logEntry) + if err != nil { + t.Fatal(err) + } + // Test sub-values separately because %+v and %#v do not follow pointers. + // TODO(jba): use a differ or pretty-printer. + if !reflect.DeepEqual(got.HTTPRequest.Request, want.HTTPRequest.Request) { + t.Fatalf("HTTPRequest.Request:\ngot %+v\nwant %+v", got.HTTPRequest.Request, want.HTTPRequest.Request) + } + if !reflect.DeepEqual(got.HTTPRequest, want.HTTPRequest) { + t.Fatalf("HTTPRequest:\ngot %+v\nwant %+v", got.HTTPRequest, want.HTTPRequest) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("FullEntry:\ngot %+v\nwant %+v", got, want) + } + + // Proto payload. + alog := &audit.AuditLog{ + ServiceName: "svc", + MethodName: "method", + ResourceName: "shelves/S/books/B", + } + any, err := ptypes.MarshalAny(alog) + if err != nil { + t.Fatal(err) + } + logEntry = logpb.LogEntry{ + LogName: "projects/PROJECT_ID/logs/LOG_ID", + Resource: res, + Timestamp: ts, + Payload: &logpb.LogEntry_ProtoPayload{any}, + } + got, err = fromLogEntry(&logEntry) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got.Payload, alog) { + t.Errorf("got %+v, want %+v", got.Payload, alog) + } + + // JSON payload. + jstruct := &structpb.Struct{map[string]*structpb.Value{ + "f": &structpb.Value{&structpb.Value_NumberValue{3.1}}, + }} + logEntry = logpb.LogEntry{ + LogName: "projects/PROJECT_ID/logs/LOG_ID", + Resource: res, + Timestamp: ts, + Payload: &logpb.LogEntry_JsonPayload{jstruct}, + } + got, err = fromLogEntry(&logEntry) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got.Payload, jstruct) { + t.Errorf("got %+v, want %+v", got.Payload, jstruct) + } +} + +func TestListLogEntriesRequest(t *testing.T) { + for _, test := range []struct { + opts []EntriesOption + projectIDs []string + filter string + orderBy string + }{ + // Default is client's project ID, empty filter and orderBy. + {nil, + []string{"PROJECT_ID"}, "", ""}, + {[]EntriesOption{NewestFirst(), Filter("f")}, + []string{"PROJECT_ID"}, "f", "timestamp desc"}, + {[]EntriesOption{ProjectIDs([]string{"foo"})}, + []string{"foo"}, "", ""}, + {[]EntriesOption{NewestFirst(), Filter("f"), ProjectIDs([]string{"foo"})}, + []string{"foo"}, "f", "timestamp desc"}, + {[]EntriesOption{NewestFirst(), Filter("f"), ProjectIDs([]string{"foo"})}, + []string{"foo"}, "f", "timestamp desc"}, + // If there are repeats, last one wins. + {[]EntriesOption{NewestFirst(), Filter("no"), ProjectIDs([]string{"foo"}), Filter("f")}, + []string{"foo"}, "f", "timestamp desc"}, + } { + got := listLogEntriesRequest("PROJECT_ID", test.opts) + want := &logpb.ListLogEntriesRequest{ + ProjectIds: test.projectIDs, + Filter: test.filter, + OrderBy: test.orderBy, + } + if !proto.Equal(got, want) { + t.Errorf("%v:\ngot %v\nwant %v", test.opts, got, want) + } + } +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/metrics.go b/vendor/cloud.google.com/go/logging/logadmin/metrics.go new file mode 100644 index 00000000..9374ac46 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/metrics.go @@ -0,0 +1,154 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin + +import ( + "fmt" + + vkit "cloud.google.com/go/logging/apiv2" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + logpb "google.golang.org/genproto/googleapis/logging/v2" +) + +// Metric describes a logs-based metric. The value of the metric is the +// number of log entries that match a logs filter. +// +// Metrics are a feature of Stackdriver Monitoring. +// See https://cloud.google.com/monitoring/api/v3/metrics for more about them. +type Metric struct { + // ID is a client-assigned metric identifier. Example: + // "severe_errors". Metric identifiers are limited to 1000 + // characters and can include only the following characters: A-Z, + // a-z, 0-9, and the special characters _-.,+!*',()%/\. The + // forward-slash character (/) denotes a hierarchy of name pieces, + // and it cannot be the first character of the name. + ID string + + // Description describes this metric. It is used in documentation. + Description string + + // Filter is an advanced logs filter (see + // https://cloud.google.com/logging/docs/view/advanced_filters). + // Example: "logName:syslog AND severity>=ERROR". + Filter string +} + +// CreateMetric creates a logs-based metric. +func (c *Client) CreateMetric(ctx context.Context, m *Metric) error { + _, err := c.mClient.CreateLogMetric(ctx, &logpb.CreateLogMetricRequest{ + Parent: c.parent(), + Metric: toLogMetric(m), + }) + return err +} + +// DeleteMetric deletes a log-based metric. +// The provided metric ID is the metric identifier. For example, "severe_errors". +func (c *Client) DeleteMetric(ctx context.Context, metricID string) error { + return c.mClient.DeleteLogMetric(ctx, &logpb.DeleteLogMetricRequest{ + MetricName: c.metricPath(metricID), + }) +} + +// Metric gets a logs-based metric. +// The provided metric ID is the metric identifier. For example, "severe_errors". +// Requires ReadScope or AdminScope. +func (c *Client) Metric(ctx context.Context, metricID string) (*Metric, error) { + lm, err := c.mClient.GetLogMetric(ctx, &logpb.GetLogMetricRequest{ + MetricName: c.metricPath(metricID), + }) + if err != nil { + return nil, err + } + return fromLogMetric(lm), nil +} + +// UpdateMetric creates a logs-based metric if it does not exist, or updates an +// existing one. +func (c *Client) UpdateMetric(ctx context.Context, m *Metric) error { + _, err := c.mClient.UpdateLogMetric(ctx, &logpb.UpdateLogMetricRequest{ + MetricName: c.metricPath(m.ID), + Metric: toLogMetric(m), + }) + return err +} + +func (c *Client) metricPath(metricID string) string { + return fmt.Sprintf("%s/metrics/%s", c.parent(), metricID) +} + +// Metrics returns a MetricIterator for iterating over all Metrics in the Client's project. +// Requires ReadScope or AdminScope. +func (c *Client) Metrics(ctx context.Context) *MetricIterator { + it := &MetricIterator{ + it: c.mClient.ListLogMetrics(ctx, &logpb.ListLogMetricsRequest{Parent: c.parent()}), + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + return it +} + +// A MetricIterator iterates over Metrics. +type MetricIterator struct { + it *vkit.LogMetricIterator + pageInfo *iterator.PageInfo + nextFunc func() error + items []*Metric +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MetricIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +// Next returns the next result. Its second return value is Done if there are +// no more results. Once Next returns Done, all subsequent calls will return +// Done. +func (it *MetricIterator) Next() (*Metric, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MetricIterator) fetch(pageSize int, pageToken string) (string, error) { + return iterFetch(pageSize, pageToken, it.it.PageInfo(), func() error { + item, err := it.it.Next() + if err != nil { + return err + } + it.items = append(it.items, fromLogMetric(item)) + return nil + }) +} + +func toLogMetric(m *Metric) *logpb.LogMetric { + return &logpb.LogMetric{ + Name: m.ID, + Description: m.Description, + Filter: m.Filter, + } +} + +func fromLogMetric(lm *logpb.LogMetric) *Metric { + return &Metric{ + ID: lm.Name, + Description: lm.Description, + Filter: lm.Filter, + } +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/metrics_test.go b/vendor/cloud.google.com/go/logging/logadmin/metrics_test.go new file mode 100644 index 00000000..2b3e3153 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/metrics_test.go @@ -0,0 +1,141 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin + +import ( + "log" + "reflect" + "testing" + + ltesting "cloud.google.com/go/logging/internal/testing" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + itesting "google.golang.org/api/iterator/testing" +) + +const testMetricIDPrefix = "GO-CLIENT-TEST-METRIC" + +// Initializes the tests before they run. +func initMetrics(ctx context.Context) { + // Clean up from aborted tests. + var IDs []string + it := client.Metrics(ctx) +loop: + for { + m, err := it.Next() + switch err { + case nil: + IDs = append(IDs, m.ID) + case iterator.Done: + break loop + default: + log.Printf("cleanupMetrics: %v", err) + return + } + } + for _, mID := range ltesting.ExpiredUniqueIDs(IDs, testMetricIDPrefix) { + client.DeleteMetric(ctx, mID) + } +} + +func TestCreateDeleteMetric(t *testing.T) { + ctx := context.Background() + metric := &Metric{ + ID: ltesting.UniqueID(testMetricIDPrefix), + Description: "DESC", + Filter: "FILTER", + } + if err := client.CreateMetric(ctx, metric); err != nil { + t.Fatal(err) + } + defer client.DeleteMetric(ctx, metric.ID) + + got, err := client.Metric(ctx, metric.ID) + if err != nil { + t.Fatal(err) + } + if want := metric; !reflect.DeepEqual(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + + if err := client.DeleteMetric(ctx, metric.ID); err != nil { + t.Fatal(err) + } + + if _, err := client.Metric(ctx, metric.ID); err == nil { + t.Fatal("got no error, expected one") + } +} + +func TestUpdateMetric(t *testing.T) { + ctx := context.Background() + metric := &Metric{ + ID: ltesting.UniqueID(testMetricIDPrefix), + Description: "DESC", + Filter: "FILTER", + } + + // Updating a non-existent metric creates a new one. + if err := client.UpdateMetric(ctx, metric); err != nil { + t.Fatal(err) + } + defer client.DeleteMetric(ctx, metric.ID) + got, err := client.Metric(ctx, metric.ID) + if err != nil { + t.Fatal(err) + } + if want := metric; !reflect.DeepEqual(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + + // Updating an existing metric changes it. + metric.Description = "CHANGED" + if err := client.UpdateMetric(ctx, metric); err != nil { + t.Fatal(err) + } + got, err = client.Metric(ctx, metric.ID) + if err != nil { + t.Fatal(err) + } + if want := metric; !reflect.DeepEqual(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } +} + +func TestListMetrics(t *testing.T) { + ctx := context.Background() + + var metrics []*Metric + for i := 0; i < 10; i++ { + metrics = append(metrics, &Metric{ + ID: ltesting.UniqueID(testMetricIDPrefix), + Description: "DESC", + Filter: "FILTER", + }) + } + for _, m := range metrics { + if err := client.CreateMetric(ctx, m); err != nil { + t.Fatalf("Create(%q): %v", m.ID, err) + } + defer client.DeleteMetric(ctx, m.ID) + } + + msg, ok := itesting.TestIterator(metrics, + func() interface{} { return client.Metrics(ctx) }, + func(it interface{}) (interface{}, error) { return it.(*MetricIterator).Next() }) + if !ok { + t.Fatal(msg) + } +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/resources.go b/vendor/cloud.google.com/go/logging/logadmin/resources.go new file mode 100644 index 00000000..79e8fdbc --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/resources.go @@ -0,0 +1,74 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin + +import ( + vkit "cloud.google.com/go/logging/apiv2" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + mrpb "google.golang.org/genproto/googleapis/api/monitoredres" + logpb "google.golang.org/genproto/googleapis/logging/v2" +) + +// ResourceDescriptors returns a ResourceDescriptorIterator +// for iterating over MonitoredResourceDescriptors. Requires ReadScope or AdminScope. +// See https://cloud.google.com/logging/docs/api/v2/#monitored-resources for an explanation of +// monitored resources. +// See https://cloud.google.com/logging/docs/api/v2/resource-list for a list of monitored resources. +func (c *Client) ResourceDescriptors(ctx context.Context) *ResourceDescriptorIterator { + it := &ResourceDescriptorIterator{ + it: c.lClient.ListMonitoredResourceDescriptors(ctx, + &logpb.ListMonitoredResourceDescriptorsRequest{}), + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + return it +} + +// ResourceDescriptorIterator is an iterator over MonitoredResourceDescriptors. +type ResourceDescriptorIterator struct { + it *vkit.MonitoredResourceDescriptorIterator + pageInfo *iterator.PageInfo + nextFunc func() error + items []*mrpb.MonitoredResourceDescriptor +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ResourceDescriptorIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +// Next returns the next result. Its second return value is Done if there are +// no more results. Once Next returns Done, all subsequent calls will return +// Done. +func (it *ResourceDescriptorIterator) Next() (*mrpb.MonitoredResourceDescriptor, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ResourceDescriptorIterator) fetch(pageSize int, pageToken string) (string, error) { + return iterFetch(pageSize, pageToken, it.it.PageInfo(), func() error { + item, err := it.it.Next() + if err != nil { + return err + } + it.items = append(it.items, item) + return nil + }) +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/resources_test.go b/vendor/cloud.google.com/go/logging/logadmin/resources_test.go new file mode 100644 index 00000000..067d3d7a --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/resources_test.go @@ -0,0 +1,46 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin + +import ( + "testing" + + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func TestMonitoredResourceDescriptors(t *testing.T) { + // We can't create MonitoredResourceDescriptors, and there is no guarantee + // about what the service will return. So we just check that the result is + // non-empty. + it := client.ResourceDescriptors(context.Background()) + n := 0 +loop: + for { + _, err := it.Next() + switch err { + case nil: + n++ + case iterator.Done: + break loop + default: + t.Fatal(err) + } + } + if n == 0 { + t.Fatal("Next: got no MetricResourceDescriptors, expected at least one") + } + // TODO(jba) test pagination. +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/sinks.go b/vendor/cloud.google.com/go/logging/logadmin/sinks.go new file mode 100644 index 00000000..588c7afd --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/sinks.go @@ -0,0 +1,169 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logadmin + +import ( + "fmt" + + vkit "cloud.google.com/go/logging/apiv2" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + logpb "google.golang.org/genproto/googleapis/logging/v2" +) + +// Sink describes a sink used to export log entries outside Stackdriver +// Logging. Incoming log entries matching a filter are exported to a +// destination (a Cloud Storage bucket, BigQuery dataset or Cloud Pub/Sub +// topic). +// +// For more information, see https://cloud.google.com/logging/docs/export/using_exported_logs. +// (The Sinks in this package are what the documentation refers to as "project sinks".) +type Sink struct { + // ID is a client-assigned sink identifier. Example: + // "my-severe-errors-to-pubsub". + // Sink identifiers are limited to 1000 characters + // and can include only the following characters: A-Z, a-z, + // 0-9, and the special characters "_-.". + ID string + + // Destination is the export destination. See + // https://cloud.google.com/logging/docs/api/tasks/exporting-logs. + // Examples: "storage.googleapis.com/a-bucket", + // "bigquery.googleapis.com/projects/a-project-id/datasets/a-dataset". + Destination string + + // Filter optionally specifies an advanced logs filter (see + // https://cloud.google.com/logging/docs/view/advanced_filters) that + // defines the log entries to be exported. Example: "logName:syslog AND + // severity>=ERROR". If omitted, all entries are returned. + Filter string +} + +// CreateSink creates a Sink. It returns an error if the Sink already exists. +// Requires AdminScope. +func (c *Client) CreateSink(ctx context.Context, sink *Sink) (*Sink, error) { + ls, err := c.sClient.CreateSink(ctx, &logpb.CreateSinkRequest{ + Parent: c.parent(), + Sink: toLogSink(sink), + }) + if err != nil { + fmt.Printf("Sink: %+v\n", toLogSink(sink)) + return nil, err + } + return fromLogSink(ls), nil +} + +// DeleteSink deletes a sink. The provided sinkID is the sink's identifier, such as +// "my-severe-errors-to-pubsub". +// Requires AdminScope. +func (c *Client) DeleteSink(ctx context.Context, sinkID string) error { + return c.sClient.DeleteSink(ctx, &logpb.DeleteSinkRequest{ + SinkName: c.sinkPath(sinkID), + }) +} + +// Sink gets a sink. The provided sinkID is the sink's identifier, such as +// "my-severe-errors-to-pubsub". +// Requires ReadScope or AdminScope. +func (c *Client) Sink(ctx context.Context, sinkID string) (*Sink, error) { + ls, err := c.sClient.GetSink(ctx, &logpb.GetSinkRequest{ + SinkName: c.sinkPath(sinkID), + }) + if err != nil { + return nil, err + } + return fromLogSink(ls), nil +} + +// UpdateSink updates an existing Sink, or creates a new one if the Sink doesn't exist. +// Requires AdminScope. +func (c *Client) UpdateSink(ctx context.Context, sink *Sink) (*Sink, error) { + ls, err := c.sClient.UpdateSink(ctx, &logpb.UpdateSinkRequest{ + SinkName: c.sinkPath(sink.ID), + Sink: toLogSink(sink), + }) + if err != nil { + return nil, err + } + return fromLogSink(ls), err +} + +func (c *Client) sinkPath(sinkID string) string { + return fmt.Sprintf("%s/sinks/%s", c.parent(), sinkID) +} + +// Sinks returns a SinkIterator for iterating over all Sinks in the Client's project. +// Requires ReadScope or AdminScope. +func (c *Client) Sinks(ctx context.Context) *SinkIterator { + it := &SinkIterator{ + it: c.sClient.ListSinks(ctx, &logpb.ListSinksRequest{Parent: c.parent()}), + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + return it +} + +// A SinkIterator iterates over Sinks. +type SinkIterator struct { + it *vkit.LogSinkIterator + pageInfo *iterator.PageInfo + nextFunc func() error + items []*Sink +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *SinkIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +// Next returns the next result. Its second return value is Done if there are +// no more results. Once Next returns Done, all subsequent calls will return +// Done. +func (it *SinkIterator) Next() (*Sink, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *SinkIterator) fetch(pageSize int, pageToken string) (string, error) { + return iterFetch(pageSize, pageToken, it.it.PageInfo(), func() error { + item, err := it.it.Next() + if err != nil { + return err + } + it.items = append(it.items, fromLogSink(item)) + return nil + }) +} + +func toLogSink(s *Sink) *logpb.LogSink { + return &logpb.LogSink{ + Name: s.ID, + Destination: s.Destination, + Filter: s.Filter, + OutputVersionFormat: logpb.LogSink_V2, + } +} + +func fromLogSink(ls *logpb.LogSink) *Sink { + return &Sink{ + ID: ls.Name, + Destination: ls.Destination, + Filter: ls.Filter, + } +} diff --git a/vendor/cloud.google.com/go/logging/logadmin/sinks_test.go b/vendor/cloud.google.com/go/logging/logadmin/sinks_test.go new file mode 100644 index 00000000..64869ec6 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logadmin/sinks_test.go @@ -0,0 +1,218 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// TODO(jba): document in CONTRIBUTING.md that service account must be given "Logs Configuration Writer" IAM role for sink tests to pass. +// TODO(jba): [cont] (1) From top left menu, go to IAM & Admin. (2) In Roles dropdown for acct, select Logging > Logs Configuration Writer. (3) Save. +// TODO(jba): Also, cloud-logs@google.com must have Owner permission on the GCS bucket named for the test project. + +package logadmin + +import ( + "log" + "reflect" + "testing" + + "cloud.google.com/go/internal/testutil" + ltesting "cloud.google.com/go/logging/internal/testing" + "cloud.google.com/go/storage" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + itesting "google.golang.org/api/iterator/testing" + "google.golang.org/api/option" +) + +const testSinkIDPrefix = "GO-CLIENT-TEST-SINK" + +const testFilter = "" + +var testSinkDestination string + +// Called just before TestMain calls m.Run. +// Returns a cleanup function to be called after the tests finish. +func initSinks(ctx context.Context) func() { + // Create a unique GCS bucket so concurrent tests don't interfere with each other. + testBucketPrefix := testProjectID + "-log-sink" + testBucket := ltesting.UniqueID(testBucketPrefix) + testSinkDestination = "storage.googleapis.com/" + testBucket + var storageClient *storage.Client + if integrationTest { + // Create a unique bucket as a sink destination, and give the cloud logging account + // owner right. + ts := testutil.TokenSource(ctx, storage.ScopeFullControl) + var err error + storageClient, err = storage.NewClient(ctx, option.WithTokenSource(ts)) + if err != nil { + log.Fatalf("new storage client: %v", err) + } + bucket := storageClient.Bucket(testBucket) + if err := bucket.Create(ctx, testProjectID, nil); err != nil { + log.Fatalf("creating storage bucket %q: %v", testBucket, err) + } + if err := bucket.ACL().Set(ctx, "group-cloud-logs@google.com", storage.RoleOwner); err != nil { + log.Fatalf("setting owner role: %v", err) + } + } + // Clean up from aborted tests. + for _, sID := range ltesting.ExpiredUniqueIDs(sinkIDs(ctx), testSinkIDPrefix) { + client.DeleteSink(ctx, sID) // ignore error + } + if integrationTest { + for _, bn := range ltesting.ExpiredUniqueIDs(bucketNames(ctx, storageClient), testBucketPrefix) { + storageClient.Bucket(bn).Delete(ctx) // ignore error + } + return func() { + if err := storageClient.Bucket(testBucket).Delete(ctx); err != nil { + log.Printf("deleting %q: %v", testBucket, err) + } + storageClient.Close() + } + } + return func() {} +} + +// Collect all sink IDs for the test project. +func sinkIDs(ctx context.Context) []string { + var IDs []string + it := client.Sinks(ctx) +loop: + for { + s, err := it.Next() + switch err { + case nil: + IDs = append(IDs, s.ID) + case iterator.Done: + break loop + default: + log.Printf("listing sinks: %v", err) + break loop + } + } + return IDs +} + +// Collect the name of all buckets for the test project. +func bucketNames(ctx context.Context, client *storage.Client) []string { + var names []string + it := client.Buckets(ctx, testProjectID) +loop: + for { + b, err := it.Next() + switch err { + case nil: + names = append(names, b.Name) + case iterator.Done: + break loop + default: + log.Printf("listing buckets: %v", err) + break loop + } + } + return names +} + +func TestCreateDeleteSink(t *testing.T) { + ctx := context.Background() + sink := &Sink{ + ID: ltesting.UniqueID(testSinkIDPrefix), + Destination: testSinkDestination, + Filter: testFilter, + } + got, err := client.CreateSink(ctx, sink) + if err != nil { + t.Fatal(err) + } + defer client.DeleteSink(ctx, sink.ID) + if want := sink; !reflect.DeepEqual(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + got, err = client.Sink(ctx, sink.ID) + if err != nil { + t.Fatal(err) + } + if want := sink; !reflect.DeepEqual(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + + if err := client.DeleteSink(ctx, sink.ID); err != nil { + t.Fatal(err) + } + + if _, err := client.Sink(ctx, sink.ID); err == nil { + t.Fatal("got no error, expected one") + } +} + +func TestUpdateSink(t *testing.T) { + ctx := context.Background() + sink := &Sink{ + ID: ltesting.UniqueID(testSinkIDPrefix), + Destination: testSinkDestination, + Filter: testFilter, + } + + // Updating a non-existent sink creates a new one. + got, err := client.UpdateSink(ctx, sink) + if err != nil { + t.Fatal(err) + } + defer client.DeleteSink(ctx, sink.ID) + if want := sink; !reflect.DeepEqual(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + got, err = client.Sink(ctx, sink.ID) + if err != nil { + t.Fatal(err) + } + if want := sink; !reflect.DeepEqual(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } + + // Updating an existing sink changes it. + sink.Filter = "" + if _, err := client.UpdateSink(ctx, sink); err != nil { + t.Fatal(err) + } + got, err = client.Sink(ctx, sink.ID) + if err != nil { + t.Fatal(err) + } + if want := sink; !reflect.DeepEqual(got, want) { + t.Errorf("got %+v, want %+v", got, want) + } +} + +func TestListSinks(t *testing.T) { + ctx := context.Background() + var sinks []*Sink + for i := 0; i < 4; i++ { + sinks = append(sinks, &Sink{ + ID: ltesting.UniqueID(testSinkIDPrefix), + Destination: testSinkDestination, + Filter: testFilter, + }) + } + for _, s := range sinks { + if _, err := client.CreateSink(ctx, s); err != nil { + t.Fatalf("Create(%q): %v", s.ID, err) + } + defer client.DeleteSink(ctx, s.ID) + } + + msg, ok := itesting.TestIterator(sinks, + func() interface{} { return client.Sinks(ctx) }, + func(it interface{}) (interface{}, error) { return it.(*SinkIterator).Next() }) + if !ok { + t.Fatal(msg) + } +} diff --git a/vendor/cloud.google.com/go/logging/logging.go b/vendor/cloud.google.com/go/logging/logging.go new file mode 100644 index 00000000..b56938ed --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logging.go @@ -0,0 +1,678 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// API/gRPC features intentionally missing from this client: +// - You cannot have the server pick the time of the entry. This client +// always sends a time. +// - There is no way to provide a protocol buffer payload. +// - No support for the "partial success" feature when writing log entries. + +// TODO(jba): test whether forward-slash characters in the log ID must be URL-encoded. +// These features are missing now, but will likely be added: +// - There is no way to specify CallOptions. + +package logging + +import ( + "encoding/json" + "errors" + "fmt" + "log" + "math" + "net/http" + "strconv" + "strings" + "sync" + "time" + + "cloud.google.com/go/internal/version" + vkit "cloud.google.com/go/logging/apiv2" + "cloud.google.com/go/logging/internal" + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + structpb "github.com/golang/protobuf/ptypes/struct" + tspb "github.com/golang/protobuf/ptypes/timestamp" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/support/bundler" + mrpb "google.golang.org/genproto/googleapis/api/monitoredres" + logtypepb "google.golang.org/genproto/googleapis/logging/type" + logpb "google.golang.org/genproto/googleapis/logging/v2" +) + +const ( + // Scope for reading from the logging service. + ReadScope = "https://www.googleapis.com/auth/logging.read" + + // Scope for writing to the logging service. + WriteScope = "https://www.googleapis.com/auth/logging.write" + + // Scope for administrative actions on the logging service. + AdminScope = "https://www.googleapis.com/auth/logging.admin" +) + +const ( + // defaultErrorCapacity is the capacity of the channel used to deliver + // errors to the OnError function. + defaultErrorCapacity = 10 + + // DefaultDelayThreshold is the default value for the DelayThreshold LoggerOption. + DefaultDelayThreshold = time.Second + + // DefaultEntryCountThreshold is the default value for the EntryCountThreshold LoggerOption. + DefaultEntryCountThreshold = 1000 + + // DefaultEntryByteThreshold is the default value for the EntryByteThreshold LoggerOption. + DefaultEntryByteThreshold = 1 << 20 // 1MiB + + // DefaultBufferedByteLimit is the default value for the BufferedByteLimit LoggerOption. + DefaultBufferedByteLimit = 1 << 30 // 1GiB +) + +// For testing: +var now = time.Now + +// ErrOverflow signals that the number of buffered entries for a Logger +// exceeds its BufferLimit. +var ErrOverflow = errors.New("logging: log entry overflowed buffer limits") + +// Client is a Logging client. A Client is associated with a single Cloud project. +type Client struct { + client *vkit.Client // client for the logging service + projectID string + errc chan error // should be buffered to minimize dropped errors + donec chan struct{} // closed on Client.Close to close Logger bundlers + loggers sync.WaitGroup // so we can wait for loggers to close + closed bool + + // OnError is called when an error occurs in a call to Log or Flush. The + // error may be due to an invalid Entry, an overflow because BufferLimit + // was reached (in which case the error will be ErrOverflow) or an error + // communicating with the logging service. OnError is called with errors + // from all Loggers. It is never called concurrently. OnError is expected + // to return quickly; if errors occur while OnError is running, some may + // not be reported. The default behavior is to call log.Printf. + // + // This field should be set only once, before any method of Client is called. + OnError func(err error) +} + +// NewClient returns a new logging client associated with the provided project ID. +// +// By default NewClient uses WriteScope. To use a different scope, call +// NewClient using a WithScopes option (see https://godoc.org/google.golang.org/api/option#WithScopes). +func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { + // Check for '/' in project ID to reserve the ability to support various owning resources, + // in the form "{Collection}/{Name}", for instance "organizations/my-org". + if strings.ContainsRune(projectID, '/') { + return nil, errors.New("logging: project ID contains '/'") + } + opts = append([]option.ClientOption{ + option.WithEndpoint(internal.ProdAddr), + option.WithScopes(WriteScope), + }, opts...) + c, err := vkit.NewClient(ctx, opts...) + if err != nil { + return nil, err + } + c.SetGoogleClientInfo("gccl", version.Repo) + client := &Client{ + client: c, + projectID: projectID, + errc: make(chan error, defaultErrorCapacity), // create a small buffer for errors + donec: make(chan struct{}), + OnError: func(e error) { log.Printf("logging client: %v", e) }, + } + // Call the user's function synchronously, to make life easier for them. + go func() { + for err := range client.errc { + // This reference to OnError is memory-safe if the user sets OnError before + // calling any client methods. The reference happens before the first read from + // client.errc, which happens before the first write to client.errc, which + // happens before any call, which happens before the user sets OnError. + if fn := client.OnError; fn != nil { + fn(err) + } else { + log.Printf("logging (project ID %q): %v", projectID, err) + } + } + }() + return client, nil +} + +// parent returns the string used in many RPCs to denote the parent resource of the log. +func (c *Client) parent() string { + return "projects/" + c.projectID +} + +var unixZeroTimestamp *tspb.Timestamp + +func init() { + var err error + unixZeroTimestamp, err = ptypes.TimestampProto(time.Unix(0, 0)) + if err != nil { + panic(err) + } +} + +// Ping reports whether the client's connection to the logging service and the +// authentication configuration are valid. To accomplish this, Ping writes a +// log entry "ping" to a log named "ping". +func (c *Client) Ping(ctx context.Context) error { + ent := &logpb.LogEntry{ + Payload: &logpb.LogEntry_TextPayload{"ping"}, + Timestamp: unixZeroTimestamp, // Identical timestamps and insert IDs are both + InsertId: "ping", // necessary for the service to dedup these entries. + } + _, err := c.client.WriteLogEntries(ctx, &logpb.WriteLogEntriesRequest{ + LogName: internal.LogPath(c.parent(), "ping"), + Resource: &mrpb.MonitoredResource{Type: "global"}, + Entries: []*logpb.LogEntry{ent}, + }) + return err +} + +// A Logger is used to write log messages to a single log. It can be configured +// with a log ID, common monitored resource, and a set of common labels. +type Logger struct { + client *Client + logName string // "projects/{projectID}/logs/{logID}" + stdLoggers map[Severity]*log.Logger + bundler *bundler.Bundler + + // Options + commonResource *mrpb.MonitoredResource + commonLabels map[string]string +} + +// A LoggerOption is a configuration option for a Logger. +type LoggerOption interface { + set(*Logger) +} + +// CommonResource sets the monitored resource associated with all log entries +// written from a Logger. If not provided, a resource of type "global" is used. +// This value can be overridden by setting an Entry's Resource field. +func CommonResource(r *mrpb.MonitoredResource) LoggerOption { return commonResource{r} } + +type commonResource struct{ *mrpb.MonitoredResource } + +func (r commonResource) set(l *Logger) { l.commonResource = r.MonitoredResource } + +// CommonLabels are labels that apply to all log entries written from a Logger, +// so that you don't have to repeat them in each log entry's Labels field. If +// any of the log entries contains a (key, value) with the same key that is in +// CommonLabels, then the entry's (key, value) overrides the one in +// CommonLabels. +func CommonLabels(m map[string]string) LoggerOption { return commonLabels(m) } + +type commonLabels map[string]string + +func (c commonLabels) set(l *Logger) { l.commonLabels = c } + +// DelayThreshold is the maximum amount of time that an entry should remain +// buffered in memory before a call to the logging service is triggered. Larger +// values of DelayThreshold will generally result in fewer calls to the logging +// service, while increasing the risk that log entries will be lost if the +// process crashes. +// The default is DefaultDelayThreshold. +func DelayThreshold(d time.Duration) LoggerOption { return delayThreshold(d) } + +type delayThreshold time.Duration + +func (d delayThreshold) set(l *Logger) { l.bundler.DelayThreshold = time.Duration(d) } + +// EntryCountThreshold is the maximum number of entries that will be buffered +// in memory before a call to the logging service is triggered. Larger values +// will generally result in fewer calls to the logging service, while +// increasing both memory consumption and the risk that log entries will be +// lost if the process crashes. +// The default is DefaultEntryCountThreshold. +func EntryCountThreshold(n int) LoggerOption { return entryCountThreshold(n) } + +type entryCountThreshold int + +func (e entryCountThreshold) set(l *Logger) { l.bundler.BundleCountThreshold = int(e) } + +// EntryByteThreshold is the maximum number of bytes of entries that will be +// buffered in memory before a call to the logging service is triggered. See +// EntryCountThreshold for a discussion of the tradeoffs involved in setting +// this option. +// The default is DefaultEntryByteThreshold. +func EntryByteThreshold(n int) LoggerOption { return entryByteThreshold(n) } + +type entryByteThreshold int + +func (e entryByteThreshold) set(l *Logger) { l.bundler.BundleByteThreshold = int(e) } + +// EntryByteLimit is the maximum number of bytes of entries that will be sent +// in a single call to the logging service. This option limits the size of a +// single RPC payload, to account for network or service issues with large +// RPCs. If EntryByteLimit is smaller than EntryByteThreshold, the latter has +// no effect. +// The default is zero, meaning there is no limit. +func EntryByteLimit(n int) LoggerOption { return entryByteLimit(n) } + +type entryByteLimit int + +func (e entryByteLimit) set(l *Logger) { l.bundler.BundleByteLimit = int(e) } + +// BufferedByteLimit is the maximum number of bytes that the Logger will keep +// in memory before returning ErrOverflow. This option limits the total memory +// consumption of the Logger (but note that each Logger has its own, separate +// limit). It is possible to reach BufferedByteLimit even if it is larger than +// EntryByteThreshold or EntryByteLimit, because calls triggered by the latter +// two options may be enqueued (and hence occupying memory) while new log +// entries are being added. +// The default is DefaultBufferedByteLimit. +func BufferedByteLimit(n int) LoggerOption { return bufferedByteLimit(n) } + +type bufferedByteLimit int + +func (b bufferedByteLimit) set(l *Logger) { l.bundler.BufferedByteLimit = int(b) } + +// Logger returns a Logger that will write entries with the given log ID, such as +// "syslog". A log ID must be less than 512 characters long and can only +// include the following characters: upper and lower case alphanumeric +// characters: [A-Za-z0-9]; and punctuation characters: forward-slash, +// underscore, hyphen, and period. +func (c *Client) Logger(logID string, opts ...LoggerOption) *Logger { + l := &Logger{ + client: c, + logName: internal.LogPath(c.parent(), logID), + commonResource: &mrpb.MonitoredResource{Type: "global"}, + } + // TODO(jba): determine the right context for the bundle handler. + ctx := context.TODO() + l.bundler = bundler.NewBundler(&logpb.LogEntry{}, func(entries interface{}) { + l.writeLogEntries(ctx, entries.([]*logpb.LogEntry)) + }) + l.bundler.DelayThreshold = DefaultDelayThreshold + l.bundler.BundleCountThreshold = DefaultEntryCountThreshold + l.bundler.BundleByteThreshold = DefaultEntryByteThreshold + l.bundler.BufferedByteLimit = DefaultBufferedByteLimit + for _, opt := range opts { + opt.set(l) + } + + l.stdLoggers = map[Severity]*log.Logger{} + for s := range severityName { + l.stdLoggers[s] = log.New(severityWriter{l, s}, "", 0) + } + c.loggers.Add(1) + go func() { + defer c.loggers.Done() + <-c.donec + l.bundler.Stop() + }() + return l +} + +type severityWriter struct { + l *Logger + s Severity +} + +func (w severityWriter) Write(p []byte) (n int, err error) { + w.l.Log(Entry{ + Severity: w.s, + Payload: string(p), + }) + return len(p), nil +} + +// Close closes the client. +func (c *Client) Close() error { + if c.closed { + return nil + } + close(c.donec) // close Logger bundlers + c.loggers.Wait() // wait for all bundlers to flush and close + // Now there can be no more errors. + close(c.errc) // terminate error goroutine + // Return only the first error. Since all clients share an underlying connection, + // Closes after the first always report a "connection is closing" error. + err := c.client.Close() + c.closed = true + return err +} + +// Severity is the severity of the event described in a log entry. These +// guideline severity levels are ordered, with numerically smaller levels +// treated as less severe than numerically larger levels. +type Severity int + +const ( + // Default means the log entry has no assigned severity level. + Default = Severity(logtypepb.LogSeverity_DEFAULT) + // Debug means debug or trace information. + Debug = Severity(logtypepb.LogSeverity_DEBUG) + // Info means routine information, such as ongoing status or performance. + Info = Severity(logtypepb.LogSeverity_INFO) + // Notice means normal but significant events, such as start up, shut down, or configuration. + Notice = Severity(logtypepb.LogSeverity_NOTICE) + // Warning means events that might cause problems. + Warning = Severity(logtypepb.LogSeverity_WARNING) + // Error means events that are likely to cause problems. + Error = Severity(logtypepb.LogSeverity_ERROR) + // Critical means events that cause more severe problems or brief outages. + Critical = Severity(logtypepb.LogSeverity_CRITICAL) + // Alert means a person must take an action immediately. + Alert = Severity(logtypepb.LogSeverity_ALERT) + // Emergency means one or more systems are unusable. + Emergency = Severity(logtypepb.LogSeverity_EMERGENCY) +) + +var severityName = map[Severity]string{ + Default: "Default", + Debug: "Debug", + Info: "Info", + Notice: "Notice", + Warning: "Warning", + Error: "Error", + Critical: "Critical", + Alert: "Alert", + Emergency: "Emergency", +} + +// String converts a severity level to a string. +func (v Severity) String() string { + // same as proto.EnumName + s, ok := severityName[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// ParseSeverity returns the Severity whose name equals s, ignoring case. It +// returns Default if no Severity matches. +func ParseSeverity(s string) Severity { + sl := strings.ToLower(s) + for sev, name := range severityName { + if strings.ToLower(name) == sl { + return sev + } + } + return Default +} + +// Entry is a log entry. +// See https://cloud.google.com/logging/docs/view/logs_index for more about entries. +type Entry struct { + // Timestamp is the time of the entry. If zero, the current time is used. + Timestamp time.Time + + // Severity is the entry's severity level. + // The zero value is Default. + Severity Severity + + // Payload must be either a string or something that + // marshals via the encoding/json package to a JSON object + // (and not any other type of JSON value). + Payload interface{} + + // Labels optionally specifies key/value labels for the log entry. + // The Logger.Log method takes ownership of this map. See Logger.CommonLabels + // for more about labels. + Labels map[string]string + + // InsertID is a unique ID for the log entry. If you provide this field, + // the logging service considers other log entries in the same log with the + // same ID as duplicates which can be removed. If omitted, the logging + // service will generate a unique ID for this log entry. Note that because + // this client retries RPCs automatically, it is possible (though unlikely) + // that an Entry without an InsertID will be written more than once. + InsertID string + + // HTTPRequest optionally specifies metadata about the HTTP request + // associated with this log entry, if applicable. It is optional. + HTTPRequest *HTTPRequest + + // Operation optionally provides information about an operation associated + // with the log entry, if applicable. + Operation *logpb.LogEntryOperation + + // LogName is the full log name, in the form + // "projects/{ProjectID}/logs/{LogID}". It is set by the client when + // reading entries. It is an error to set it when writing entries. + LogName string + + // Resource is the monitored resource associated with the entry. It is set + // by the client when reading entries. It is an error to set it when + // writing entries. + Resource *mrpb.MonitoredResource +} + +// HTTPRequest contains an http.Request as well as additional +// information about the request and its response. +type HTTPRequest struct { + // Request is the http.Request passed to the handler. + Request *http.Request + + // RequestSize is the size of the HTTP request message in bytes, including + // the request headers and the request body. + RequestSize int64 + + // Status is the response code indicating the status of the response. + // Examples: 200, 404. + Status int + + // ResponseSize is the size of the HTTP response message sent back to the client, in bytes, + // including the response headers and the response body. + ResponseSize int64 + + // Latency is the request processing latency on the server, from the time the request was + // received until the response was sent. + Latency time.Duration + + // RemoteIP is the IP address (IPv4 or IPv6) of the client that issued the + // HTTP request. Examples: "192.168.1.1", "FE80::0202:B3FF:FE1E:8329". + RemoteIP string + + // CacheHit reports whether an entity was served from cache (with or without + // validation). + CacheHit bool + + // CacheValidatedWithOriginServer reports whether the response was + // validated with the origin server before being served from cache. This + // field is only meaningful if CacheHit is true. + CacheValidatedWithOriginServer bool +} + +func fromHTTPRequest(r *HTTPRequest) *logtypepb.HttpRequest { + if r == nil { + return nil + } + if r.Request == nil { + panic("HTTPRequest must have a non-nil Request") + } + u := *r.Request.URL + u.Fragment = "" + pb := &logtypepb.HttpRequest{ + RequestMethod: r.Request.Method, + RequestUrl: u.String(), + RequestSize: r.RequestSize, + Status: int32(r.Status), + ResponseSize: r.ResponseSize, + UserAgent: r.Request.UserAgent(), + RemoteIp: r.RemoteIP, // TODO(jba): attempt to parse http.Request.RemoteAddr? + Referer: r.Request.Referer(), + CacheHit: r.CacheHit, + CacheValidatedWithOriginServer: r.CacheValidatedWithOriginServer, + } + if r.Latency != 0 { + pb.Latency = ptypes.DurationProto(r.Latency) + } + return pb +} + +// toProtoStruct converts v, which must marshal into a JSON object, +// into a Google Struct proto. +func toProtoStruct(v interface{}) (*structpb.Struct, error) { + // Fast path: if v is already a *structpb.Struct, nothing to do. + if s, ok := v.(*structpb.Struct); ok { + return s, nil + } + // v is a Go struct that supports JSON marshalling. We want a Struct + // protobuf. Some day we may have a more direct way to get there, but right + // now the only way is to marshal the Go struct to JSON, unmarshal into a + // map, and then build the Struct proto from the map. + jb, err := json.Marshal(v) + if err != nil { + return nil, fmt.Errorf("logging: json.Marshal: %v", err) + } + var m map[string]interface{} + err = json.Unmarshal(jb, &m) + if err != nil { + return nil, fmt.Errorf("logging: json.Unmarshal: %v", err) + } + return jsonMapToProtoStruct(m), nil +} + +func jsonMapToProtoStruct(m map[string]interface{}) *structpb.Struct { + fields := map[string]*structpb.Value{} + for k, v := range m { + fields[k] = jsonValueToStructValue(v) + } + return &structpb.Struct{Fields: fields} +} + +func jsonValueToStructValue(v interface{}) *structpb.Value { + switch x := v.(type) { + case bool: + return &structpb.Value{Kind: &structpb.Value_BoolValue{x}} + case float64: + return &structpb.Value{Kind: &structpb.Value_NumberValue{x}} + case string: + return &structpb.Value{Kind: &structpb.Value_StringValue{x}} + case nil: + return &structpb.Value{Kind: &structpb.Value_NullValue{}} + case map[string]interface{}: + return &structpb.Value{Kind: &structpb.Value_StructValue{jsonMapToProtoStruct(x)}} + case []interface{}: + var vals []*structpb.Value + for _, e := range x { + vals = append(vals, jsonValueToStructValue(e)) + } + return &structpb.Value{Kind: &structpb.Value_ListValue{&structpb.ListValue{vals}}} + default: + panic(fmt.Sprintf("bad type %T for JSON value", v)) + } +} + +// LogSync logs the Entry synchronously without any buffering. Because LogSync is slow +// and will block, it is intended primarily for debugging or critical errors. +// Prefer Log for most uses. +// TODO(jba): come up with a better name (LogNow?) or eliminate. +func (l *Logger) LogSync(ctx context.Context, e Entry) error { + ent, err := toLogEntry(e) + if err != nil { + return err + } + _, err = l.client.client.WriteLogEntries(ctx, &logpb.WriteLogEntriesRequest{ + LogName: l.logName, + Resource: l.commonResource, + Labels: l.commonLabels, + Entries: []*logpb.LogEntry{ent}, + }) + return err +} + +// Log buffers the Entry for output to the logging service. It never blocks. +func (l *Logger) Log(e Entry) { + ent, err := toLogEntry(e) + if err != nil { + l.error(err) + return + } + if err := l.bundler.Add(ent, proto.Size(ent)); err != nil { + l.error(err) + } +} + +// Flush blocks until all currently buffered log entries are sent. +func (l *Logger) Flush() { + l.bundler.Flush() +} + +func (l *Logger) writeLogEntries(ctx context.Context, entries []*logpb.LogEntry) { + req := &logpb.WriteLogEntriesRequest{ + LogName: l.logName, + Resource: l.commonResource, + Labels: l.commonLabels, + Entries: entries, + } + _, err := l.client.client.WriteLogEntries(ctx, req) + if err != nil { + l.error(err) + } +} + +// error puts the error on the client's error channel +// without blocking. +func (l *Logger) error(err error) { + select { + case l.client.errc <- err: + default: + } +} + +// StandardLogger returns a *log.Logger for the provided severity. +// +// This method is cheap. A single log.Logger is pre-allocated for each +// severity level in each Logger. Callers may mutate the returned log.Logger +// (for example by calling SetFlags or SetPrefix). +func (l *Logger) StandardLogger(s Severity) *log.Logger { return l.stdLoggers[s] } + +func trunc32(i int) int32 { + if i > math.MaxInt32 { + i = math.MaxInt32 + } + return int32(i) +} + +func toLogEntry(e Entry) (*logpb.LogEntry, error) { + if e.LogName != "" { + return nil, errors.New("logging: Entry.LogName should be not be set when writing") + } + t := e.Timestamp + if t.IsZero() { + t = now() + } + ts, err := ptypes.TimestampProto(t) + if err != nil { + return nil, err + } + ent := &logpb.LogEntry{ + Timestamp: ts, + Severity: logtypepb.LogSeverity(e.Severity), + InsertId: e.InsertID, + HttpRequest: fromHTTPRequest(e.HTTPRequest), + Operation: e.Operation, + Labels: e.Labels, + } + + switch p := e.Payload.(type) { + case string: + ent.Payload = &logpb.LogEntry_TextPayload{p} + default: + s, err := toProtoStruct(p) + if err != nil { + return nil, err + } + ent.Payload = &logpb.LogEntry_JsonPayload{s} + } + return ent, nil +} diff --git a/vendor/cloud.google.com/go/logging/logging_test.go b/vendor/cloud.google.com/go/logging/logging_test.go new file mode 100644 index 00000000..2b4e5b03 --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logging_test.go @@ -0,0 +1,509 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// TODO(jba): test that OnError is getting called appropriately. + +package logging_test + +import ( + "flag" + "fmt" + "log" + "os" + "reflect" + "strings" + "testing" + "time" + + gax "github.com/googleapis/gax-go" + + cinternal "cloud.google.com/go/internal" + "cloud.google.com/go/internal/testutil" + "cloud.google.com/go/logging" + "cloud.google.com/go/logging/internal" + ltesting "cloud.google.com/go/logging/internal/testing" + "cloud.google.com/go/logging/logadmin" + "golang.org/x/net/context" + "golang.org/x/oauth2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + mrpb "google.golang.org/genproto/googleapis/api/monitoredres" + "google.golang.org/grpc" +) + +const testLogIDPrefix = "GO-LOGGING-CLIENT/TEST-LOG" + +var ( + client *logging.Client + aclient *logadmin.Client + testProjectID string + testLogID string + testFilter string + errorc chan error + ctx context.Context + + // Adjust the fields of a FullEntry received from the production service + // before comparing it with the expected result. We can't correctly + // compare certain fields, like times or server-generated IDs. + clean func(*logging.Entry) + + // Create a new client with the given project ID. + newClients func(ctx context.Context, projectID string) (*logging.Client, *logadmin.Client) +) + +func testNow() time.Time { + return time.Unix(1000, 0) +} + +// If true, this test is using the production service, not a fake. +var integrationTest bool + +func TestMain(m *testing.M) { + flag.Parse() // needed for testing.Short() + ctx = context.Background() + testProjectID = testutil.ProjID() + errorc = make(chan error, 100) + if testProjectID == "" || testing.Short() { + integrationTest = false + if testProjectID != "" { + log.Print("Integration tests skipped in short mode (using fake instead)") + } + testProjectID = "PROJECT_ID" + clean = func(e *logging.Entry) { + // Remove the insert ID for consistency with the integration test. + e.InsertID = "" + } + + addr, err := ltesting.NewServer() + if err != nil { + log.Fatalf("creating fake server: %v", err) + } + logging.SetNow(testNow) + + newClients = func(ctx context.Context, projectID string) (*logging.Client, *logadmin.Client) { + conn, err := grpc.Dial(addr, grpc.WithInsecure()) + if err != nil { + log.Fatalf("dialing %q: %v", addr, err) + } + c, err := logging.NewClient(ctx, projectID, option.WithGRPCConn(conn)) + if err != nil { + log.Fatalf("creating client for fake at %q: %v", addr, err) + } + ac, err := logadmin.NewClient(ctx, projectID, option.WithGRPCConn(conn)) + if err != nil { + log.Fatalf("creating client for fake at %q: %v", addr, err) + } + return c, ac + } + + } else { + integrationTest = true + clean = func(e *logging.Entry) { + // We cannot compare timestamps, so set them to the test time. + // Also, remove the insert ID added by the service. + e.Timestamp = testNow().UTC() + e.InsertID = "" + } + ts := testutil.TokenSource(ctx, logging.AdminScope) + if ts == nil { + log.Fatal("The project key must be set. See CONTRIBUTING.md for details") + } + log.Printf("running integration tests with project %s", testProjectID) + newClients = func(ctx context.Context, projectID string) (*logging.Client, *logadmin.Client) { + c, err := logging.NewClient(ctx, projectID, option.WithTokenSource(ts)) + if err != nil { + log.Fatalf("creating prod client: %v", err) + } + ac, err := logadmin.NewClient(ctx, projectID, option.WithTokenSource(ts)) + if err != nil { + log.Fatalf("creating prod client: %v", err) + } + return c, ac + } + + } + client, aclient = newClients(ctx, testProjectID) + client.OnError = func(e error) { errorc <- e } + + exit := m.Run() + client.Close() + os.Exit(exit) +} + +func initLogs(ctx context.Context) { + testLogID = ltesting.UniqueID(testLogIDPrefix) + testFilter = fmt.Sprintf(`logName = "projects/%s/logs/%s"`, testProjectID, + strings.Replace(testLogID, "/", "%2F", -1)) + // TODO(jba): Clean up from previous aborted tests by deleting old logs; requires ListLogs RPC. +} + +// Testing of Logger.Log is done in logadmin_test.go, TestEntries. + +func TestLogSync(t *testing.T) { + initLogs(ctx) // Generate new testLogID + ctx := context.Background() + lg := client.Logger(testLogID) + err := lg.LogSync(ctx, logging.Entry{Payload: "hello"}) + if err != nil { + t.Fatal(err) + } + err = lg.LogSync(ctx, logging.Entry{Payload: "goodbye"}) + if err != nil { + t.Fatal(err) + } + // Allow overriding the MonitoredResource. + err = lg.LogSync(ctx, logging.Entry{Payload: "mr", Resource: &mrpb.MonitoredResource{Type: "global"}}) + if err != nil { + t.Fatal(err) + } + + want := []*logging.Entry{ + entryForTesting("hello"), + entryForTesting("goodbye"), + entryForTesting("mr"), + } + var got []*logging.Entry + ok := waitFor(func() bool { + got, err = allTestLogEntries(ctx) + if err != nil { + t.Log("fetching log entries: ", err) + return false + } + return len(got) == len(want) + }) + if !ok { + t.Fatalf("timed out; got: %d, want: %d\n", len(got), len(want)) + } + if msg, ok := compareEntries(got, want); !ok { + t.Error(msg) + } +} + +func TestLogAndEntries(t *testing.T) { + initLogs(ctx) // Generate new testLogID + ctx := context.Background() + payloads := []string{"p1", "p2", "p3", "p4", "p5"} + lg := client.Logger(testLogID) + for _, p := range payloads { + // Use the insert ID to guarantee iteration order. + lg.Log(logging.Entry{Payload: p, InsertID: p}) + } + lg.Flush() + var want []*logging.Entry + for _, p := range payloads { + want = append(want, entryForTesting(p)) + } + var got []*logging.Entry + ok := waitFor(func() bool { + var err error + got, err = allTestLogEntries(ctx) + if err != nil { + t.Log("fetching log entries: ", err) + return false + } + return len(got) == len(want) + }) + if !ok { + t.Fatalf("timed out; got: %d, want: %d\n", len(got), len(want)) + } + if msg, ok := compareEntries(got, want); !ok { + t.Error(msg) + } +} + +// compareEntries compares most fields list of Entries against expected. compareEntries does not compare: +// - HTTPRequest +// - Operation +// - Resource +func compareEntries(got, want []*logging.Entry) (string, bool) { + if len(got) != len(want) { + return fmt.Sprintf("got %d entries, want %d", len(got), len(want)), false + } + for i := range got { + if !compareEntry(got[i], want[i]) { + return fmt.Sprintf("#%d:\ngot %+v\nwant %+v", i, got[i], want[i]), false + } + } + return "", true +} + +func compareEntry(got, want *logging.Entry) bool { + if got.Timestamp.Unix() != want.Timestamp.Unix() { + return false + } + + if got.Severity != want.Severity { + return false + } + + if !reflect.DeepEqual(got.Payload, want.Payload) { + return false + } + + if !reflect.DeepEqual(got.Labels, want.Labels) { + return false + } + + if got.InsertID != want.InsertID { + return false + } + + if got.LogName != want.LogName { + return false + } + + return true +} + +func entryForTesting(payload interface{}) *logging.Entry { + return &logging.Entry{ + Timestamp: testNow().UTC(), + Payload: payload, + LogName: "projects/" + testProjectID + "/logs/" + testLogID, + Resource: &mrpb.MonitoredResource{Type: "global", Labels: map[string]string{"project_id": testProjectID}}, + } +} + +func countLogEntries(ctx context.Context, filter string) int { + it := aclient.Entries(ctx, logadmin.Filter(filter)) + n := 0 + for { + _, err := it.Next() + if err == iterator.Done { + return n + } + if err != nil { + log.Fatalf("counting log entries: %v", err) + } + n++ + } +} + +func allTestLogEntries(ctx context.Context) ([]*logging.Entry, error) { + var es []*logging.Entry + it := aclient.Entries(ctx, logadmin.Filter(testFilter)) + for { + e, err := cleanNext(it) + switch err { + case nil: + es = append(es, e) + case iterator.Done: + return es, nil + default: + return nil, err + } + } +} + +func cleanNext(it *logadmin.EntryIterator) (*logging.Entry, error) { + e, err := it.Next() + if err != nil { + return nil, err + } + clean(e) + return e, nil +} + +func TestStandardLogger(t *testing.T) { + initLogs(ctx) // Generate new testLogID + ctx := context.Background() + lg := client.Logger(testLogID) + slg := lg.StandardLogger(logging.Info) + + if slg != lg.StandardLogger(logging.Info) { + t.Error("There should be only one standard logger at each severity.") + } + if slg == lg.StandardLogger(logging.Debug) { + t.Error("There should be a different standard logger for each severity.") + } + + slg.Print("info") + lg.Flush() + var got []*logging.Entry + ok := waitFor(func() bool { + var err error + got, err = allTestLogEntries(ctx) + if err != nil { + t.Log("fetching log entries: ", err) + return false + } + return len(got) == 1 + }) + if !ok { + t.Fatalf("timed out; got: %d, want: %d\n", len(got), 1) + } + if len(got) != 1 { + t.Fatalf("expected non-nil request with one entry; got:\n%+v", got) + } + if got, want := got[0].Payload.(string), "info\n"; got != want { + t.Errorf("payload: got %q, want %q", got, want) + } + if got, want := logging.Severity(got[0].Severity), logging.Info; got != want { + t.Errorf("severity: got %s, want %s", got, want) + } +} + +func TestSeverity(t *testing.T) { + if got, want := logging.Info.String(), "Info"; got != want { + t.Errorf("got %q, want %q", got, want) + } + if got, want := logging.Severity(-99).String(), "-99"; got != want { + t.Errorf("got %q, want %q", got, want) + } +} + +func TestParseSeverity(t *testing.T) { + for _, test := range []struct { + in string + want logging.Severity + }{ + {"", logging.Default}, + {"whatever", logging.Default}, + {"Default", logging.Default}, + {"ERROR", logging.Error}, + {"Error", logging.Error}, + {"error", logging.Error}, + } { + got := logging.ParseSeverity(test.in) + if got != test.want { + t.Errorf("%q: got %s, want %s\n", test.in, got, test.want) + } + } +} + +func TestErrors(t *testing.T) { + initLogs(ctx) // Generate new testLogID + // Drain errors already seen. +loop: + for { + select { + case <-errorc: + default: + break loop + } + } + // Try to log something that can't be JSON-marshalled. + lg := client.Logger(testLogID) + lg.Log(logging.Entry{Payload: func() {}}) + // Expect an error. + select { + case <-errorc: // pass + case <-time.After(100 * time.Millisecond): + t.Fatal("expected an error but timed out") + } +} + +type badTokenSource struct{} + +func (badTokenSource) Token() (*oauth2.Token, error) { + return &oauth2.Token{}, nil +} + +func TestPing(t *testing.T) { + // Ping twice, in case the service's InsertID logic messes with the error code. + ctx := context.Background() + // The global client should be valid. + if err := client.Ping(ctx); err != nil { + t.Errorf("project %s: got %v, expected nil", testProjectID, err) + } + if err := client.Ping(ctx); err != nil { + t.Errorf("project %s, #2: got %v, expected nil", testProjectID, err) + } + // nonexistent project + c, _ := newClients(ctx, testProjectID+"-BAD") + if err := c.Ping(ctx); err == nil { + t.Errorf("nonexistent project: want error pinging logging api, got nil") + } + if err := c.Ping(ctx); err == nil { + t.Errorf("nonexistent project, #2: want error pinging logging api, got nil") + } + + // Bad creds. We cannot test this with the fake, since it doesn't do auth. + if integrationTest { + c, err := logging.NewClient(ctx, testProjectID, option.WithTokenSource(badTokenSource{})) + if err != nil { + t.Fatal(err) + } + if err := c.Ping(ctx); err == nil { + t.Errorf("bad creds: want error pinging logging api, got nil") + } + if err := c.Ping(ctx); err == nil { + t.Errorf("bad creds, #2: want error pinging logging api, got nil") + } + if err := c.Close(); err != nil { + t.Fatalf("error closing client: %v", err) + } + } +} + +func TestDeleteLog(t *testing.T) { + initLogs(ctx) // Generate new testLogID + // Write some log entries. + ctx := context.Background() + payloads := []string{"p1", "p2"} + lg := client.Logger(testLogID) + for _, p := range payloads { + // Use the insert ID to guarantee iteration order. + lg.Log(logging.Entry{Payload: p, InsertID: p}) + } + lg.Flush() + + var got []*logging.Entry + ok := waitFor(func() bool { + var err error + got, err = allTestLogEntries(ctx) + if err != nil { + t.Log("fetching log entries: ", err) + return false + } + return len(got) == 2 + }) + if !ok { + t.Fatalf("timed out; got: %d, want: %d\n", len(got), 2) + } + + // Sleep. + // Write timestamp uses client-provided timestamp, delete uses server + // timestamp. We sleep to reduce the possibility that the logs are never + // "deleted" because of clock skew. + // This is the recommended approach by Stackdriver team. + time.Sleep(3 * time.Second) + + // Delete the log + err := aclient.DeleteLog(ctx, testLogID) + if err != nil { + log.Fatalf("error deleting log: %v", err) + } + + // DeleteLog can take some time to happen, so we wait for the log to + // disappear. There is no direct way to determine if a log exists, so we + // just wait until there are no log entries associated with the ID. + filter := fmt.Sprintf(`logName = "%s"`, internal.LogPath("projects/"+testProjectID, testLogID)) + ok = waitFor(func() bool { return countLogEntries(ctx, filter) == 0 }) + if !ok { + t.Fatalf("timed out waiting for log entries to be deleted") + } +} + +// waitFor calls f repeatedly with exponential backoff, blocking until it returns true. +// It returns false after a while (if it times out). +func waitFor(f func() bool) bool { + // TODO(shadams): Find a better way to deflake these tests. + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + err := cinternal.Retry(ctx, + gax.Backoff{Initial: time.Second, Multiplier: 2}, + func() (bool, error) { return f(), nil }) + return err == nil +} diff --git a/vendor/cloud.google.com/go/logging/logging_unexported_test.go b/vendor/cloud.google.com/go/logging/logging_unexported_test.go new file mode 100644 index 00000000..9481e00b --- /dev/null +++ b/vendor/cloud.google.com/go/logging/logging_unexported_test.go @@ -0,0 +1,197 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Tests that require access to unexported names of the logging package. + +package logging + +import ( + "net/http" + "net/url" + "reflect" + "testing" + "time" + + "github.com/golang/protobuf/proto" + durpb "github.com/golang/protobuf/ptypes/duration" + structpb "github.com/golang/protobuf/ptypes/struct" + "google.golang.org/api/support/bundler" + mrpb "google.golang.org/genproto/googleapis/api/monitoredres" + logtypepb "google.golang.org/genproto/googleapis/logging/type" +) + +func TestLoggerCreation(t *testing.T) { + const logID = "testing" + c := &Client{projectID: "PROJECT_ID"} + defaultResource := &mrpb.MonitoredResource{Type: "global"} + defaultBundler := &bundler.Bundler{ + DelayThreshold: DefaultDelayThreshold, + BundleCountThreshold: DefaultEntryCountThreshold, + BundleByteThreshold: DefaultEntryByteThreshold, + BundleByteLimit: 0, + BufferedByteLimit: DefaultBufferedByteLimit, + } + for _, test := range []struct { + options []LoggerOption + wantLogger *Logger + wantBundler *bundler.Bundler + }{ + {nil, &Logger{commonResource: defaultResource}, defaultBundler}, + { + []LoggerOption{CommonResource(nil), CommonLabels(map[string]string{"a": "1"})}, + &Logger{commonResource: nil, commonLabels: map[string]string{"a": "1"}}, + defaultBundler, + }, + { + []LoggerOption{DelayThreshold(time.Minute), EntryCountThreshold(99), + EntryByteThreshold(17), EntryByteLimit(18), BufferedByteLimit(19)}, + &Logger{commonResource: defaultResource}, + &bundler.Bundler{ + DelayThreshold: time.Minute, + BundleCountThreshold: 99, + BundleByteThreshold: 17, + BundleByteLimit: 18, + BufferedByteLimit: 19, + }, + }, + } { + gotLogger := c.Logger(logID, test.options...) + if got, want := gotLogger.commonResource, test.wantLogger.commonResource; !reflect.DeepEqual(got, want) { + t.Errorf("%v: resource: got %v, want %v", test.options, got, want) + } + if got, want := gotLogger.commonLabels, test.wantLogger.commonLabels; !reflect.DeepEqual(got, want) { + t.Errorf("%v: commonLabels: got %v, want %v", test.options, got, want) + } + if got, want := gotLogger.bundler.DelayThreshold, test.wantBundler.DelayThreshold; got != want { + t.Errorf("%v: DelayThreshold: got %v, want %v", test.options, got, want) + } + if got, want := gotLogger.bundler.BundleCountThreshold, test.wantBundler.BundleCountThreshold; got != want { + t.Errorf("%v: BundleCountThreshold: got %v, want %v", test.options, got, want) + } + if got, want := gotLogger.bundler.BundleByteThreshold, test.wantBundler.BundleByteThreshold; got != want { + t.Errorf("%v: BundleByteThreshold: got %v, want %v", test.options, got, want) + } + if got, want := gotLogger.bundler.BundleByteLimit, test.wantBundler.BundleByteLimit; got != want { + t.Errorf("%v: BundleByteLimit: got %v, want %v", test.options, got, want) + } + if got, want := gotLogger.bundler.BufferedByteLimit, test.wantBundler.BufferedByteLimit; got != want { + t.Errorf("%v: BufferedByteLimit: got %v, want %v", test.options, got, want) + } + } +} + +func TestToProtoStruct(t *testing.T) { + v := struct { + Foo string `json:"foo"` + Bar int `json:"bar,omitempty"` + Baz []float64 `json:"baz"` + Moo map[string]interface{} `json:"moo"` + }{ + Foo: "foovalue", + Baz: []float64{1.1}, + Moo: map[string]interface{}{ + "a": 1, + "b": "two", + "c": true, + }, + } + + got, err := toProtoStruct(v) + if err != nil { + t.Fatal(err) + } + want := &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "foo": {Kind: &structpb.Value_StringValue{v.Foo}}, + "baz": {Kind: &structpb.Value_ListValue{&structpb.ListValue{ + []*structpb.Value{{Kind: &structpb.Value_NumberValue{1.1}}}}}}, + "moo": {Kind: &structpb.Value_StructValue{ + &structpb.Struct{ + Fields: map[string]*structpb.Value{ + "a": {Kind: &structpb.Value_NumberValue{1}}, + "b": {Kind: &structpb.Value_StringValue{"two"}}, + "c": {Kind: &structpb.Value_BoolValue{true}}, + }, + }, + }}, + }, + } + if !proto.Equal(got, want) { + t.Errorf("got %+v\nwant %+v", got, want) + } + + // Non-structs should fail to convert. + for v := range []interface{}{3, "foo", []int{1, 2, 3}} { + _, err := toProtoStruct(v) + if err == nil { + t.Errorf("%v: got nil, want error", v) + } + } + + // Test fast path. + got, err = toProtoStruct(want) + if err != nil { + t.Fatal(err) + } + if got != want { + t.Error("got and want should be identical, but are not") + } +} + +func TestFromHTTPRequest(t *testing.T) { + const testURL = "http:://example.com/path?q=1" + u, err := url.Parse(testURL) + if err != nil { + t.Fatal(err) + } + req := &HTTPRequest{ + Request: &http.Request{ + Method: "GET", + URL: u, + Header: map[string][]string{ + "User-Agent": []string{"user-agent"}, + "Referer": []string{"referer"}, + }, + }, + RequestSize: 100, + Status: 200, + ResponseSize: 25, + Latency: 100 * time.Second, + RemoteIP: "127.0.0.1", + CacheHit: true, + CacheValidatedWithOriginServer: true, + } + got := fromHTTPRequest(req) + want := &logtypepb.HttpRequest{ + RequestMethod: "GET", + RequestUrl: testURL, + RequestSize: 100, + Status: 200, + ResponseSize: 25, + Latency: &durpb.Duration{Seconds: 100}, + UserAgent: "user-agent", + RemoteIp: "127.0.0.1", + Referer: "referer", + CacheHit: true, + CacheValidatedWithOriginServer: true, + } + if !reflect.DeepEqual(got, want) { + t.Errorf("got %+v\nwant %+v", got, want) + } +} + +// Used by the tests in logging_test. +func SetNow(f func() time.Time) { + now = f +} diff --git a/vendor/cloud.google.com/go/longrunning/example_test.go b/vendor/cloud.google.com/go/longrunning/example_test.go new file mode 100644 index 00000000..c7b52ac5 --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/example_test.go @@ -0,0 +1,116 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package longrunning + +import ( + "fmt" + "time" + + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/duration" + "github.com/golang/protobuf/ptypes/timestamp" + "golang.org/x/net/context" + pb "google.golang.org/genproto/googleapis/longrunning" +) + +func bestMomentInHistory() (*Operation, error) { + t, err := time.Parse("2006-01-02 15:04:05.999999999 -0700 MST", "2009-11-10 23:00:00 +0000 UTC") + if err != nil { + return nil, err + } + resp, err := ptypes.TimestampProto(t) + if err != nil { + return nil, err + } + respAny, err := ptypes.MarshalAny(resp) + if err != nil { + return nil, err + } + metaAny, err := ptypes.MarshalAny(ptypes.DurationProto(1 * time.Hour)) + return &Operation{ + proto: &pb.Operation{ + Name: "best-moment", + Done: true, + Metadata: metaAny, + Result: &pb.Operation_Response{ + Response: respAny, + }, + }, + }, err +} + +func ExampleOperation_Wait() { + // Complex computation, might take a long time. + op, err := bestMomentInHistory() + if err != nil { + // TODO: Handle err. + } + var ts timestamp.Timestamp + err = op.Wait(context.TODO(), &ts) + if err != nil && !op.Done() { + fmt.Println("failed to fetch operation status", err) + } else if err != nil && op.Done() { + fmt.Println("operation completed with error", err) + } else { + fmt.Println(ptypes.TimestampString(&ts)) + } + // Output: + // 2009-11-10T23:00:00Z +} + +func ExampleOperation_Metadata() { + op, err := bestMomentInHistory() + if err != nil { + // TODO: Handle err. + } + + // The operation might contain metadata. + // In this example, the metadata contains the estimated length of time + // the operation might take to complete. + var meta duration.Duration + if err := op.Metadata(&meta); err != nil { + // TODO: Handle err. + } + d, err := ptypes.Duration(&meta) + if err == ErrNoMetadata { + fmt.Println("no metadata") + } else if err != nil { + // TODO: Handle err. + } else { + fmt.Println(d) + } + // Output: + // 1h0m0s +} + +func ExampleOperation_Cancel() { + op, err := bestMomentInHistory() + if err != nil { + // TODO: Handle err. + } + if err := op.Cancel(context.Background()); err != nil { + // TODO: Handle err. + } +} + +func ExampleOperation_Delete() { + op, err := bestMomentInHistory() + if err != nil { + // TODO: Handle err. + } + if err := op.Delete(context.Background()); err != nil { + // TODO: Handle err. + } +} diff --git a/vendor/cloud.google.com/go/longrunning/longrunning.go b/vendor/cloud.google.com/go/longrunning/longrunning.go new file mode 100644 index 00000000..7133c8fb --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/longrunning.go @@ -0,0 +1,163 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package longrunning supports Long Running Operations for the Google Cloud Libraries. +// See google.golang.org/genproto/googleapis/longrunning for its service definition. +// +// Users of the Google Cloud Libraries will typically not use this package directly. +// Instead they will call functions returning Operations and call their methods. +// +// This package is still experimental and subject to change. +package longrunning // import "cloud.google.com/go/longrunning" + +import ( + "errors" + "fmt" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + gax "github.com/googleapis/gax-go" + + "golang.org/x/net/context" + + pb "google.golang.org/genproto/googleapis/longrunning" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// ErrNoMetadata is the error returned by Metadata if the operation contains no metadata. +var ErrNoMetadata = errors.New("operation contains no metadata") + +// Operation represents the result of an API call that may not be ready yet. +type Operation struct { + c pb.OperationsClient + proto *pb.Operation +} + +// InternalNewOperation is for use by the google Cloud Libraries only. +// +// InternalNewOperation returns an long-running operation, abstracting the raw pb.Operation. +// The conn parameter refers to a server that proto was received from. +func InternalNewOperation(conn *grpc.ClientConn, proto *pb.Operation) *Operation { + return &Operation{ + c: pb.NewOperationsClient(conn), + proto: proto, + } +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service +// from which the operation is created. +func (op *Operation) Name() string { + return op.proto.Name +} + +// Done reports whether the long-running operation has completed. +func (op *Operation) Done() bool { + return op.proto.Done +} + +// Metadata unmarshals op's metadata into meta. +// If op does not contain any metadata, Metadata returns ErrNoMetadata and meta is unmodified. +func (op *Operation) Metadata(meta proto.Message) error { + if m := op.proto.Metadata; m != nil { + return ptypes.UnmarshalAny(m, meta) + } + return ErrNoMetadata +} + +// Poll fetches the latest state of a long-running operation. +// +// If Poll fails, the error is returned and op is unmodified. +// If Poll succeeds and the operation has completed with failure, +// the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true; if resp != nil, the response of the operation +// is stored in resp. +func (op *Operation) Poll(ctx context.Context, resp proto.Message) error { + if !op.Done() { + p, err := op.c.GetOperation(ctx, &pb.GetOperationRequest{Name: op.Name()}) + if err != nil { + return err + } + op.proto = p + } + if !op.Done() { + return nil + } + + switch r := op.proto.Result.(type) { + case *pb.Operation_Error: + // TODO (pongad): r.Details may contain further information + return grpc.Errorf(codes.Code(r.Error.Code), "%s", r.Error.Message) + case *pb.Operation_Response: + if resp == nil { + return nil + } + return ptypes.UnmarshalAny(r.Response, resp) + default: + return fmt.Errorf("unsupported result type %[1]T: %[1]v", r) + } +} + +// Wait blocks until the operation is completed. +// If resp != nil, Wait stores the response in resp. +// +// See documentation of Poll for error-handling information. +func (op *Operation) Wait(ctx context.Context, resp proto.Message) error { + bo := gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 10 * time.Second, + } + return op.wait(ctx, resp, &bo, gax.Sleep) +} + +type sleeper func(context.Context, time.Duration) error + +// wait implements Wait, taking exponentialBackoff and sleeper arguments for testing. +func (op *Operation) wait(ctx context.Context, resp proto.Message, bo *gax.Backoff, sl sleeper) error { + for { + if err := op.Poll(ctx, resp); err != nil { + return err + } + if op.Done() { + return nil + } + if err := sl(ctx, bo.Pause()); err != nil { + return err + } + } +} + +// Cancel starts asynchronous cancellation on a long-running operation. The server +// makes a best effort to cancel the operation, but success is not +// guaranteed. If the server doesn't support this method, it returns +// grpc.Code(error) == codes.Unimplemented. Clients can use +// Poll or other methods to check whether the cancellation succeeded or whether the +// operation completed despite cancellation. On successful cancellation, +// the operation is not deleted; instead, op.Poll returns an error +// with code Canceled. +func (op *Operation) Cancel(ctx context.Context) error { + _, err := op.c.CancelOperation(ctx, &pb.CancelOperationRequest{Name: op.Name()}) + return err +} + +// Delete deletes a long-running operation. This method indicates that the client is +// no longer interested in the operation result. It does not cancel the +// operation. If the server doesn't support this method, grpc.Code(error) == codes.Unimplemented. +func (op *Operation) Delete(ctx context.Context) error { + _, err := op.c.DeleteOperation(ctx, &pb.DeleteOperationRequest{Name: op.Name()}) + return err +} diff --git a/vendor/cloud.google.com/go/longrunning/longrunning_test.go b/vendor/cloud.google.com/go/longrunning/longrunning_test.go new file mode 100644 index 00000000..5f8c0897 --- /dev/null +++ b/vendor/cloud.google.com/go/longrunning/longrunning_test.go @@ -0,0 +1,216 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package lro supports Long Running Operations for the Google Cloud Libraries. +// +// This package is still experimental and subject to change. +package longrunning + +import ( + "errors" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "github.com/golang/protobuf/ptypes/duration" + "github.com/golang/protobuf/ptypes/empty" + gax "github.com/googleapis/gax-go" + + "golang.org/x/net/context" + + pb "google.golang.org/genproto/googleapis/longrunning" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +type getterService struct { + pb.OperationsClient + + // clock represents the fake current time of the service. + // It is the running sum of the of the duration we have slept. + clock time.Duration + + // getTimes records the the times at which GetOperation is called. + getTimes []time.Duration + + // results are the fake results that GetOperation should return. + results []*pb.Operation +} + +func (s *getterService) GetOperation(context.Context, *pb.GetOperationRequest, ...grpc.CallOption) (*pb.Operation, error) { + i := len(s.getTimes) + s.getTimes = append(s.getTimes, s.clock) + if i >= len(s.results) { + return nil, errors.New("unexpected call") + } + return s.results[i], nil +} + +func (s *getterService) sleeper() sleeper { + return func(_ context.Context, d time.Duration) error { + s.clock += d + return nil + } +} + +func TestWait(t *testing.T) { + responseDur := ptypes.DurationProto(42 * time.Second) + responseAny, err := ptypes.MarshalAny(responseDur) + if err != nil { + t.Fatal(err) + } + + s := &getterService{ + results: []*pb.Operation{ + {Name: "foo"}, + {Name: "foo"}, + {Name: "foo"}, + {Name: "foo"}, + {Name: "foo"}, + { + Name: "foo", + Done: true, + Result: &pb.Operation_Response{ + Response: responseAny, + }, + }, + }, + } + op := &Operation{ + c: s, + proto: &pb.Operation{Name: "foo"}, + } + if op.Done() { + t.Fatal("operation should not have completed yet") + } + + var resp duration.Duration + bo := gax.Backoff{ + Initial: 1 * time.Second, + Max: 3 * time.Second, + } + if err := op.wait(context.Background(), &resp, &bo, s.sleeper()); err != nil { + t.Fatal(err) + } + if !proto.Equal(&resp, responseDur) { + t.Errorf("response, got %v, want %v", resp, responseDur) + } + if !op.Done() { + t.Errorf("operation should have completed") + } + + maxWait := []time.Duration{ + 1 * time.Second, + 2 * time.Second, + 3 * time.Second, + 3 * time.Second, + 3 * time.Second, + } + for i := 0; i < len(s.getTimes)-1; i++ { + w := s.getTimes[i+1] - s.getTimes[i] + if mw := maxWait[i]; w > mw { + t.Errorf("backoff, waited %s, max %s", w, mw) + } + } +} + +func TestPollRequestError(t *testing.T) { + const opName = "foo" + + // All calls error. + s := &getterService{} + op := &Operation{ + c: s, + proto: &pb.Operation{Name: opName}, + } + if err := op.Poll(context.Background(), nil); err == nil { + t.Fatalf("Poll should error") + } + if n := op.Name(); n != opName { + t.Errorf("operation name, got %q, want %q", n, opName) + } + if op.Done() { + t.Errorf("operation should not have completed; we failed to fetch state") + } +} + +func TestPollErrorResult(t *testing.T) { + const ( + errCode = codes.NotFound + errMsg = "my error" + ) + op := &Operation{ + proto: &pb.Operation{ + Name: "foo", + Done: true, + Result: &pb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: errMsg, + }, + }, + }, + } + err := op.Poll(context.Background(), nil) + if got := grpc.Code(err); got != errCode { + t.Errorf("error code, want %s, got %s", errCode, got) + } + if got := grpc.ErrorDesc(err); got != errMsg { + t.Errorf("error code, want %s, got %s", errMsg, got) + } + if !op.Done() { + t.Errorf("operation should have completed") + } +} + +type errService struct { + pb.OperationsClient + errCancel, errDelete error +} + +func (s *errService) CancelOperation(context.Context, *pb.CancelOperationRequest, ...grpc.CallOption) (*empty.Empty, error) { + return nil, s.errCancel +} + +func (s *errService) DeleteOperation(context.Context, *pb.DeleteOperationRequest, ...grpc.CallOption) (*empty.Empty, error) { + return nil, s.errDelete +} + +func TestCancelReturnsError(t *testing.T) { + s := &errService{ + errCancel: errors.New("cancel error"), + } + op := &Operation{ + c: s, + proto: &pb.Operation{Name: "foo"}, + } + if got, want := op.Cancel(context.Background()), s.errCancel; got != want { + t.Errorf("cancel, got error %s, want %s", got, want) + } +} + +func TestDeleteReturnsError(t *testing.T) { + s := &errService{ + errDelete: errors.New("delete error"), + } + op := &Operation{ + c: s, + proto: &pb.Operation{Name: "foo"}, + } + if got, want := op.Delete(context.Background()), s.errDelete; got != want { + t.Errorf("cancel, got error %s, want %s", got, want) + } +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/doc.go b/vendor/cloud.google.com/go/monitoring/apiv3/doc.go new file mode 100644 index 00000000..3fa72a35 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/doc.go @@ -0,0 +1,35 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package monitoring is an experimental, auto-generated package for the +// monitoring API. +// +// Manages your Stackdriver Monitoring data and configurations. Most projects +// must be associated with a Stackdriver account, with a few exceptions as +// noted on the individual method pages. +package monitoring // import "cloud.google.com/go/monitoring/apiv3" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertXGoog(ctx context.Context, val string) context.Context { + md, _ := metadata.FromContext(ctx) + md = md.Copy() + md["x-goog-api-client"] = []string{val} + return metadata.NewContext(ctx, md) +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go new file mode 100644 index 00000000..32604fdf --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go @@ -0,0 +1,392 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + groupProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}") + groupGroupPathTemplate = gax.MustCompilePathTemplate("projects/{project}/groups/{group}") +) + +// GroupCallOptions contains the retry settings for each method of GroupClient. +type GroupCallOptions struct { + ListGroups []gax.CallOption + GetGroup []gax.CallOption + CreateGroup []gax.CallOption + UpdateGroup []gax.CallOption + DeleteGroup []gax.CallOption + ListGroupMembers []gax.CallOption +} + +func defaultGroupClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("monitoring.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + "https://www.googleapis.com/auth/monitoring.write", + ), + } +} + +func defaultGroupCallOptions() *GroupCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + {"default", "non_idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &GroupCallOptions{ + ListGroups: retry[[2]string{"default", "idempotent"}], + GetGroup: retry[[2]string{"default", "idempotent"}], + CreateGroup: retry[[2]string{"default", "non_idempotent"}], + UpdateGroup: retry[[2]string{"default", "idempotent"}], + DeleteGroup: retry[[2]string{"default", "idempotent"}], + ListGroupMembers: retry[[2]string{"default", "idempotent"}], + } +} + +// GroupClient is a client for interacting with Stackdriver Monitoring API. +type GroupClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + groupClient monitoringpb.GroupServiceClient + + // The call options for this service. + CallOptions *GroupCallOptions + + // The metadata to be sent with each request. + xGoogHeader string +} + +// NewGroupClient creates a new group service client. +// +// The Group API lets you inspect and manage your +// [groups](google.monitoring.v3.Group). +// +// A group is a named filter that is used to identify +// a collection of monitored resources. Groups are typically used to +// mirror the physical and/or logical topology of the environment. +// Because group membership is computed dynamically, monitored +// resources that are started in the future are automatically placed +// in matching groups. By using a group to name monitored resources in, +// for example, an alert policy, the target of that alert policy is +// updated automatically as monitored resources are added and removed +// from the infrastructure. +func NewGroupClient(ctx context.Context, opts ...option.ClientOption) (*GroupClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultGroupClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &GroupClient{ + conn: conn, + CallOptions: defaultGroupCallOptions(), + + groupClient: monitoringpb.NewGroupServiceClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *GroupClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *GroupClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *GroupClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", "") + c.xGoogHeader = gax.XGoogHeader(kv...) +} + +// GroupProjectPath returns the path for the project resource. +func GroupProjectPath(project string) string { + path, err := groupProjectPathTemplate.Render(map[string]string{ + "project": project, + }) + if err != nil { + panic(err) + } + return path +} + +// GroupGroupPath returns the path for the group resource. +func GroupGroupPath(project, group string) string { + path, err := groupGroupPathTemplate.Render(map[string]string{ + "project": project, + "group": group, + }) + if err != nil { + panic(err) + } + return path +} + +// ListGroups lists the existing groups. +func (c *GroupClient) ListGroups(ctx context.Context, req *monitoringpb.ListGroupsRequest) *GroupIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + it := &GroupIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Group, string, error) { + var resp *monitoringpb.ListGroupsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.groupClient.ListGroups(ctx, req) + return err + }, c.CallOptions.ListGroups...) + if err != nil { + return nil, "", err + } + return resp.Group, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetGroup gets a single group. +func (c *GroupClient) GetGroup(ctx context.Context, req *monitoringpb.GetGroupRequest) (*monitoringpb.Group, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *monitoringpb.Group + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.groupClient.GetGroup(ctx, req) + return err + }, c.CallOptions.GetGroup...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateGroup creates a new group. +func (c *GroupClient) CreateGroup(ctx context.Context, req *monitoringpb.CreateGroupRequest) (*monitoringpb.Group, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *monitoringpb.Group + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.groupClient.CreateGroup(ctx, req) + return err + }, c.CallOptions.CreateGroup...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateGroup updates an existing group. +// You can change any group attributes except `name`. +func (c *GroupClient) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateGroupRequest) (*monitoringpb.Group, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *monitoringpb.Group + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.groupClient.UpdateGroup(ctx, req) + return err + }, c.CallOptions.UpdateGroup...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteGroup deletes an existing group. +func (c *GroupClient) DeleteGroup(ctx context.Context, req *monitoringpb.DeleteGroupRequest) error { + ctx = insertXGoog(ctx, c.xGoogHeader) + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + _, err = c.groupClient.DeleteGroup(ctx, req) + return err + }, c.CallOptions.DeleteGroup...) + return err +} + +// ListGroupMembers lists the monitored resources that are members of a group. +func (c *GroupClient) ListGroupMembers(ctx context.Context, req *monitoringpb.ListGroupMembersRequest) *MonitoredResourceIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + it := &MonitoredResourceIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResource, string, error) { + var resp *monitoringpb.ListGroupMembersResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.groupClient.ListGroupMembers(ctx, req) + return err + }, c.CallOptions.ListGroupMembers...) + if err != nil { + return nil, "", err + } + return resp.Members, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GroupIterator manages a stream of *monitoringpb.Group. +type GroupIterator struct { + items []*monitoringpb.Group + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Group, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *GroupIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *GroupIterator) Next() (*monitoringpb.Group, error) { + var item *monitoringpb.Group + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *GroupIterator) bufLen() int { + return len(it.items) +} + +func (it *GroupIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// MonitoredResourceIterator manages a stream of *monitoredrespb.MonitoredResource. +type MonitoredResourceIterator struct { + items []*monitoredrespb.MonitoredResource + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResource, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MonitoredResourceIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MonitoredResourceIterator) Next() (*monitoredrespb.MonitoredResource, error) { + var item *monitoredrespb.MonitoredResource + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MonitoredResourceIterator) bufLen() int { + return len(it.items) +} + +func (it *MonitoredResourceIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/group_client_example_test.go b/vendor/cloud.google.com/go/monitoring/apiv3/group_client_example_test.go new file mode 100644 index 00000000..095661a7 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/group_client_example_test.go @@ -0,0 +1,147 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring_test + +import ( + "cloud.google.com/go/monitoring/apiv3" + "golang.org/x/net/context" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" +) + +func ExampleNewGroupClient() { + ctx := context.Background() + c, err := monitoring.NewGroupClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleGroupClient_ListGroups() { + ctx := context.Background() + c, err := monitoring.NewGroupClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.ListGroupsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListGroups(ctx, req) + for { + resp, err := it.Next() + if err != nil { + // TODO: Handle error. + break + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleGroupClient_GetGroup() { + ctx := context.Background() + c, err := monitoring.NewGroupClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.GetGroupRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetGroup(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleGroupClient_CreateGroup() { + ctx := context.Background() + c, err := monitoring.NewGroupClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.CreateGroupRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateGroup(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleGroupClient_UpdateGroup() { + ctx := context.Background() + c, err := monitoring.NewGroupClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.UpdateGroupRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.UpdateGroup(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleGroupClient_DeleteGroup() { + ctx := context.Background() + c, err := monitoring.NewGroupClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.DeleteGroupRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteGroup(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleGroupClient_ListGroupMembers() { + ctx := context.Background() + c, err := monitoring.NewGroupClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.ListGroupMembersRequest{ + // TODO: Fill request struct fields. + } + it := c.ListGroupMembers(ctx, req) + for { + resp, err := it.Next() + if err != nil { + // TODO: Handle error. + break + } + // TODO: Use resp. + _ = resp + } +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go b/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go new file mode 100644 index 00000000..852598c3 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go @@ -0,0 +1,492 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + metricpb "google.golang.org/genproto/googleapis/api/metric" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + metricProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}") + metricMetricDescriptorPathTemplate = gax.MustCompilePathTemplate("projects/{project}/metricDescriptors/{metric_descriptor=**}") + metricMonitoredResourceDescriptorPathTemplate = gax.MustCompilePathTemplate("projects/{project}/monitoredResourceDescriptors/{monitored_resource_descriptor}") +) + +// MetricCallOptions contains the retry settings for each method of MetricClient. +type MetricCallOptions struct { + ListMonitoredResourceDescriptors []gax.CallOption + GetMonitoredResourceDescriptor []gax.CallOption + ListMetricDescriptors []gax.CallOption + GetMetricDescriptor []gax.CallOption + CreateMetricDescriptor []gax.CallOption + DeleteMetricDescriptor []gax.CallOption + ListTimeSeries []gax.CallOption + CreateTimeSeries []gax.CallOption +} + +func defaultMetricClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("monitoring.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + "https://www.googleapis.com/auth/monitoring.write", + ), + } +} + +func defaultMetricCallOptions() *MetricCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + {"default", "non_idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &MetricCallOptions{ + ListMonitoredResourceDescriptors: retry[[2]string{"default", "idempotent"}], + GetMonitoredResourceDescriptor: retry[[2]string{"default", "idempotent"}], + ListMetricDescriptors: retry[[2]string{"default", "idempotent"}], + GetMetricDescriptor: retry[[2]string{"default", "idempotent"}], + CreateMetricDescriptor: retry[[2]string{"default", "non_idempotent"}], + DeleteMetricDescriptor: retry[[2]string{"default", "idempotent"}], + ListTimeSeries: retry[[2]string{"default", "idempotent"}], + CreateTimeSeries: retry[[2]string{"default", "non_idempotent"}], + } +} + +// MetricClient is a client for interacting with Stackdriver Monitoring API. +type MetricClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + metricClient monitoringpb.MetricServiceClient + + // The call options for this service. + CallOptions *MetricCallOptions + + // The metadata to be sent with each request. + xGoogHeader string +} + +// NewMetricClient creates a new metric service client. +// +// Manages metric descriptors, monitored resource descriptors, and +// time series data. +func NewMetricClient(ctx context.Context, opts ...option.ClientOption) (*MetricClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultMetricClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &MetricClient{ + conn: conn, + CallOptions: defaultMetricCallOptions(), + + metricClient: monitoringpb.NewMetricServiceClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *MetricClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *MetricClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *MetricClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", "") + c.xGoogHeader = gax.XGoogHeader(kv...) +} + +// MetricProjectPath returns the path for the project resource. +func MetricProjectPath(project string) string { + path, err := metricProjectPathTemplate.Render(map[string]string{ + "project": project, + }) + if err != nil { + panic(err) + } + return path +} + +// MetricMetricDescriptorPath returns the path for the metric descriptor resource. +func MetricMetricDescriptorPath(project, metricDescriptor string) string { + path, err := metricMetricDescriptorPathTemplate.Render(map[string]string{ + "project": project, + "metric_descriptor": metricDescriptor, + }) + if err != nil { + panic(err) + } + return path +} + +// MetricMonitoredResourceDescriptorPath returns the path for the monitored resource descriptor resource. +func MetricMonitoredResourceDescriptorPath(project, monitoredResourceDescriptor string) string { + path, err := metricMonitoredResourceDescriptorPathTemplate.Render(map[string]string{ + "project": project, + "monitored_resource_descriptor": monitoredResourceDescriptor, + }) + if err != nil { + panic(err) + } + return path +} + +// ListMonitoredResourceDescriptors lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account. +func (c *MetricClient) ListMonitoredResourceDescriptors(ctx context.Context, req *monitoringpb.ListMonitoredResourceDescriptorsRequest) *MonitoredResourceDescriptorIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + it := &MonitoredResourceDescriptorIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) { + var resp *monitoringpb.ListMonitoredResourceDescriptorsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.metricClient.ListMonitoredResourceDescriptors(ctx, req) + return err + }, c.CallOptions.ListMonitoredResourceDescriptors...) + if err != nil { + return nil, "", err + } + return resp.ResourceDescriptors, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetMonitoredResourceDescriptor gets a single monitored resource descriptor. This method does not require a Stackdriver account. +func (c *MetricClient) GetMonitoredResourceDescriptor(ctx context.Context, req *monitoringpb.GetMonitoredResourceDescriptorRequest) (*monitoredrespb.MonitoredResourceDescriptor, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *monitoredrespb.MonitoredResourceDescriptor + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.metricClient.GetMonitoredResourceDescriptor(ctx, req) + return err + }, c.CallOptions.GetMonitoredResourceDescriptor...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListMetricDescriptors lists metric descriptors that match a filter. This method does not require a Stackdriver account. +func (c *MetricClient) ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest) *MetricDescriptorIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + it := &MetricDescriptorIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*metricpb.MetricDescriptor, string, error) { + var resp *monitoringpb.ListMetricDescriptorsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.metricClient.ListMetricDescriptors(ctx, req) + return err + }, c.CallOptions.ListMetricDescriptors...) + if err != nil { + return nil, "", err + } + return resp.MetricDescriptors, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetMetricDescriptor gets a single metric descriptor. This method does not require a Stackdriver account. +func (c *MetricClient) GetMetricDescriptor(ctx context.Context, req *monitoringpb.GetMetricDescriptorRequest) (*metricpb.MetricDescriptor, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *metricpb.MetricDescriptor + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.metricClient.GetMetricDescriptor(ctx, req) + return err + }, c.CallOptions.GetMetricDescriptor...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateMetricDescriptor creates a new metric descriptor. +// User-created metric descriptors define +// [custom metrics](/monitoring/custom-metrics). +func (c *MetricClient) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest) (*metricpb.MetricDescriptor, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *metricpb.MetricDescriptor + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.metricClient.CreateMetricDescriptor(ctx, req) + return err + }, c.CallOptions.CreateMetricDescriptor...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteMetricDescriptor deletes a metric descriptor. Only user-created +// [custom metrics](/monitoring/custom-metrics) can be deleted. +func (c *MetricClient) DeleteMetricDescriptor(ctx context.Context, req *monitoringpb.DeleteMetricDescriptorRequest) error { + ctx = insertXGoog(ctx, c.xGoogHeader) + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + _, err = c.metricClient.DeleteMetricDescriptor(ctx, req) + return err + }, c.CallOptions.DeleteMetricDescriptor...) + return err +} + +// ListTimeSeries lists time series that match a filter. This method does not require a Stackdriver account. +func (c *MetricClient) ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest) *TimeSeriesIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + it := &TimeSeriesIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.TimeSeries, string, error) { + var resp *monitoringpb.ListTimeSeriesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.metricClient.ListTimeSeries(ctx, req) + return err + }, c.CallOptions.ListTimeSeries...) + if err != nil { + return nil, "", err + } + return resp.TimeSeries, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// CreateTimeSeries creates or adds data to one or more time series. +// The response is empty if all time series in the request were written. +// If any time series could not be written, a corresponding failure message is +// included in the error response. +func (c *MetricClient) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest) error { + ctx = insertXGoog(ctx, c.xGoogHeader) + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + _, err = c.metricClient.CreateTimeSeries(ctx, req) + return err + }, c.CallOptions.CreateTimeSeries...) + return err +} + +// MetricDescriptorIterator manages a stream of *metricpb.MetricDescriptor. +type MetricDescriptorIterator struct { + items []*metricpb.MetricDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*metricpb.MetricDescriptor, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MetricDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MetricDescriptorIterator) Next() (*metricpb.MetricDescriptor, error) { + var item *metricpb.MetricDescriptor + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MetricDescriptorIterator) bufLen() int { + return len(it.items) +} + +func (it *MetricDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// MonitoredResourceDescriptorIterator manages a stream of *monitoredrespb.MonitoredResourceDescriptor. +type MonitoredResourceDescriptorIterator struct { + items []*monitoredrespb.MonitoredResourceDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResourceDescriptor, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MonitoredResourceDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MonitoredResourceDescriptorIterator) Next() (*monitoredrespb.MonitoredResourceDescriptor, error) { + var item *monitoredrespb.MonitoredResourceDescriptor + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MonitoredResourceDescriptorIterator) bufLen() int { + return len(it.items) +} + +func (it *MonitoredResourceDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// TimeSeriesIterator manages a stream of *monitoringpb.TimeSeries. +type TimeSeriesIterator struct { + items []*monitoringpb.TimeSeries + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.TimeSeries, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *TimeSeriesIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *TimeSeriesIterator) Next() (*monitoringpb.TimeSeries, error) { + var item *monitoringpb.TimeSeries + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *TimeSeriesIterator) bufLen() int { + return len(it.items) +} + +func (it *TimeSeriesIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/metric_client_example_test.go b/vendor/cloud.google.com/go/monitoring/apiv3/metric_client_example_test.go new file mode 100644 index 00000000..5dbb5efd --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/metric_client_example_test.go @@ -0,0 +1,185 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring_test + +import ( + "cloud.google.com/go/monitoring/apiv3" + "golang.org/x/net/context" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" +) + +func ExampleNewMetricClient() { + ctx := context.Background() + c, err := monitoring.NewMetricClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleMetricClient_ListMonitoredResourceDescriptors() { + ctx := context.Background() + c, err := monitoring.NewMetricClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.ListMonitoredResourceDescriptorsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListMonitoredResourceDescriptors(ctx, req) + for { + resp, err := it.Next() + if err != nil { + // TODO: Handle error. + break + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleMetricClient_GetMonitoredResourceDescriptor() { + ctx := context.Background() + c, err := monitoring.NewMetricClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.GetMonitoredResourceDescriptorRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetMonitoredResourceDescriptor(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleMetricClient_ListMetricDescriptors() { + ctx := context.Background() + c, err := monitoring.NewMetricClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.ListMetricDescriptorsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListMetricDescriptors(ctx, req) + for { + resp, err := it.Next() + if err != nil { + // TODO: Handle error. + break + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleMetricClient_GetMetricDescriptor() { + ctx := context.Background() + c, err := monitoring.NewMetricClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.GetMetricDescriptorRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetMetricDescriptor(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleMetricClient_CreateMetricDescriptor() { + ctx := context.Background() + c, err := monitoring.NewMetricClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.CreateMetricDescriptorRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateMetricDescriptor(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleMetricClient_DeleteMetricDescriptor() { + ctx := context.Background() + c, err := monitoring.NewMetricClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.DeleteMetricDescriptorRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteMetricDescriptor(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleMetricClient_ListTimeSeries() { + ctx := context.Background() + c, err := monitoring.NewMetricClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.ListTimeSeriesRequest{ + // TODO: Fill request struct fields. + } + it := c.ListTimeSeries(ctx, req) + for { + resp, err := it.Next() + if err != nil { + // TODO: Handle error. + break + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleMetricClient_CreateTimeSeries() { + ctx := context.Background() + c, err := monitoring.NewMetricClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &monitoringpb.CreateTimeSeriesRequest{ + // TODO: Fill request struct fields. + } + err = c.CreateTimeSeries(ctx, req) + if err != nil { + // TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/monitoring/apiv3/mock_test.go b/vendor/cloud.google.com/go/monitoring/apiv3/mock_test.go new file mode 100644 index 00000000..82d738e7 --- /dev/null +++ b/vendor/cloud.google.com/go/monitoring/apiv3/mock_test.go @@ -0,0 +1,1133 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package monitoring + +import ( + google_protobuf "github.com/golang/protobuf/ptypes/empty" + metricpb "google.golang.org/genproto/googleapis/api/metric" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" +) + +import ( + "flag" + "io" + "log" + "net" + "os" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockGroupServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + monitoringpb.GroupServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockGroupServer) ListGroups(_ context.Context, req *monitoringpb.ListGroupsRequest) (*monitoringpb.ListGroupsResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.ListGroupsResponse), nil +} + +func (s *mockGroupServer) GetGroup(_ context.Context, req *monitoringpb.GetGroupRequest) (*monitoringpb.Group, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.Group), nil +} + +func (s *mockGroupServer) CreateGroup(_ context.Context, req *monitoringpb.CreateGroupRequest) (*monitoringpb.Group, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.Group), nil +} + +func (s *mockGroupServer) UpdateGroup(_ context.Context, req *monitoringpb.UpdateGroupRequest) (*monitoringpb.Group, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.Group), nil +} + +func (s *mockGroupServer) DeleteGroup(_ context.Context, req *monitoringpb.DeleteGroupRequest) (*google_protobuf.Empty, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*google_protobuf.Empty), nil +} + +func (s *mockGroupServer) ListGroupMembers(_ context.Context, req *monitoringpb.ListGroupMembersRequest) (*monitoringpb.ListGroupMembersResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.ListGroupMembersResponse), nil +} + +type mockMetricServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + monitoringpb.MetricServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockMetricServer) ListMonitoredResourceDescriptors(_ context.Context, req *monitoringpb.ListMonitoredResourceDescriptorsRequest) (*monitoringpb.ListMonitoredResourceDescriptorsResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.ListMonitoredResourceDescriptorsResponse), nil +} + +func (s *mockMetricServer) GetMonitoredResourceDescriptor(_ context.Context, req *monitoringpb.GetMonitoredResourceDescriptorRequest) (*monitoredrespb.MonitoredResourceDescriptor, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoredrespb.MonitoredResourceDescriptor), nil +} + +func (s *mockMetricServer) ListMetricDescriptors(_ context.Context, req *monitoringpb.ListMetricDescriptorsRequest) (*monitoringpb.ListMetricDescriptorsResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.ListMetricDescriptorsResponse), nil +} + +func (s *mockMetricServer) GetMetricDescriptor(_ context.Context, req *monitoringpb.GetMetricDescriptorRequest) (*metricpb.MetricDescriptor, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*metricpb.MetricDescriptor), nil +} + +func (s *mockMetricServer) CreateMetricDescriptor(_ context.Context, req *monitoringpb.CreateMetricDescriptorRequest) (*metricpb.MetricDescriptor, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*metricpb.MetricDescriptor), nil +} + +func (s *mockMetricServer) DeleteMetricDescriptor(_ context.Context, req *monitoringpb.DeleteMetricDescriptorRequest) (*google_protobuf.Empty, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*google_protobuf.Empty), nil +} + +func (s *mockMetricServer) ListTimeSeries(_ context.Context, req *monitoringpb.ListTimeSeriesRequest) (*monitoringpb.ListTimeSeriesResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*monitoringpb.ListTimeSeriesResponse), nil +} + +func (s *mockMetricServer) CreateTimeSeries(_ context.Context, req *monitoringpb.CreateTimeSeriesRequest) (*google_protobuf.Empty, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*google_protobuf.Empty), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockGroup mockGroupServer + mockMetric mockMetricServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + monitoringpb.RegisterGroupServiceServer(serv, &mockGroup) + monitoringpb.RegisterMetricServiceServer(serv, &mockMetric) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestGroupServiceListGroups(t *testing.T) { + var nextPageToken string = "" + var groupElement *monitoringpb.Group = &monitoringpb.Group{} + var group = []*monitoringpb.Group{groupElement} + var expectedResponse = &monitoringpb.ListGroupsResponse{ + NextPageToken: nextPageToken, + Group: group, + } + + mockGroup.err = nil + mockGroup.reqs = nil + + mockGroup.resps = append(mockGroup.resps[:0], expectedResponse) + + var formattedName string = GroupProjectPath("[PROJECT]") + var request = &monitoringpb.ListGroupsRequest{ + Name: formattedName, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListGroups(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockGroup.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Group[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestGroupServiceListGroupsError(t *testing.T) { + errCode := codes.Internal + mockGroup.err = grpc.Errorf(errCode, "test error") + + var formattedName string = GroupProjectPath("[PROJECT]") + var request = &monitoringpb.ListGroupsRequest{ + Name: formattedName, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListGroups(context.Background(), request).Next() + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestGroupServiceGetGroup(t *testing.T) { + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var parentName string = "parentName1015022848" + var filter string = "filter-1274492040" + var isCluster bool = false + var expectedResponse = &monitoringpb.Group{ + Name: name2, + DisplayName: displayName, + ParentName: parentName, + Filter: filter, + IsCluster: isCluster, + } + + mockGroup.err = nil + mockGroup.reqs = nil + + mockGroup.resps = append(mockGroup.resps[:0], expectedResponse) + + var formattedName string = GroupGroupPath("[PROJECT]", "[GROUP]") + var request = &monitoringpb.GetGroupRequest{ + Name: formattedName, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetGroup(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockGroup.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestGroupServiceGetGroupError(t *testing.T) { + errCode := codes.Internal + mockGroup.err = grpc.Errorf(errCode, "test error") + + var formattedName string = GroupGroupPath("[PROJECT]", "[GROUP]") + var request = &monitoringpb.GetGroupRequest{ + Name: formattedName, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetGroup(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestGroupServiceCreateGroup(t *testing.T) { + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var parentName string = "parentName1015022848" + var filter string = "filter-1274492040" + var isCluster bool = false + var expectedResponse = &monitoringpb.Group{ + Name: name2, + DisplayName: displayName, + ParentName: parentName, + Filter: filter, + IsCluster: isCluster, + } + + mockGroup.err = nil + mockGroup.reqs = nil + + mockGroup.resps = append(mockGroup.resps[:0], expectedResponse) + + var formattedName string = GroupProjectPath("[PROJECT]") + var group *monitoringpb.Group = &monitoringpb.Group{} + var request = &monitoringpb.CreateGroupRequest{ + Name: formattedName, + Group: group, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateGroup(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockGroup.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestGroupServiceCreateGroupError(t *testing.T) { + errCode := codes.Internal + mockGroup.err = grpc.Errorf(errCode, "test error") + + var formattedName string = GroupProjectPath("[PROJECT]") + var group *monitoringpb.Group = &monitoringpb.Group{} + var request = &monitoringpb.CreateGroupRequest{ + Name: formattedName, + Group: group, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateGroup(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestGroupServiceUpdateGroup(t *testing.T) { + var name string = "name3373707" + var displayName string = "displayName1615086568" + var parentName string = "parentName1015022848" + var filter string = "filter-1274492040" + var isCluster bool = false + var expectedResponse = &monitoringpb.Group{ + Name: name, + DisplayName: displayName, + ParentName: parentName, + Filter: filter, + IsCluster: isCluster, + } + + mockGroup.err = nil + mockGroup.reqs = nil + + mockGroup.resps = append(mockGroup.resps[:0], expectedResponse) + + var group *monitoringpb.Group = &monitoringpb.Group{} + var request = &monitoringpb.UpdateGroupRequest{ + Group: group, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateGroup(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockGroup.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestGroupServiceUpdateGroupError(t *testing.T) { + errCode := codes.Internal + mockGroup.err = grpc.Errorf(errCode, "test error") + + var group *monitoringpb.Group = &monitoringpb.Group{} + var request = &monitoringpb.UpdateGroupRequest{ + Group: group, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.UpdateGroup(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestGroupServiceDeleteGroup(t *testing.T) { + var expectedResponse *google_protobuf.Empty = &google_protobuf.Empty{} + + mockGroup.err = nil + mockGroup.reqs = nil + + mockGroup.resps = append(mockGroup.resps[:0], expectedResponse) + + var formattedName string = GroupGroupPath("[PROJECT]", "[GROUP]") + var request = &monitoringpb.DeleteGroupRequest{ + Name: formattedName, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteGroup(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockGroup.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestGroupServiceDeleteGroupError(t *testing.T) { + errCode := codes.Internal + mockGroup.err = grpc.Errorf(errCode, "test error") + + var formattedName string = GroupGroupPath("[PROJECT]", "[GROUP]") + var request = &monitoringpb.DeleteGroupRequest{ + Name: formattedName, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteGroup(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestGroupServiceListGroupMembers(t *testing.T) { + var nextPageToken string = "" + var totalSize int32 = -705419236 + var membersElement *monitoredrespb.MonitoredResource = &monitoredrespb.MonitoredResource{} + var members = []*monitoredrespb.MonitoredResource{membersElement} + var expectedResponse = &monitoringpb.ListGroupMembersResponse{ + NextPageToken: nextPageToken, + TotalSize: totalSize, + Members: members, + } + + mockGroup.err = nil + mockGroup.reqs = nil + + mockGroup.resps = append(mockGroup.resps[:0], expectedResponse) + + var formattedName string = GroupGroupPath("[PROJECT]", "[GROUP]") + var request = &monitoringpb.ListGroupMembersRequest{ + Name: formattedName, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListGroupMembers(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockGroup.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Members[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestGroupServiceListGroupMembersError(t *testing.T) { + errCode := codes.Internal + mockGroup.err = grpc.Errorf(errCode, "test error") + + var formattedName string = GroupGroupPath("[PROJECT]", "[GROUP]") + var request = &monitoringpb.ListGroupMembersRequest{ + Name: formattedName, + } + + c, err := NewGroupClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListGroupMembers(context.Background(), request).Next() + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricServiceListMonitoredResourceDescriptors(t *testing.T) { + var nextPageToken string = "" + var resourceDescriptorsElement *monitoredrespb.MonitoredResourceDescriptor = &monitoredrespb.MonitoredResourceDescriptor{} + var resourceDescriptors = []*monitoredrespb.MonitoredResourceDescriptor{resourceDescriptorsElement} + var expectedResponse = &monitoringpb.ListMonitoredResourceDescriptorsResponse{ + NextPageToken: nextPageToken, + ResourceDescriptors: resourceDescriptors, + } + + mockMetric.err = nil + mockMetric.reqs = nil + + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + var formattedName string = MetricProjectPath("[PROJECT]") + var request = &monitoringpb.ListMonitoredResourceDescriptorsRequest{ + Name: formattedName, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListMonitoredResourceDescriptors(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.ResourceDescriptors[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestMetricServiceListMonitoredResourceDescriptorsError(t *testing.T) { + errCode := codes.Internal + mockMetric.err = grpc.Errorf(errCode, "test error") + + var formattedName string = MetricProjectPath("[PROJECT]") + var request = &monitoringpb.ListMonitoredResourceDescriptorsRequest{ + Name: formattedName, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListMonitoredResourceDescriptors(context.Background(), request).Next() + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricServiceGetMonitoredResourceDescriptor(t *testing.T) { + var name2 string = "name2-1052831874" + var type_ string = "type3575610" + var displayName string = "displayName1615086568" + var description string = "description-1724546052" + var expectedResponse = &monitoredrespb.MonitoredResourceDescriptor{ + Name: name2, + Type: type_, + DisplayName: displayName, + Description: description, + } + + mockMetric.err = nil + mockMetric.reqs = nil + + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + var formattedName string = MetricMonitoredResourceDescriptorPath("[PROJECT]", "[MONITORED_RESOURCE_DESCRIPTOR]") + var request = &monitoringpb.GetMonitoredResourceDescriptorRequest{ + Name: formattedName, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetMonitoredResourceDescriptor(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestMetricServiceGetMonitoredResourceDescriptorError(t *testing.T) { + errCode := codes.Internal + mockMetric.err = grpc.Errorf(errCode, "test error") + + var formattedName string = MetricMonitoredResourceDescriptorPath("[PROJECT]", "[MONITORED_RESOURCE_DESCRIPTOR]") + var request = &monitoringpb.GetMonitoredResourceDescriptorRequest{ + Name: formattedName, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetMonitoredResourceDescriptor(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricServiceListMetricDescriptors(t *testing.T) { + var nextPageToken string = "" + var metricDescriptorsElement *metricpb.MetricDescriptor = &metricpb.MetricDescriptor{} + var metricDescriptors = []*metricpb.MetricDescriptor{metricDescriptorsElement} + var expectedResponse = &monitoringpb.ListMetricDescriptorsResponse{ + NextPageToken: nextPageToken, + MetricDescriptors: metricDescriptors, + } + + mockMetric.err = nil + mockMetric.reqs = nil + + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + var formattedName string = MetricProjectPath("[PROJECT]") + var request = &monitoringpb.ListMetricDescriptorsRequest{ + Name: formattedName, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListMetricDescriptors(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.MetricDescriptors[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestMetricServiceListMetricDescriptorsError(t *testing.T) { + errCode := codes.Internal + mockMetric.err = grpc.Errorf(errCode, "test error") + + var formattedName string = MetricProjectPath("[PROJECT]") + var request = &monitoringpb.ListMetricDescriptorsRequest{ + Name: formattedName, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListMetricDescriptors(context.Background(), request).Next() + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricServiceGetMetricDescriptor(t *testing.T) { + var name2 string = "name2-1052831874" + var type_ string = "type3575610" + var unit string = "unit3594628" + var description string = "description-1724546052" + var displayName string = "displayName1615086568" + var expectedResponse = &metricpb.MetricDescriptor{ + Name: name2, + Type: type_, + Unit: unit, + Description: description, + DisplayName: displayName, + } + + mockMetric.err = nil + mockMetric.reqs = nil + + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + var formattedName string = MetricMetricDescriptorPath("[PROJECT]", "[METRIC_DESCRIPTOR]") + var request = &monitoringpb.GetMetricDescriptorRequest{ + Name: formattedName, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetMetricDescriptor(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestMetricServiceGetMetricDescriptorError(t *testing.T) { + errCode := codes.Internal + mockMetric.err = grpc.Errorf(errCode, "test error") + + var formattedName string = MetricMetricDescriptorPath("[PROJECT]", "[METRIC_DESCRIPTOR]") + var request = &monitoringpb.GetMetricDescriptorRequest{ + Name: formattedName, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetMetricDescriptor(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricServiceCreateMetricDescriptor(t *testing.T) { + var name2 string = "name2-1052831874" + var type_ string = "type3575610" + var unit string = "unit3594628" + var description string = "description-1724546052" + var displayName string = "displayName1615086568" + var expectedResponse = &metricpb.MetricDescriptor{ + Name: name2, + Type: type_, + Unit: unit, + Description: description, + DisplayName: displayName, + } + + mockMetric.err = nil + mockMetric.reqs = nil + + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + var formattedName string = MetricProjectPath("[PROJECT]") + var metricDescriptor *metricpb.MetricDescriptor = &metricpb.MetricDescriptor{} + var request = &monitoringpb.CreateMetricDescriptorRequest{ + Name: formattedName, + MetricDescriptor: metricDescriptor, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateMetricDescriptor(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestMetricServiceCreateMetricDescriptorError(t *testing.T) { + errCode := codes.Internal + mockMetric.err = grpc.Errorf(errCode, "test error") + + var formattedName string = MetricProjectPath("[PROJECT]") + var metricDescriptor *metricpb.MetricDescriptor = &metricpb.MetricDescriptor{} + var request = &monitoringpb.CreateMetricDescriptorRequest{ + Name: formattedName, + MetricDescriptor: metricDescriptor, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateMetricDescriptor(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricServiceDeleteMetricDescriptor(t *testing.T) { + var expectedResponse *google_protobuf.Empty = &google_protobuf.Empty{} + + mockMetric.err = nil + mockMetric.reqs = nil + + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + var formattedName string = MetricMetricDescriptorPath("[PROJECT]", "[METRIC_DESCRIPTOR]") + var request = &monitoringpb.DeleteMetricDescriptorRequest{ + Name: formattedName, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteMetricDescriptor(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestMetricServiceDeleteMetricDescriptorError(t *testing.T) { + errCode := codes.Internal + mockMetric.err = grpc.Errorf(errCode, "test error") + + var formattedName string = MetricMetricDescriptorPath("[PROJECT]", "[METRIC_DESCRIPTOR]") + var request = &monitoringpb.DeleteMetricDescriptorRequest{ + Name: formattedName, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteMetricDescriptor(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestMetricServiceListTimeSeries(t *testing.T) { + var nextPageToken string = "" + var timeSeriesElement *monitoringpb.TimeSeries = &monitoringpb.TimeSeries{} + var timeSeries = []*monitoringpb.TimeSeries{timeSeriesElement} + var expectedResponse = &monitoringpb.ListTimeSeriesResponse{ + NextPageToken: nextPageToken, + TimeSeries: timeSeries, + } + + mockMetric.err = nil + mockMetric.reqs = nil + + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + var formattedName string = MetricProjectPath("[PROJECT]") + var filter string = "filter-1274492040" + var interval *monitoringpb.TimeInterval = &monitoringpb.TimeInterval{} + var view monitoringpb.ListTimeSeriesRequest_TimeSeriesView = monitoringpb.ListTimeSeriesRequest_FULL + var request = &monitoringpb.ListTimeSeriesRequest{ + Name: formattedName, + Filter: filter, + Interval: interval, + View: view, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTimeSeries(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.TimeSeries[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestMetricServiceListTimeSeriesError(t *testing.T) { + errCode := codes.Internal + mockMetric.err = grpc.Errorf(errCode, "test error") + + var formattedName string = MetricProjectPath("[PROJECT]") + var filter string = "filter-1274492040" + var interval *monitoringpb.TimeInterval = &monitoringpb.TimeInterval{} + var view monitoringpb.ListTimeSeriesRequest_TimeSeriesView = monitoringpb.ListTimeSeriesRequest_FULL + var request = &monitoringpb.ListTimeSeriesRequest{ + Name: formattedName, + Filter: filter, + Interval: interval, + View: view, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTimeSeries(context.Background(), request).Next() + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestMetricServiceCreateTimeSeries(t *testing.T) { + var expectedResponse *google_protobuf.Empty = &google_protobuf.Empty{} + + mockMetric.err = nil + mockMetric.reqs = nil + + mockMetric.resps = append(mockMetric.resps[:0], expectedResponse) + + var formattedName string = MetricProjectPath("[PROJECT]") + var timeSeries []*monitoringpb.TimeSeries = nil + var request = &monitoringpb.CreateTimeSeriesRequest{ + Name: formattedName, + TimeSeries: timeSeries, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.CreateTimeSeries(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockMetric.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestMetricServiceCreateTimeSeriesError(t *testing.T) { + errCode := codes.Internal + mockMetric.err = grpc.Errorf(errCode, "test error") + + var formattedName string = MetricProjectPath("[PROJECT]") + var timeSeries []*monitoringpb.TimeSeries = nil + var request = &monitoringpb.CreateTimeSeriesRequest{ + Name: formattedName, + TimeSeries: timeSeries, + } + + c, err := NewMetricClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.CreateTimeSeries(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} diff --git a/vendor/cloud.google.com/go/old-news.md b/vendor/cloud.google.com/go/old-news.md new file mode 100644 index 00000000..718dd00e --- /dev/null +++ b/vendor/cloud.google.com/go/old-news.md @@ -0,0 +1,312 @@ +_October 19, 2016_ + +Breaking changes to cloud.google.com/go/bigquery: + +* Client.Table and Client.OpenTable have been removed. + Replace + ```go + client.OpenTable("project", "dataset", "table") + ``` + with + ```go + client.DatasetInProject("project", "dataset").Table("table") + ``` + +* Client.CreateTable has been removed. + Replace + ```go + client.CreateTable(ctx, "project", "dataset", "table") + ``` + with + ```go + client.DatasetInProject("project", "dataset").Table("table").Create(ctx) + ``` + +* Dataset.ListTables have been replaced with Dataset.Tables. + Replace + ```go + tables, err := ds.ListTables(ctx) + ``` + with + ```go + it := ds.Tables(ctx) + for { + table, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: use table. + } + ``` + +* Client.Read has been replaced with Job.Read, Table.Read and Query.Read. + Replace + ```go + it, err := client.Read(ctx, job) + ``` + with + ```go + it, err := job.Read(ctx) + ``` + and similarly for reading from tables or queries. + +* The iterator returned from the Read methods is now named RowIterator. Its + behavior is closer to the other iterators in these libraries. It no longer + supports the Schema method; see the next item. + Replace + ```go + for it.Next(ctx) { + var vals ValueList + if err := it.Get(&vals); err != nil { + // TODO: Handle error. + } + // TODO: use vals. + } + if err := it.Err(); err != nil { + // TODO: Handle error. + } + ``` + with + ``` + for { + var vals ValueList + err := it.Next(&vals) + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: use vals. + } + ``` + Instead of the `RecordsPerRequest(n)` option, write + ```go + it.PageInfo().MaxSize = n + ``` + Instead of the `StartIndex(i)` option, write + ```go + it.StartIndex = i + ``` + +* ValueLoader.Load now takes a Schema in addition to a slice of Values. + Replace + ```go + func (vl *myValueLoader) Load(v []bigquery.Value) + ``` + with + ```go + func (vl *myValueLoader) Load(v []bigquery.Value, s bigquery.Schema) + ``` + + +* Table.Patch is replace by Table.Update. + Replace + ```go + p := table.Patch() + p.Description("new description") + metadata, err := p.Apply(ctx) + ``` + with + ```go + metadata, err := table.Update(ctx, bigquery.TableMetadataToUpdate{ + Description: "new description", + }) + ``` + +* Client.Copy is replaced by separate methods for each of its four functions. + All options have been replaced by struct fields. + + * To load data from Google Cloud Storage into a table, use Table.LoaderFrom. + + Replace + ```go + client.Copy(ctx, table, gcsRef) + ``` + with + ```go + table.LoaderFrom(gcsRef).Run(ctx) + ``` + Instead of passing options to Copy, set fields on the Loader: + ```go + loader := table.LoaderFrom(gcsRef) + loader.WriteDisposition = bigquery.WriteTruncate + ``` + + * To extract data from a table into Google Cloud Storage, use + Table.ExtractorTo. Set fields on the returned Extractor instead of + passing options. + + Replace + ```go + client.Copy(ctx, gcsRef, table) + ``` + with + ```go + table.ExtractorTo(gcsRef).Run(ctx) + ``` + + * To copy data into a table from one or more other tables, use + Table.CopierFrom. Set fields on the returned Copier instead of passing options. + + Replace + ```go + client.Copy(ctx, dstTable, srcTable) + ``` + with + ```go + dst.Table.CopierFrom(srcTable).Run(ctx) + ``` + + * To start a query job, create a Query and call its Run method. Set fields + on the query instead of passing options. + + Replace + ```go + client.Copy(ctx, table, query) + ``` + with + ```go + query.Run(ctx) + ``` + +* Table.NewUploader has been renamed to Table.Uploader. Instead of options, + configure an Uploader by setting its fields. + Replace + ```go + u := table.NewUploader(bigquery.UploadIgnoreUnknownValues()) + ``` + with + ```go + u := table.NewUploader(bigquery.UploadIgnoreUnknownValues()) + u.IgnoreUnknownValues = true + ``` + +_October 10, 2016_ + +Breaking changes to cloud.google.com/go/storage: + +* AdminClient replaced by methods on Client. + Replace + ```go + adminClient.CreateBucket(ctx, bucketName, attrs) + ``` + with + ```go + client.Bucket(bucketName).Create(ctx, projectID, attrs) + ``` + +* BucketHandle.List replaced by BucketHandle.Objects. + Replace + ```go + for query != nil { + objs, err := bucket.List(d.ctx, query) + if err != nil { ... } + query = objs.Next + for _, obj := range objs.Results { + fmt.Println(obj) + } + } + ``` + with + ```go + iter := bucket.Objects(d.ctx, query) + for { + obj, err := iter.Next() + if err == iterator.Done { + break + } + if err != nil { ... } + fmt.Println(obj) + } + ``` + (The `iterator` package is at `google.golang.org/api/iterator`.) + + Replace `Query.Cursor` with `ObjectIterator.PageInfo().Token`. + + Replace `Query.MaxResults` with `ObjectIterator.PageInfo().MaxSize`. + + +* ObjectHandle.CopyTo replaced by ObjectHandle.CopierFrom. + Replace + ```go + attrs, err := src.CopyTo(ctx, dst, nil) + ``` + with + ```go + attrs, err := dst.CopierFrom(src).Run(ctx) + ``` + + Replace + ```go + attrs, err := src.CopyTo(ctx, dst, &storage.ObjectAttrs{ContextType: "text/html"}) + ``` + with + ```go + c := dst.CopierFrom(src) + c.ContextType = "text/html" + attrs, err := c.Run(ctx) + ``` + +* ObjectHandle.ComposeFrom replaced by ObjectHandle.ComposerFrom. + Replace + ```go + attrs, err := dst.ComposeFrom(ctx, []*storage.ObjectHandle{src1, src2}, nil) + ``` + with + ```go + attrs, err := dst.ComposerFrom(src1, src2).Run(ctx) + ``` + +* ObjectHandle.Update's ObjectAttrs argument replaced by ObjectAttrsToUpdate. + Replace + ```go + attrs, err := obj.Update(ctx, &storage.ObjectAttrs{ContextType: "text/html"}) + ``` + with + ```go + attrs, err := obj.Update(ctx, storage.ObjectAttrsToUpdate{ContextType: "text/html"}) + ``` + +* ObjectHandle.WithConditions replaced by ObjectHandle.If. + Replace + ```go + obj.WithConditions(storage.Generation(gen), storage.IfMetaGenerationMatch(mgen)) + ``` + with + ```go + obj.Generation(gen).If(storage.Conditions{MetagenerationMatch: mgen}) + ``` + + Replace + ```go + obj.WithConditions(storage.IfGenerationMatch(0)) + ``` + with + ```go + obj.If(storage.Conditions{DoesNotExist: true}) + ``` + +* `storage.Done` replaced by `iterator.Done` (from package `google.golang.org/api/iterator`). + +_October 6, 2016_ + +Package preview/logging deleted. Use logging instead. + +_September 27, 2016_ + +Logging client replaced with preview version (see below). + +_September 8, 2016_ + +* New clients for some of Google's Machine Learning APIs: Vision, Speech, and +Natural Language. + +* Preview version of a new [Stackdriver Logging][cloud-logging] client in +[`cloud.google.com/go/preview/logging`](https://godoc.org/cloud.google.com/go/preview/logging). +This client uses gRPC as its transport layer, and supports log reading, sinks +and metrics. It will replace the current client at `cloud.google.com/go/logging` shortly. + diff --git a/vendor/cloud.google.com/go/pubsub/acker.go b/vendor/cloud.google.com/go/pubsub/acker.go new file mode 100644 index 00000000..088ed79c --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/acker.go @@ -0,0 +1,159 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "sync" + "time" + + "golang.org/x/net/context" +) + +// ackBuffer stores the pending ack IDs and notifies the Dirty channel when it becomes non-empty. +type ackBuffer struct { + Dirty chan struct{} + // Close done when ackBuffer is no longer needed. + Done chan struct{} + + mu sync.Mutex + pending []string + send bool +} + +// Add adds ackID to the buffer. +func (buf *ackBuffer) Add(ackID string) { + buf.mu.Lock() + defer buf.mu.Unlock() + buf.pending = append(buf.pending, ackID) + + // If we are transitioning into a non-empty notification state. + if buf.send && len(buf.pending) == 1 { + buf.notify() + } +} + +// RemoveAll removes all ackIDs from the buffer and returns them. +func (buf *ackBuffer) RemoveAll() []string { + buf.mu.Lock() + defer buf.mu.Unlock() + + ret := buf.pending + buf.pending = nil + return ret +} + +// SendNotifications enables sending dirty notification on empty -> non-empty transitions. +// If the buffer is already non-empty, a notification will be sent immediately. +func (buf *ackBuffer) SendNotifications() { + buf.mu.Lock() + defer buf.mu.Unlock() + + buf.send = true + // If we are transitioning into a non-empty notification state. + if len(buf.pending) > 0 { + buf.notify() + } +} + +func (buf *ackBuffer) notify() { + go func() { + select { + case buf.Dirty <- struct{}{}: + case <-buf.Done: + } + }() +} + +// acker acks messages in batches. +type acker struct { + s service + Ctx context.Context // The context to use when acknowledging messages. + Sub string // The full name of the subscription. + AckTick <-chan time.Time // AckTick supplies the frequency with which to make ack requests. + + // Notify is called with an ack ID after the message with that ack ID + // has been processed. An ackID is considered to have been processed + // if at least one attempt has been made to acknowledge it. + Notify func(string) + + ackBuffer + + wg sync.WaitGroup + done chan struct{} +} + +// Start intiates processing of ackIDs which are added via Add. +// Notify is called with each ackID once it has been processed. +func (a *acker) Start() { + a.done = make(chan struct{}) + a.ackBuffer.Dirty = make(chan struct{}) + a.ackBuffer.Done = a.done + + a.wg.Add(1) + go func() { + defer a.wg.Done() + for { + select { + case <-a.ackBuffer.Dirty: + a.ack(a.ackBuffer.RemoveAll()) + case <-a.AckTick: + a.ack(a.ackBuffer.RemoveAll()) + case <-a.done: + return + } + } + + }() +} + +// Ack adds an ack id to be acked in the next batch. +func (a *acker) Ack(ackID string) { + a.ackBuffer.Add(ackID) +} + +// FastMode switches acker into a mode which acks messages as they arrive, rather than waiting +// for a.AckTick. +func (a *acker) FastMode() { + a.ackBuffer.SendNotifications() +} + +// Stop drops all pending messages, and releases resources before returning. +func (a *acker) Stop() { + close(a.done) + a.wg.Wait() +} + +const maxAckAttempts = 2 + +// ack acknowledges the supplied ackIDs. +// After the acknowledgement request has completed (regardless of its success +// or failure), ids will be passed to a.Notify. +func (a *acker) ack(ids []string) { + head, tail := a.s.splitAckIDs(ids) + for len(head) > 0 { + for i := 0; i < maxAckAttempts; i++ { + if a.s.acknowledge(a.Ctx, a.Sub, head) == nil { + break + } + } + // NOTE: if retry gives up and returns an error, we simply drop + // those ack IDs. The messages will be redelivered and this is + // a documented behaviour of the API. + head, tail = a.s.splitAckIDs(tail) + } + for _, id := range ids { + a.Notify(id) + } +} diff --git a/vendor/cloud.google.com/go/pubsub/acker_test.go b/vendor/cloud.google.com/go/pubsub/acker_test.go new file mode 100644 index 00000000..9e283baf --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/acker_test.go @@ -0,0 +1,262 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "errors" + "reflect" + "sort" + "testing" + "time" + + "golang.org/x/net/context" +) + +func TestAcker(t *testing.T) { + tick := make(chan time.Time) + s := &testService{acknowledgeCalled: make(chan acknowledgeCall)} + + processed := make(chan string, 10) + acker := &acker{ + s: s, + Ctx: context.Background(), + Sub: "subname", + AckTick: tick, + Notify: func(ackID string) { processed <- ackID }, + } + acker.Start() + + checkAckProcessed := func(ackIDs []string) { + got := <-s.acknowledgeCalled + sort.Strings(got.ackIDs) + + want := acknowledgeCall{ + subName: "subname", + ackIDs: ackIDs, + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("acknowledge: got:\n%v\nwant:\n%v", got, want) + } + } + + acker.Ack("a") + acker.Ack("b") + tick <- time.Time{} + checkAckProcessed([]string{"a", "b"}) + acker.Ack("c") + tick <- time.Time{} + checkAckProcessed([]string{"c"}) + acker.Stop() + + // all IDS should have been sent to processed. + close(processed) + processedIDs := []string{} + for id := range processed { + processedIDs = append(processedIDs, id) + } + sort.Strings(processedIDs) + want := []string{"a", "b", "c"} + if !reflect.DeepEqual(processedIDs, want) { + t.Errorf("acker processed: got:\n%v\nwant:\n%v", processedIDs, want) + } +} + +func TestAckerFastMode(t *testing.T) { + tick := make(chan time.Time) + s := &testService{acknowledgeCalled: make(chan acknowledgeCall)} + + processed := make(chan string, 10) + acker := &acker{ + s: s, + Ctx: context.Background(), + Sub: "subname", + AckTick: tick, + Notify: func(ackID string) { processed <- ackID }, + } + acker.Start() + + checkAckProcessed := func(ackIDs []string) { + got := <-s.acknowledgeCalled + sort.Strings(got.ackIDs) + + want := acknowledgeCall{ + subName: "subname", + ackIDs: ackIDs, + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("acknowledge: got:\n%v\nwant:\n%v", got, want) + } + } + // No ticks are sent; fast mode doesn't need them. + acker.Ack("a") + acker.Ack("b") + acker.FastMode() + checkAckProcessed([]string{"a", "b"}) + acker.Ack("c") + checkAckProcessed([]string{"c"}) + acker.Stop() + + // all IDS should have been sent to processed. + close(processed) + processedIDs := []string{} + for id := range processed { + processedIDs = append(processedIDs, id) + } + sort.Strings(processedIDs) + want := []string{"a", "b", "c"} + if !reflect.DeepEqual(processedIDs, want) { + t.Errorf("acker processed: got:\n%v\nwant:\n%v", processedIDs, want) + } +} + +// TestAckerStop checks that Stop returns immediately. +func TestAckerStop(t *testing.T) { + tick := make(chan time.Time) + s := &testService{acknowledgeCalled: make(chan acknowledgeCall, 10)} + + processed := make(chan string) + acker := &acker{ + s: s, + Ctx: context.Background(), + Sub: "subname", + AckTick: tick, + Notify: func(ackID string) { processed <- ackID }, + } + + acker.Start() + + stopped := make(chan struct{}) + + acker.Ack("a") + + go func() { + acker.Stop() + stopped <- struct{}{} + }() + + // Stopped should have been written to by the time this sleep completes. + time.Sleep(time.Millisecond) + + // Receiving from processed should cause Stop to subsequently return, + // so it should never be possible to read from stopped before + // processed. + select { + case <-stopped: + case <-processed: + t.Errorf("acker.Stop processed an ack id before returning") + case <-time.After(time.Millisecond): + t.Errorf("acker.Stop never returned") + } +} + +type ackCallResult struct { + ackIDs []string + err error +} + +type ackService struct { + service + + calls []ackCallResult + + t *testing.T // used for error logging. +} + +func (as *ackService) acknowledge(ctx context.Context, subName string, ackIDs []string) error { + if len(as.calls) == 0 { + as.t.Fatalf("unexpected call to acknowledge: ackIDs: %v", ackIDs) + } + call := as.calls[0] + as.calls = as.calls[1:] + + if got, want := ackIDs, call.ackIDs; !reflect.DeepEqual(got, want) { + as.t.Errorf("unexpected arguments to acknowledge: got: %v ; want: %v", got, want) + } + return call.err +} + +// Test implementation returns the first 2 elements as head, and the rest as tail. +func (as *ackService) splitAckIDs(ids []string) ([]string, []string) { + if len(ids) < 2 { + return ids, nil + } + return ids[:2], ids[2:] +} + +func TestAckerSplitsBatches(t *testing.T) { + type testCase struct { + calls []ackCallResult + } + for _, tc := range []testCase{ + { + calls: []ackCallResult{ + { + ackIDs: []string{"a", "b"}, + }, + { + ackIDs: []string{"c", "d"}, + }, + { + ackIDs: []string{"e", "f"}, + }, + }, + }, + { + calls: []ackCallResult{ + { + ackIDs: []string{"a", "b"}, + err: errors.New("bang"), + }, + // On error we retry once. + { + ackIDs: []string{"a", "b"}, + err: errors.New("bang"), + }, + // We give up after failing twice, so we move on to the next set, "c" and "d" + { + ackIDs: []string{"c", "d"}, + err: errors.New("bang"), + }, + // Again, we retry once. + { + ackIDs: []string{"c", "d"}, + }, + { + ackIDs: []string{"e", "f"}, + }, + }, + }, + } { + s := &ackService{ + t: t, + calls: tc.calls, + } + + acker := &acker{ + s: s, + Ctx: context.Background(), + Sub: "subname", + Notify: func(string) {}, + } + + acker.ack([]string{"a", "b", "c", "d", "e", "f"}) + + if len(s.calls) != 0 { + t.Errorf("expected ack calls did not occur: %v", s.calls) + } + } +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/README.md b/vendor/cloud.google.com/go/pubsub/apiv1/README.md new file mode 100644 index 00000000..b5967ab9 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/README.md @@ -0,0 +1,9 @@ +Auto-generated pubsub v1 clients +================================= + +This package includes auto-generated clients for the pubsub v1 API. + +Use the handwritten client (in the parent directory, +cloud.google.com/go/pubsub) in preference to this. + +This code is EXPERIMENTAL and subject to CHANGE AT ANY TIME. diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/doc.go b/vendor/cloud.google.com/go/pubsub/apiv1/doc.go new file mode 100644 index 00000000..cf9769a3 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/doc.go @@ -0,0 +1,36 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package pubsub is an experimental, auto-generated package for the +// pubsub API. +// +// Provides reliable, many-to-many, asynchronous messaging between +// applications. +// +// Use the client at cloud.google.com/go/pubsub in preference to this. +package pubsub // import "cloud.google.com/go/pubsub/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertXGoog(ctx context.Context, val string) context.Context { + md, _ := metadata.FromContext(ctx) + md = md.Copy() + md["x-goog-api-client"] = []string{val} + return metadata.NewContext(ctx, md) +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/mock_test.go b/vendor/cloud.google.com/go/pubsub/apiv1/mock_test.go new file mode 100644 index 00000000..f0005ad4 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/mock_test.go @@ -0,0 +1,1202 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package pubsub + +import ( + google_protobuf "github.com/golang/protobuf/ptypes/empty" + iampb "google.golang.org/genproto/googleapis/iam/v1" + pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +import ( + "flag" + "io" + "log" + "net" + "os" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockPublisherServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + pubsubpb.PublisherServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockPublisherServer) CreateTopic(_ context.Context, req *pubsubpb.Topic) (*pubsubpb.Topic, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.Topic), nil +} + +func (s *mockPublisherServer) Publish(_ context.Context, req *pubsubpb.PublishRequest) (*pubsubpb.PublishResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.PublishResponse), nil +} + +func (s *mockPublisherServer) GetTopic(_ context.Context, req *pubsubpb.GetTopicRequest) (*pubsubpb.Topic, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.Topic), nil +} + +func (s *mockPublisherServer) ListTopics(_ context.Context, req *pubsubpb.ListTopicsRequest) (*pubsubpb.ListTopicsResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.ListTopicsResponse), nil +} + +func (s *mockPublisherServer) ListTopicSubscriptions(_ context.Context, req *pubsubpb.ListTopicSubscriptionsRequest) (*pubsubpb.ListTopicSubscriptionsResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.ListTopicSubscriptionsResponse), nil +} + +func (s *mockPublisherServer) DeleteTopic(_ context.Context, req *pubsubpb.DeleteTopicRequest) (*google_protobuf.Empty, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*google_protobuf.Empty), nil +} + +type mockIamPolicyServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + iampb.IAMPolicyServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockIamPolicyServer) SetIamPolicy(_ context.Context, req *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.Policy), nil +} + +func (s *mockIamPolicyServer) GetIamPolicy(_ context.Context, req *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.Policy), nil +} + +func (s *mockIamPolicyServer) TestIamPermissions(_ context.Context, req *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.TestIamPermissionsResponse), nil +} + +type mockSubscriberServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + pubsubpb.SubscriberServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockSubscriberServer) CreateSubscription(_ context.Context, req *pubsubpb.Subscription) (*pubsubpb.Subscription, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.Subscription), nil +} + +func (s *mockSubscriberServer) GetSubscription(_ context.Context, req *pubsubpb.GetSubscriptionRequest) (*pubsubpb.Subscription, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.Subscription), nil +} + +func (s *mockSubscriberServer) ListSubscriptions(_ context.Context, req *pubsubpb.ListSubscriptionsRequest) (*pubsubpb.ListSubscriptionsResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.ListSubscriptionsResponse), nil +} + +func (s *mockSubscriberServer) DeleteSubscription(_ context.Context, req *pubsubpb.DeleteSubscriptionRequest) (*google_protobuf.Empty, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*google_protobuf.Empty), nil +} + +func (s *mockSubscriberServer) ModifyAckDeadline(_ context.Context, req *pubsubpb.ModifyAckDeadlineRequest) (*google_protobuf.Empty, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*google_protobuf.Empty), nil +} + +func (s *mockSubscriberServer) Acknowledge(_ context.Context, req *pubsubpb.AcknowledgeRequest) (*google_protobuf.Empty, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*google_protobuf.Empty), nil +} + +func (s *mockSubscriberServer) Pull(_ context.Context, req *pubsubpb.PullRequest) (*pubsubpb.PullResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*pubsubpb.PullResponse), nil +} + +func (s *mockSubscriberServer) StreamingPull(stream pubsubpb.Subscriber_StreamingPullServer) error { + for { + if req, err := stream.Recv(); err == io.EOF { + break + } else if err != nil { + return err + } else { + s.reqs = append(s.reqs, req) + } + } + if s.err != nil { + return s.err + } + for _, v := range s.resps { + if err := stream.Send(v.(*pubsubpb.StreamingPullResponse)); err != nil { + return err + } + } + return nil +} + +func (s *mockSubscriberServer) ModifyPushConfig(_ context.Context, req *pubsubpb.ModifyPushConfigRequest) (*google_protobuf.Empty, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*google_protobuf.Empty), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockPublisher mockPublisherServer + mockIamPolicy mockIamPolicyServer + mockSubscriber mockSubscriberServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + pubsubpb.RegisterPublisherServer(serv, &mockPublisher) + iampb.RegisterIAMPolicyServer(serv, &mockIamPolicy) + pubsubpb.RegisterSubscriberServer(serv, &mockSubscriber) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestPublisherCreateTopic(t *testing.T) { + var name2 string = "name2-1052831874" + var expectedResponse = &pubsubpb.Topic{ + Name: name2, + } + + mockPublisher.err = nil + mockPublisher.reqs = nil + + mockPublisher.resps = append(mockPublisher.resps[:0], expectedResponse) + + var formattedName string = PublisherTopicPath("[PROJECT]", "[TOPIC]") + var request = &pubsubpb.Topic{ + Name: formattedName, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateTopic(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockPublisher.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestPublisherCreateTopicError(t *testing.T) { + errCode := codes.Internal + mockPublisher.err = grpc.Errorf(errCode, "test error") + + var formattedName string = PublisherTopicPath("[PROJECT]", "[TOPIC]") + var request = &pubsubpb.Topic{ + Name: formattedName, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateTopic(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestPublisherPublish(t *testing.T) { + var messageIdsElement string = "messageIdsElement-744837059" + var messageIds = []string{messageIdsElement} + var expectedResponse = &pubsubpb.PublishResponse{ + MessageIds: messageIds, + } + + mockPublisher.err = nil + mockPublisher.reqs = nil + + mockPublisher.resps = append(mockPublisher.resps[:0], expectedResponse) + + var formattedTopic string = PublisherTopicPath("[PROJECT]", "[TOPIC]") + var data []byte = []byte("-86") + var messagesElement = &pubsubpb.PubsubMessage{ + Data: data, + } + var messages = []*pubsubpb.PubsubMessage{messagesElement} + var request = &pubsubpb.PublishRequest{ + Topic: formattedTopic, + Messages: messages, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Publish(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockPublisher.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestPublisherPublishError(t *testing.T) { + errCode := codes.Internal + mockPublisher.err = grpc.Errorf(errCode, "test error") + + var formattedTopic string = PublisherTopicPath("[PROJECT]", "[TOPIC]") + var data []byte = []byte("-86") + var messagesElement = &pubsubpb.PubsubMessage{ + Data: data, + } + var messages = []*pubsubpb.PubsubMessage{messagesElement} + var request = &pubsubpb.PublishRequest{ + Topic: formattedTopic, + Messages: messages, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Publish(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestPublisherGetTopic(t *testing.T) { + var name string = "name3373707" + var expectedResponse = &pubsubpb.Topic{ + Name: name, + } + + mockPublisher.err = nil + mockPublisher.reqs = nil + + mockPublisher.resps = append(mockPublisher.resps[:0], expectedResponse) + + var formattedTopic string = PublisherTopicPath("[PROJECT]", "[TOPIC]") + var request = &pubsubpb.GetTopicRequest{ + Topic: formattedTopic, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetTopic(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockPublisher.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestPublisherGetTopicError(t *testing.T) { + errCode := codes.Internal + mockPublisher.err = grpc.Errorf(errCode, "test error") + + var formattedTopic string = PublisherTopicPath("[PROJECT]", "[TOPIC]") + var request = &pubsubpb.GetTopicRequest{ + Topic: formattedTopic, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetTopic(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestPublisherListTopics(t *testing.T) { + var nextPageToken string = "" + var topicsElement *pubsubpb.Topic = &pubsubpb.Topic{} + var topics = []*pubsubpb.Topic{topicsElement} + var expectedResponse = &pubsubpb.ListTopicsResponse{ + NextPageToken: nextPageToken, + Topics: topics, + } + + mockPublisher.err = nil + mockPublisher.reqs = nil + + mockPublisher.resps = append(mockPublisher.resps[:0], expectedResponse) + + var formattedProject string = PublisherProjectPath("[PROJECT]") + var request = &pubsubpb.ListTopicsRequest{ + Project: formattedProject, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTopics(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockPublisher.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Topics[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestPublisherListTopicsError(t *testing.T) { + errCode := codes.Internal + mockPublisher.err = grpc.Errorf(errCode, "test error") + + var formattedProject string = PublisherProjectPath("[PROJECT]") + var request = &pubsubpb.ListTopicsRequest{ + Project: formattedProject, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTopics(context.Background(), request).Next() + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestPublisherListTopicSubscriptions(t *testing.T) { + var nextPageToken string = "" + var subscriptionsElement string = "subscriptionsElement1698708147" + var subscriptions = []string{subscriptionsElement} + var expectedResponse = &pubsubpb.ListTopicSubscriptionsResponse{ + NextPageToken: nextPageToken, + Subscriptions: subscriptions, + } + + mockPublisher.err = nil + mockPublisher.reqs = nil + + mockPublisher.resps = append(mockPublisher.resps[:0], expectedResponse) + + var formattedTopic string = PublisherTopicPath("[PROJECT]", "[TOPIC]") + var request = &pubsubpb.ListTopicSubscriptionsRequest{ + Topic: formattedTopic, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTopicSubscriptions(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockPublisher.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Subscriptions[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestPublisherListTopicSubscriptionsError(t *testing.T) { + errCode := codes.Internal + mockPublisher.err = grpc.Errorf(errCode, "test error") + + var formattedTopic string = PublisherTopicPath("[PROJECT]", "[TOPIC]") + var request = &pubsubpb.ListTopicSubscriptionsRequest{ + Topic: formattedTopic, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTopicSubscriptions(context.Background(), request).Next() + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestPublisherDeleteTopic(t *testing.T) { + var expectedResponse *google_protobuf.Empty = &google_protobuf.Empty{} + + mockPublisher.err = nil + mockPublisher.reqs = nil + + mockPublisher.resps = append(mockPublisher.resps[:0], expectedResponse) + + var formattedTopic string = PublisherTopicPath("[PROJECT]", "[TOPIC]") + var request = &pubsubpb.DeleteTopicRequest{ + Topic: formattedTopic, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteTopic(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockPublisher.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestPublisherDeleteTopicError(t *testing.T) { + errCode := codes.Internal + mockPublisher.err = grpc.Errorf(errCode, "test error") + + var formattedTopic string = PublisherTopicPath("[PROJECT]", "[TOPIC]") + var request = &pubsubpb.DeleteTopicRequest{ + Topic: formattedTopic, + } + + c, err := NewPublisherClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteTopic(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestSubscriberCreateSubscription(t *testing.T) { + var name2 string = "name2-1052831874" + var topic2 string = "topic2-1139259102" + var ackDeadlineSeconds int32 = 2135351438 + var expectedResponse = &pubsubpb.Subscription{ + Name: name2, + Topic: topic2, + AckDeadlineSeconds: ackDeadlineSeconds, + } + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedName string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var formattedTopic string = SubscriberTopicPath("[PROJECT]", "[TOPIC]") + var request = &pubsubpb.Subscription{ + Name: formattedName, + Topic: formattedTopic, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateSubscription(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSubscriberCreateSubscriptionError(t *testing.T) { + errCode := codes.Internal + mockSubscriber.err = grpc.Errorf(errCode, "test error") + + var formattedName string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var formattedTopic string = SubscriberTopicPath("[PROJECT]", "[TOPIC]") + var request = &pubsubpb.Subscription{ + Name: formattedName, + Topic: formattedTopic, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.CreateSubscription(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSubscriberGetSubscription(t *testing.T) { + var name string = "name3373707" + var topic string = "topic110546223" + var ackDeadlineSeconds int32 = 2135351438 + var expectedResponse = &pubsubpb.Subscription{ + Name: name, + Topic: topic, + AckDeadlineSeconds: ackDeadlineSeconds, + } + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var request = &pubsubpb.GetSubscriptionRequest{ + Subscription: formattedSubscription, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetSubscription(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSubscriberGetSubscriptionError(t *testing.T) { + errCode := codes.Internal + mockSubscriber.err = grpc.Errorf(errCode, "test error") + + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var request = &pubsubpb.GetSubscriptionRequest{ + Subscription: formattedSubscription, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetSubscription(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSubscriberListSubscriptions(t *testing.T) { + var nextPageToken string = "" + var subscriptionsElement *pubsubpb.Subscription = &pubsubpb.Subscription{} + var subscriptions = []*pubsubpb.Subscription{subscriptionsElement} + var expectedResponse = &pubsubpb.ListSubscriptionsResponse{ + NextPageToken: nextPageToken, + Subscriptions: subscriptions, + } + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedProject string = SubscriberProjectPath("[PROJECT]") + var request = &pubsubpb.ListSubscriptionsRequest{ + Project: formattedProject, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListSubscriptions(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Subscriptions[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSubscriberListSubscriptionsError(t *testing.T) { + errCode := codes.Internal + mockSubscriber.err = grpc.Errorf(errCode, "test error") + + var formattedProject string = SubscriberProjectPath("[PROJECT]") + var request = &pubsubpb.ListSubscriptionsRequest{ + Project: formattedProject, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListSubscriptions(context.Background(), request).Next() + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSubscriberDeleteSubscription(t *testing.T) { + var expectedResponse *google_protobuf.Empty = &google_protobuf.Empty{} + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var request = &pubsubpb.DeleteSubscriptionRequest{ + Subscription: formattedSubscription, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteSubscription(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestSubscriberDeleteSubscriptionError(t *testing.T) { + errCode := codes.Internal + mockSubscriber.err = grpc.Errorf(errCode, "test error") + + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var request = &pubsubpb.DeleteSubscriptionRequest{ + Subscription: formattedSubscription, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteSubscription(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestSubscriberModifyAckDeadline(t *testing.T) { + var expectedResponse *google_protobuf.Empty = &google_protobuf.Empty{} + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var ackIds []string = nil + var ackDeadlineSeconds int32 = 2135351438 + var request = &pubsubpb.ModifyAckDeadlineRequest{ + Subscription: formattedSubscription, + AckIds: ackIds, + AckDeadlineSeconds: ackDeadlineSeconds, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.ModifyAckDeadline(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestSubscriberModifyAckDeadlineError(t *testing.T) { + errCode := codes.Internal + mockSubscriber.err = grpc.Errorf(errCode, "test error") + + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var ackIds []string = nil + var ackDeadlineSeconds int32 = 2135351438 + var request = &pubsubpb.ModifyAckDeadlineRequest{ + Subscription: formattedSubscription, + AckIds: ackIds, + AckDeadlineSeconds: ackDeadlineSeconds, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.ModifyAckDeadline(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestSubscriberAcknowledge(t *testing.T) { + var expectedResponse *google_protobuf.Empty = &google_protobuf.Empty{} + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var ackIds []string = nil + var request = &pubsubpb.AcknowledgeRequest{ + Subscription: formattedSubscription, + AckIds: ackIds, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.Acknowledge(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestSubscriberAcknowledgeError(t *testing.T) { + errCode := codes.Internal + mockSubscriber.err = grpc.Errorf(errCode, "test error") + + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var ackIds []string = nil + var request = &pubsubpb.AcknowledgeRequest{ + Subscription: formattedSubscription, + AckIds: ackIds, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.Acknowledge(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestSubscriberPull(t *testing.T) { + var expectedResponse *pubsubpb.PullResponse = &pubsubpb.PullResponse{} + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var maxMessages int32 = 496131527 + var request = &pubsubpb.PullRequest{ + Subscription: formattedSubscription, + MaxMessages: maxMessages, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Pull(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSubscriberPullError(t *testing.T) { + errCode := codes.Internal + mockSubscriber.err = grpc.Errorf(errCode, "test error") + + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var maxMessages int32 = 496131527 + var request = &pubsubpb.PullRequest{ + Subscription: formattedSubscription, + MaxMessages: maxMessages, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.Pull(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSubscriberStreamingPull(t *testing.T) { + var expectedResponse *pubsubpb.StreamingPullResponse = &pubsubpb.StreamingPullResponse{} + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var streamAckDeadlineSeconds int32 = 1875467245 + var request = &pubsubpb.StreamingPullRequest{ + Subscription: formattedSubscription, + StreamAckDeadlineSeconds: streamAckDeadlineSeconds, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.StreamingPull(context.Background()) + if err != nil { + t.Fatal(err) + } + if err := stream.Send(request); err != nil { + t.Fatal(err) + } + if err := stream.CloseSend(); err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSubscriberStreamingPullError(t *testing.T) { + errCode := codes.Internal + mockSubscriber.err = grpc.Errorf(errCode, "test error") + + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var streamAckDeadlineSeconds int32 = 1875467245 + var request = &pubsubpb.StreamingPullRequest{ + Subscription: formattedSubscription, + StreamAckDeadlineSeconds: streamAckDeadlineSeconds, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.StreamingPull(context.Background()) + if err != nil { + t.Fatal(err) + } + if err := stream.Send(request); err != nil { + t.Fatal(err) + } + if err := stream.CloseSend(); err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSubscriberModifyPushConfig(t *testing.T) { + var expectedResponse *google_protobuf.Empty = &google_protobuf.Empty{} + + mockSubscriber.err = nil + mockSubscriber.reqs = nil + + mockSubscriber.resps = append(mockSubscriber.resps[:0], expectedResponse) + + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var pushConfig *pubsubpb.PushConfig = &pubsubpb.PushConfig{} + var request = &pubsubpb.ModifyPushConfigRequest{ + Subscription: formattedSubscription, + PushConfig: pushConfig, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.ModifyPushConfig(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSubscriber.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestSubscriberModifyPushConfigError(t *testing.T) { + errCode := codes.Internal + mockSubscriber.err = grpc.Errorf(errCode, "test error") + + var formattedSubscription string = SubscriberSubscriptionPath("[PROJECT]", "[SUBSCRIPTION]") + var pushConfig *pubsubpb.PushConfig = &pubsubpb.PushConfig{} + var request = &pubsubpb.ModifyPushConfigRequest{ + Subscription: formattedSubscription, + PushConfig: pushConfig, + } + + c, err := NewSubscriberClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.ModifyPushConfig(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go new file mode 100644 index 00000000..ec8c6abf --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go @@ -0,0 +1,394 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package pubsub + +import ( + "math" + "time" + + "cloud.google.com/go/iam" + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + publisherProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}") + publisherTopicPathTemplate = gax.MustCompilePathTemplate("projects/{project}/topics/{topic}") +) + +// PublisherCallOptions contains the retry settings for each method of PublisherClient. +type PublisherCallOptions struct { + CreateTopic []gax.CallOption + Publish []gax.CallOption + GetTopic []gax.CallOption + ListTopics []gax.CallOption + ListTopicSubscriptions []gax.CallOption + DeleteTopic []gax.CallOption +} + +func defaultPublisherClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("pubsub.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub", + ), + } +} + +func defaultPublisherCallOptions() *PublisherCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + {"messaging", "one_plus_delivery"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &PublisherCallOptions{ + CreateTopic: retry[[2]string{"default", "idempotent"}], + Publish: retry[[2]string{"messaging", "one_plus_delivery"}], + GetTopic: retry[[2]string{"default", "idempotent"}], + ListTopics: retry[[2]string{"default", "idempotent"}], + ListTopicSubscriptions: retry[[2]string{"default", "idempotent"}], + DeleteTopic: retry[[2]string{"default", "idempotent"}], + } +} + +// PublisherClient is a client for interacting with Google Cloud Pub/Sub API. +type PublisherClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + publisherClient pubsubpb.PublisherClient + + // The call options for this service. + CallOptions *PublisherCallOptions + + // The metadata to be sent with each request. + xGoogHeader string +} + +// NewPublisherClient creates a new publisher client. +// +// The service that an application uses to manipulate topics, and to send +// messages to a topic. +func NewPublisherClient(ctx context.Context, opts ...option.ClientOption) (*PublisherClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultPublisherClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &PublisherClient{ + conn: conn, + CallOptions: defaultPublisherCallOptions(), + + publisherClient: pubsubpb.NewPublisherClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *PublisherClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *PublisherClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *PublisherClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", "") + c.xGoogHeader = gax.XGoogHeader(kv...) +} + +// PublisherProjectPath returns the path for the project resource. +func PublisherProjectPath(project string) string { + path, err := publisherProjectPathTemplate.Render(map[string]string{ + "project": project, + }) + if err != nil { + panic(err) + } + return path +} + +// PublisherTopicPath returns the path for the topic resource. +func PublisherTopicPath(project, topic string) string { + path, err := publisherTopicPathTemplate.Render(map[string]string{ + "project": project, + "topic": topic, + }) + if err != nil { + panic(err) + } + return path +} + +func (c *PublisherClient) SubscriptionIAM(subscription *pubsubpb.Subscription) *iam.Handle { + return iam.InternalNewHandle(c.Connection(), subscription.Name) +} + +func (c *PublisherClient) TopicIAM(topic *pubsubpb.Topic) *iam.Handle { + return iam.InternalNewHandle(c.Connection(), topic.Name) +} + +// CreateTopic creates the given topic with the given name. +func (c *PublisherClient) CreateTopic(ctx context.Context, req *pubsubpb.Topic) (*pubsubpb.Topic, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *pubsubpb.Topic + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.publisherClient.CreateTopic(ctx, req) + return err + }, c.CallOptions.CreateTopic...) + if err != nil { + return nil, err + } + return resp, nil +} + +// Publish adds one or more messages to the topic. Returns `NOT_FOUND` if the topic +// does not exist. The message payload must not be empty; it must contain +// either a non-empty data field, or at least one attribute. +func (c *PublisherClient) Publish(ctx context.Context, req *pubsubpb.PublishRequest) (*pubsubpb.PublishResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *pubsubpb.PublishResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.publisherClient.Publish(ctx, req) + return err + }, c.CallOptions.Publish...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetTopic gets the configuration of a topic. +func (c *PublisherClient) GetTopic(ctx context.Context, req *pubsubpb.GetTopicRequest) (*pubsubpb.Topic, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *pubsubpb.Topic + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.publisherClient.GetTopic(ctx, req) + return err + }, c.CallOptions.GetTopic...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListTopics lists matching topics. +func (c *PublisherClient) ListTopics(ctx context.Context, req *pubsubpb.ListTopicsRequest) *TopicIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + it := &TopicIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Topic, string, error) { + var resp *pubsubpb.ListTopicsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.publisherClient.ListTopics(ctx, req) + return err + }, c.CallOptions.ListTopics...) + if err != nil { + return nil, "", err + } + return resp.Topics, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// ListTopicSubscriptions lists the name of the subscriptions for this topic. +func (c *PublisherClient) ListTopicSubscriptions(ctx context.Context, req *pubsubpb.ListTopicSubscriptionsRequest) *StringIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + it := &StringIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) { + var resp *pubsubpb.ListTopicSubscriptionsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.publisherClient.ListTopicSubscriptions(ctx, req) + return err + }, c.CallOptions.ListTopicSubscriptions...) + if err != nil { + return nil, "", err + } + return resp.Subscriptions, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// DeleteTopic deletes the topic with the given name. Returns `NOT_FOUND` if the topic +// does not exist. After a topic is deleted, a new topic may be created with +// the same name; this is an entirely new topic with none of the old +// configuration or subscriptions. Existing subscriptions to this topic are +// not deleted, but their `topic` field is set to `_deleted-topic_`. +func (c *PublisherClient) DeleteTopic(ctx context.Context, req *pubsubpb.DeleteTopicRequest) error { + ctx = insertXGoog(ctx, c.xGoogHeader) + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + _, err = c.publisherClient.DeleteTopic(ctx, req) + return err + }, c.CallOptions.DeleteTopic...) + return err +} + +// StringIterator manages a stream of string. +type StringIterator struct { + items []string + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []string, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *StringIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *StringIterator) Next() (string, error) { + var item string + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *StringIterator) bufLen() int { + return len(it.items) +} + +func (it *StringIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// TopicIterator manages a stream of *pubsubpb.Topic. +type TopicIterator struct { + items []*pubsubpb.Topic + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Topic, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *TopicIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *TopicIterator) Next() (*pubsubpb.Topic, error) { + var item *pubsubpb.Topic + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *TopicIterator) bufLen() int { + return len(it.items) +} + +func (it *TopicIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client_example_test.go b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client_example_test.go new file mode 100644 index 00000000..6e63f80e --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client_example_test.go @@ -0,0 +1,181 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package pubsub_test + +import ( + "cloud.google.com/go/pubsub/apiv1" + "golang.org/x/net/context" + pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +func ExampleNewPublisherClient() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExamplePublisherClient_SubscriptionIAM() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + subscription := &pubsubpb.Subscription{} + h := c.SubscriptionIAM(subscription) + policy, err := h.Policy(ctx) + if err != nil { + // TODO: Handle error. + } + //TODO: Use the IAM policy + _ = policy +} + +func ExamplePublisherClient_TopicIAM() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + topic := &pubsubpb.Topic{} + h := c.TopicIAM(topic) + policy, err := h.Policy(ctx) + if err != nil { + // TODO: Handle error. + } + //TODO: Use the IAM policy + _ = policy +} + +func ExamplePublisherClient_CreateTopic() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.Topic{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateTopic(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExamplePublisherClient_Publish() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.PublishRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.Publish(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExamplePublisherClient_GetTopic() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.GetTopicRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetTopic(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExamplePublisherClient_ListTopics() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.ListTopicsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListTopics(ctx, req) + for { + resp, err := it.Next() + if err != nil { + // TODO: Handle error. + break + } + // TODO: Use resp. + _ = resp + } +} + +func ExamplePublisherClient_ListTopicSubscriptions() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.ListTopicSubscriptionsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListTopicSubscriptions(ctx, req) + for { + resp, err := it.Next() + if err != nil { + // TODO: Handle error. + break + } + // TODO: Use resp. + _ = resp + } +} + +func ExamplePublisherClient_DeleteTopic() { + ctx := context.Background() + c, err := pubsub.NewPublisherClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.DeleteTopicRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteTopic(ctx, req) + if err != nil { + // TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go new file mode 100644 index 00000000..c3938a32 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go @@ -0,0 +1,409 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package pubsub + +import ( + "math" + "time" + + "cloud.google.com/go/iam" + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + subscriberProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}") + subscriberSubscriptionPathTemplate = gax.MustCompilePathTemplate("projects/{project}/subscriptions/{subscription}") + subscriberTopicPathTemplate = gax.MustCompilePathTemplate("projects/{project}/topics/{topic}") +) + +// SubscriberCallOptions contains the retry settings for each method of SubscriberClient. +type SubscriberCallOptions struct { + CreateSubscription []gax.CallOption + GetSubscription []gax.CallOption + ListSubscriptions []gax.CallOption + DeleteSubscription []gax.CallOption + ModifyAckDeadline []gax.CallOption + Acknowledge []gax.CallOption + Pull []gax.CallOption + StreamingPull []gax.CallOption + ModifyPushConfig []gax.CallOption +} + +func defaultSubscriberClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("pubsub.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub", + ), + } +} + +func defaultSubscriberCallOptions() *SubscriberCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &SubscriberCallOptions{ + CreateSubscription: retry[[2]string{"default", "idempotent"}], + GetSubscription: retry[[2]string{"default", "idempotent"}], + ListSubscriptions: retry[[2]string{"default", "idempotent"}], + DeleteSubscription: retry[[2]string{"default", "idempotent"}], + ModifyAckDeadline: retry[[2]string{"default", "non_idempotent"}], + Acknowledge: retry[[2]string{"messaging", "non_idempotent"}], + Pull: retry[[2]string{"messaging", "non_idempotent"}], + StreamingPull: retry[[2]string{"messaging", "non_idempotent"}], + ModifyPushConfig: retry[[2]string{"default", "non_idempotent"}], + } +} + +// SubscriberClient is a client for interacting with Google Cloud Pub/Sub API. +type SubscriberClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + subscriberClient pubsubpb.SubscriberClient + + // The call options for this service. + CallOptions *SubscriberCallOptions + + // The metadata to be sent with each request. + xGoogHeader string +} + +// NewSubscriberClient creates a new subscriber client. +// +// The service that an application uses to manipulate subscriptions and to +// consume messages from a subscription via the `Pull` method. +func NewSubscriberClient(ctx context.Context, opts ...option.ClientOption) (*SubscriberClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultSubscriberClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &SubscriberClient{ + conn: conn, + CallOptions: defaultSubscriberCallOptions(), + + subscriberClient: pubsubpb.NewSubscriberClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *SubscriberClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *SubscriberClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *SubscriberClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", "") + c.xGoogHeader = gax.XGoogHeader(kv...) +} + +// SubscriberProjectPath returns the path for the project resource. +func SubscriberProjectPath(project string) string { + path, err := subscriberProjectPathTemplate.Render(map[string]string{ + "project": project, + }) + if err != nil { + panic(err) + } + return path +} + +// SubscriberSubscriptionPath returns the path for the subscription resource. +func SubscriberSubscriptionPath(project, subscription string) string { + path, err := subscriberSubscriptionPathTemplate.Render(map[string]string{ + "project": project, + "subscription": subscription, + }) + if err != nil { + panic(err) + } + return path +} + +// SubscriberTopicPath returns the path for the topic resource. +func SubscriberTopicPath(project, topic string) string { + path, err := subscriberTopicPathTemplate.Render(map[string]string{ + "project": project, + "topic": topic, + }) + if err != nil { + panic(err) + } + return path +} + +func (c *SubscriberClient) SubscriptionIAM(subscription *pubsubpb.Subscription) *iam.Handle { + return iam.InternalNewHandle(c.Connection(), subscription.Name) +} + +func (c *SubscriberClient) TopicIAM(topic *pubsubpb.Topic) *iam.Handle { + return iam.InternalNewHandle(c.Connection(), topic.Name) +} + +// CreateSubscription creates a subscription to a given topic. +// If the subscription already exists, returns `ALREADY_EXISTS`. +// If the corresponding topic doesn't exist, returns `NOT_FOUND`. +// +// If the name is not provided in the request, the server will assign a random +// name for this subscription on the same project as the topic, conforming +// to the +// [resource name format](https://cloud.google.com/pubsub/docs/overview#names). +// The generated name is populated in the returned Subscription object. +// Note that for REST API requests, you must specify a name in the request. +func (c *SubscriberClient) CreateSubscription(ctx context.Context, req *pubsubpb.Subscription) (*pubsubpb.Subscription, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *pubsubpb.Subscription + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.subscriberClient.CreateSubscription(ctx, req) + return err + }, c.CallOptions.CreateSubscription...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetSubscription gets the configuration details of a subscription. +func (c *SubscriberClient) GetSubscription(ctx context.Context, req *pubsubpb.GetSubscriptionRequest) (*pubsubpb.Subscription, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *pubsubpb.Subscription + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.subscriberClient.GetSubscription(ctx, req) + return err + }, c.CallOptions.GetSubscription...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListSubscriptions lists matching subscriptions. +func (c *SubscriberClient) ListSubscriptions(ctx context.Context, req *pubsubpb.ListSubscriptionsRequest) *SubscriptionIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + it := &SubscriptionIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Subscription, string, error) { + var resp *pubsubpb.ListSubscriptionsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.subscriberClient.ListSubscriptions(ctx, req) + return err + }, c.CallOptions.ListSubscriptions...) + if err != nil { + return nil, "", err + } + return resp.Subscriptions, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// DeleteSubscription deletes an existing subscription. All messages retained in the subscription +// are immediately dropped. Calls to `Pull` after deletion will return +// `NOT_FOUND`. After a subscription is deleted, a new one may be created with +// the same name, but the new one has no association with the old +// subscription or its topic unless the same topic is specified. +func (c *SubscriberClient) DeleteSubscription(ctx context.Context, req *pubsubpb.DeleteSubscriptionRequest) error { + ctx = insertXGoog(ctx, c.xGoogHeader) + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + _, err = c.subscriberClient.DeleteSubscription(ctx, req) + return err + }, c.CallOptions.DeleteSubscription...) + return err +} + +// ModifyAckDeadline modifies the ack deadline for a specific message. This method is useful +// to indicate that more time is needed to process a message by the +// subscriber, or to make the message available for redelivery if the +// processing was interrupted. Note that this does not modify the +// subscription-level `ackDeadlineSeconds` used for subsequent messages. +func (c *SubscriberClient) ModifyAckDeadline(ctx context.Context, req *pubsubpb.ModifyAckDeadlineRequest) error { + ctx = insertXGoog(ctx, c.xGoogHeader) + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + _, err = c.subscriberClient.ModifyAckDeadline(ctx, req) + return err + }, c.CallOptions.ModifyAckDeadline...) + return err +} + +// Acknowledge acknowledges the messages associated with the `ack_ids` in the +// `AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages +// from the subscription. +// +// Acknowledging a message whose ack deadline has expired may succeed, +// but such a message may be redelivered later. Acknowledging a message more +// than once will not result in an error. +func (c *SubscriberClient) Acknowledge(ctx context.Context, req *pubsubpb.AcknowledgeRequest) error { + ctx = insertXGoog(ctx, c.xGoogHeader) + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + _, err = c.subscriberClient.Acknowledge(ctx, req) + return err + }, c.CallOptions.Acknowledge...) + return err +} + +// Pull pulls messages from the server. Returns an empty list if there are no +// messages available in the backlog. The server may return `UNAVAILABLE` if +// there are too many concurrent pull requests pending for the given +// subscription. +func (c *SubscriberClient) Pull(ctx context.Context, req *pubsubpb.PullRequest) (*pubsubpb.PullResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *pubsubpb.PullResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.subscriberClient.Pull(ctx, req) + return err + }, c.CallOptions.Pull...) + if err != nil { + return nil, err + } + return resp, nil +} + +// StreamingPull (EXPERIMENTAL) StreamingPull is an experimental feature. This RPC will +// respond with UNIMPLEMENTED errors unless you have been invited to test +// this feature. Contact cloud-pubsub@google.com with any questions. +// +// Establishes a stream with the server, which sends messages down to the +// client. The client streams acknowledgements and ack deadline modifications +// back to the server. The server will close the stream and return the status +// on any error. The server may close the stream with status `OK` to reassign +// server-side resources, in which case, the client should re-establish the +// stream. `UNAVAILABLE` may also be returned in the case of a transient error +// (e.g., a server restart). These should also be retried by the client. Flow +// control can be achieved by configuring the underlying RPC channel. +func (c *SubscriberClient) StreamingPull(ctx context.Context) (pubsubpb.Subscriber_StreamingPullClient, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp pubsubpb.Subscriber_StreamingPullClient + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.subscriberClient.StreamingPull(ctx) + return err + }, c.CallOptions.StreamingPull...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ModifyPushConfig modifies the `PushConfig` for a specified subscription. +// +// This may be used to change a push subscription to a pull one (signified by +// an empty `PushConfig`) or vice versa, or change the endpoint URL and other +// attributes of a push subscription. Messages will accumulate for delivery +// continuously through the call regardless of changes to the `PushConfig`. +func (c *SubscriberClient) ModifyPushConfig(ctx context.Context, req *pubsubpb.ModifyPushConfigRequest) error { + ctx = insertXGoog(ctx, c.xGoogHeader) + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + _, err = c.subscriberClient.ModifyPushConfig(ctx, req) + return err + }, c.CallOptions.ModifyPushConfig...) + return err +} + +// SubscriptionIterator manages a stream of *pubsubpb.Subscription. +type SubscriptionIterator struct { + items []*pubsubpb.Subscription + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Subscription, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *SubscriptionIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *SubscriptionIterator) Next() (*pubsubpb.Subscription, error) { + var item *pubsubpb.Subscription + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *SubscriptionIterator) bufLen() int { + return len(it.items) +} + +func (it *SubscriptionIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client_example_test.go b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client_example_test.go new file mode 100644 index 00000000..28492e3a --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client_example_test.go @@ -0,0 +1,243 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package pubsub_test + +import ( + "io" + + "cloud.google.com/go/pubsub/apiv1" + "golang.org/x/net/context" + pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +func ExampleNewSubscriberClient() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleSubscriberClient_SubscriptionIAM() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + subscription := &pubsubpb.Subscription{} + h := c.SubscriptionIAM(subscription) + policy, err := h.Policy(ctx) + if err != nil { + // TODO: Handle error. + } + //TODO: Use the IAM policy + _ = policy +} + +func ExampleSubscriberClient_TopicIAM() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + topic := &pubsubpb.Topic{} + h := c.TopicIAM(topic) + policy, err := h.Policy(ctx) + if err != nil { + // TODO: Handle error. + } + //TODO: Use the IAM policy + _ = policy +} + +func ExampleSubscriberClient_CreateSubscription() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.Subscription{ + // TODO: Fill request struct fields. + } + resp, err := c.CreateSubscription(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleSubscriberClient_GetSubscription() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.GetSubscriptionRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetSubscription(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleSubscriberClient_ListSubscriptions() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.ListSubscriptionsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListSubscriptions(ctx, req) + for { + resp, err := it.Next() + if err != nil { + // TODO: Handle error. + break + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleSubscriberClient_DeleteSubscription() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.DeleteSubscriptionRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteSubscription(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleSubscriberClient_ModifyAckDeadline() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.ModifyAckDeadlineRequest{ + // TODO: Fill request struct fields. + } + err = c.ModifyAckDeadline(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleSubscriberClient_Acknowledge() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.AcknowledgeRequest{ + // TODO: Fill request struct fields. + } + err = c.Acknowledge(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleSubscriberClient_Pull() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.PullRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.Pull(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleSubscriberClient_StreamingPull() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + stream, err := c.StreamingPull(ctx) + if err != nil { + // TODO: Handle error. + } + go func() { + reqs := []*pubsubpb.StreamingPullRequest{ + // TODO: Create requests. + } + for _, req := range reqs { + if err := stream.Send(req); err != nil { + // TODO: Handle error. + } + } + stream.CloseSend() + }() + for { + resp, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + // TODO: handle error. + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleSubscriberClient_ModifyPushConfig() { + ctx := context.Background() + c, err := pubsub.NewSubscriberClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &pubsubpb.ModifyPushConfigRequest{ + // TODO: Fill request struct fields. + } + err = c.ModifyPushConfig(ctx, req) + if err != nil { + // TODO: Handle error. + } +} diff --git a/vendor/cloud.google.com/go/pubsub/doc.go b/vendor/cloud.google.com/go/pubsub/doc.go new file mode 100644 index 00000000..159469a5 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/doc.go @@ -0,0 +1,120 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package pubsub provides an easy way to publish and receive Google Cloud Pub/Sub +messages, hiding the the details of the underlying server RPCs. Google Cloud +Pub/Sub is a many-to-many, asynchronous messaging system that decouples senders +and receivers. + +Note: This package is experimental and may make backwards-incompatible changes. + +More information about Google Cloud Pub/Sub is available at +https://cloud.google.com/pubsub/docs + +Publishing + +Google Cloud Pub/Sub messages are published to topics. Topics may be created +using the pubsub package like so: + + topic, err := pubsubClient.CreateTopic(context.Background(), "topic-name") + +Messages may then be published to a topic: + + msgIDs, err := topic.Publish(ctx, &pubsub.Message{ + Data: []byte("payload"), + }) + +Receiving + +To receive messages published to a topic, clients create subscriptions +to the topic. There may be more than one subscription per topic; each message +that is published to the topic will be delivered to all of its subscriptions. + +Subsciptions may be created like so: + + sub, err := pubsubClient.CreateSubscription(context.Background(), "sub-name", topic, 0, nil) + +Messages are then consumed from a subscription via an iterator: + + // Construct the iterator + it, err := sub.Pull(context.Background()) + if err != nil { + // handle err ... + } + defer it.Stop() + + // Consume N messages + for i := 0; i < N; i++ { + msg, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // handle err ... + break + } + + log.Print("got message: ", string(msg.Data)) + msg.Done(true) + } + +The message iterator returns messages one at a time, fetching batches of +messages behind the scenes as needed. Once client code has processed the +message, it must call Message.Done, otherwise the message will eventually be +redelivered. For more information and configuration options, see "Deadlines" +below. + +Note: It is possible for Messages to be redelivered, even if Message.Done has +been called. Client code must be robust to multiple deliveries of messages. + +Deadlines + +The default pubsub deadlines are suitable for most use cases, but may be +overridden. This section describes the tradeoffs that should be considered +when overriding the defaults. + +Behind the scenes, each message returned by the Pub/Sub server has an +associated lease, known as an "ACK deadline". +Unless a message is acknowledged within the ACK deadline, or the client requests that +the ACK deadline be extended, the message will become elegible for redelivery. +As a convenience, the pubsub package will automatically extend deadlines until +either: + * Message.Done is called, or + * the "MaxExtension" period elapses from the time the message is fetched from the server. + +The initial ACK deadline given to each messages defaults to 10 seconds, but may +be overridden during subscription creation. Selecting an ACK deadline is a +tradeoff between message redelivery latency and RPC volume. If the pubsub +package fails to acknowledge or extend a message (e.g. due to unexpected +termination of the process), a shorter ACK deadline will generally result in +faster message redelivery by the Pub/Sub system. However, a short ACK deadline +may also increase the number of deadline extension RPCs that the pubsub package +sends to the server. + +The default max extension period is DefaultMaxExtension, and can be overridden +by passing a MaxExtension option to Subscription.Pull. Selecting a max +extension period is a tradeoff between the speed at which client code must +process messages, and the redelivery delay if messages fail to be acknowledged +(e.g. because client code neglects to do so). Using a large MaxExtension +increases the available time for client code to process messages. However, if +the client code neglects to call Message.Done, a large MaxExtension will +increase the delay before the message is redelivered. + +Authentication + +See examples of authorization and authentication at +https://godoc.org/cloud.google.com/go#pkg-examples. +*/ +package pubsub // import "cloud.google.com/go/pubsub" diff --git a/vendor/cloud.google.com/go/pubsub/endtoend_test.go b/vendor/cloud.google.com/go/pubsub/endtoend_test.go new file mode 100644 index 00000000..6efab649 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/endtoend_test.go @@ -0,0 +1,324 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "fmt" + "math/rand" + "reflect" + "sync" + "testing" + "time" + + "golang.org/x/net/context" + + "cloud.google.com/go/internal/testutil" + "google.golang.org/api/iterator" + "google.golang.org/api/option" +) + +const timeout = time.Minute * 10 +const ackDeadline = time.Second * 10 + +const batchSize = 100 +const batches = 100 + +// messageCounter keeps track of how many times a given message has been received. +type messageCounter struct { + mu sync.Mutex + counts map[string]int + // A value is sent to recv each time Inc is called. + recv chan struct{} +} + +func (mc *messageCounter) Inc(msgID string) { + mc.mu.Lock() + mc.counts[msgID] += 1 + mc.mu.Unlock() + mc.recv <- struct{}{} +} + +// process pulls messages from an iterator and records them in mc. +func process(t *testing.T, it *MessageIterator, mc *messageCounter) { + for { + m, err := it.Next() + if err == iterator.Done { + return + } + + if err != nil { + t.Errorf("unexpected err from iterator: %v", err) + return + } + mc.Inc(m.ID) + // Simulate time taken to process m, while continuing to process more messages. + go func() { + // Some messages will need to have their ack deadline extended due to this delay. + delay := rand.Intn(int(ackDeadline * 3)) + time.After(time.Duration(delay)) + m.Done(true) + }() + } +} + +// newIter constructs a new MessageIterator. +func newIter(t *testing.T, ctx context.Context, sub *Subscription) *MessageIterator { + it, err := sub.Pull(ctx) + if err != nil { + t.Fatalf("error constructing iterator: %v", err) + } + return it +} + +// launchIter launches a number of goroutines to pull from the supplied MessageIterator. +func launchIter(t *testing.T, ctx context.Context, it *MessageIterator, mc *messageCounter, n int, wg *sync.WaitGroup) { + for j := 0; j < n; j++ { + wg.Add(1) + go func() { + defer wg.Done() + process(t, it, mc) + }() + } +} + +// iteratorLifetime controls how long iterators live for before they are stopped. +type iteratorLifetimes interface { + // lifetimeChan should be called when an iterator is started. The + // returned channel will send when the iterator should be stopped. + lifetimeChan() <-chan time.Time +} + +var immortal = &explicitLifetimes{} + +// explicitLifetimes implements iteratorLifetime with hard-coded lifetimes, falling back +// to indefinite lifetimes when no explicit lifetimes remain. +type explicitLifetimes struct { + mu sync.Mutex + lifetimes []time.Duration +} + +func (el *explicitLifetimes) lifetimeChan() <-chan time.Time { + el.mu.Lock() + defer el.mu.Unlock() + if len(el.lifetimes) == 0 { + return nil + } + lifetime := el.lifetimes[0] + el.lifetimes = el.lifetimes[1:] + return time.After(lifetime) +} + +// consumer consumes messages according to its configuration. +type consumer struct { + // How many goroutines should pull from the subscription. + iteratorsInFlight int + // How many goroutines should pull from each iterator. + concurrencyPerIterator int + + lifetimes iteratorLifetimes +} + +// consume reads messages from a subscription, and keeps track of what it receives in mc. +// After consume returns, the caller should wait on wg to ensure that no more updates to mc will be made. +func (c *consumer) consume(t *testing.T, ctx context.Context, sub *Subscription, mc *messageCounter, wg *sync.WaitGroup, stop <-chan struct{}) { + for i := 0; i < c.iteratorsInFlight; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for { + it := newIter(t, ctx, sub) + launchIter(t, ctx, it, mc, c.concurrencyPerIterator, wg) + + select { + case <-c.lifetimes.lifetimeChan(): + it.Stop() + case <-stop: + it.Stop() + return + } + } + + }() + } +} + +// publish publishes many messages to topic, and returns the published message ids. +func publish(t *testing.T, ctx context.Context, topic *Topic) []string { + var published []string + msgs := make([]*Message, batchSize) + for i := 0; i < batches; i++ { + for j := 0; j < batchSize; j++ { + text := fmt.Sprintf("msg %02d-%02d", i, j) + msgs[j] = &Message{Data: []byte(text)} + } + ids, err := topic.Publish(ctx, msgs...) + if err != nil { + t.Errorf("Publish error: %v", err) + } + published = append(published, ids...) + } + return published +} + +// diff returns counts of the differences between got and want. +func diff(got, want map[string]int) map[string]int { + ids := make(map[string]struct{}) + for k := range got { + ids[k] = struct{}{} + } + for k := range want { + ids[k] = struct{}{} + } + + gotWantCount := make(map[string]int) + for k := range ids { + if got[k] == want[k] { + continue + } + desc := fmt.Sprintf("", got[k], want[k]) + gotWantCount[desc] += 1 + } + return gotWantCount +} + +// TestEndToEnd pumps many messages into a topic and tests that they are all delivered to each subscription for the topic. +// It also tests that messages are not unexpectedly redelivered. +func TestEndToEnd(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, ScopePubSub, ScopeCloudPlatform) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + now := time.Now() + topicName := fmt.Sprintf("endtoend-%d", now.Unix()) + subPrefix := fmt.Sprintf("endtoend-%d", now.Unix()) + + client, err := NewClient(ctx, testutil.ProjID(), option.WithTokenSource(ts)) + if err != nil { + t.Fatalf("Creating client error: %v", err) + } + + var topic *Topic + if topic, err = client.CreateTopic(ctx, topicName); err != nil { + t.Fatalf("CreateTopic error: %v", err) + } + defer topic.Delete(ctx) + + // Three subscriptions to the same topic. + var subA, subB, subC *Subscription + if subA, err = client.CreateSubscription(ctx, subPrefix+"-a", topic, ackDeadline, nil); err != nil { + t.Fatalf("CreateSub error: %v", err) + } + defer subA.Delete(ctx) + + if subB, err = client.CreateSubscription(ctx, subPrefix+"-b", topic, ackDeadline, nil); err != nil { + t.Fatalf("CreateSub error: %v", err) + } + defer subB.Delete(ctx) + + if subC, err = client.CreateSubscription(ctx, subPrefix+"-c", topic, ackDeadline, nil); err != nil { + t.Fatalf("CreateSub error: %v", err) + } + defer subC.Delete(ctx) + + expectedCounts := make(map[string]int) + for _, id := range publish(t, ctx, topic) { + expectedCounts[id] = 1 + } + + // recv provides an indication that messages are still arriving. + recv := make(chan struct{}) + + // Keep track of the number of times each message (by message id) was + // seen from each subscription. + mcA := &messageCounter{counts: make(map[string]int), recv: recv} + mcB := &messageCounter{counts: make(map[string]int), recv: recv} + mcC := &messageCounter{counts: make(map[string]int), recv: recv} + + stopC := make(chan struct{}) + + // We have three subscriptions to our topic. + // Each subscription will get a copy of each pulished message. + // + // subA has just one iterator, while subB has two. The subB iterators + // will each process roughly half of the messages for subB. All of + // these iterators live until all messages have been consumed. subC is + // processed by a series of short-lived iterators. + + var wg sync.WaitGroup + + con := &consumer{ + concurrencyPerIterator: 1, + iteratorsInFlight: 2, + lifetimes: immortal, + } + con.consume(t, ctx, subA, mcA, &wg, stopC) + + con = &consumer{ + concurrencyPerIterator: 1, + iteratorsInFlight: 2, + lifetimes: immortal, + } + con.consume(t, ctx, subB, mcB, &wg, stopC) + + con = &consumer{ + concurrencyPerIterator: 1, + iteratorsInFlight: 2, + lifetimes: &explicitLifetimes{ + lifetimes: []time.Duration{ackDeadline, ackDeadline, ackDeadline / 2, ackDeadline / 2}, + }, + } + con.consume(t, ctx, subC, mcC, &wg, stopC) + + go func() { + timeoutC := time.After(timeout) + // Every time this ticker ticks, we will check if we have received any + // messages since the last time it ticked. We check less frequently + // than the ack deadline, so that we can detect if messages are + // redelivered after having their ack deadline extended. + checkQuiescence := time.NewTicker(ackDeadline * 3) + defer checkQuiescence.Stop() + + var received bool + for { + select { + case <-recv: + received = true + case <-checkQuiescence.C: + if received { + received = false + } else { + close(stopC) + return + } + case <-timeoutC: + t.Errorf("timed out") + close(stopC) + return + } + } + }() + wg.Wait() + + for _, mc := range []*messageCounter{mcA, mcB, mcC} { + if got, want := mc.counts, expectedCounts; !reflect.DeepEqual(got, want) { + t.Errorf("message counts: %v\n", diff(got, want)) + } + } +} diff --git a/vendor/cloud.google.com/go/pubsub/example_subscription_iterator_test.go b/vendor/cloud.google.com/go/pubsub/example_subscription_iterator_test.go new file mode 100644 index 00000000..da74b1b6 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/example_subscription_iterator_test.go @@ -0,0 +1,54 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub_test + +import ( + "fmt" + + "cloud.google.com/go/pubsub" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleClient_Subscriptions() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + // List all subscriptions of the project. + it := client.Subscriptions(ctx) + _ = it // TODO: iterate using Next. +} + +func ExampleSubscriptionIterator_Next() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + // List all subscriptions of the project. + it := client.Subscriptions(ctx) + for { + sub, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(sub) + } +} diff --git a/vendor/cloud.google.com/go/pubsub/example_test.go b/vendor/cloud.google.com/go/pubsub/example_test.go new file mode 100644 index 00000000..ecfd5a20 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/example_test.go @@ -0,0 +1,299 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub_test + +import ( + "fmt" + "time" + + "cloud.google.com/go/pubsub" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleNewClient() { + ctx := context.Background() + _, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + // See the other examples to learn how to use the Client. +} + +func ExampleClient_CreateTopic() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + // Create a new topic with the given name. + topic, err := client.CreateTopic(ctx, "topicName") + if err != nil { + // TODO: Handle error. + } + + _ = topic // TODO: use the topic. +} + +func ExampleClient_CreateSubscription() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + // Create a new topic with the given name. + topic, err := client.CreateTopic(ctx, "topicName") + if err != nil { + // TODO: Handle error. + } + + // Create a new subscription to the previously created topic + // with the given name. + sub, err := client.CreateSubscription(ctx, "subName", topic, 10*time.Second, nil) + if err != nil { + // TODO: Handle error. + } + + _ = sub // TODO: use the subscription. +} + +func ExampleTopic_Delete() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + topic := client.Topic("topicName") + if err := topic.Delete(ctx); err != nil { + // TODO: Handle error. + } +} + +func ExampleTopic_Exists() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + topic := client.Topic("topicName") + ok, err := topic.Exists(ctx) + if err != nil { + // TODO: Handle error. + } + if !ok { + // Topic doesn't exist. + } +} + +func ExampleTopic_Publish() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + topic := client.Topic("topicName") + msgIDs, err := topic.Publish(ctx, &pubsub.Message{ + Data: []byte("hello world"), + }) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("Published a message with a message ID: %s\n", msgIDs[0]) +} + +func ExampleTopic_Subscriptions() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + topic := client.Topic("topic-name") + // List all subscriptions of the topic (maybe of multiple projects). + for subs := topic.Subscriptions(ctx); ; { + sub, err := subs.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + _ = sub // TODO: use the subscription. + } +} + +func ExampleSubscription_Delete() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + sub := client.Subscription("subName") + if err := sub.Delete(ctx); err != nil { + // TODO: Handle error. + } +} + +func ExampleSubscription_Exists() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + + sub := client.Subscription("subName") + ok, err := sub.Exists(ctx) + if err != nil { + // TODO: Handle error. + } + if !ok { + // Subscription doesn't exist. + } +} + +func ExampleSubscription_Config() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + sub := client.Subscription("subName") + config, err := sub.Config(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Println(config) +} + +func ExampleSubscription_Pull() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it, err := client.Subscription("subName").Pull(ctx) + if err != nil { + // TODO: Handle error. + } + // Ensure that the iterator is closed down cleanly. + defer it.Stop() +} + +func ExampleSubscription_Pull_options() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + sub := client.Subscription("subName") + // This program is expected to process and acknowledge messages + // in 5 seconds. If not, Pub/Sub API will assume the message is not + // acknowledged. + it, err := sub.Pull(ctx, pubsub.MaxExtension(5*time.Second)) + if err != nil { + // TODO: Handle error. + } + // Ensure that the iterator is closed down cleanly. + defer it.Stop() +} + +func ExampleSubscription_ModifyPushConfig() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + sub := client.Subscription("subName") + if err := sub.ModifyPushConfig(ctx, &pubsub.PushConfig{Endpoint: "https://example.com/push"}); err != nil { + // TODO: Handle error. + } +} + +func ExampleMessageIterator_Next() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it, err := client.Subscription("subName").Pull(ctx) + if err != nil { + // TODO: Handle error. + } + // Ensure that the iterator is closed down cleanly. + defer it.Stop() + // Consume 10 messages. + for i := 0; i < 10; i++ { + m, err := it.Next() + if err == iterator.Done { + // There are no more messages. This will happen if it.Stop is called. + break + } + if err != nil { + // TODO: Handle error. + break + } + fmt.Printf("message %d: %s\n", i, m.Data) + + // Acknowledge the message. + m.Done(true) + } +} + +func ExampleMessageIterator_Stop_defer() { + // If all uses of the iterator occur within the lifetime of a single + // function, stop it with defer. + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it, err := client.Subscription("subName").Pull(ctx) + if err != nil { + // TODO: Handle error. + } + + // Ensure that the iterator is closed down cleanly. + defer it.Stop() + + // TODO: Use the iterator (see the example for MessageIterator.Next). +} + +func ExampleMessageIterator_Stop_goroutine() *pubsub.MessageIterator { + // If you use the iterator outside the lifetime of a single function, you + // must still stop it. + // This (contrived) example returns an iterator that will yield messages + // for ten seconds, and then stop. + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it, err := client.Subscription("subName").Pull(ctx) + if err != nil { + // TODO: Handle error. + } + // Stop the iterator after receiving messages for ten seconds. + go func() { + time.Sleep(10 * time.Second) + it.Stop() + }() + return it +} diff --git a/vendor/cloud.google.com/go/pubsub/example_topic_iterator_test.go b/vendor/cloud.google.com/go/pubsub/example_topic_iterator_test.go new file mode 100644 index 00000000..0c227edd --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/example_topic_iterator_test.go @@ -0,0 +1,53 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub_test + +import ( + "fmt" + + "cloud.google.com/go/pubsub" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleClient_Topics() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + it := client.Topics(ctx) + _ = it // TODO: iterate using Next. +} + +func ExampleTopicIterator_Next() { + ctx := context.Background() + client, err := pubsub.NewClient(ctx, "project-id") + if err != nil { + // TODO: Handle error. + } + // List all topics. + it := client.Topics(ctx) + for { + t, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(t) + } +} diff --git a/vendor/cloud.google.com/go/pubsub/fake_test.go b/vendor/cloud.google.com/go/pubsub/fake_test.go new file mode 100644 index 00000000..2d444a8b --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/fake_test.go @@ -0,0 +1,148 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +// This file provides a fake/mock in-memory pubsub server. +// (Really just a mock at the moment, but we hope to turn it into +// more of a fake.) + +import ( + "io" + "sync" + "time" + + "golang.org/x/net/context" + + "cloud.google.com/go/internal/testutil" + pb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +type fakeServer struct { + pb.PublisherServer + pb.SubscriberServer + + Addr string + Acked map[string]bool // acked message IDs + Deadlines map[string]int32 // deadlines by message ID + pullResponses []*pullResponse + wg sync.WaitGroup +} + +type pullResponse struct { + msgs []*pb.ReceivedMessage + err error +} + +func newFakeServer() (*fakeServer, error) { + srv, err := testutil.NewServer() + if err != nil { + return nil, err + } + fake := &fakeServer{ + Addr: srv.Addr, + Acked: map[string]bool{}, + Deadlines: map[string]int32{}, + } + pb.RegisterPublisherServer(srv.Gsrv, fake) + pb.RegisterSubscriberServer(srv.Gsrv, fake) + srv.Start() + return fake, nil +} + +// Each call to addStreamingPullMessages results in one StreamingPullResponse. +func (s *fakeServer) addStreamingPullMessages(msgs []*pb.ReceivedMessage) { + s.pullResponses = append(s.pullResponses, &pullResponse{msgs, nil}) +} + +func (s *fakeServer) addStreamingPullError(err error) { + s.pullResponses = append(s.pullResponses, &pullResponse{nil, err}) +} + +func (s *fakeServer) wait() { + s.wg.Wait() +} + +func (s *fakeServer) StreamingPull(stream pb.Subscriber_StreamingPullServer) error { + // Receive initial request. + _, err := stream.Recv() + if err == io.EOF { + return nil + } + if err != nil { + return err + } + // Consume and ignore subsequent requests. + errc := make(chan error, 1) + s.wg.Add(1) + go func() { + defer s.wg.Done() + for { + req, err := stream.Recv() + if err != nil { + errc <- err + return + } + for _, id := range req.AckIds { + s.Acked[id] = true + } + for i, id := range req.ModifyDeadlineAckIds { + s.Deadlines[id] = req.ModifyDeadlineSeconds[i] + } + } + }() + // Send responses. + for { + if len(s.pullResponses) == 0 { + // Nothing to send, so wait for the client to shut down the stream. + err := <-errc // a real error, or at least EOF + if err == io.EOF { + return nil + } + return err + } + pr := s.pullResponses[0] + s.pullResponses = s.pullResponses[1:] + if pr.err != nil { + // Add a slight delay to ensure the server receives any + // messages en route from the client before shutting down the stream. + // This reduces flakiness of tests involving retry. + time.Sleep(100 * time.Millisecond) + } + if pr.err == io.EOF { + return nil + } + if pr.err != nil { + return pr.err + } + // Return any error from Recv. + select { + case err := <-errc: + return err + default: + } + res := &pb.StreamingPullResponse{ReceivedMessages: pr.msgs} + if err := stream.Send(res); err != nil { + return err + } + } +} + +func (s *fakeServer) GetSubscription(ctx context.Context, req *pb.GetSubscriptionRequest) (*pb.Subscription, error) { + return &pb.Subscription{ + Name: req.Subscription, + AckDeadlineSeconds: 10, + PushConfig: &pb.PushConfig{}, + }, nil +} diff --git a/vendor/cloud.google.com/go/pubsub/integration_test.go b/vendor/cloud.google.com/go/pubsub/integration_test.go new file mode 100644 index 00000000..01279eef --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/integration_test.go @@ -0,0 +1,232 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "fmt" + "reflect" + "testing" + "time" + + "golang.org/x/net/context" + + "cloud.google.com/go/iam" + "cloud.google.com/go/internal/testutil" + "google.golang.org/api/option" +) + +// messageData is used to hold the contents of a message so that it can be compared against the contents +// of another message without regard to irrelevant fields. +type messageData struct { + ID string + Data []byte + Attributes map[string]string +} + +func extractMessageData(m *Message) *messageData { + return &messageData{ + ID: m.ID, + Data: m.Data, + Attributes: m.Attributes, + } +} + +func TestAll(t *testing.T) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ctx := context.Background() + ts := testutil.TokenSource(ctx, ScopePubSub, ScopeCloudPlatform) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + + now := time.Now() + topicName := fmt.Sprintf("topic-%d", now.Unix()) + subName := fmt.Sprintf("subscription-%d", now.Unix()) + + client, err := NewClient(ctx, testutil.ProjID(), option.WithTokenSource(ts)) + if err != nil { + t.Fatalf("Creating client error: %v", err) + } + defer client.Close() + + var topic *Topic + if topic, err = client.CreateTopic(ctx, topicName); err != nil { + t.Errorf("CreateTopic error: %v", err) + } + + var sub *Subscription + if sub, err = client.CreateSubscription(ctx, subName, topic, 0, nil); err != nil { + t.Errorf("CreateSub error: %v", err) + } + + exists, err := topic.Exists(ctx) + if err != nil { + t.Fatalf("TopicExists error: %v", err) + } + if !exists { + t.Errorf("topic %s should exist, but it doesn't", topic) + } + + exists, err = sub.Exists(ctx) + if err != nil { + t.Fatalf("SubExists error: %v", err) + } + if !exists { + t.Errorf("subscription %s should exist, but it doesn't", subName) + } + + msgs := []*Message{} + for i := 0; i < 10; i++ { + text := fmt.Sprintf("a message with an index %d", i) + attrs := make(map[string]string) + attrs["foo"] = "bar" + msgs = append(msgs, &Message{ + Data: []byte(text), + Attributes: attrs, + }) + } + + ids, err := topic.Publish(ctx, msgs...) + if err != nil { + t.Fatalf("Publish (1) error: %v", err) + } + + if len(ids) != len(msgs) { + t.Errorf("unexpected number of message IDs received; %d, want %d", len(ids), len(msgs)) + } + + want := make(map[string]*messageData) + for i, m := range msgs { + md := extractMessageData(m) + md.ID = ids[i] + want[md.ID] = md + } + + // Use a timeout to ensure that Pull does not block indefinitely if there are unexpectedly few messages available. + timeoutCtx, _ := context.WithTimeout(ctx, time.Minute) + it, err := sub.Pull(timeoutCtx) + if err != nil { + t.Fatalf("error constructing iterator: %v", err) + } + defer it.Stop() + got := make(map[string]*messageData) + for i := 0; i < len(want); i++ { + m, err := it.Next() + if err != nil { + t.Fatalf("error getting next message: %v", err) + } + md := extractMessageData(m) + got[md.ID] = md + m.Done(true) + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("messages: got: %v ; want: %v", got, want) + } + + // base64 test + data := "=@~" + _, err = topic.Publish(ctx, &Message{Data: []byte(data)}) + if err != nil { + t.Fatalf("Publish error: %v", err) + } + + m, err := it.Next() + if err != nil { + t.Fatalf("Pull error: %v", err) + } + + if string(m.Data) != data { + t.Errorf("unexpected message received; %s, want %s", string(m.Data), data) + } + m.Done(true) + + if msg, ok := testIAM(ctx, topic.IAM(), "pubsub.topics.get"); !ok { + t.Errorf("topic IAM: %s", msg) + } + if msg, ok := testIAM(ctx, sub.IAM(), "pubsub.subscriptions.get"); !ok { + t.Errorf("sub IAM: %s", msg) + } + + err = sub.Delete(ctx) + if err != nil { + t.Errorf("DeleteSub error: %v", err) + } + + err = topic.Delete(ctx) + if err != nil { + t.Errorf("DeleteTopic error: %v", err) + } +} + +// IAM tests. +// NOTE: for these to succeed, the test runner identity must have the Pub/Sub Admin or Owner roles. +// To set, visit https://console.developers.google.com, select "IAM & Admin" from the top-left +// menu, choose the account, click the Roles dropdown, and select "Pub/Sub > Pub/Sub Admin". +// TODO(jba): move this to a testing package within cloud.google.com/iam, so we can re-use it. +func testIAM(ctx context.Context, h *iam.Handle, permission string) (msg string, ok bool) { + // Attempting to add an non-existent identity (e.g. "alice@example.com") causes the service + // to return an internal error, so use a real identity. + const member = "domain:google.com" + + var policy *iam.Policy + var err error + + if policy, err = h.Policy(ctx); err != nil { + return fmt.Sprintf("Policy: %v", err), false + } + // The resource is new, so the policy should be empty. + if got := policy.Roles(); len(got) > 0 { + return fmt.Sprintf("initially: got roles %v, want none", got), false + } + // Add a member, set the policy, then check that the member is present. + policy.Add(member, iam.Viewer) + if err := h.SetPolicy(ctx, policy); err != nil { + return fmt.Sprintf("SetPolicy: %v", err), false + } + if policy, err = h.Policy(ctx); err != nil { + return fmt.Sprintf("Policy: %v", err), false + } + if got, want := policy.Members(iam.Viewer), []string{member}; !reflect.DeepEqual(got, want) { + return fmt.Sprintf("after Add: got %v, want %v", got, want), false + } + // Now remove that member, set the policy, and check that it's empty again. + policy.Remove(member, iam.Viewer) + if err := h.SetPolicy(ctx, policy); err != nil { + return fmt.Sprintf("SetPolicy: %v", err), false + } + if policy, err = h.Policy(ctx); err != nil { + return fmt.Sprintf("Policy: %v", err), false + } + if got := policy.Roles(); len(got) > 0 { + return fmt.Sprintf("after Remove: got roles %v, want none", got), false + } + // Call TestPermissions. + // Because this user is an admin, it has all the permissions on the + // resource type. Note: the service fails if we ask for inapplicable + // permissions (e.g. a subscription permission on a topic, or a topic + // create permission on a topic rather than its parent). + wantPerms := []string{permission} + gotPerms, err := h.TestPermissions(ctx, wantPerms) + if err != nil { + return fmt.Sprintf("TestPermissions: %v", err), false + } + if !reflect.DeepEqual(gotPerms, wantPerms) { + return fmt.Sprintf("TestPermissions: got %v, want %v", gotPerms, wantPerms), false + } + return "", true +} diff --git a/vendor/cloud.google.com/go/pubsub/iterator.go b/vendor/cloud.google.com/go/pubsub/iterator.go new file mode 100644 index 00000000..4da7ca38 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/iterator.go @@ -0,0 +1,527 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "log" + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/support/bundler" + pb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +type MessageIterator struct { + impl interface { + next() (*Message, error) + stop() + } +} + +type pollingMessageIterator struct { + // kaTicker controls how often we send an ack deadline extension request. + kaTicker *time.Ticker + // ackTicker controls how often we acknowledge a batch of messages. + ackTicker *time.Ticker + + ka *keepAlive + acker *acker + nacker *bundler.Bundler + puller *puller + + // mu ensures that cleanup only happens once, and concurrent Stop + // invocations block until cleanup completes. + mu sync.Mutex + + // closed is used to signal that Stop has been called. + closed chan struct{} +} + +var useStreamingPull = false + +// newMessageIterator starts a new MessageIterator. Stop must be called on the MessageIterator +// when it is no longer needed. +// subName is the full name of the subscription to pull messages from. +// ctx is the context to use for acking messages and extending message deadlines. +func newMessageIterator(ctx context.Context, s service, subName string, po *pullOptions) *MessageIterator { + if !useStreamingPull { + return &MessageIterator{ + impl: newPollingMessageIterator(ctx, s, subName, po), + } + } + sp := s.newStreamingPuller(ctx, subName, int32(po.ackDeadline.Seconds())) + err := sp.open() + if grpc.Code(err) == codes.Unimplemented { + log.Println("pubsub: streaming pull unimplemented; falling back to legacy pull") + return &MessageIterator{ + impl: newPollingMessageIterator(ctx, s, subName, po), + } + } + // TODO(jba): handle other non-nil error? + log.Println("using streaming pull") + return &MessageIterator{ + impl: newStreamingMessageIterator(ctx, sp, po), + } +} + +func newPollingMessageIterator(ctx context.Context, s service, subName string, po *pullOptions) *pollingMessageIterator { + // TODO: make kaTicker frequency more configurable. + // (ackDeadline - 5s) is a reasonable default for now, because the minimum ack period is 10s. This gives us 5s grace. + keepAlivePeriod := po.ackDeadline - 5*time.Second + kaTicker := time.NewTicker(keepAlivePeriod) // Stopped in it.Stop + + // TODO: make ackTicker more configurable. Something less than + // kaTicker is a reasonable default (there's no point extending + // messages when they could be acked instead). + ackTicker := time.NewTicker(keepAlivePeriod / 2) // Stopped in it.Stop + + ka := &keepAlive{ + s: s, + Ctx: ctx, + Sub: subName, + ExtensionTick: kaTicker.C, + Deadline: po.ackDeadline, + MaxExtension: po.maxExtension, + } + + ack := &acker{ + s: s, + Ctx: ctx, + Sub: subName, + AckTick: ackTicker.C, + Notify: ka.Remove, + } + + nacker := bundler.NewBundler("", func(ackIDs interface{}) { + // NACK by setting the ack deadline to zero, to make the message + // immediately available for redelivery. + // + // If the RPC fails, nothing we can do about it. In the worst case, the + // deadline for these messages will expire and they will still get + // redelivered. + _ = s.modifyAckDeadline(ctx, subName, 0, ackIDs.([]string)) + }) + nacker.DelayThreshold = keepAlivePeriod / 10 // nack promptly + nacker.BundleCountThreshold = 10 + + pull := newPuller(s, subName, ctx, po.maxPrefetch, ka.Add, ka.Remove) + + ka.Start() + ack.Start() + return &pollingMessageIterator{ + kaTicker: kaTicker, + ackTicker: ackTicker, + ka: ka, + acker: ack, + nacker: nacker, + puller: pull, + closed: make(chan struct{}), + } +} + +// Next returns the next Message to be processed. The caller must call +// Message.Done when finished with it. +// Once Stop has been called, calls to Next will return iterator.Done. +func (it *MessageIterator) Next() (*Message, error) { + return it.impl.next() +} + +func (it *pollingMessageIterator) next() (*Message, error) { + m, err := it.puller.Next() + if err == nil { + m.done = it.done + return m, nil + } + + select { + // If Stop has been called, we return Done regardless the value of err. + case <-it.closed: + return nil, iterator.Done + default: + return nil, err + } +} + +// Client code must call Stop on a MessageIterator when finished with it. +// Stop will block until Done has been called on all Messages that have been +// returned by Next, or until the context with which the MessageIterator was created +// is cancelled or exceeds its deadline. +// Stop need only be called once, but may be called multiple times from +// multiple goroutines. +func (it *MessageIterator) Stop() { + it.impl.stop() +} + +func (it *pollingMessageIterator) stop() { + it.mu.Lock() + defer it.mu.Unlock() + + select { + case <-it.closed: + // Cleanup has already been performed. + return + default: + } + + // We close this channel before calling it.puller.Stop to ensure that we + // reliably return iterator.Done from Next. + close(it.closed) + + // Stop the puller. Once this completes, no more messages will be added + // to it.ka. + it.puller.Stop() + + // Start acking messages as they arrive, ignoring ackTicker. This will + // result in it.ka.Stop, below, returning as soon as possible. + it.acker.FastMode() + + // This will block until + // (a) it.ka.Ctx is done, or + // (b) all messages have been removed from keepAlive. + // (b) will happen once all outstanding messages have been either ACKed or NACKed. + it.ka.Stop() + + // There are no more live messages, so kill off the acker. + it.acker.Stop() + it.nacker.Stop() + it.kaTicker.Stop() + it.ackTicker.Stop() +} + +func (it *pollingMessageIterator) done(ackID string, ack bool) { + if ack { + it.acker.Ack(ackID) + // There's no need to call it.ka.Remove here, as acker will + // call it via its Notify function. + } else { + it.ka.Remove(ackID) + _ = it.nacker.Add(ackID, len(ackID)) // ignore error; this is just an optimization + } +} + +type streamingMessageIterator struct { + ctx context.Context + po *pullOptions + sp *streamingPuller + kaTicker *time.Ticker // keep-alive (deadline extensions) + ackTicker *time.Ticker // message acks + nackTicker *time.Ticker // message nacks (more frequent than acks) + failed chan struct{} // closed on stream error + stopped chan struct{} // closed when Stop is called + drained chan struct{} // closed when stopped && no more pending messages + msgc chan *Message + wg sync.WaitGroup + + mu sync.Mutex + keepAliveDeadlines map[string]time.Time + pendingReq *pb.StreamingPullRequest + err error // error from stream failure +} + +const messageBufferSize = 1000 + +func newStreamingMessageIterator(ctx context.Context, sp *streamingPuller, po *pullOptions) *streamingMessageIterator { + // TODO: make kaTicker frequency more configurable. (ackDeadline - 5s) is a + // reasonable default for now, because the minimum ack period is 10s. This + // gives us 5s grace. + keepAlivePeriod := po.ackDeadline - 5*time.Second + kaTicker := time.NewTicker(keepAlivePeriod) + + // TODO: make ackTicker more configurable. Something less than + // kaTicker is a reasonable default (there's no point extending + // messages when they could be acked instead). + ackTicker := time.NewTicker(keepAlivePeriod / 2) + nackTicker := time.NewTicker(keepAlivePeriod / 10) + it := &streamingMessageIterator{ + ctx: ctx, + sp: sp, + po: po, + kaTicker: kaTicker, + ackTicker: ackTicker, + nackTicker: nackTicker, + failed: make(chan struct{}), + stopped: make(chan struct{}), + drained: make(chan struct{}), + msgc: make(chan *Message, messageBufferSize), + keepAliveDeadlines: map[string]time.Time{}, + pendingReq: &pb.StreamingPullRequest{}, + } + it.wg.Add(2) + go it.receiver() + go it.sender() + return it +} + +func (it *streamingMessageIterator) next() (*Message, error) { + // If ctx has been cancelled or the iterator is done, return straight + // away (even if there are buffered messages available). + select { + case <-it.ctx.Done(): + return nil, it.ctx.Err() + + case <-it.failed: + break + + case <-it.stopped: + break + + default: + // Wait for a message, but also for one of the above conditions. + select { + case msg := <-it.msgc: + // Since active select cases are chosen at random, this can return + // nil (from the channel close) even if it.failed or it.stopped is + // closed. + if msg == nil { + break + } + msg.done = it.done + return msg, nil + + case <-it.ctx.Done(): + return nil, it.ctx.Err() + + case <-it.failed: + break + + case <-it.stopped: + break + } + } + // Here if the iterator is done. + it.mu.Lock() + defer it.mu.Unlock() + return nil, it.err +} + +func (it *streamingMessageIterator) stop() { + it.mu.Lock() + select { + case <-it.stopped: + it.mu.Unlock() + it.wg.Wait() + return + default: + close(it.stopped) + } + if it.err == nil { + it.err = iterator.Done + } + // Before reading from the channel, see if we're already drained. + it.checkDrained() + it.mu.Unlock() + // Nack all the pending messages. + // Grab the lock separately for each message to allow the receiver + // and sender goroutines to make progress. + // Why this will eventually terminate: + // - If the receiver is not blocked on a stream Recv, then + // it will write all the messages it has received to the channel, + // then exit, closing the channel. + // - If the receiver is blocked, then this loop will eventually + // nack all the messages in the channel. Once done is called + // on the remaining messages, the iterator will be marked as drained, + // which will trigger the sender to terminate. When it does, it + // performs a CloseSend on the stream, which will result in the blocked + // stream Recv returning. + for m := range it.msgc { + it.mu.Lock() + delete(it.keepAliveDeadlines, m.ackID) + it.addDeadlineMod(m.ackID, 0) + it.checkDrained() + it.mu.Unlock() + } + it.wg.Wait() +} + +// checkDrained closes the drained channel if the iterator has been stopped and all +// pending messages have either been n/acked or expired. +// +// Called with the lock held. +func (it *streamingMessageIterator) checkDrained() { + select { + case <-it.drained: + return + default: + } + select { + case <-it.stopped: + if len(it.keepAliveDeadlines) == 0 { + close(it.drained) + } + default: + } +} + +// Called when a message is acked/nacked. +func (it *streamingMessageIterator) done(ackID string, ack bool) { + it.mu.Lock() + defer it.mu.Unlock() + delete(it.keepAliveDeadlines, ackID) + if ack { + it.pendingReq.AckIds = append(it.pendingReq.AckIds, ackID) + } else { + it.addDeadlineMod(ackID, 0) // Nack indicated by modifying the deadline to zero. + } + it.checkDrained() +} + +// addDeadlineMod adds the ack ID to the pending request with the given deadline. +// +// Called with the lock held. +func (it *streamingMessageIterator) addDeadlineMod(ackID string, deadlineSecs int32) { + pr := it.pendingReq + pr.ModifyDeadlineAckIds = append(pr.ModifyDeadlineAckIds, ackID) + pr.ModifyDeadlineSeconds = append(pr.ModifyDeadlineSeconds, deadlineSecs) +} + +// fail is called when a stream method returns a permanent error. +func (it *streamingMessageIterator) fail(err error) { + it.mu.Lock() + if it.err == nil { + it.err = err + close(it.failed) + } + it.mu.Unlock() +} + +// receiver runs in a goroutine and handles all receives from the stream. +func (it *streamingMessageIterator) receiver() { + defer it.wg.Done() + defer close(it.msgc) + + for { + // Stop retrieving messages if the context is done, the stream + // failed, or the iterator's Stop method was called. + select { + case <-it.ctx.Done(): + return + case <-it.failed: + return + case <-it.stopped: + return + default: + } + // Receive messages from stream. This may block indefinitely. + msgs, err := it.sp.fetchMessages() + + // The streamingPuller handles retries, so any error here + // is fatal to the iterator. + if err != nil { + it.fail(err) + return + } + // We received some messages. Remember them so we can + // keep them alive. + deadline := time.Now().Add(it.po.maxExtension) + it.mu.Lock() + for _, m := range msgs { + it.keepAliveDeadlines[m.ackID] = deadline + } + it.mu.Unlock() + // Deliver the messages to the channel. + for _, m := range msgs { + select { + case <-it.ctx.Done(): + return + case <-it.failed: + return + // Don't return if stopped. We want to send the remaining + // messages on the channel, where they will be nacked. + case it.msgc <- m: + } + } + } +} + +// sender runs in a goroutine and handles all sends to the stream. +func (it *streamingMessageIterator) sender() { + defer it.wg.Done() + defer it.kaTicker.Stop() + defer it.ackTicker.Stop() + defer it.nackTicker.Stop() + defer it.sp.closeSend() + + done := false + for !done { + send := false + select { + case <-it.ctx.Done(): + // Context canceled or timed out: stop immediately, without + // another RPC. + return + + case <-it.failed: + // Stream failed: nothing to do, so stop immediately. + return + + case <-it.drained: + // All outstanding messages have been marked done: + // nothing left to do except send the final request. + it.mu.Lock() + send = (len(it.pendingReq.AckIds) > 0 || len(it.pendingReq.ModifyDeadlineAckIds) > 0) + done = true + + case <-it.kaTicker.C: + it.mu.Lock() + send = it.handleKeepAlives() + + case <-it.nackTicker.C: + it.mu.Lock() + send = (len(it.pendingReq.ModifyDeadlineAckIds) > 0) + + case <-it.ackTicker.C: + it.mu.Lock() + send = (len(it.pendingReq.AckIds) > 0) + + } + // Lock is held here. + if send { + req := it.pendingReq + it.pendingReq = &pb.StreamingPullRequest{} + it.mu.Unlock() + err := it.sp.send(req) + if err != nil { + // The streamingPuller handles retries, so any error here + // is fatal to the iterator. + it.fail(err) + return + } + } else { + it.mu.Unlock() + } + } +} + +// handleKeepAlives modifies the pending request to include deadline extensions +// for live messages. It also purges expired messages. It reports whether +// there were any live messages. +// +// Called with the lock held. +func (it *streamingMessageIterator) handleKeepAlives() bool { + live, expired := getKeepAliveAckIDs(it.keepAliveDeadlines) + for _, e := range expired { + delete(it.keepAliveDeadlines, e) + } + dl := trunc32(int64(it.po.ackDeadline.Seconds())) + for _, m := range live { + it.addDeadlineMod(m, dl) + } + it.checkDrained() + return len(live) > 0 +} diff --git a/vendor/cloud.google.com/go/pubsub/iterator_test.go b/vendor/cloud.google.com/go/pubsub/iterator_test.go new file mode 100644 index 00000000..b631b4fb --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/iterator_test.go @@ -0,0 +1,324 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "fmt" + "reflect" + "testing" + "time" + + "golang.org/x/net/context" + + "google.golang.org/api/iterator" +) + +func TestReturnsDoneOnStop(t *testing.T) { + type testCase struct { + abort func(*MessageIterator, context.CancelFunc) + want error + } + + for _, tc := range []testCase{ + { + abort: func(it *MessageIterator, cancel context.CancelFunc) { + it.Stop() + }, + want: iterator.Done, + }, + { + abort: func(it *MessageIterator, cancel context.CancelFunc) { + cancel() + }, + want: context.Canceled, + }, + { + abort: func(it *MessageIterator, cancel context.CancelFunc) { + it.Stop() + cancel() + }, + want: iterator.Done, + }, + { + abort: func(it *MessageIterator, cancel context.CancelFunc) { + cancel() + it.Stop() + }, + want: iterator.Done, + }, + } { + s := &blockingFetch{} + ctx, cancel := context.WithCancel(context.Background()) + it := newMessageIterator(ctx, s, "subname", &pullOptions{ackDeadline: time.Second * 10, maxExtension: time.Hour}) + defer it.Stop() + tc.abort(it, cancel) + + _, err := it.Next() + if err != tc.want { + t.Errorf("iterator Next error after abort: got:\n%v\nwant:\n%v", err, tc.want) + } + } +} + +// blockingFetch implements message fetching by not returning until its context is cancelled. +type blockingFetch struct { + service +} + +func (s *blockingFetch) fetchMessages(ctx context.Context, subName string, maxMessages int32) ([]*Message, error) { + <-ctx.Done() + return nil, ctx.Err() +} + +func (s *blockingFetch) newStreamingPuller(ctx context.Context, subName string, ackDeadline int32) *streamingPuller { + return nil +} + +// justInTimeFetch simulates the situation where the iterator is aborted just after the fetch RPC +// succeeds, so the rest of puller.Next will continue to execute and return sucessfully. +type justInTimeFetch struct { + service +} + +func (s *justInTimeFetch) fetchMessages(ctx context.Context, subName string, maxMessages int32) ([]*Message, error) { + <-ctx.Done() + // The context was cancelled, but let's pretend that this happend just after our RPC returned. + + var result []*Message + for i := 0; i < int(maxMessages); i++ { + val := fmt.Sprintf("msg%v", i) + result = append(result, &Message{Data: []byte(val), ackID: val}) + } + return result, nil +} + +func (s *justInTimeFetch) splitAckIDs(ids []string) ([]string, []string) { + return nil, nil +} + +func (s *justInTimeFetch) modifyAckDeadline(ctx context.Context, subName string, deadline time.Duration, ackIDs []string) error { + return nil +} + +func (s *justInTimeFetch) newStreamingPuller(ctx context.Context, subName string, ackDeadline int32) *streamingPuller { + return nil +} + +func TestAfterAbortReturnsNoMoreThanOneMessage(t *testing.T) { + // Each test case is excercised by making two concurrent blocking calls on a + // MessageIterator, and then aborting the iterator. + // The result should be one call to Next returning a message, and the other returning an error. + type testCase struct { + abort func(*MessageIterator, context.CancelFunc) + // want is the error that should be returned from one Next invocation. + want error + } + for n := 1; n < 3; n++ { + for _, tc := range []testCase{ + { + abort: func(it *MessageIterator, cancel context.CancelFunc) { + it.Stop() + }, + want: iterator.Done, + }, + { + abort: func(it *MessageIterator, cancel context.CancelFunc) { + cancel() + }, + want: context.Canceled, + }, + { + abort: func(it *MessageIterator, cancel context.CancelFunc) { + it.Stop() + cancel() + }, + want: iterator.Done, + }, + { + abort: func(it *MessageIterator, cancel context.CancelFunc) { + cancel() + it.Stop() + }, + want: iterator.Done, + }, + } { + s := &justInTimeFetch{} + ctx, cancel := context.WithCancel(context.Background()) + + // if maxPrefetch == 1, there will be no messages in the puller buffer when Next is invoked the second time. + // if maxPrefetch == 2, there will be 1 message in the puller buffer when Next is invoked the second time. + po := &pullOptions{ + ackDeadline: time.Second * 10, + maxExtension: time.Hour, + maxPrefetch: int32(n), + } + it := newMessageIterator(ctx, s, "subname", po) + defer it.Stop() + + type result struct { + m *Message + err error + } + results := make(chan *result, 2) + + for i := 0; i < 2; i++ { + go func() { + m, err := it.Next() + results <- &result{m, err} + if err == nil { + m.Done(false) + } + }() + } + // Wait for goroutines to block on it.Next(). + time.Sleep(time.Millisecond) + tc.abort(it, cancel) + + result1 := <-results + result2 := <-results + + // There should be one error result, and one non-error result. + // Make result1 be the non-error result. + if result1.err != nil { + result1, result2 = result2, result1 + } + + if string(result1.m.Data) != "msg0" { + t.Errorf("After abort, got message: %v, want %v", result1.m.Data, "msg0") + } + if result1.err != nil { + t.Errorf("After abort, got : %v, want nil", result1.err) + } + if result2.m != nil { + t.Errorf("After abort, got message: %v, want nil", result2.m) + } + if result2.err != tc.want { + t.Errorf("After abort, got err: %v, want %v", result2.err, tc.want) + } + } + } +} + +type fetcherServiceWithModifyAckDeadline struct { + fetcherService + events chan string +} + +func (f *fetcherServiceWithModifyAckDeadline) modifyAckDeadline(_ context.Context, _ string, d time.Duration, ids []string) error { + // Different versions of Go use different representations for time.Duration(0). + var ds string + if d == 0 { + ds = "0s" + } else { + ds = d.String() + } + f.events <- fmt.Sprintf("modAck(%v, %s)", ids, ds) + return nil +} + +func (f *fetcherServiceWithModifyAckDeadline) splitAckIDs(ackIDs []string) ([]string, []string) { + return ackIDs, nil +} + +func (f *fetcherServiceWithModifyAckDeadline) newStreamingPuller(ctx context.Context, subName string, ackDeadline int32) *streamingPuller { + return nil +} + +func TestMultipleStopCallsBlockUntilMessageDone(t *testing.T) { + events := make(chan string, 3) + s := &fetcherServiceWithModifyAckDeadline{ + fetcherService{ + results: []fetchResult{ + { + msgs: []*Message{{ackID: "a"}, {ackID: "b"}}, + }, + }, + }, + events, + } + + ctx := context.Background() + it := newMessageIterator(ctx, s, "subname", &pullOptions{ackDeadline: time.Second * 10, maxExtension: 0}) + + m, err := it.Next() + if err != nil { + t.Errorf("error calling Next: %v", err) + } + + go func() { + it.Stop() + events <- "stopped" + }() + go func() { + it.Stop() + events <- "stopped" + }() + + time.Sleep(10 * time.Millisecond) + m.Done(false) + + got := []string{<-events, <-events, <-events} + want := []string{"modAck([a], 0s)", "stopped", "stopped"} + if !reflect.DeepEqual(got, want) { + t.Errorf("stopping iterator, got: %v ; want: %v", got, want) + } + + // The iterator is stopped, so should not return another message. + m, err = it.Next() + if m != nil { + t.Errorf("message got: %v ; want: nil", m) + } + if err != iterator.Done { + t.Errorf("err got: %v ; want: %v", err, iterator.Done) + } +} + +func TestFastNack(t *testing.T) { + events := make(chan string, 3) + s := &fetcherServiceWithModifyAckDeadline{ + fetcherService{ + results: []fetchResult{ + { + msgs: []*Message{{ackID: "a"}, {ackID: "b"}}, + }, + }, + }, + events, + } + + ctx := context.Background() + it := newMessageIterator(ctx, s, "subname", &pullOptions{ + ackDeadline: time.Second * 6, + maxExtension: time.Second * 10, + }) + // Get both messages. + _, err := it.Next() + if err != nil { + t.Errorf("error calling Next: %v", err) + } + m2, err := it.Next() + if err != nil { + t.Errorf("error calling Next: %v", err) + } + // Ignore the first, nack the second. + m2.Done(false) + + got := []string{<-events, <-events} + // The nack should happen before the deadline extension. + want := []string{"modAck([b], 0s)", "modAck([a], 6s)"} + if !reflect.DeepEqual(got, want) { + t.Errorf("got: %v ; want: %v", got, want) + } +} diff --git a/vendor/cloud.google.com/go/pubsub/keepalive.go b/vendor/cloud.google.com/go/pubsub/keepalive.go new file mode 100644 index 00000000..f57c3831 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/keepalive.go @@ -0,0 +1,182 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "sync" + "time" + + "golang.org/x/net/context" +) + +// keepAlive keeps track of which Messages need to have their deadline extended, and +// periodically extends them. +// Messages are tracked by Ack ID. +type keepAlive struct { + s service + Ctx context.Context // The context to use when extending deadlines. + Sub string // The full name of the subscription. + ExtensionTick <-chan time.Time // ExtensionTick supplies the frequency with which to make extension requests. + Deadline time.Duration // How long to extend messages for each time they are extended. Should be greater than ExtensionTick frequency. + MaxExtension time.Duration // How long to keep extending each message's ack deadline before automatically removing it. + + mu sync.Mutex + // key: ackID; value: time at which ack deadline extension should cease. + items map[string]time.Time + dr drain + + wg sync.WaitGroup +} + +// Start initiates the deadline extension loop. Stop must be called once keepAlive is no longer needed. +func (ka *keepAlive) Start() { + ka.items = make(map[string]time.Time) + ka.dr = drain{Drained: make(chan struct{})} + ka.wg.Add(1) + go func() { + defer ka.wg.Done() + for { + select { + case <-ka.Ctx.Done(): + // Don't bother waiting for items to be removed: we can't extend them any more. + return + case <-ka.dr.Drained: + return + case <-ka.ExtensionTick: + live, expired := ka.getAckIDs() + ka.wg.Add(1) + go func() { + defer ka.wg.Done() + ka.extendDeadlines(live) + }() + + for _, id := range expired { + ka.Remove(id) + } + } + } + }() +} + +// Add adds an ack id to be kept alive. +// It should not be called after Stop. +func (ka *keepAlive) Add(ackID string) { + ka.mu.Lock() + defer ka.mu.Unlock() + + ka.items[ackID] = time.Now().Add(ka.MaxExtension) + ka.dr.SetPending(true) +} + +// Remove removes ackID from the list to be kept alive. +func (ka *keepAlive) Remove(ackID string) { + ka.mu.Lock() + defer ka.mu.Unlock() + + // Note: If users NACKs a message after it has been removed due to + // expiring, Remove will be called twice with same ack id. This is OK. + delete(ka.items, ackID) + ka.dr.SetPending(len(ka.items) != 0) +} + +// Stop waits until all added ackIDs have been removed, and cleans up resources. +// Stop may only be called once. +func (ka *keepAlive) Stop() { + ka.mu.Lock() + ka.dr.Drain() + ka.mu.Unlock() + + ka.wg.Wait() +} + +// getAckIDs returns the set of ackIDs that are being kept alive. +// The set is divided into two lists: one with IDs that should continue to be kept alive, +// and the other with IDs that should be dropped. +func (ka *keepAlive) getAckIDs() (live, expired []string) { + ka.mu.Lock() + defer ka.mu.Unlock() + return getKeepAliveAckIDs(ka.items) +} + +func getKeepAliveAckIDs(items map[string]time.Time) (live, expired []string) { + now := time.Now() + for id, expiry := range items { + if expiry.Before(now) { + expired = append(expired, id) + } else { + live = append(live, id) + } + } + return live, expired +} + +const maxExtensionAttempts = 2 + +func (ka *keepAlive) extendDeadlines(ackIDs []string) { + head, tail := ka.s.splitAckIDs(ackIDs) + for len(head) > 0 { + for i := 0; i < maxExtensionAttempts; i++ { + if ka.s.modifyAckDeadline(ka.Ctx, ka.Sub, ka.Deadline, head) == nil { + break + } + } + // NOTE: Messages whose deadlines we fail to extend will + // eventually be redelivered and this is a documented behaviour + // of the API. + // + // NOTE: If we fail to extend deadlines here, this + // implementation will continue to attempt extending the + // deadlines for those ack IDs the next time the extension + // ticker ticks. By then the deadline will have expired. + // Re-extending them is harmless, however. + // + // TODO: call Remove for ids which fail to be extended. + + head, tail = ka.s.splitAckIDs(tail) + } +} + +// A drain (once started) indicates via a channel when there is no work pending. +type drain struct { + started bool + pending bool + + // Drained is closed once there are no items outstanding if Drain has been called. + Drained chan struct{} +} + +// Drain starts the drain process. This cannot be undone. +func (d *drain) Drain() { + d.started = true + d.closeIfDrained() +} + +// SetPending sets whether there is work pending or not. It may be called multiple times before or after Drain. +func (d *drain) SetPending(pending bool) { + d.pending = pending + d.closeIfDrained() +} + +func (d *drain) closeIfDrained() { + if !d.pending && d.started { + // Check to see if d.Drained is closed before closing it. + // This allows SetPending(false) to be safely called multiple times. + select { + case <-d.Drained: + default: + close(d.Drained) + } + } +} diff --git a/vendor/cloud.google.com/go/pubsub/keepalive_test.go b/vendor/cloud.google.com/go/pubsub/keepalive_test.go new file mode 100644 index 00000000..0128afe6 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/keepalive_test.go @@ -0,0 +1,319 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "errors" + "reflect" + "sort" + "testing" + "time" + + "golang.org/x/net/context" +) + +func TestKeepAliveExtendsDeadline(t *testing.T) { + ticker := make(chan time.Time) + deadline := time.Nanosecond * 15 + s := &testService{modDeadlineCalled: make(chan modDeadlineCall)} + + checkModDeadlineCall := func(ackIDs []string) { + got := <-s.modDeadlineCalled + sort.Strings(got.ackIDs) + + want := modDeadlineCall{ + subName: "subname", + deadline: deadline, + ackIDs: ackIDs, + } + + if !reflect.DeepEqual(got, want) { + t.Errorf("keepalive: got:\n%v\nwant:\n%v", got, want) + } + } + + ka := &keepAlive{ + s: s, + Ctx: context.Background(), + Sub: "subname", + ExtensionTick: ticker, + Deadline: deadline, + MaxExtension: time.Hour, + } + ka.Start() + + ka.Add("a") + ka.Add("b") + ticker <- time.Time{} + checkModDeadlineCall([]string{"a", "b"}) + ka.Add("c") + ka.Remove("b") + ticker <- time.Time{} + checkModDeadlineCall([]string{"a", "c"}) + ka.Remove("a") + ka.Remove("c") + ka.Add("d") + ticker <- time.Time{} + checkModDeadlineCall([]string{"d"}) + + ka.Remove("d") + ka.Stop() +} + +func TestKeepAliveStopsWhenNoItem(t *testing.T) { + ticker := make(chan time.Time) + stopped := make(chan bool) + s := &testService{modDeadlineCalled: make(chan modDeadlineCall, 3)} + ka := &keepAlive{ + s: s, + Ctx: context.Background(), + ExtensionTick: ticker, + } + + ka.Start() + + // There should be no call to modifyAckDeadline since there is no item. + ticker <- time.Time{} + + go func() { + ka.Stop() // No items; should not block + if len(s.modDeadlineCalled) > 0 { + t.Errorf("unexpected extension to non-existent items: %v", <-s.modDeadlineCalled) + } + close(stopped) + }() + + select { + case <-stopped: + case <-time.After(time.Second): + t.Errorf("keepAlive timed out waiting for stop") + } +} + +func TestKeepAliveStopsWhenItemsExpired(t *testing.T) { + ticker := make(chan time.Time) + stopped := make(chan bool) + s := &testService{modDeadlineCalled: make(chan modDeadlineCall, 2)} + ka := &keepAlive{ + s: s, + Ctx: context.Background(), + ExtensionTick: ticker, + MaxExtension: time.Duration(0), // Should expire items at the first tick. + } + + ka.Start() + ka.Add("a") + ka.Add("b") + + // Wait until the clock advances. Without this loop, this test fails on + // Windows because the clock doesn't advance at all between ka.Add and the + // expiration check after the tick is received. + begin := time.Now() + for time.Now().Equal(begin) { + time.Sleep(time.Millisecond) + } + + // There should be no call to modifyAckDeadline since both items are expired. + ticker <- time.Time{} + + go func() { + ka.Stop() // No live items; should not block. + if len(s.modDeadlineCalled) > 0 { + t.Errorf("unexpected extension to expired items") + } + close(stopped) + }() + + select { + case <-stopped: + case <-time.After(time.Second): + t.Errorf("timed out waiting for stop") + } +} + +func TestKeepAliveBlocksUntilAllItemsRemoved(t *testing.T) { + ticker := make(chan time.Time) + eventc := make(chan string, 3) + s := &testService{modDeadlineCalled: make(chan modDeadlineCall)} + ka := &keepAlive{ + s: s, + Ctx: context.Background(), + ExtensionTick: ticker, + MaxExtension: time.Hour, // Should not expire. + } + + ka.Start() + ka.Add("a") + ka.Add("b") + + go func() { + ticker <- time.Time{} + + // We expect a call since both items should be extended. + select { + case args := <-s.modDeadlineCalled: + sort.Strings(args.ackIDs) + got := args.ackIDs + want := []string{"a", "b"} + if !reflect.DeepEqual(got, want) { + t.Errorf("mismatching IDs:\ngot %v\nwant %v", got, want) + } + case <-time.After(time.Second): + t.Errorf("timed out waiting for deadline extend call") + } + + time.Sleep(10 * time.Millisecond) + + eventc <- "pre-remove-b" + // Remove one item, Stop should still be waiting. + ka.Remove("b") + + ticker <- time.Time{} + + // We expect a call since the item is still alive. + select { + case args := <-s.modDeadlineCalled: + got := args.ackIDs + want := []string{"a"} + if !reflect.DeepEqual(got, want) { + t.Errorf("mismatching IDs:\ngot %v\nwant %v", got, want) + } + case <-time.After(time.Second): + t.Errorf("timed out waiting for deadline extend call") + } + + time.Sleep(10 * time.Millisecond) + + eventc <- "pre-remove-a" + // Remove the last item so that Stop can proceed. + ka.Remove("a") + }() + + go func() { + ka.Stop() // Should block all item are removed. + eventc <- "post-stop" + }() + + for i, want := range []string{"pre-remove-b", "pre-remove-a", "post-stop"} { + select { + case got := <-eventc: + if got != want { + t.Errorf("event #%d:\ngot %v\nwant %v", i, got, want) + } + case <-time.After(time.Second): + t.Errorf("time out waiting for #%d event: want %v", i, want) + } + } +} + +// extendCallResult contains a list of ackIDs which are expected in an ackID +// extension request, along with the result that should be returned. +type extendCallResult struct { + ackIDs []string + err error +} + +// extendService implements modifyAckDeadline using a hard-coded list of extendCallResults. +type extendService struct { + service + + calls []extendCallResult + + t *testing.T // used for error logging. +} + +func (es *extendService) modifyAckDeadline(ctx context.Context, subName string, deadline time.Duration, ackIDs []string) error { + if len(es.calls) == 0 { + es.t.Fatalf("unexpected call to modifyAckDeadline: ackIDs: %v", ackIDs) + } + call := es.calls[0] + es.calls = es.calls[1:] + + if got, want := ackIDs, call.ackIDs; !reflect.DeepEqual(got, want) { + es.t.Errorf("unexpected arguments to modifyAckDeadline: got: %v ; want: %v", got, want) + } + return call.err +} + +// Test implementation returns the first 2 elements as head, and the rest as tail. +func (es *extendService) splitAckIDs(ids []string) ([]string, []string) { + if len(ids) < 2 { + return ids, nil + } + return ids[:2], ids[2:] +} + +func TestKeepAliveSplitsBatches(t *testing.T) { + type testCase struct { + calls []extendCallResult + } + for _, tc := range []testCase{ + { + calls: []extendCallResult{ + { + ackIDs: []string{"a", "b"}, + }, + { + ackIDs: []string{"c", "d"}, + }, + { + ackIDs: []string{"e", "f"}, + }, + }, + }, + { + calls: []extendCallResult{ + { + ackIDs: []string{"a", "b"}, + err: errors.New("bang"), + }, + // On error we retry once. + { + ackIDs: []string{"a", "b"}, + err: errors.New("bang"), + }, + // We give up after failing twice, so we move on to the next set, "c" and "d". + { + ackIDs: []string{"c", "d"}, + err: errors.New("bang"), + }, + // Again, we retry once. + { + ackIDs: []string{"c", "d"}, + }, + { + ackIDs: []string{"e", "f"}, + }, + }, + }, + } { + s := &extendService{ + t: t, + calls: tc.calls, + } + + ka := &keepAlive{ + s: s, + Ctx: context.Background(), + Sub: "subname", + } + + ka.extendDeadlines([]string{"a", "b", "c", "d", "e", "f"}) + + if len(s.calls) != 0 { + t.Errorf("expected extend calls did not occur: %v", s.calls) + } + } +} diff --git a/vendor/cloud.google.com/go/pubsub/message.go b/vendor/cloud.google.com/go/pubsub/message.go new file mode 100644 index 00000000..2ecc86c5 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/message.go @@ -0,0 +1,84 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "time" + + "github.com/golang/protobuf/ptypes" + pb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +// Message represents a Pub/Sub message. +type Message struct { + // ID identifies this message. + // This ID is assigned by the server and is populated for Messages obtained from a subscription. + // This field is read-only. + ID string + + // Data is the actual data in the message. + Data []byte + + // Attributes represents the key-value pairs the current message + // is labelled with. + Attributes map[string]string + + // ackID is the identifier to acknowledge this message. + ackID string + + // The time at which the message was published. + // This is populated by the server for Messages obtained from a subscription. + // This field is read-only. + PublishTime time.Time + + calledDone bool + + // The done method of the iterator that created this Message. + done func(string, bool) +} + +func toMessage(resp *pb.ReceivedMessage) (*Message, error) { + if resp.Message == nil { + return &Message{ackID: resp.AckId}, nil + } + + pubTime, err := ptypes.Timestamp(resp.Message.PublishTime) + if err != nil { + return nil, err + } + return &Message{ + ackID: resp.AckId, + Data: resp.Message.Data, + Attributes: resp.Message.Attributes, + ID: resp.Message.MessageId, + PublishTime: pubTime, + }, nil +} + +// Done completes the processing of a Message that was returned from a MessageIterator. +// ack indicates whether the message should be acknowledged. +// Client code must call Done when finished for each Message returned by an iterator. +// Done may only be called on Messages returned by a MessageIterator. +// If message acknowledgement fails, the Message will be redelivered. +// Calls to Done have no effect after the first call. +// +// See MessageIterator.Next for an example. +func (m *Message) Done(ack bool) { + if m.calledDone { + return + } + m.calledDone = true + m.done(m.ackID, ack) +} diff --git a/vendor/cloud.google.com/go/pubsub/pubsub.go b/vendor/cloud.google.com/go/pubsub/pubsub.go new file mode 100644 index 00000000..0aab2ba8 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/pubsub.go @@ -0,0 +1,136 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub // import "cloud.google.com/go/pubsub" + +import ( + "fmt" + "os" + + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/grpc" + + "golang.org/x/net/context" +) + +const ( + // ScopePubSub grants permissions to view and manage Pub/Sub + // topics and subscriptions. + ScopePubSub = "https://www.googleapis.com/auth/pubsub" + + // ScopeCloudPlatform grants permissions to view and manage your data + // across Google Cloud Platform services. + ScopeCloudPlatform = "https://www.googleapis.com/auth/cloud-platform" +) + +const prodAddr = "https://pubsub.googleapis.com/" +const userAgent = "gcloud-golang-pubsub/20160927" + +// Client is a Google Pub/Sub client scoped to a single project. +// +// Clients should be reused rather than being created as needed. +// A Client may be shared by multiple goroutines. +type Client struct { + projectID string + s service +} + +// NewClient creates a new PubSub client. +func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { + var o []option.ClientOption + // Environment variables for gcloud emulator: + // https://cloud.google.com/sdk/gcloud/reference/beta/emulators/pubsub/ + if addr := os.Getenv("PUBSUB_EMULATOR_HOST"); addr != "" { + conn, err := grpc.Dial(addr, grpc.WithInsecure()) + if err != nil { + return nil, fmt.Errorf("grpc.Dial: %v", err) + } + o = []option.ClientOption{option.WithGRPCConn(conn)} + } else { + o = []option.ClientOption{option.WithUserAgent(userAgent)} + } + o = append(o, opts...) + s, err := newPubSubService(ctx, o) + if err != nil { + return nil, fmt.Errorf("constructing pubsub client: %v", err) + } + + c := &Client{ + projectID: projectID, + s: s, + } + + return c, nil +} + +// Close closes any resources held by the client. +// +// Close need not be called at program exit. +func (c *Client) Close() error { + return c.s.close() +} + +func (c *Client) fullyQualifiedProjectName() string { + return fmt.Sprintf("projects/%s", c.projectID) +} + +// pageToken stores the next page token for a server response which is split over multiple pages. +type pageToken struct { + tok string + explicit bool +} + +func (pt *pageToken) set(tok string) { + pt.tok = tok + pt.explicit = true +} + +func (pt *pageToken) get() string { + return pt.tok +} + +// more returns whether further pages should be fetched from the server. +func (pt *pageToken) more() bool { + return pt.tok != "" || !pt.explicit +} + +// stringsIterator provides an iterator API for a sequence of API page fetches that return lists of strings. +type stringsIterator struct { + ctx context.Context + strings []string + token pageToken + fetch func(ctx context.Context, tok string) (*stringsPage, error) +} + +// Next returns the next string. If there are no more strings, iterator.Done will be returned. +func (si *stringsIterator) Next() (string, error) { + for len(si.strings) == 0 && si.token.more() { + page, err := si.fetch(si.ctx, si.token.get()) + if err != nil { + return "", err + } + si.token.set(page.tok) + si.strings = page.strings + } + + if len(si.strings) == 0 { + return "", iterator.Done + } + + s := si.strings[0] + si.strings = si.strings[1:] + + return s, nil +} diff --git a/vendor/cloud.google.com/go/pubsub/puller.go b/vendor/cloud.google.com/go/pubsub/puller.go new file mode 100644 index 00000000..f3ffa3e1 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/puller.go @@ -0,0 +1,115 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "sync" + + "golang.org/x/net/context" +) + +// puller fetches messages from the server in a batch. +type puller struct { + ctx context.Context + cancel context.CancelFunc + + // keepAlive takes ownership of the lifetime of the message identified + // by ackID, ensuring that its ack deadline does not expire. It should + // be called each time a new message is fetched from the server, even + // if it is not yet returned from Next. + keepAlive func(ackID string) + + // abandon should be called for each message which has previously been + // passed to keepAlive, but will never be returned by Next. + abandon func(ackID string) + + // fetch fetches a batch of messages from the server. + fetch func() ([]*Message, error) + + mu sync.Mutex + buf []*Message +} + +// newPuller constructs a new puller. +// batchSize is the maximum number of messages to fetch at once. +// No more than batchSize messages will be outstanding at any time. +func newPuller(s service, subName string, ctx context.Context, batchSize int32, keepAlive, abandon func(ackID string)) *puller { + ctx, cancel := context.WithCancel(ctx) + return &puller{ + cancel: cancel, + keepAlive: keepAlive, + abandon: abandon, + ctx: ctx, + fetch: func() ([]*Message, error) { return s.fetchMessages(ctx, subName, batchSize) }, + } +} + +const maxPullAttempts = 2 + +// Next returns the next message from the server, fetching a new batch if necessary. +// keepAlive is called with the ackIDs of newly fetched messages. +// If p.Ctx has already been cancelled before Next is called, no new messages +// will be fetched. +func (p *puller) Next() (*Message, error) { + p.mu.Lock() + defer p.mu.Unlock() + + // If ctx has been cancelled, return straight away (even if there are buffered messages available). + select { + case <-p.ctx.Done(): + return nil, p.ctx.Err() + default: + } + + for len(p.buf) == 0 { + var buf []*Message + var err error + + for i := 0; i < maxPullAttempts; i++ { + // Once Stop has completed, all future calls to Next will immediately fail at this point. + buf, err = p.fetch() + if err == nil || err == context.Canceled || err == context.DeadlineExceeded { + break + } + } + if err != nil { + return nil, err + } + + for _, m := range buf { + p.keepAlive(m.ackID) + } + p.buf = buf + } + + m := p.buf[0] + p.buf = p.buf[1:] + return m, nil +} + +// Stop aborts any pending calls to Next, and prevents any future ones from succeeding. +// Stop also abandons any messages that have been pre-fetched. +// Once Stop completes, no calls to Next will succeed. +func (p *puller) Stop() { + // Next may be executing in another goroutine. Cancel it, and then wait until it terminates. + p.cancel() + p.mu.Lock() + defer p.mu.Unlock() + + for _, m := range p.buf { + p.abandon(m.ackID) + } + p.buf = nil +} diff --git a/vendor/cloud.google.com/go/pubsub/puller_test.go b/vendor/cloud.google.com/go/pubsub/puller_test.go new file mode 100644 index 00000000..2cc9ce03 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/puller_test.go @@ -0,0 +1,154 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "errors" + "reflect" + "testing" + + "golang.org/x/net/context" +) + +type fetchResult struct { + msgs []*Message + err error +} + +type fetcherService struct { + service + results []fetchResult + unexpectedCall bool +} + +func (s *fetcherService) fetchMessages(ctx context.Context, subName string, maxMessages int32) ([]*Message, error) { + if len(s.results) == 0 { + s.unexpectedCall = true + return nil, errors.New("bang") + } + ret := s.results[0] + s.results = s.results[1:] + return ret.msgs, ret.err +} + +func TestPuller(t *testing.T) { + s := &fetcherService{ + results: []fetchResult{ + { + msgs: []*Message{{ackID: "a"}, {ackID: "b"}}, + }, + {}, + { + msgs: []*Message{{ackID: "c"}, {ackID: "d"}}, + }, + { + msgs: []*Message{{ackID: "e"}}, + }, + }, + } + + pulled := make(chan string, 10) + + pull := newPuller(s, "subname", context.Background(), 2, func(ackID string) { pulled <- ackID }, func(string) {}) + + got := []string{} + for i := 0; i < 5; i++ { + m, err := pull.Next() + got = append(got, m.ackID) + if err != nil { + t.Errorf("unexpected err from pull.Next: %v", err) + } + } + _, err := pull.Next() + if err == nil { + t.Errorf("unexpected err from pull.Next: %v", err) + } + + want := []string{"a", "b", "c", "d", "e"} + if !reflect.DeepEqual(got, want) { + t.Errorf("pulled ack ids: got: %v ; want: %v", got, want) + } +} + +func TestPullerAddsToKeepAlive(t *testing.T) { + s := &fetcherService{ + results: []fetchResult{ + { + msgs: []*Message{{ackID: "a"}, {ackID: "b"}}, + }, + { + msgs: []*Message{{ackID: "c"}, {ackID: "d"}}, + }, + }, + } + + pulled := make(chan string, 10) + + pull := newPuller(s, "subname", context.Background(), 2, func(ackID string) { pulled <- ackID }, func(string) {}) + + got := []string{} + for i := 0; i < 3; i++ { + m, err := pull.Next() + got = append(got, m.ackID) + if err != nil { + t.Errorf("unexpected err from pull.Next: %v", err) + } + } + + want := []string{"a", "b", "c"} + if !reflect.DeepEqual(got, want) { + t.Errorf("pulled ack ids: got: %v ; want: %v", got, want) + } + + close(pulled) + // We should have seen "d" written to the channel too, even though it hasn't been returned yet. + pulledIDs := []string{} + for id := range pulled { + pulledIDs = append(pulledIDs, id) + } + + want = append(want, "d") + if !reflect.DeepEqual(pulledIDs, want) { + t.Errorf("pulled ack ids: got: %v ; want: %v", pulledIDs, want) + } +} + +func TestPullerRetriesOnce(t *testing.T) { + bang := errors.New("bang") + s := &fetcherService{ + results: []fetchResult{ + { + err: bang, + }, + { + err: bang, + }, + }, + } + + pull := newPuller(s, "subname", context.Background(), 2, func(string) {}, func(string) {}) + + _, err := pull.Next() + if err != bang { + t.Errorf("pull.Next err got: %v, want: %v", err, bang) + } + + if s.unexpectedCall { + t.Errorf("unexpected retry") + } + if len(s.results) != 0 { + t.Errorf("outstanding calls: got: %v, want: 0", len(s.results)) + } +} diff --git a/vendor/cloud.google.com/go/pubsub/service.go b/vendor/cloud.google.com/go/pubsub/service.go new file mode 100644 index 00000000..328fe48d --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/service.go @@ -0,0 +1,485 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "fmt" + "io" + "math" + "sync" + "time" + + "cloud.google.com/go/iam" + "cloud.google.com/go/internal/version" + vkit "cloud.google.com/go/pubsub/apiv1" + "golang.org/x/net/context" + "google.golang.org/api/option" + pb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +type nextStringFunc func() (string, error) + +// service provides an internal abstraction to isolate the generated +// PubSub API; most of this package uses this interface instead. +// The single implementation, *apiService, contains all the knowledge +// of the generated PubSub API (except for that present in legacy code). +type service interface { + createSubscription(ctx context.Context, topicName, subName string, ackDeadline time.Duration, pushConfig *PushConfig) error + getSubscriptionConfig(ctx context.Context, subName string) (*SubscriptionConfig, string, error) + listProjectSubscriptions(ctx context.Context, projName string) nextStringFunc + deleteSubscription(ctx context.Context, name string) error + subscriptionExists(ctx context.Context, name string) (bool, error) + modifyPushConfig(ctx context.Context, subName string, conf *PushConfig) error + + createTopic(ctx context.Context, name string) error + deleteTopic(ctx context.Context, name string) error + topicExists(ctx context.Context, name string) (bool, error) + listProjectTopics(ctx context.Context, projName string) nextStringFunc + listTopicSubscriptions(ctx context.Context, topicName string) nextStringFunc + + modifyAckDeadline(ctx context.Context, subName string, deadline time.Duration, ackIDs []string) error + fetchMessages(ctx context.Context, subName string, maxMessages int32) ([]*Message, error) + publishMessages(ctx context.Context, topicName string, msgs []*Message) ([]string, error) + + // splitAckIDs divides ackIDs into + // * a batch of a size which is suitable for passing to acknowledge or + // modifyAckDeadline, and + // * the rest. + splitAckIDs(ackIDs []string) ([]string, []string) + + // acknowledge ACKs the IDs in ackIDs. + acknowledge(ctx context.Context, subName string, ackIDs []string) error + + iamHandle(resourceName string) *iam.Handle + + newStreamingPuller(ctx context.Context, subName string, ackDeadline int32) *streamingPuller + + close() error +} + +type apiService struct { + pubc *vkit.PublisherClient + subc *vkit.SubscriberClient +} + +func newPubSubService(ctx context.Context, opts []option.ClientOption) (*apiService, error) { + pubc, err := vkit.NewPublisherClient(ctx, opts...) + if err != nil { + return nil, err + } + subc, err := vkit.NewSubscriberClient(ctx, option.WithGRPCConn(pubc.Connection())) + if err != nil { + _ = pubc.Close() // ignore error + return nil, err + } + pubc.SetGoogleClientInfo("gccl", version.Repo) + subc.SetGoogleClientInfo("gccl", version.Repo) + return &apiService{pubc: pubc, subc: subc}, nil +} + +func (s *apiService) close() error { + // Return the first error, because the first call closes the connection. + err := s.pubc.Close() + _ = s.subc.Close() + return err +} + +func (s *apiService) createSubscription(ctx context.Context, topicName, subName string, ackDeadline time.Duration, pushConfig *PushConfig) error { + var rawPushConfig *pb.PushConfig + if pushConfig != nil { + rawPushConfig = &pb.PushConfig{ + Attributes: pushConfig.Attributes, + PushEndpoint: pushConfig.Endpoint, + } + } + _, err := s.subc.CreateSubscription(ctx, &pb.Subscription{ + Name: subName, + Topic: topicName, + PushConfig: rawPushConfig, + AckDeadlineSeconds: trunc32(int64(ackDeadline.Seconds())), + }) + return err +} + +func (s *apiService) getSubscriptionConfig(ctx context.Context, subName string) (*SubscriptionConfig, string, error) { + rawSub, err := s.subc.GetSubscription(ctx, &pb.GetSubscriptionRequest{Subscription: subName}) + if err != nil { + return nil, "", err + } + sub := &SubscriptionConfig{ + AckDeadline: time.Second * time.Duration(rawSub.AckDeadlineSeconds), + PushConfig: PushConfig{ + Endpoint: rawSub.PushConfig.PushEndpoint, + Attributes: rawSub.PushConfig.Attributes, + }, + } + return sub, rawSub.Topic, nil +} + +// stringsPage contains a list of strings and a token for fetching the next page. +type stringsPage struct { + strings []string + tok string +} + +func (s *apiService) listProjectSubscriptions(ctx context.Context, projName string) nextStringFunc { + it := s.subc.ListSubscriptions(ctx, &pb.ListSubscriptionsRequest{ + Project: projName, + }) + return func() (string, error) { + sub, err := it.Next() + if err != nil { + return "", err + } + return sub.Name, nil + } +} + +func (s *apiService) deleteSubscription(ctx context.Context, name string) error { + return s.subc.DeleteSubscription(ctx, &pb.DeleteSubscriptionRequest{Subscription: name}) +} + +func (s *apiService) subscriptionExists(ctx context.Context, name string) (bool, error) { + _, err := s.subc.GetSubscription(ctx, &pb.GetSubscriptionRequest{Subscription: name}) + if err == nil { + return true, nil + } + if grpc.Code(err) == codes.NotFound { + return false, nil + } + return false, err +} + +func (s *apiService) createTopic(ctx context.Context, name string) error { + _, err := s.pubc.CreateTopic(ctx, &pb.Topic{Name: name}) + return err +} + +func (s *apiService) listProjectTopics(ctx context.Context, projName string) nextStringFunc { + it := s.pubc.ListTopics(ctx, &pb.ListTopicsRequest{ + Project: projName, + }) + return func() (string, error) { + topic, err := it.Next() + if err != nil { + return "", err + } + return topic.Name, nil + } +} + +func (s *apiService) deleteTopic(ctx context.Context, name string) error { + return s.pubc.DeleteTopic(ctx, &pb.DeleteTopicRequest{Topic: name}) +} + +func (s *apiService) topicExists(ctx context.Context, name string) (bool, error) { + _, err := s.pubc.GetTopic(ctx, &pb.GetTopicRequest{Topic: name}) + if err == nil { + return true, nil + } + if grpc.Code(err) == codes.NotFound { + return false, nil + } + return false, err +} + +func (s *apiService) listTopicSubscriptions(ctx context.Context, topicName string) nextStringFunc { + it := s.pubc.ListTopicSubscriptions(ctx, &pb.ListTopicSubscriptionsRequest{ + Topic: topicName, + }) + return it.Next +} + +func (s *apiService) modifyAckDeadline(ctx context.Context, subName string, deadline time.Duration, ackIDs []string) error { + return s.subc.ModifyAckDeadline(ctx, &pb.ModifyAckDeadlineRequest{ + Subscription: subName, + AckIds: ackIDs, + AckDeadlineSeconds: trunc32(int64(deadline.Seconds())), + }) +} + +// maxPayload is the maximum number of bytes to devote to actual ids in +// acknowledgement or modifyAckDeadline requests. A serialized +// AcknowledgeRequest proto has a small constant overhead, plus the size of the +// subscription name, plus 3 bytes per ID (a tag byte and two size bytes). A +// ModifyAckDeadlineRequest has an additional few bytes for the deadline. We +// don't know the subscription name here, so we just assume the size exclusive +// of ids is 100 bytes. +// +// With gRPC there is no way for the client to know the server's max message size (it is +// configurable on the server). We know from experience that it +// it 512K. +const ( + maxPayload = 512 * 1024 + reqFixedOverhead = 100 + overheadPerID = 3 +) + +// splitAckIDs splits ids into two slices, the first of which contains at most maxPayload bytes of ackID data. +func (s *apiService) splitAckIDs(ids []string) ([]string, []string) { + total := reqFixedOverhead + for i, id := range ids { + total += len(id) + overheadPerID + if total > maxPayload { + return ids[:i], ids[i:] + } + } + return ids, nil +} + +func (s *apiService) acknowledge(ctx context.Context, subName string, ackIDs []string) error { + return s.subc.Acknowledge(ctx, &pb.AcknowledgeRequest{ + Subscription: subName, + AckIds: ackIDs, + }) +} + +func (s *apiService) fetchMessages(ctx context.Context, subName string, maxMessages int32) ([]*Message, error) { + resp, err := s.subc.Pull(ctx, &pb.PullRequest{ + Subscription: subName, + MaxMessages: maxMessages, + }) + if err != nil { + return nil, err + } + return convertMessages(resp.ReceivedMessages) +} + +func convertMessages(rms []*pb.ReceivedMessage) ([]*Message, error) { + msgs := make([]*Message, 0, len(rms)) + for i, m := range rms { + msg, err := toMessage(m) + if err != nil { + return nil, fmt.Errorf("pubsub: cannot decode the retrieved message at index: %d, message: %+v", i, m) + } + msgs = append(msgs, msg) + } + return msgs, nil +} + +func (s *apiService) publishMessages(ctx context.Context, topicName string, msgs []*Message) ([]string, error) { + rawMsgs := make([]*pb.PubsubMessage, len(msgs)) + for i, msg := range msgs { + rawMsgs[i] = &pb.PubsubMessage{ + Data: msg.Data, + Attributes: msg.Attributes, + } + } + resp, err := s.pubc.Publish(ctx, &pb.PublishRequest{ + Topic: topicName, + Messages: rawMsgs, + }) + if err != nil { + return nil, err + } + return resp.MessageIds, nil +} + +func (s *apiService) modifyPushConfig(ctx context.Context, subName string, conf *PushConfig) error { + return s.subc.ModifyPushConfig(ctx, &pb.ModifyPushConfigRequest{ + Subscription: subName, + PushConfig: &pb.PushConfig{ + Attributes: conf.Attributes, + PushEndpoint: conf.Endpoint, + }, + }) +} + +func (s *apiService) iamHandle(resourceName string) *iam.Handle { + return iam.InternalNewHandle(s.pubc.Connection(), resourceName) +} + +func trunc32(i int64) int32 { + if i > math.MaxInt32 { + i = math.MaxInt32 + } + return int32(i) +} + +func (s *apiService) newStreamingPuller(ctx context.Context, subName string, ackDeadlineSecs int32) *streamingPuller { + p := &streamingPuller{ + ctx: ctx, + subName: subName, + ackDeadlineSecs: ackDeadlineSecs, + subc: s.subc, + } + p.c = sync.NewCond(&p.mu) + return p +} + +type streamingPuller struct { + ctx context.Context + subName string + ackDeadlineSecs int32 + subc *vkit.SubscriberClient + + mu sync.Mutex + c *sync.Cond + inFlight bool + closed bool // set after CloseSend called + spc pb.Subscriber_StreamingPullClient + err error +} + +// open establishes (or re-establishes) a stream for pulling messages. +// It takes care that only one RPC is in flight at a time. +func (p *streamingPuller) open() error { + p.c.L.Lock() + defer p.c.L.Unlock() + p.openLocked() + return p.err +} + +func (p *streamingPuller) openLocked() { + if p.inFlight { + // Another goroutine is opening; wait for it. + for p.inFlight { + p.c.Wait() + } + return + } + // No opens in flight; start one. + p.inFlight = true + p.c.L.Unlock() + spc, err := p.subc.StreamingPull(p.ctx) + if err == nil { + err = spc.Send(&pb.StreamingPullRequest{ + Subscription: p.subName, + StreamAckDeadlineSeconds: p.ackDeadlineSecs, + }) + } + p.c.L.Lock() + p.spc = spc + p.err = err + p.inFlight = false + p.c.Broadcast() +} + +func (p *streamingPuller) call(f func(pb.Subscriber_StreamingPullClient) error) error { + p.c.L.Lock() + defer p.c.L.Unlock() + // Wait for an open in flight. + for p.inFlight { + p.c.Wait() + } + // TODO(jba): better retry strategy. + var err error + for i := 0; i < 3; i++ { + if p.err != nil { + return p.err + } + spc := p.spc + // Do not call f with the lock held. Only one goroutine calls Send + // (streamingMessageIterator.sender) and only one calls Recv + // (streamingMessageIterator.receiver). If we locked, then a + // blocked Recv would prevent a Send from happening. + p.c.L.Unlock() + err = f(spc) + p.c.L.Lock() + if !p.closed && (err == io.EOF || grpc.Code(err) == codes.Unavailable) { + time.Sleep(500 * time.Millisecond) + p.openLocked() + continue + } + // Not a retry-able error; fail permanently. + // TODO(jba): for some errors, should we retry f (the Send or Recv) + // but not re-open the stream? + p.err = err + return err + } + p.err = fmt.Errorf("retry exceeded; last error was %v", err) + return p.err +} + +func (p *streamingPuller) fetchMessages() ([]*Message, error) { + var res *pb.StreamingPullResponse + err := p.call(func(spc pb.Subscriber_StreamingPullClient) error { + var err error + res, err = spc.Recv() + return err + }) + if err != nil { + return nil, err + } + return convertMessages(res.ReceivedMessages) +} + +func (p *streamingPuller) send(req *pb.StreamingPullRequest) error { + // Note: len(modAckIDs) == len(modSecs) + var rest *pb.StreamingPullRequest + for len(req.AckIds) > 0 || len(req.ModifyDeadlineAckIds) > 0 { + req, rest = splitRequest(req, maxPayload) + err := p.call(func(spc pb.Subscriber_StreamingPullClient) error { + x := spc.Send(req) + return x + }) + if err != nil { + return err + } + req = rest + } + return nil +} + +func (p *streamingPuller) closeSend() { + p.mu.Lock() + p.closed = true + p.mu.Unlock() + p.spc.CloseSend() +} + +// Split req into a prefix that is smaller than maxSize, and a remainder. +func splitRequest(req *pb.StreamingPullRequest, maxSize int) (prefix, remainder *pb.StreamingPullRequest) { + const int32Bytes = 4 + + // Copy all fields before splitting the variable-sized ones. + remainder = &pb.StreamingPullRequest{} + *remainder = *req + // Split message so it isn't too big. + size := reqFixedOverhead + i := 0 + for size < maxSize && (i < len(req.AckIds) || i < len(req.ModifyDeadlineAckIds)) { + if i < len(req.AckIds) { + size += overheadPerID + len(req.AckIds[i]) + } + if i < len(req.ModifyDeadlineAckIds) { + size += overheadPerID + len(req.ModifyDeadlineAckIds[i]) + int32Bytes + } + i++ + } + + min := func(a, b int) int { + if a < b { + return a + } + return b + } + + j := i + if size > maxSize { + j-- + } + k := min(j, len(req.AckIds)) + remainder.AckIds = req.AckIds[k:] + req.AckIds = req.AckIds[:k] + k = min(j, len(req.ModifyDeadlineAckIds)) + remainder.ModifyDeadlineAckIds = req.ModifyDeadlineAckIds[k:] + remainder.ModifyDeadlineSeconds = req.ModifyDeadlineSeconds[k:] + req.ModifyDeadlineAckIds = req.ModifyDeadlineAckIds[:k] + req.ModifyDeadlineSeconds = req.ModifyDeadlineSeconds[:k] + return req, remainder +} diff --git a/vendor/cloud.google.com/go/pubsub/service_test.go b/vendor/cloud.google.com/go/pubsub/service_test.go new file mode 100644 index 00000000..e8a9b0a1 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/service_test.go @@ -0,0 +1,68 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "reflect" + "testing" + + pb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +func TestSplitRequest(t *testing.T) { + split := func(a []string, i int) ([]string, []string) { + if len(a) < i { + return a, nil + } + return a[:i], a[i:] + } + ackIDs := []string{"aaaa", "bbbb", "cccc", "dddd", "eeee"} + modDeadlines := []int32{1, 2, 3, 4, 5} + for i, test := range []struct { + ackIDs []string + modAckIDs []string + splitIndex int + }{ + {ackIDs, ackIDs, 2}, + {nil, ackIDs, 3}, + {ackIDs, nil, 5}, + {nil, ackIDs[:1], 1}, + } { + req := &pb.StreamingPullRequest{ + AckIds: test.ackIDs, + ModifyDeadlineAckIds: test.modAckIDs, + ModifyDeadlineSeconds: modDeadlines[:len(test.modAckIDs)], + } + a1, a2 := split(test.ackIDs, test.splitIndex) + m1, m2 := split(test.modAckIDs, test.splitIndex) + want1 := &pb.StreamingPullRequest{ + AckIds: a1, + ModifyDeadlineAckIds: m1, + ModifyDeadlineSeconds: modDeadlines[:len(m1)], + } + want2 := &pb.StreamingPullRequest{ + AckIds: a2, + ModifyDeadlineAckIds: m2, + ModifyDeadlineSeconds: modDeadlines[len(m1) : len(m1)+len(m2)], + } + got1, got2 := splitRequest(req, reqFixedOverhead+40) + if !reflect.DeepEqual(got1, want1) { + t.Errorf("#%d: first:\ngot %+v\nwant %+v", i, got1, want1) + } + if !reflect.DeepEqual(got2, want2) { + t.Errorf("#%d: second:\ngot %+v\nwant %+v", i, got2, want2) + } + } +} diff --git a/vendor/cloud.google.com/go/pubsub/streaming_pull_test.go b/vendor/cloud.google.com/go/pubsub/streaming_pull_test.go new file mode 100644 index 00000000..c47adc1c --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/streaming_pull_test.go @@ -0,0 +1,277 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +// TODO(jba): test keepalive +// TODO(jba): test that expired messages are not kept alive +// TODO(jba): test that when all messages expire, Stop returns. + +import ( + "io" + "reflect" + "strconv" + "testing" + "time" + + tspb "github.com/golang/protobuf/ptypes/timestamp" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + pb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + timestamp = &tspb.Timestamp{} + testMessages = []*pb.ReceivedMessage{ + {AckId: "1", Message: &pb.PubsubMessage{Data: []byte{1}, PublishTime: timestamp}}, + {AckId: "2", Message: &pb.PubsubMessage{Data: []byte{2}, PublishTime: timestamp}}, + {AckId: "3", Message: &pb.PubsubMessage{Data: []byte{3}, PublishTime: timestamp}}, + } +) + +func TestStreamingPullBasic(t *testing.T) { + client, server := newFake(t) + server.addStreamingPullMessages(testMessages) + testStreamingPullIteration(t, client, server, testMessages) +} + +func TestStreamingPullMultipleFetches(t *testing.T) { + client, server := newFake(t) + server.addStreamingPullMessages(testMessages[:1]) + server.addStreamingPullMessages(testMessages[1:]) + testStreamingPullIteration(t, client, server, testMessages) +} + +func testStreamingPullIteration(t *testing.T, client *Client, server *fakeServer, msgs []*pb.ReceivedMessage) { + if !useStreamingPull { + t.SkipNow() + } + sub := client.Subscription("s") + iter, err := sub.Pull(context.Background()) + if err != nil { + t.Fatal(err) + } + for i := 0; i < len(msgs); i++ { + got, err := iter.Next() + if err != nil { + t.Fatal(err) + } + got.Done(i%2 == 0) // ack evens, nack odds + want, err := toMessage(msgs[i]) + if err != nil { + t.Fatal(err) + } + want.calledDone = true + // Don't compare done; it's a function. + got.done = nil + if !reflect.DeepEqual(got, want) { + t.Errorf("%d: got\n%#v\nwant\n%#v", i, got, want) + } + + } + iter.Stop() + server.wait() + for i := 0; i < len(msgs); i++ { + id := msgs[i].AckId + if i%2 == 0 { + if !server.Acked[id] { + t.Errorf("msg %q should have been acked but wasn't", id) + } + } else { + if dl, ok := server.Deadlines[id]; !ok || dl != 0 { + t.Errorf("msg %q should have been nacked but wasn't", id) + } + } + } +} + +func TestStreamingPullStop(t *testing.T) { + if !useStreamingPull { + t.SkipNow() + } + // After Stop is called, Next returns iterator.Done. + client, server := newFake(t) + server.addStreamingPullMessages(testMessages) + sub := client.Subscription("s") + iter, err := sub.Pull(context.Background()) + if err != nil { + t.Fatal(err) + } + msg, err := iter.Next() + if err != nil { + t.Fatal(err) + } + msg.Done(true) + iter.Stop() + // Next should always return the same error. + for i := 0; i < 3; i++ { + _, err = iter.Next() + if want := iterator.Done; err != want { + t.Fatalf("got <%v> %p, want <%v> %p", err, err, want, want) + } + } +} + +func TestStreamingPullError(t *testing.T) { + if !useStreamingPull { + t.SkipNow() + } + client, server := newFake(t) + server.addStreamingPullError(grpc.Errorf(codes.Internal, "")) + sub := client.Subscription("s") + iter, err := sub.Pull(context.Background()) + if err != nil { + t.Fatal(err) + } + // Next should always return the same error. + for i := 0; i < 3; i++ { + _, err = iter.Next() + if want := codes.Internal; grpc.Code(err) != want { + t.Fatalf("got <%v>, want code %v", err, want) + } + } +} + +func TestStreamingPullCancel(t *testing.T) { + if !useStreamingPull { + t.SkipNow() + } + // Test that canceling the iterator's context behaves correctly. + client, server := newFake(t) + server.addStreamingPullMessages(testMessages) + sub := client.Subscription("s") + ctx, cancel := context.WithCancel(context.Background()) + iter, err := sub.Pull(ctx) + if err != nil { + t.Fatal(err) + } + _, err = iter.Next() + if err != nil { + t.Fatal(err) + } + // Here we have one message read (but not acked), and two + // in the iterator's buffer. + cancel() + // Further calls to Next will return Canceled. + _, err = iter.Next() + if got, want := err, context.Canceled; got != want { + t.Errorf("got %v, want %v", got, want) + } + // Despite the unacked message, Stop will still return promptly. + done := make(chan struct{}) + go func() { + iter.Stop() + close(done) + }() + select { + case <-done: + case <-time.After(1 * time.Second): + t.Fatal("iter.Stop timed out") + } +} + +func TestStreamingPullRetry(t *testing.T) { + if !useStreamingPull { + t.SkipNow() + } + // Check that we retry on io.EOF or Unavailable. + client, server := newFake(t) + server.addStreamingPullMessages(testMessages[:1]) + server.addStreamingPullError(io.EOF) + server.addStreamingPullError(io.EOF) + server.addStreamingPullMessages(testMessages[1:2]) + server.addStreamingPullError(grpc.Errorf(codes.Unavailable, "")) + server.addStreamingPullError(grpc.Errorf(codes.Unavailable, "")) + server.addStreamingPullMessages(testMessages[2:]) + testStreamingPullIteration(t, client, server, testMessages) +} + +func TestStreamingPullConcurrent(t *testing.T) { + if !useStreamingPull { + t.SkipNow() + } + newMsg := func(i int) *pb.ReceivedMessage { + return &pb.ReceivedMessage{ + AckId: strconv.Itoa(i), + Message: &pb.PubsubMessage{Data: []byte{byte(i)}, PublishTime: timestamp}, + } + } + + // Multiple goroutines should be able to read from the same iterator. + client, server := newFake(t) + // Add a lot of messages, a few at a time, to make sure both threads get a chance. + nMessages := 100 + for i := 0; i < nMessages; i += 2 { + server.addStreamingPullMessages([]*pb.ReceivedMessage{newMsg(i), newMsg(i + 1)}) + } + sub := client.Subscription("s") + iter, err := sub.Pull(context.Background()) + if err != nil { + t.Fatal(err) + } + seenc := make(chan string) + errc := make(chan error, 2) + for i := 0; i < 2; i++ { + go func() { + for { + msg, err := iter.Next() + if err == iterator.Done { + return + } + if err != nil { + errc <- err + return + } + // Must ack before sending to channel, or Stop may hang. + msg.Done(true) + seenc <- msg.ackID + } + }() + } + seen := map[string]bool{} + for i := 0; i < nMessages; i++ { + select { + case err := <-errc: + t.Fatal(err) + case id := <-seenc: + if seen[id] { + t.Fatalf("duplicate ID %q", id) + } + seen[id] = true + } + } + iter.Stop() + if len(seen) != nMessages { + t.Fatalf("got %d messages, want %d", len(seen), nMessages) + } +} + +func newFake(t *testing.T) (*Client, *fakeServer) { + srv, err := newFakeServer() + if err != nil { + t.Fatal(err) + } + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) + if err != nil { + t.Fatal(err) + } + client, err := NewClient(context.Background(), "projectID", option.WithGRPCConn(conn)) + if err != nil { + t.Fatal(err) + } + return client, srv +} diff --git a/vendor/cloud.google.com/go/pubsub/subscription.go b/vendor/cloud.google.com/go/pubsub/subscription.go new file mode 100644 index 00000000..74f45898 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/subscription.go @@ -0,0 +1,265 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "errors" + "fmt" + "strings" + "time" + + "cloud.google.com/go/iam" + "golang.org/x/net/context" +) + +// The default period for which to automatically extend Message acknowledgement deadlines. +const DefaultMaxExtension = 10 * time.Minute + +// The default maximum number of messages that are prefetched from the server. +const DefaultMaxPrefetch = 100 + +// Subscription is a reference to a PubSub subscription. +type Subscription struct { + s service + + // The fully qualified identifier for the subscription, in the format "projects//subscriptions/" + name string +} + +// Subscription creates a reference to a subscription. +func (c *Client) Subscription(id string) *Subscription { + return &Subscription{ + s: c.s, + name: fmt.Sprintf("projects/%s/subscriptions/%s", c.projectID, id), + } +} + +// String returns the globally unique printable name of the subscription. +func (s *Subscription) String() string { + return s.name +} + +// ID returns the unique identifier of the subscription within its project. +func (s *Subscription) ID() string { + slash := strings.LastIndex(s.name, "/") + if slash == -1 { + // name is not a fully-qualified name. + panic("bad subscription name") + } + return s.name[slash+1:] +} + +// Subscriptions returns an iterator which returns all of the subscriptions for the client's project. +func (c *Client) Subscriptions(ctx context.Context) *SubscriptionIterator { + return &SubscriptionIterator{ + s: c.s, + next: c.s.listProjectSubscriptions(ctx, c.fullyQualifiedProjectName()), + } +} + +// SubscriptionIterator is an iterator that returns a series of subscriptions. +type SubscriptionIterator struct { + s service + next nextStringFunc +} + +// Next returns the next subscription. If there are no more subscriptions, iterator.Done will be returned. +func (subs *SubscriptionIterator) Next() (*Subscription, error) { + subName, err := subs.next() + if err != nil { + return nil, err + } + return &Subscription{s: subs.s, name: subName}, nil +} + +// PushConfig contains configuration for subscriptions that operate in push mode. +type PushConfig struct { + // A URL locating the endpoint to which messages should be pushed. + Endpoint string + + // Endpoint configuration attributes. See https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions#pushconfig for more details. + Attributes map[string]string +} + +// Subscription config contains the configuration of a subscription. +type SubscriptionConfig struct { + Topic *Topic + PushConfig PushConfig + + // The default maximum time after a subscriber receives a message before + // the subscriber should acknowledge the message. Note: messages which are + // obtained via a MessageIterator need not be acknowledged within this + // deadline, as the deadline will be automatically extended. + AckDeadline time.Duration +} + +// Delete deletes the subscription. +func (s *Subscription) Delete(ctx context.Context) error { + return s.s.deleteSubscription(ctx, s.name) +} + +// Exists reports whether the subscription exists on the server. +func (s *Subscription) Exists(ctx context.Context) (bool, error) { + return s.s.subscriptionExists(ctx, s.name) +} + +// Config fetches the current configuration for the subscription. +func (s *Subscription) Config(ctx context.Context) (*SubscriptionConfig, error) { + conf, topicName, err := s.s.getSubscriptionConfig(ctx, s.name) + if err != nil { + return nil, err + } + conf.Topic = &Topic{ + s: s.s, + name: topicName, + } + return conf, nil +} + +// Pull returns a MessageIterator that can be used to fetch Messages. The MessageIterator +// will automatically extend the ack deadline of all fetched Messages, for the +// period specified by DefaultMaxExtension. This may be overridden by supplying +// a MaxExtension pull option. +// +// If ctx is cancelled or exceeds its deadline, outstanding acks or deadline +// extensions will fail. +// +// The caller must call Stop on the MessageIterator once finished with it. +func (s *Subscription) Pull(ctx context.Context, opts ...PullOption) (*MessageIterator, error) { + config, err := s.Config(ctx) + if err != nil { + return nil, err + } + po := processPullOptions(opts) + po.ackDeadline = config.AckDeadline + return newMessageIterator(ctx, s.s, s.name, po), nil +} + +// ModifyPushConfig updates the endpoint URL and other attributes of a push subscription. +func (s *Subscription) ModifyPushConfig(ctx context.Context, conf *PushConfig) error { + if conf == nil { + return errors.New("must supply non-nil PushConfig") + } + + return s.s.modifyPushConfig(ctx, s.name, conf) +} + +func (s *Subscription) IAM() *iam.Handle { + return s.s.iamHandle(s.name) +} + +// A PullOption is an optional argument to Subscription.Pull. +type PullOption interface { + setOptions(o *pullOptions) +} + +type pullOptions struct { + // maxExtension is the maximum period for which the iterator should + // automatically extend the ack deadline for each message. + maxExtension time.Duration + + // maxPrefetch is the maximum number of Messages to have in flight, to + // be returned by MessageIterator.Next. + maxPrefetch int32 + + // ackDeadline is the default ack deadline for the subscription. Not + // configurable via a PullOption. + ackDeadline time.Duration +} + +func processPullOptions(opts []PullOption) *pullOptions { + po := &pullOptions{ + maxExtension: DefaultMaxExtension, + maxPrefetch: DefaultMaxPrefetch, + } + + for _, o := range opts { + o.setOptions(po) + } + + return po +} + +type maxPrefetch int32 + +func (max maxPrefetch) setOptions(o *pullOptions) { + if o.maxPrefetch = int32(max); o.maxPrefetch < 1 { + o.maxPrefetch = 1 + } +} + +// MaxPrefetch returns a PullOption that limits Message prefetching. +// +// For performance reasons, the pubsub library may prefetch a pool of Messages +// to be returned serially from MessageIterator.Next. MaxPrefetch is used to limit the +// the size of this pool. +// +// If num is less than 1, it will be treated as if it were 1. +func MaxPrefetch(num int) PullOption { + return maxPrefetch(trunc32(int64(num))) +} + +type maxExtension time.Duration + +func (max maxExtension) setOptions(o *pullOptions) { + if o.maxExtension = time.Duration(max); o.maxExtension < 0 { + o.maxExtension = 0 + } +} + +// MaxExtension returns a PullOption that limits how long acks deadlines are +// extended for. +// +// A MessageIterator will automatically extend the ack deadline of all fetched +// Messages for the duration specified. Automatic deadline extension may be +// disabled by specifying a duration of 0. +func MaxExtension(duration time.Duration) PullOption { + return maxExtension(duration) +} + +// CreateSubscription creates a new subscription on a topic. +// +// name is the name of the subscription to create. It must start with a letter, +// and contain only letters ([A-Za-z]), numbers ([0-9]), dashes (-), +// underscores (_), periods (.), tildes (~), plus (+) or percent signs (%). It +// must be between 3 and 255 characters in length, and must not start with +// "goog". +// +// topic is the topic from which the subscription should receive messages. It +// need not belong to the same project as the subscription. +// +// ackDeadline is the maximum time after a subscriber receives a message before +// the subscriber should acknowledge the message. It must be between 10 and 600 +// seconds (inclusive), and is rounded down to the nearest second. If the +// provided ackDeadline is 0, then the default value of 10 seconds is used. +// Note: messages which are obtained via a MessageIterator need not be +// acknowledged within this deadline, as the deadline will be automatically +// extended. +// +// pushConfig may be set to configure this subscription for push delivery. +// +// If the subscription already exists an error will be returned. +func (c *Client) CreateSubscription(ctx context.Context, id string, topic *Topic, ackDeadline time.Duration, pushConfig *PushConfig) (*Subscription, error) { + if ackDeadline == 0 { + ackDeadline = 10 * time.Second + } + if d := ackDeadline.Seconds(); d < 10 || d > 600 { + return nil, fmt.Errorf("ack deadline must be between 10 and 600 seconds; got: %v", d) + } + + sub := c.Subscription(id) + err := c.s.createSubscription(ctx, topic.name, sub.name, ackDeadline, pushConfig) + return sub, err +} diff --git a/vendor/cloud.google.com/go/pubsub/subscription_test.go b/vendor/cloud.google.com/go/pubsub/subscription_test.go new file mode 100644 index 00000000..2c1c8978 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/subscription_test.go @@ -0,0 +1,151 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "reflect" + "testing" + + "golang.org/x/net/context" + + "google.golang.org/api/iterator" +) + +type subListService struct { + service + subs []string + err error + + t *testing.T // for error logging. +} + +func (s *subListService) newNextStringFunc() nextStringFunc { + return func() (string, error) { + if len(s.subs) == 0 { + return "", iterator.Done + } + sn := s.subs[0] + s.subs = s.subs[1:] + return sn, s.err + } +} + +func (s *subListService) listProjectSubscriptions(ctx context.Context, projName string) nextStringFunc { + if projName != "projects/projid" { + s.t.Fatalf("unexpected call: projName: %q", projName) + return nil + } + return s.newNextStringFunc() +} + +func (s *subListService) listTopicSubscriptions(ctx context.Context, topicName string) nextStringFunc { + if topicName != "projects/projid/topics/topic" { + s.t.Fatalf("unexpected call: topicName: %q", topicName) + return nil + } + return s.newNextStringFunc() +} + +// All returns the remaining subscriptions from this iterator. +func slurpSubs(it *SubscriptionIterator) ([]*Subscription, error) { + var subs []*Subscription + for { + switch sub, err := it.Next(); err { + case nil: + subs = append(subs, sub) + case iterator.Done: + return subs, nil + default: + return nil, err + } + } +} + +func TestSubscriptionID(t *testing.T) { + const id = "id" + serv := &subListService{ + subs: []string{"projects/projid/subscriptions/s1", "projects/projid/subscriptions/s2"}, + t: t, + } + c := &Client{projectID: "projid", s: serv} + s := c.Subscription(id) + if got, want := s.ID(), id; got != want { + t.Errorf("Subscription.ID() = %q; want %q", got, want) + } + want := []string{"s1", "s2"} + subs, err := slurpSubs(c.Subscriptions(context.Background())) + if err != nil { + t.Errorf("error listing subscriptions: %v", err) + } + for i, s := range subs { + if got, want := s.ID(), want[i]; got != want { + t.Errorf("Subscription.ID() = %q; want %q", got, want) + } + } +} + +func TestListProjectSubscriptions(t *testing.T) { + snames := []string{"projects/projid/subscriptions/s1", "projects/projid/subscriptions/s2", + "projects/projid/subscriptions/s3"} + s := &subListService{subs: snames, t: t} + c := &Client{projectID: "projid", s: s} + subs, err := slurpSubs(c.Subscriptions(context.Background())) + if err != nil { + t.Errorf("error listing subscriptions: %v", err) + } + got := subNames(subs) + want := []string{ + "projects/projid/subscriptions/s1", + "projects/projid/subscriptions/s2", + "projects/projid/subscriptions/s3"} + if !reflect.DeepEqual(got, want) { + t.Errorf("sub list: got: %v, want: %v", got, want) + } + if len(s.subs) != 0 { + t.Errorf("outstanding subs: %v", s.subs) + } +} + +func TestListTopicSubscriptions(t *testing.T) { + snames := []string{"projects/projid/subscriptions/s1", "projects/projid/subscriptions/s2", + "projects/projid/subscriptions/s3"} + s := &subListService{subs: snames, t: t} + c := &Client{projectID: "projid", s: s} + subs, err := slurpSubs(c.Topic("topic").Subscriptions(context.Background())) + if err != nil { + t.Errorf("error listing subscriptions: %v", err) + } + got := subNames(subs) + want := []string{ + "projects/projid/subscriptions/s1", + "projects/projid/subscriptions/s2", + "projects/projid/subscriptions/s3"} + if !reflect.DeepEqual(got, want) { + t.Errorf("sub list: got: %v, want: %v", got, want) + } + if len(s.subs) != 0 { + t.Errorf("outstanding subs: %v", s.subs) + } +} + +func subNames(subs []*Subscription) []string { + var names []string + + for _, sub := range subs { + names = append(names, sub.name) + + } + return names +} diff --git a/vendor/cloud.google.com/go/pubsub/topic.go b/vendor/cloud.google.com/go/pubsub/topic.go new file mode 100644 index 00000000..6d6bc1a3 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/topic.go @@ -0,0 +1,132 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "fmt" + "strings" + + "cloud.google.com/go/iam" + "golang.org/x/net/context" +) + +const MaxPublishBatchSize = 1000 + +// Topic is a reference to a PubSub topic. +type Topic struct { + s service + + // The fully qualified identifier for the topic, in the format "projects//topics/" + name string +} + +// CreateTopic creates a new topic. +// The specified topic ID must start with a letter, and contain only letters +// ([A-Za-z]), numbers ([0-9]), dashes (-), underscores (_), periods (.), +// tildes (~), plus (+) or percent signs (%). It must be between 3 and 255 +// characters in length, and must not start with "goog". +// If the topic already exists an error will be returned. +func (c *Client) CreateTopic(ctx context.Context, id string) (*Topic, error) { + t := c.Topic(id) + err := c.s.createTopic(ctx, t.name) + return t, err +} + +// Topic creates a reference to a topic. +func (c *Client) Topic(id string) *Topic { + return &Topic{ + s: c.s, + name: fmt.Sprintf("projects/%s/topics/%s", c.projectID, id), + } +} + +// Topics returns an iterator which returns all of the topics for the client's project. +func (c *Client) Topics(ctx context.Context) *TopicIterator { + return &TopicIterator{ + s: c.s, + next: c.s.listProjectTopics(ctx, c.fullyQualifiedProjectName()), + } +} + +// TopicIterator is an iterator that returns a series of topics. +type TopicIterator struct { + s service + next nextStringFunc +} + +// Next returns the next topic. If there are no more topics, iterator.Done will be returned. +func (tps *TopicIterator) Next() (*Topic, error) { + topicName, err := tps.next() + if err != nil { + return nil, err + } + return &Topic{s: tps.s, name: topicName}, nil +} + +// ID returns the unique idenfier of the topic within its project. +func (t *Topic) ID() string { + slash := strings.LastIndex(t.name, "/") + if slash == -1 { + // name is not a fully-qualified name. + panic("bad topic name") + } + return t.name[slash+1:] +} + +// String returns the printable globally unique name for the topic. +func (t *Topic) String() string { + return t.name +} + +// Delete deletes the topic. +func (t *Topic) Delete(ctx context.Context) error { + return t.s.deleteTopic(ctx, t.name) +} + +// Exists reports whether the topic exists on the server. +func (t *Topic) Exists(ctx context.Context) (bool, error) { + if t.name == "_deleted-topic_" { + return false, nil + } + + return t.s.topicExists(ctx, t.name) +} + +// Subscriptions returns an iterator which returns the subscriptions for this topic. +func (t *Topic) Subscriptions(ctx context.Context) *SubscriptionIterator { + // NOTE: zero or more Subscriptions that are ultimately returned by this + // Subscriptions iterator may belong to a different project to t. + return &SubscriptionIterator{ + s: t.s, + next: t.s.listTopicSubscriptions(ctx, t.name), + } +} + +// Publish publishes the supplied Messages to the topic. +// If successful, the server-assigned message IDs are returned in the same order as the supplied Messages. +// At most MaxPublishBatchSize messages may be supplied. +func (t *Topic) Publish(ctx context.Context, msgs ...*Message) ([]string, error) { + if len(msgs) == 0 { + return nil, nil + } + if len(msgs) > MaxPublishBatchSize { + return nil, fmt.Errorf("pubsub: got %d messages, but maximum batch size is %d", len(msgs), MaxPublishBatchSize) + } + return t.s.publishMessages(ctx, t.name, msgs) +} + +func (t *Topic) IAM() *iam.Handle { + return t.s.iamHandle(t.name) +} diff --git a/vendor/cloud.google.com/go/pubsub/topic_test.go b/vendor/cloud.google.com/go/pubsub/topic_test.go new file mode 100644 index 00000000..32e4c9d6 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/topic_test.go @@ -0,0 +1,127 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "reflect" + "testing" + + "golang.org/x/net/context" + + "google.golang.org/api/iterator" +) + +type topicListService struct { + service + topics []string + err error + t *testing.T // for error logging. +} + +func (s *topicListService) newNextStringFunc() nextStringFunc { + return func() (string, error) { + if len(s.topics) == 0 { + return "", iterator.Done + } + tn := s.topics[0] + s.topics = s.topics[1:] + return tn, s.err + } +} + +func (s *topicListService) listProjectTopics(ctx context.Context, projName string) nextStringFunc { + if projName != "projects/projid" { + s.t.Fatalf("unexpected call: projName: %q", projName) + return nil + } + return s.newNextStringFunc() +} + +func checkTopicListing(t *testing.T, want []string) { + s := &topicListService{topics: want, t: t} + c := &Client{projectID: "projid", s: s} + topics, err := slurpTopics(c.Topics(context.Background())) + if err != nil { + t.Errorf("error listing topics: %v", err) + } + got := topicNames(topics) + if !reflect.DeepEqual(got, want) { + t.Errorf("topic list: got: %v, want: %v", got, want) + } + if len(s.topics) != 0 { + t.Errorf("outstanding topics: %v", s.topics) + } +} + +// All returns the remaining topics from this iterator. +func slurpTopics(it *TopicIterator) ([]*Topic, error) { + var topics []*Topic + for { + switch topic, err := it.Next(); err { + case nil: + topics = append(topics, topic) + case iterator.Done: + return topics, nil + default: + return nil, err + } + } +} + +func TestTopicID(t *testing.T) { + const id = "id" + serv := &topicListService{ + topics: []string{"projects/projid/topics/t1", "projects/projid/topics/t2"}, + t: t, + } + c := &Client{projectID: "projid", s: serv} + s := c.Topic(id) + if got, want := s.ID(), id; got != want { + t.Errorf("Token.ID() = %q; want %q", got, want) + } + want := []string{"t1", "t2"} + topics, err := slurpTopics(c.Topics(context.Background())) + if err != nil { + t.Errorf("error listing topics: %v", err) + } + for i, topic := range topics { + if got, want := topic.ID(), want[i]; got != want { + t.Errorf("Token.ID() = %q; want %q", got, want) + } + } +} + +func TestListTopics(t *testing.T) { + checkTopicListing(t, []string{ + "projects/projid/topics/t1", + "projects/projid/topics/t2", + "projects/projid/topics/t3", + "projects/projid/topics/t4"}) +} + +func TestListCompletelyEmptyTopics(t *testing.T) { + var want []string + checkTopicListing(t, want) +} + +func topicNames(topics []*Topic) []string { + var names []string + + for _, topic := range topics { + names = append(names, topic.name) + + } + return names +} diff --git a/vendor/cloud.google.com/go/pubsub/utils_test.go b/vendor/cloud.google.com/go/pubsub/utils_test.go new file mode 100644 index 00000000..d0b54202 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/utils_test.go @@ -0,0 +1,63 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "time" + + "golang.org/x/net/context" +) + +type modDeadlineCall struct { + subName string + deadline time.Duration + ackIDs []string +} + +type acknowledgeCall struct { + subName string + ackIDs []string +} + +type testService struct { + service + + // The arguments of each call to modifyAckDealine are written to this channel. + modDeadlineCalled chan modDeadlineCall + + // The arguments of each call to acknowledge are written to this channel. + acknowledgeCalled chan acknowledgeCall +} + +func (s *testService) modifyAckDeadline(ctx context.Context, subName string, deadline time.Duration, ackIDs []string) error { + s.modDeadlineCalled <- modDeadlineCall{ + subName: subName, + deadline: deadline, + ackIDs: ackIDs, + } + return nil +} + +func (s *testService) acknowledge(ctx context.Context, subName string, ackIDs []string) error { + s.acknowledgeCalled <- acknowledgeCall{ + subName: subName, + ackIDs: ackIDs, + } + return nil +} + +func (s *testService) splitAckIDs(ids []string) ([]string, []string) { + return ids, nil +} diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client.go new file mode 100644 index 00000000..7c508757 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client.go @@ -0,0 +1,533 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package database + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + "cloud.google.com/go/longrunning" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + iampb "google.golang.org/genproto/googleapis/iam/v1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + databasepb "google.golang.org/genproto/googleapis/spanner/admin/database/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + databaseAdminInstancePathTemplate = gax.MustCompilePathTemplate("projects/{project}/instances/{instance}") + databaseAdminDatabasePathTemplate = gax.MustCompilePathTemplate("projects/{project}/instances/{instance}/databases/{database}") +) + +// DatabaseAdminCallOptions contains the retry settings for each method of DatabaseAdminClient. +type DatabaseAdminCallOptions struct { + ListDatabases []gax.CallOption + CreateDatabase []gax.CallOption + GetDatabase []gax.CallOption + UpdateDatabaseDdl []gax.CallOption + DropDatabase []gax.CallOption + GetDatabaseDdl []gax.CallOption + SetIamPolicy []gax.CallOption + GetIamPolicy []gax.CallOption + TestIamPermissions []gax.CallOption +} + +func defaultDatabaseAdminClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("spanner.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin", + ), + } +} + +func defaultDatabaseAdminCallOptions() *DatabaseAdminCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 32000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + {"default", "non_idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 32000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &DatabaseAdminCallOptions{ + ListDatabases: retry[[2]string{"default", "idempotent"}], + CreateDatabase: retry[[2]string{"default", "non_idempotent"}], + GetDatabase: retry[[2]string{"default", "idempotent"}], + UpdateDatabaseDdl: retry[[2]string{"default", "idempotent"}], + DropDatabase: retry[[2]string{"default", "idempotent"}], + GetDatabaseDdl: retry[[2]string{"default", "idempotent"}], + SetIamPolicy: retry[[2]string{"default", "non_idempotent"}], + GetIamPolicy: retry[[2]string{"default", "idempotent"}], + TestIamPermissions: retry[[2]string{"default", "non_idempotent"}], + } +} + +// DatabaseAdminClient is a client for interacting with Cloud Spanner Database Admin API. +type DatabaseAdminClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + databaseAdminClient databasepb.DatabaseAdminClient + + // The call options for this service. + CallOptions *DatabaseAdminCallOptions + + // The metadata to be sent with each request. + xGoogHeader string +} + +// NewDatabaseAdminClient creates a new database admin client. +// +// Cloud Spanner Database Admin API +// +// The Cloud Spanner Database Admin API can be used to create, drop, and +// list databases. It also enables updating the schema of pre-existing +// databases. +func NewDatabaseAdminClient(ctx context.Context, opts ...option.ClientOption) (*DatabaseAdminClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultDatabaseAdminClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &DatabaseAdminClient{ + conn: conn, + CallOptions: defaultDatabaseAdminCallOptions(), + + databaseAdminClient: databasepb.NewDatabaseAdminClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *DatabaseAdminClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *DatabaseAdminClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *DatabaseAdminClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", "") + c.xGoogHeader = gax.XGoogHeader(kv...) +} + +// DatabaseAdminInstancePath returns the path for the instance resource. +func DatabaseAdminInstancePath(project, instance string) string { + path, err := databaseAdminInstancePathTemplate.Render(map[string]string{ + "project": project, + "instance": instance, + }) + if err != nil { + panic(err) + } + return path +} + +// DatabaseAdminDatabasePath returns the path for the database resource. +func DatabaseAdminDatabasePath(project, instance, database string) string { + path, err := databaseAdminDatabasePathTemplate.Render(map[string]string{ + "project": project, + "instance": instance, + "database": database, + }) + if err != nil { + panic(err) + } + return path +} + +// ListDatabases lists Cloud Spanner databases. +func (c *DatabaseAdminClient) ListDatabases(ctx context.Context, req *databasepb.ListDatabasesRequest) *DatabaseIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + it := &DatabaseIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*databasepb.Database, string, error) { + var resp *databasepb.ListDatabasesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.databaseAdminClient.ListDatabases(ctx, req) + return err + }, c.CallOptions.ListDatabases...) + if err != nil { + return nil, "", err + } + return resp.Databases, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// CreateDatabase creates a new Cloud Spanner database and starts to prepare it for serving. +// The returned [long-running operation][google.longrunning.Operation] will +// have a name of the format `/operations/` and +// can be used to track preparation of the database. The +// [metadata][google.longrunning.Operation.metadata] field type is +// [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The +// [response][google.longrunning.Operation.response] field type is +// [Database][google.spanner.admin.database.v1.Database], if successful. +func (c *DatabaseAdminClient) CreateDatabase(ctx context.Context, req *databasepb.CreateDatabaseRequest) (*DatabaseOperation, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.databaseAdminClient.CreateDatabase(ctx, req) + return err + }, c.CallOptions.CreateDatabase...) + if err != nil { + return nil, err + } + return &DatabaseOperation{ + lro: longrunning.InternalNewOperation(c.Connection(), resp), + }, nil +} + +// GetDatabase gets the state of a Cloud Spanner database. +func (c *DatabaseAdminClient) GetDatabase(ctx context.Context, req *databasepb.GetDatabaseRequest) (*databasepb.Database, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *databasepb.Database + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.databaseAdminClient.GetDatabase(ctx, req) + return err + }, c.CallOptions.GetDatabase...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateDatabaseDdl updates the schema of a Cloud Spanner database by +// creating/altering/dropping tables, columns, indexes, etc. The returned +// [long-running operation][google.longrunning.Operation] will have a name of +// the format `/operations/` and can be used to +// track execution of the schema change(s). The +// [metadata][google.longrunning.Operation.metadata] field type is +// [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The operation has no response. +func (c *DatabaseAdminClient) UpdateDatabaseDdl(ctx context.Context, req *databasepb.UpdateDatabaseDdlRequest) (*EmptyOperation, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.databaseAdminClient.UpdateDatabaseDdl(ctx, req) + return err + }, c.CallOptions.UpdateDatabaseDdl...) + if err != nil { + return nil, err + } + return &EmptyOperation{ + lro: longrunning.InternalNewOperation(c.Connection(), resp), + }, nil +} + +// DropDatabase drops (aka deletes) a Cloud Spanner database. +func (c *DatabaseAdminClient) DropDatabase(ctx context.Context, req *databasepb.DropDatabaseRequest) error { + ctx = insertXGoog(ctx, c.xGoogHeader) + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + _, err = c.databaseAdminClient.DropDatabase(ctx, req) + return err + }, c.CallOptions.DropDatabase...) + return err +} + +// GetDatabaseDdl returns the schema of a Cloud Spanner database as a list of formatted +// DDL statements. This method does not show pending schema updates, those may +// be queried using the [Operations][google.longrunning.Operations] API. +func (c *DatabaseAdminClient) GetDatabaseDdl(ctx context.Context, req *databasepb.GetDatabaseDdlRequest) (*databasepb.GetDatabaseDdlResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *databasepb.GetDatabaseDdlResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.databaseAdminClient.GetDatabaseDdl(ctx, req) + return err + }, c.CallOptions.GetDatabaseDdl...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SetIamPolicy sets the access control policy on a database resource. Replaces any +// existing policy. +// +// Authorization requires `spanner.databases.setIamPolicy` permission on +// [resource][google.iam.v1.SetIamPolicyRequest.resource]. +func (c *DatabaseAdminClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.databaseAdminClient.SetIamPolicy(ctx, req) + return err + }, c.CallOptions.SetIamPolicy...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetIamPolicy gets the access control policy for a database resource. Returns an empty +// policy if a database exists but does not have a policy set. +// +// Authorization requires `spanner.databases.getIamPolicy` permission on +// [resource][google.iam.v1.GetIamPolicyRequest.resource]. +func (c *DatabaseAdminClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.databaseAdminClient.GetIamPolicy(ctx, req) + return err + }, c.CallOptions.GetIamPolicy...) + if err != nil { + return nil, err + } + return resp, nil +} + +// TestIamPermissions returns permissions that the caller has on the specified database resource. +// +// Attempting this RPC on a non-existent Cloud Spanner database will result in +// a NOT_FOUND error if the user has `spanner.databases.list` permission on +// the containing Cloud Spanner instance. Otherwise returns an empty set of +// permissions. +func (c *DatabaseAdminClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *iampb.TestIamPermissionsResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.databaseAdminClient.TestIamPermissions(ctx, req) + return err + }, c.CallOptions.TestIamPermissions...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DatabaseIterator manages a stream of *databasepb.Database. +type DatabaseIterator struct { + items []*databasepb.Database + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*databasepb.Database, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *DatabaseIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *DatabaseIterator) Next() (*databasepb.Database, error) { + var item *databasepb.Database + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *DatabaseIterator) bufLen() int { + return len(it.items) +} + +func (it *DatabaseIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// DatabaseOperation manages a long-running operation yielding databasepb.Database. +type DatabaseOperation struct { + lro *longrunning.Operation +} + +// DatabaseOperation returns a new DatabaseOperation from a given name. +// The name must be that of a previously created DatabaseOperation, possibly from a different process. +func (c *DatabaseAdminClient) DatabaseOperation(name string) *DatabaseOperation { + return &DatabaseOperation{ + lro: longrunning.InternalNewOperation(c.Connection(), &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning the response and any errors encountered. +// +// See documentation of Poll for error-handling information. +func (op *DatabaseOperation) Wait(ctx context.Context) (*databasepb.Database, error) { + var resp databasepb.Database + if err := op.lro.Wait(ctx, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true, and the response of the operation is returned. +// If Poll succeeds and the operation has not completed, the returned response and error are both nil. +func (op *DatabaseOperation) Poll(ctx context.Context) (*databasepb.Database, error) { + var resp databasepb.Database + if err := op.lro.Poll(ctx, &resp); err != nil { + return nil, err + } + if !op.Done() { + return nil, nil + } + return &resp, nil +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *DatabaseOperation) Metadata() (*databasepb.CreateDatabaseMetadata, error) { + var meta databasepb.CreateDatabaseMetadata + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *DatabaseOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *DatabaseOperation) Name() string { + return op.lro.Name() +} + +// EmptyOperation manages a long-running operation with no result. +type EmptyOperation struct { + lro *longrunning.Operation +} + +// EmptyOperation returns a new EmptyOperation from a given name. +// The name must be that of a previously created EmptyOperation, possibly from a different process. +func (c *DatabaseAdminClient) EmptyOperation(name string) *EmptyOperation { + return &EmptyOperation{ + lro: longrunning.InternalNewOperation(c.Connection(), &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning any error encountered. +// +// See documentation of Poll for error-handling information. +func (op *EmptyOperation) Wait(ctx context.Context) error { + return op.lro.Wait(ctx, nil) +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, op.Done will return true. +func (op *EmptyOperation) Poll(ctx context.Context) error { + return op.lro.Poll(ctx, nil) +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *EmptyOperation) Metadata() (*databasepb.UpdateDatabaseDdlMetadata, error) { + var meta databasepb.UpdateDatabaseDdlMetadata + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *EmptyOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *EmptyOperation) Name() string { + return op.lro.Name() +} diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client_example_test.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client_example_test.go new file mode 100644 index 00000000..0769d119 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/database_admin_client_example_test.go @@ -0,0 +1,204 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package database_test + +import ( + "cloud.google.com/go/spanner/admin/database/apiv1" + "golang.org/x/net/context" + iampb "google.golang.org/genproto/googleapis/iam/v1" + databasepb "google.golang.org/genproto/googleapis/spanner/admin/database/v1" +) + +func ExampleNewDatabaseAdminClient() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleDatabaseAdminClient_ListDatabases() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &databasepb.ListDatabasesRequest{ + // TODO: Fill request struct fields. + } + it := c.ListDatabases(ctx, req) + for { + resp, err := it.Next() + if err != nil { + // TODO: Handle error. + break + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleDatabaseAdminClient_CreateDatabase() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &databasepb.CreateDatabaseRequest{ + // TODO: Fill request struct fields. + } + op, err := c.CreateDatabase(ctx, req) + if err != nil { + // TODO: Handle error. + } + + resp, err := op.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleDatabaseAdminClient_GetDatabase() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &databasepb.GetDatabaseRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetDatabase(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleDatabaseAdminClient_UpdateDatabaseDdl() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &databasepb.UpdateDatabaseDdlRequest{ + // TODO: Fill request struct fields. + } + op, err := c.UpdateDatabaseDdl(ctx, req) + if err != nil { + // TODO: Handle error. + } + + err = op.Wait(ctx) + // TODO: Handle error. +} + +func ExampleDatabaseAdminClient_DropDatabase() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &databasepb.DropDatabaseRequest{ + // TODO: Fill request struct fields. + } + err = c.DropDatabase(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleDatabaseAdminClient_GetDatabaseDdl() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &databasepb.GetDatabaseDdlRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetDatabaseDdl(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleDatabaseAdminClient_SetIamPolicy() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &iampb.SetIamPolicyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetIamPolicy(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleDatabaseAdminClient_GetIamPolicy() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &iampb.GetIamPolicyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetIamPolicy(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleDatabaseAdminClient_TestIamPermissions() { + ctx := context.Background() + c, err := database.NewDatabaseAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &iampb.TestIamPermissionsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.TestIamPermissions(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/doc.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/doc.go new file mode 100644 index 00000000..b0ef0f3d --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/doc.go @@ -0,0 +1,32 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package database is an experimental, auto-generated package for the +// database API. +// +package database // import "cloud.google.com/go/spanner/admin/database/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertXGoog(ctx context.Context, val string) context.Context { + md, _ := metadata.FromContext(ctx) + md = md.Copy() + md["x-goog-api-client"] = []string{val} + return metadata.NewContext(ctx, md) +} diff --git a/vendor/cloud.google.com/go/spanner/admin/database/apiv1/mock_test.go b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/mock_test.go new file mode 100644 index 00000000..37bc0a33 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/database/apiv1/mock_test.go @@ -0,0 +1,740 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package database + +import ( + google_protobuf "github.com/golang/protobuf/ptypes/empty" + iampb "google.golang.org/genproto/googleapis/iam/v1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + databasepb "google.golang.org/genproto/googleapis/spanner/admin/database/v1" +) + +import ( + "flag" + "io" + "log" + "net" + "os" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockDatabaseAdminServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + databasepb.DatabaseAdminServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockDatabaseAdminServer) ListDatabases(_ context.Context, req *databasepb.ListDatabasesRequest) (*databasepb.ListDatabasesResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*databasepb.ListDatabasesResponse), nil +} + +func (s *mockDatabaseAdminServer) CreateDatabase(_ context.Context, req *databasepb.CreateDatabaseRequest) (*longrunningpb.Operation, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +func (s *mockDatabaseAdminServer) GetDatabase(_ context.Context, req *databasepb.GetDatabaseRequest) (*databasepb.Database, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*databasepb.Database), nil +} + +func (s *mockDatabaseAdminServer) UpdateDatabaseDdl(_ context.Context, req *databasepb.UpdateDatabaseDdlRequest) (*longrunningpb.Operation, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +func (s *mockDatabaseAdminServer) DropDatabase(_ context.Context, req *databasepb.DropDatabaseRequest) (*google_protobuf.Empty, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*google_protobuf.Empty), nil +} + +func (s *mockDatabaseAdminServer) GetDatabaseDdl(_ context.Context, req *databasepb.GetDatabaseDdlRequest) (*databasepb.GetDatabaseDdlResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*databasepb.GetDatabaseDdlResponse), nil +} + +func (s *mockDatabaseAdminServer) SetIamPolicy(_ context.Context, req *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.Policy), nil +} + +func (s *mockDatabaseAdminServer) GetIamPolicy(_ context.Context, req *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.Policy), nil +} + +func (s *mockDatabaseAdminServer) TestIamPermissions(_ context.Context, req *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.TestIamPermissionsResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockDatabaseAdmin mockDatabaseAdminServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + databasepb.RegisterDatabaseAdminServer(serv, &mockDatabaseAdmin) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestDatabaseAdminListDatabases(t *testing.T) { + var nextPageToken string = "" + var databasesElement *databasepb.Database = &databasepb.Database{} + var databases = []*databasepb.Database{databasesElement} + var expectedResponse = &databasepb.ListDatabasesResponse{ + NextPageToken: nextPageToken, + Databases: databases, + } + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) + + var formattedParent string = DatabaseAdminInstancePath("[PROJECT]", "[INSTANCE]") + var request = &databasepb.ListDatabasesRequest{ + Parent: formattedParent, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDatabases(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Databases[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDatabaseAdminListDatabasesError(t *testing.T) { + errCode := codes.Internal + mockDatabaseAdmin.err = grpc.Errorf(errCode, "test error") + + var formattedParent string = DatabaseAdminInstancePath("[PROJECT]", "[INSTANCE]") + var request = &databasepb.ListDatabasesRequest{ + Parent: formattedParent, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListDatabases(context.Background(), request).Next() + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDatabaseAdminCreateDatabase(t *testing.T) { + var name string = "name3373707" + var expectedResponse = &databasepb.Database{ + Name: name, + } + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var formattedParent string = DatabaseAdminInstancePath("[PROJECT]", "[INSTANCE]") + var createStatement string = "createStatement552974828" + var request = &databasepb.CreateDatabaseRequest{ + Parent: formattedParent, + CreateStatement: createStatement, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.CreateDatabase(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDatabaseAdminCreateDatabaseError(t *testing.T) { + errCode := codes.Internal + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var formattedParent string = DatabaseAdminInstancePath("[PROJECT]", "[INSTANCE]") + var createStatement string = "createStatement552974828" + var request = &databasepb.CreateDatabaseRequest{ + Parent: formattedParent, + CreateStatement: createStatement, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.CreateDatabase(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDatabaseAdminGetDatabase(t *testing.T) { + var name2 string = "name2-1052831874" + var expectedResponse = &databasepb.Database{ + Name: name2, + } + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) + + var formattedName string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &databasepb.GetDatabaseRequest{ + Name: formattedName, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDatabase(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDatabaseAdminGetDatabaseError(t *testing.T) { + errCode := codes.Internal + mockDatabaseAdmin.err = grpc.Errorf(errCode, "test error") + + var formattedName string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &databasepb.GetDatabaseRequest{ + Name: formattedName, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDatabase(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDatabaseAdminUpdateDatabaseDdl(t *testing.T) { + var expectedResponse *google_protobuf.Empty = &google_protobuf.Empty{} + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var formattedDatabase string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var statements []string = nil + var request = &databasepb.UpdateDatabaseDdlRequest{ + Database: formattedDatabase, + Statements: statements, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.UpdateDatabaseDdl(context.Background(), request) + if err != nil { + t.Fatal(err) + } + err = respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDatabaseAdminUpdateDatabaseDdlError(t *testing.T) { + errCode := codes.Internal + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var formattedDatabase string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var statements []string = nil + var request = &databasepb.UpdateDatabaseDdlRequest{ + Database: formattedDatabase, + Statements: statements, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.UpdateDatabaseDdl(context.Background(), request) + if err != nil { + t.Fatal(err) + } + err = respLRO.Wait(context.Background()) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestDatabaseAdminDropDatabase(t *testing.T) { + var expectedResponse *google_protobuf.Empty = &google_protobuf.Empty{} + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) + + var formattedDatabase string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &databasepb.DropDatabaseRequest{ + Database: formattedDatabase, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DropDatabase(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestDatabaseAdminDropDatabaseError(t *testing.T) { + errCode := codes.Internal + mockDatabaseAdmin.err = grpc.Errorf(errCode, "test error") + + var formattedDatabase string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &databasepb.DropDatabaseRequest{ + Database: formattedDatabase, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DropDatabase(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestDatabaseAdminGetDatabaseDdl(t *testing.T) { + var expectedResponse *databasepb.GetDatabaseDdlResponse = &databasepb.GetDatabaseDdlResponse{} + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) + + var formattedDatabase string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &databasepb.GetDatabaseDdlRequest{ + Database: formattedDatabase, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDatabaseDdl(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDatabaseAdminGetDatabaseDdlError(t *testing.T) { + errCode := codes.Internal + mockDatabaseAdmin.err = grpc.Errorf(errCode, "test error") + + var formattedDatabase string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &databasepb.GetDatabaseDdlRequest{ + Database: formattedDatabase, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetDatabaseDdl(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDatabaseAdminSetIamPolicy(t *testing.T) { + var version int32 = 351608024 + var etag []byte = []byte("21") + var expectedResponse = &iampb.Policy{ + Version: version, + Etag: etag, + } + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) + + var formattedResource string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var policy *iampb.Policy = &iampb.Policy{} + var request = &iampb.SetIamPolicyRequest{ + Resource: formattedResource, + Policy: policy, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetIamPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDatabaseAdminSetIamPolicyError(t *testing.T) { + errCode := codes.Internal + mockDatabaseAdmin.err = grpc.Errorf(errCode, "test error") + + var formattedResource string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var policy *iampb.Policy = &iampb.Policy{} + var request = &iampb.SetIamPolicyRequest{ + Resource: formattedResource, + Policy: policy, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetIamPolicy(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDatabaseAdminGetIamPolicy(t *testing.T) { + var version int32 = 351608024 + var etag []byte = []byte("21") + var expectedResponse = &iampb.Policy{ + Version: version, + Etag: etag, + } + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) + + var formattedResource string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &iampb.GetIamPolicyRequest{ + Resource: formattedResource, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetIamPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDatabaseAdminGetIamPolicyError(t *testing.T) { + errCode := codes.Internal + mockDatabaseAdmin.err = grpc.Errorf(errCode, "test error") + + var formattedResource string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var request = &iampb.GetIamPolicyRequest{ + Resource: formattedResource, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetIamPolicy(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestDatabaseAdminTestIamPermissions(t *testing.T) { + var expectedResponse *iampb.TestIamPermissionsResponse = &iampb.TestIamPermissionsResponse{} + + mockDatabaseAdmin.err = nil + mockDatabaseAdmin.reqs = nil + + mockDatabaseAdmin.resps = append(mockDatabaseAdmin.resps[:0], expectedResponse) + + var formattedResource string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var permissions []string = nil + var request = &iampb.TestIamPermissionsRequest{ + Resource: formattedResource, + Permissions: permissions, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.TestIamPermissions(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockDatabaseAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestDatabaseAdminTestIamPermissionsError(t *testing.T) { + errCode := codes.Internal + mockDatabaseAdmin.err = grpc.Errorf(errCode, "test error") + + var formattedResource string = DatabaseAdminDatabasePath("[PROJECT]", "[INSTANCE]", "[DATABASE]") + var permissions []string = nil + var request = &iampb.TestIamPermissionsRequest{ + Resource: formattedResource, + Permissions: permissions, + } + + c, err := NewDatabaseAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.TestIamPermissions(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/doc.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/doc.go new file mode 100644 index 00000000..db4986e6 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/doc.go @@ -0,0 +1,32 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package instance is an experimental, auto-generated package for the +// instance API. +// +package instance // import "cloud.google.com/go/spanner/admin/instance/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertXGoog(ctx context.Context, val string) context.Context { + md, _ := metadata.FromContext(ctx) + md = md.Copy() + md["x-goog-api-client"] = []string{val} + return metadata.NewContext(ctx, md) +} diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client.go new file mode 100644 index 00000000..7dec1bb3 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client.go @@ -0,0 +1,649 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package instance + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + "cloud.google.com/go/longrunning" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + iampb "google.golang.org/genproto/googleapis/iam/v1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + instancepb "google.golang.org/genproto/googleapis/spanner/admin/instance/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + instanceAdminProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}") + instanceAdminInstanceConfigPathTemplate = gax.MustCompilePathTemplate("projects/{project}/instanceConfigs/{instance_config}") + instanceAdminInstancePathTemplate = gax.MustCompilePathTemplate("projects/{project}/instances/{instance}") +) + +// InstanceAdminCallOptions contains the retry settings for each method of InstanceAdminClient. +type InstanceAdminCallOptions struct { + ListInstanceConfigs []gax.CallOption + GetInstanceConfig []gax.CallOption + ListInstances []gax.CallOption + GetInstance []gax.CallOption + CreateInstance []gax.CallOption + UpdateInstance []gax.CallOption + DeleteInstance []gax.CallOption + SetIamPolicy []gax.CallOption + GetIamPolicy []gax.CallOption + TestIamPermissions []gax.CallOption +} + +func defaultInstanceAdminClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("spanner.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/spanner.admin", + ), + } +} + +func defaultInstanceAdminCallOptions() *InstanceAdminCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 32000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + {"default", "non_idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 1000 * time.Millisecond, + Max: 32000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &InstanceAdminCallOptions{ + ListInstanceConfigs: retry[[2]string{"default", "idempotent"}], + GetInstanceConfig: retry[[2]string{"default", "idempotent"}], + ListInstances: retry[[2]string{"default", "idempotent"}], + GetInstance: retry[[2]string{"default", "idempotent"}], + CreateInstance: retry[[2]string{"default", "non_idempotent"}], + UpdateInstance: retry[[2]string{"default", "non_idempotent"}], + DeleteInstance: retry[[2]string{"default", "idempotent"}], + SetIamPolicy: retry[[2]string{"default", "non_idempotent"}], + GetIamPolicy: retry[[2]string{"default", "idempotent"}], + TestIamPermissions: retry[[2]string{"default", "non_idempotent"}], + } +} + +// InstanceAdminClient is a client for interacting with Cloud Spanner Instance Admin API. +type InstanceAdminClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + instanceAdminClient instancepb.InstanceAdminClient + + // The call options for this service. + CallOptions *InstanceAdminCallOptions + + // The metadata to be sent with each request. + xGoogHeader string +} + +// NewInstanceAdminClient creates a new instance admin client. +// +// Cloud Spanner Instance Admin API +// +// The Cloud Spanner Instance Admin API can be used to create, delete, +// modify and list instances. Instances are dedicated Cloud Spanner serving +// and storage resources to be used by Cloud Spanner databases. +// +// Each instance has a "configuration", which dictates where the +// serving resources for the Cloud Spanner instance are located (e.g., +// US-central, Europe). Configurations are created by Google based on +// resource availability. +// +// Cloud Spanner billing is based on the instances that exist and their +// sizes. After an instance exists, there are no additional +// per-database or per-operation charges for use of the instance +// (though there may be additional network bandwidth charges). +// Instances offer isolation: problems with databases in one instance +// will not affect other instances. However, within an instance +// databases can affect each other. For example, if one database in an +// instance receives a lot of requests and consumes most of the +// instance resources, fewer resources are available for other +// databases in that instance, and their performance may suffer. +func NewInstanceAdminClient(ctx context.Context, opts ...option.ClientOption) (*InstanceAdminClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultInstanceAdminClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &InstanceAdminClient{ + conn: conn, + CallOptions: defaultInstanceAdminCallOptions(), + + instanceAdminClient: instancepb.NewInstanceAdminClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *InstanceAdminClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *InstanceAdminClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *InstanceAdminClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", "") + c.xGoogHeader = gax.XGoogHeader(kv...) +} + +// InstanceAdminProjectPath returns the path for the project resource. +func InstanceAdminProjectPath(project string) string { + path, err := instanceAdminProjectPathTemplate.Render(map[string]string{ + "project": project, + }) + if err != nil { + panic(err) + } + return path +} + +// InstanceAdminInstanceConfigPath returns the path for the instance config resource. +func InstanceAdminInstanceConfigPath(project, instanceConfig string) string { + path, err := instanceAdminInstanceConfigPathTemplate.Render(map[string]string{ + "project": project, + "instance_config": instanceConfig, + }) + if err != nil { + panic(err) + } + return path +} + +// InstanceAdminInstancePath returns the path for the instance resource. +func InstanceAdminInstancePath(project, instance string) string { + path, err := instanceAdminInstancePathTemplate.Render(map[string]string{ + "project": project, + "instance": instance, + }) + if err != nil { + panic(err) + } + return path +} + +// ListInstanceConfigs lists the supported instance configurations for a given project. +func (c *InstanceAdminClient) ListInstanceConfigs(ctx context.Context, req *instancepb.ListInstanceConfigsRequest) *InstanceConfigIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + it := &InstanceConfigIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*instancepb.InstanceConfig, string, error) { + var resp *instancepb.ListInstanceConfigsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.instanceAdminClient.ListInstanceConfigs(ctx, req) + return err + }, c.CallOptions.ListInstanceConfigs...) + if err != nil { + return nil, "", err + } + return resp.InstanceConfigs, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetInstanceConfig gets information about a particular instance configuration. +func (c *InstanceAdminClient) GetInstanceConfig(ctx context.Context, req *instancepb.GetInstanceConfigRequest) (*instancepb.InstanceConfig, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *instancepb.InstanceConfig + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.instanceAdminClient.GetInstanceConfig(ctx, req) + return err + }, c.CallOptions.GetInstanceConfig...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListInstances lists all instances in the given project. +func (c *InstanceAdminClient) ListInstances(ctx context.Context, req *instancepb.ListInstancesRequest) *InstanceIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + it := &InstanceIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*instancepb.Instance, string, error) { + var resp *instancepb.ListInstancesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.instanceAdminClient.ListInstances(ctx, req) + return err + }, c.CallOptions.ListInstances...) + if err != nil { + return nil, "", err + } + return resp.Instances, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// GetInstance gets information about a particular instance. +func (c *InstanceAdminClient) GetInstance(ctx context.Context, req *instancepb.GetInstanceRequest) (*instancepb.Instance, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *instancepb.Instance + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.instanceAdminClient.GetInstance(ctx, req) + return err + }, c.CallOptions.GetInstance...) + if err != nil { + return nil, err + } + return resp, nil +} + +// CreateInstance creates an instance and begins preparing it to begin serving. The +// returned [long-running operation][google.longrunning.Operation] +// can be used to track the progress of preparing the new +// instance. The instance name is assigned by the caller. If the +// named instance already exists, `CreateInstance` returns +// `ALREADY_EXISTS`. +// +// Immediately upon completion of this request: +// +// * The instance is readable via the API, with all requested attributes +// but no allocated resources. Its state is `CREATING`. +// +// Until completion of the returned operation: +// +// * Cancelling the operation renders the instance immediately unreadable +// via the API. +// * The instance can be deleted. +// * All other attempts to modify the instance are rejected. +// +// Upon completion of the returned operation: +// +// * Billing for all successfully-allocated resources begins (some types +// may have lower than the requested levels). +// * Databases can be created in the instance. +// * The instance's allocated resource levels are readable via the API. +// * The instance's state becomes `READY`. +// +// The returned [long-running operation][google.longrunning.Operation] will +// have a name of the format `/operations/` and +// can be used to track creation of the instance. The +// [metadata][google.longrunning.Operation.metadata] field type is +// [CreateInstanceMetadata][google.spanner.admin.instance.v1.CreateInstanceMetadata]. +// The [response][google.longrunning.Operation.response] field type is +// [Instance][google.spanner.admin.instance.v1.Instance], if successful. +func (c *InstanceAdminClient) CreateInstance(ctx context.Context, req *instancepb.CreateInstanceRequest) (*InstanceOperation, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.instanceAdminClient.CreateInstance(ctx, req) + return err + }, c.CallOptions.CreateInstance...) + if err != nil { + return nil, err + } + return &InstanceOperation{ + lro: longrunning.InternalNewOperation(c.Connection(), resp), + }, nil +} + +// UpdateInstance updates an instance, and begins allocating or releasing resources +// as requested. The returned [long-running +// operation][google.longrunning.Operation] can be used to track the +// progress of updating the instance. If the named instance does not +// exist, returns `NOT_FOUND`. +// +// Immediately upon completion of this request: +// +// * For resource types for which a decrease in the instance's allocation +// has been requested, billing is based on the newly-requested level. +// +// Until completion of the returned operation: +// +// * Cancelling the operation sets its metadata's +// [cancel_time][google.spanner.admin.instance.v1.UpdateInstanceMetadata.cancel_time], and begins +// restoring resources to their pre-request values. The operation +// is guaranteed to succeed at undoing all resource changes, +// after which point it terminates with a `CANCELLED` status. +// * All other attempts to modify the instance are rejected. +// * Reading the instance via the API continues to give the pre-request +// resource levels. +// +// Upon completion of the returned operation: +// +// * Billing begins for all successfully-allocated resources (some types +// may have lower than the requested levels). +// * All newly-reserved resources are available for serving the instance's +// tables. +// * The instance's new resource levels are readable via the API. +// +// The returned [long-running operation][google.longrunning.Operation] will +// have a name of the format `/operations/` and +// can be used to track the instance modification. The +// [metadata][google.longrunning.Operation.metadata] field type is +// [UpdateInstanceMetadata][google.spanner.admin.instance.v1.UpdateInstanceMetadata]. +// The [response][google.longrunning.Operation.response] field type is +// [Instance][google.spanner.admin.instance.v1.Instance], if successful. +// +// Authorization requires `spanner.instances.update` permission on +// resource [name][google.spanner.admin.instance.v1.Instance.name]. +func (c *InstanceAdminClient) UpdateInstance(ctx context.Context, req *instancepb.UpdateInstanceRequest) (*InstanceOperation, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.instanceAdminClient.UpdateInstance(ctx, req) + return err + }, c.CallOptions.UpdateInstance...) + if err != nil { + return nil, err + } + return &InstanceOperation{ + lro: longrunning.InternalNewOperation(c.Connection(), resp), + }, nil +} + +// DeleteInstance deletes an instance. +// +// Immediately upon completion of the request: +// +// * Billing ceases for all of the instance's reserved resources. +// +// Soon afterward: +// +// * The instance and *all of its databases* immediately and +// irrevocably disappear from the API. All data in the databases +// is permanently deleted. +func (c *InstanceAdminClient) DeleteInstance(ctx context.Context, req *instancepb.DeleteInstanceRequest) error { + ctx = insertXGoog(ctx, c.xGoogHeader) + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + _, err = c.instanceAdminClient.DeleteInstance(ctx, req) + return err + }, c.CallOptions.DeleteInstance...) + return err +} + +// SetIamPolicy sets the access control policy on an instance resource. Replaces any +// existing policy. +// +// Authorization requires `spanner.instances.setIamPolicy` on +// [resource][google.iam.v1.SetIamPolicyRequest.resource]. +func (c *InstanceAdminClient) SetIamPolicy(ctx context.Context, req *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.instanceAdminClient.SetIamPolicy(ctx, req) + return err + }, c.CallOptions.SetIamPolicy...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetIamPolicy gets the access control policy for an instance resource. Returns an empty +// policy if an instance exists but does not have a policy set. +// +// Authorization requires `spanner.instances.getIamPolicy` on +// [resource][google.iam.v1.GetIamPolicyRequest.resource]. +func (c *InstanceAdminClient) GetIamPolicy(ctx context.Context, req *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *iampb.Policy + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.instanceAdminClient.GetIamPolicy(ctx, req) + return err + }, c.CallOptions.GetIamPolicy...) + if err != nil { + return nil, err + } + return resp, nil +} + +// TestIamPermissions returns permissions that the caller has on the specified instance resource. +// +// Attempting this RPC on a non-existent Cloud Spanner instance resource will +// result in a NOT_FOUND error if the user has `spanner.instances.list` +// permission on the containing Google Cloud Project. Otherwise returns an +// empty set of permissions. +func (c *InstanceAdminClient) TestIamPermissions(ctx context.Context, req *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *iampb.TestIamPermissionsResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.instanceAdminClient.TestIamPermissions(ctx, req) + return err + }, c.CallOptions.TestIamPermissions...) + if err != nil { + return nil, err + } + return resp, nil +} + +// InstanceConfigIterator manages a stream of *instancepb.InstanceConfig. +type InstanceConfigIterator struct { + items []*instancepb.InstanceConfig + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*instancepb.InstanceConfig, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *InstanceConfigIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *InstanceConfigIterator) Next() (*instancepb.InstanceConfig, error) { + var item *instancepb.InstanceConfig + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *InstanceConfigIterator) bufLen() int { + return len(it.items) +} + +func (it *InstanceConfigIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// InstanceIterator manages a stream of *instancepb.Instance. +type InstanceIterator struct { + items []*instancepb.Instance + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*instancepb.Instance, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *InstanceIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *InstanceIterator) Next() (*instancepb.Instance, error) { + var item *instancepb.Instance + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *InstanceIterator) bufLen() int { + return len(it.items) +} + +func (it *InstanceIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// InstanceOperation manages a long-running operation yielding instancepb.Instance. +type InstanceOperation struct { + lro *longrunning.Operation +} + +// InstanceOperation returns a new InstanceOperation from a given name. +// The name must be that of a previously created InstanceOperation, possibly from a different process. +func (c *InstanceAdminClient) InstanceOperation(name string) *InstanceOperation { + return &InstanceOperation{ + lro: longrunning.InternalNewOperation(c.Connection(), &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning the response and any errors encountered. +// +// See documentation of Poll for error-handling information. +func (op *InstanceOperation) Wait(ctx context.Context) (*instancepb.Instance, error) { + var resp instancepb.Instance + if err := op.lro.Wait(ctx, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true, and the response of the operation is returned. +// If Poll succeeds and the operation has not completed, the returned response and error are both nil. +func (op *InstanceOperation) Poll(ctx context.Context) (*instancepb.Instance, error) { + var resp instancepb.Instance + if err := op.lro.Poll(ctx, &resp); err != nil { + return nil, err + } + if !op.Done() { + return nil, nil + } + return &resp, nil +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *InstanceOperation) Metadata() (*instancepb.UpdateInstanceMetadata, error) { + var meta instancepb.UpdateInstanceMetadata + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *InstanceOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *InstanceOperation) Name() string { + return op.lro.Name() +} diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client_example_test.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client_example_test.go new file mode 100644 index 00000000..ee807fdb --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/instance_admin_client_example_test.go @@ -0,0 +1,230 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package instance_test + +import ( + "cloud.google.com/go/spanner/admin/instance/apiv1" + "golang.org/x/net/context" + iampb "google.golang.org/genproto/googleapis/iam/v1" + instancepb "google.golang.org/genproto/googleapis/spanner/admin/instance/v1" +) + +func ExampleNewInstanceAdminClient() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleInstanceAdminClient_ListInstanceConfigs() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &instancepb.ListInstanceConfigsRequest{ + // TODO: Fill request struct fields. + } + it := c.ListInstanceConfigs(ctx, req) + for { + resp, err := it.Next() + if err != nil { + // TODO: Handle error. + break + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleInstanceAdminClient_GetInstanceConfig() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &instancepb.GetInstanceConfigRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetInstanceConfig(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleInstanceAdminClient_ListInstances() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &instancepb.ListInstancesRequest{ + // TODO: Fill request struct fields. + } + it := c.ListInstances(ctx, req) + for { + resp, err := it.Next() + if err != nil { + // TODO: Handle error. + break + } + // TODO: Use resp. + _ = resp + } +} + +func ExampleInstanceAdminClient_GetInstance() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &instancepb.GetInstanceRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetInstance(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleInstanceAdminClient_CreateInstance() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &instancepb.CreateInstanceRequest{ + // TODO: Fill request struct fields. + } + op, err := c.CreateInstance(ctx, req) + if err != nil { + // TODO: Handle error. + } + + resp, err := op.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleInstanceAdminClient_UpdateInstance() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &instancepb.UpdateInstanceRequest{ + // TODO: Fill request struct fields. + } + op, err := c.UpdateInstance(ctx, req) + if err != nil { + // TODO: Handle error. + } + + resp, err := op.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleInstanceAdminClient_DeleteInstance() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &instancepb.DeleteInstanceRequest{ + // TODO: Fill request struct fields. + } + err = c.DeleteInstance(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleInstanceAdminClient_SetIamPolicy() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &iampb.SetIamPolicyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SetIamPolicy(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleInstanceAdminClient_GetIamPolicy() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &iampb.GetIamPolicyRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetIamPolicy(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleInstanceAdminClient_TestIamPermissions() { + ctx := context.Background() + c, err := instance.NewInstanceAdminClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &iampb.TestIamPermissionsRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.TestIamPermissions(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/mock_test.go b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/mock_test.go new file mode 100644 index 00000000..0c7023e8 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/admin/instance/apiv1/mock_test.go @@ -0,0 +1,853 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package instance + +import ( + google_protobuf "github.com/golang/protobuf/ptypes/empty" + iampb "google.golang.org/genproto/googleapis/iam/v1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + instancepb "google.golang.org/genproto/googleapis/spanner/admin/instance/v1" + field_maskpb "google.golang.org/genproto/protobuf/field_mask" +) + +import ( + "flag" + "io" + "log" + "net" + "os" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockInstanceAdminServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + instancepb.InstanceAdminServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockInstanceAdminServer) ListInstanceConfigs(_ context.Context, req *instancepb.ListInstanceConfigsRequest) (*instancepb.ListInstanceConfigsResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*instancepb.ListInstanceConfigsResponse), nil +} + +func (s *mockInstanceAdminServer) GetInstanceConfig(_ context.Context, req *instancepb.GetInstanceConfigRequest) (*instancepb.InstanceConfig, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*instancepb.InstanceConfig), nil +} + +func (s *mockInstanceAdminServer) ListInstances(_ context.Context, req *instancepb.ListInstancesRequest) (*instancepb.ListInstancesResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*instancepb.ListInstancesResponse), nil +} + +func (s *mockInstanceAdminServer) GetInstance(_ context.Context, req *instancepb.GetInstanceRequest) (*instancepb.Instance, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*instancepb.Instance), nil +} + +func (s *mockInstanceAdminServer) CreateInstance(_ context.Context, req *instancepb.CreateInstanceRequest) (*longrunningpb.Operation, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +func (s *mockInstanceAdminServer) UpdateInstance(_ context.Context, req *instancepb.UpdateInstanceRequest) (*longrunningpb.Operation, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +func (s *mockInstanceAdminServer) DeleteInstance(_ context.Context, req *instancepb.DeleteInstanceRequest) (*google_protobuf.Empty, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*google_protobuf.Empty), nil +} + +func (s *mockInstanceAdminServer) SetIamPolicy(_ context.Context, req *iampb.SetIamPolicyRequest) (*iampb.Policy, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.Policy), nil +} + +func (s *mockInstanceAdminServer) GetIamPolicy(_ context.Context, req *iampb.GetIamPolicyRequest) (*iampb.Policy, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.Policy), nil +} + +func (s *mockInstanceAdminServer) TestIamPermissions(_ context.Context, req *iampb.TestIamPermissionsRequest) (*iampb.TestIamPermissionsResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*iampb.TestIamPermissionsResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockInstanceAdmin mockInstanceAdminServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + instancepb.RegisterInstanceAdminServer(serv, &mockInstanceAdmin) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestInstanceAdminListInstanceConfigs(t *testing.T) { + var nextPageToken string = "" + var instanceConfigsElement *instancepb.InstanceConfig = &instancepb.InstanceConfig{} + var instanceConfigs = []*instancepb.InstanceConfig{instanceConfigsElement} + var expectedResponse = &instancepb.ListInstanceConfigsResponse{ + NextPageToken: nextPageToken, + InstanceConfigs: instanceConfigs, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedParent string = InstanceAdminProjectPath("[PROJECT]") + var request = &instancepb.ListInstanceConfigsRequest{ + Parent: formattedParent, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInstanceConfigs(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.InstanceConfigs[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminListInstanceConfigsError(t *testing.T) { + errCode := codes.Internal + mockInstanceAdmin.err = grpc.Errorf(errCode, "test error") + + var formattedParent string = InstanceAdminProjectPath("[PROJECT]") + var request = &instancepb.ListInstanceConfigsRequest{ + Parent: formattedParent, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInstanceConfigs(context.Background(), request).Next() + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminGetInstanceConfig(t *testing.T) { + var name2 string = "name2-1052831874" + var displayName string = "displayName1615086568" + var expectedResponse = &instancepb.InstanceConfig{ + Name: name2, + DisplayName: displayName, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedName string = InstanceAdminInstanceConfigPath("[PROJECT]", "[INSTANCE_CONFIG]") + var request = &instancepb.GetInstanceConfigRequest{ + Name: formattedName, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetInstanceConfig(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminGetInstanceConfigError(t *testing.T) { + errCode := codes.Internal + mockInstanceAdmin.err = grpc.Errorf(errCode, "test error") + + var formattedName string = InstanceAdminInstanceConfigPath("[PROJECT]", "[INSTANCE_CONFIG]") + var request = &instancepb.GetInstanceConfigRequest{ + Name: formattedName, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetInstanceConfig(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminListInstances(t *testing.T) { + var nextPageToken string = "" + var instancesElement *instancepb.Instance = &instancepb.Instance{} + var instances = []*instancepb.Instance{instancesElement} + var expectedResponse = &instancepb.ListInstancesResponse{ + NextPageToken: nextPageToken, + Instances: instances, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedParent string = InstanceAdminProjectPath("[PROJECT]") + var request = &instancepb.ListInstancesRequest{ + Parent: formattedParent, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInstances(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Instances[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminListInstancesError(t *testing.T) { + errCode := codes.Internal + mockInstanceAdmin.err = grpc.Errorf(errCode, "test error") + + var formattedParent string = InstanceAdminProjectPath("[PROJECT]") + var request = &instancepb.ListInstancesRequest{ + Parent: formattedParent, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListInstances(context.Background(), request).Next() + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminGetInstance(t *testing.T) { + var name2 string = "name2-1052831874" + var config string = "config-1354792126" + var displayName string = "displayName1615086568" + var nodeCount int32 = 1539922066 + var expectedResponse = &instancepb.Instance{ + Name: name2, + Config: config, + DisplayName: displayName, + NodeCount: nodeCount, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedName string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") + var request = &instancepb.GetInstanceRequest{ + Name: formattedName, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetInstance(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminGetInstanceError(t *testing.T) { + errCode := codes.Internal + mockInstanceAdmin.err = grpc.Errorf(errCode, "test error") + + var formattedName string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") + var request = &instancepb.GetInstanceRequest{ + Name: formattedName, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetInstance(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminCreateInstance(t *testing.T) { + var name string = "name3373707" + var config string = "config-1354792126" + var displayName string = "displayName1615086568" + var nodeCount int32 = 1539922066 + var expectedResponse = &instancepb.Instance{ + Name: name, + Config: config, + DisplayName: displayName, + NodeCount: nodeCount, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var formattedParent string = InstanceAdminProjectPath("[PROJECT]") + var instanceId string = "instanceId-2101995259" + var instance *instancepb.Instance = &instancepb.Instance{} + var request = &instancepb.CreateInstanceRequest{ + Parent: formattedParent, + InstanceId: instanceId, + Instance: instance, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.CreateInstance(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminCreateInstanceError(t *testing.T) { + errCode := codes.Internal + mockInstanceAdmin.err = nil + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var formattedParent string = InstanceAdminProjectPath("[PROJECT]") + var instanceId string = "instanceId-2101995259" + var instance *instancepb.Instance = &instancepb.Instance{} + var request = &instancepb.CreateInstanceRequest{ + Parent: formattedParent, + InstanceId: instanceId, + Instance: instance, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.CreateInstance(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminUpdateInstance(t *testing.T) { + var name string = "name3373707" + var config string = "config-1354792126" + var displayName string = "displayName1615086568" + var nodeCount int32 = 1539922066 + var expectedResponse = &instancepb.Instance{ + Name: name, + Config: config, + DisplayName: displayName, + NodeCount: nodeCount, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var instance *instancepb.Instance = &instancepb.Instance{} + var fieldMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{} + var request = &instancepb.UpdateInstanceRequest{ + Instance: instance, + FieldMask: fieldMask, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.UpdateInstance(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminUpdateInstanceError(t *testing.T) { + errCode := codes.Internal + mockInstanceAdmin.err = nil + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var instance *instancepb.Instance = &instancepb.Instance{} + var fieldMask *field_maskpb.FieldMask = &field_maskpb.FieldMask{} + var request = &instancepb.UpdateInstanceRequest{ + Instance: instance, + FieldMask: fieldMask, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.UpdateInstance(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminDeleteInstance(t *testing.T) { + var expectedResponse *google_protobuf.Empty = &google_protobuf.Empty{} + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedName string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") + var request = &instancepb.DeleteInstanceRequest{ + Name: formattedName, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteInstance(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestInstanceAdminDeleteInstanceError(t *testing.T) { + errCode := codes.Internal + mockInstanceAdmin.err = grpc.Errorf(errCode, "test error") + + var formattedName string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") + var request = &instancepb.DeleteInstanceRequest{ + Name: formattedName, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.DeleteInstance(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestInstanceAdminSetIamPolicy(t *testing.T) { + var version int32 = 351608024 + var etag []byte = []byte("21") + var expectedResponse = &iampb.Policy{ + Version: version, + Etag: etag, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedResource string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") + var policy *iampb.Policy = &iampb.Policy{} + var request = &iampb.SetIamPolicyRequest{ + Resource: formattedResource, + Policy: policy, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetIamPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminSetIamPolicyError(t *testing.T) { + errCode := codes.Internal + mockInstanceAdmin.err = grpc.Errorf(errCode, "test error") + + var formattedResource string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") + var policy *iampb.Policy = &iampb.Policy{} + var request = &iampb.SetIamPolicyRequest{ + Resource: formattedResource, + Policy: policy, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SetIamPolicy(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminGetIamPolicy(t *testing.T) { + var version int32 = 351608024 + var etag []byte = []byte("21") + var expectedResponse = &iampb.Policy{ + Version: version, + Etag: etag, + } + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedResource string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") + var request = &iampb.GetIamPolicyRequest{ + Resource: formattedResource, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetIamPolicy(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminGetIamPolicyError(t *testing.T) { + errCode := codes.Internal + mockInstanceAdmin.err = grpc.Errorf(errCode, "test error") + + var formattedResource string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") + var request = &iampb.GetIamPolicyRequest{ + Resource: formattedResource, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetIamPolicy(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestInstanceAdminTestIamPermissions(t *testing.T) { + var expectedResponse *iampb.TestIamPermissionsResponse = &iampb.TestIamPermissionsResponse{} + + mockInstanceAdmin.err = nil + mockInstanceAdmin.reqs = nil + + mockInstanceAdmin.resps = append(mockInstanceAdmin.resps[:0], expectedResponse) + + var formattedResource string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") + var permissions []string = nil + var request = &iampb.TestIamPermissionsRequest{ + Resource: formattedResource, + Permissions: permissions, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.TestIamPermissions(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockInstanceAdmin.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestInstanceAdminTestIamPermissionsError(t *testing.T) { + errCode := codes.Internal + mockInstanceAdmin.err = grpc.Errorf(errCode, "test error") + + var formattedResource string = InstanceAdminInstancePath("[PROJECT]", "[INSTANCE]") + var permissions []string = nil + var request = &iampb.TestIamPermissionsRequest{ + Resource: formattedResource, + Permissions: permissions, + } + + c, err := NewInstanceAdminClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.TestIamPermissions(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/spanner/backoff.go b/vendor/cloud.google.com/go/spanner/backoff.go new file mode 100644 index 00000000..d3872384 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/backoff.go @@ -0,0 +1,58 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "math/rand" + "time" +) + +const ( + // minBackoff is the minimum backoff used by default. + minBackoff = 1 * time.Second + // maxBackoff is the maximum backoff used by default. + maxBackoff = 32 * time.Second + // jitter is the jitter factor. + jitter = 0.4 + // rate is the rate of exponential increase in the backoff. + rate = 1.3 +) + +var defaultBackoff = exponentialBackoff{minBackoff, maxBackoff} + +type exponentialBackoff struct { + min, max time.Duration +} + +// delay calculates the delay that should happen at n-th +// exponential backoff in a series. +func (b exponentialBackoff) delay(retries int) time.Duration { + min, max := float64(b.min), float64(b.max) + delay := min + for delay < max && retries > 0 { + delay *= rate + retries-- + } + if delay > max { + delay = max + } + delay -= delay * jitter * rand.Float64() + if delay < min { + delay = min + } + return time.Duration(delay) +} diff --git a/vendor/cloud.google.com/go/spanner/backoff_test.go b/vendor/cloud.google.com/go/spanner/backoff_test.go new file mode 100644 index 00000000..7a0314e8 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/backoff_test.go @@ -0,0 +1,62 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "math" + "time" + + "testing" +) + +// Test if exponential backoff helper can produce correct series of +// retry delays. +func TestBackoff(t *testing.T) { + b := exponentialBackoff{minBackoff, maxBackoff} + tests := []struct { + retries int + min time.Duration + max time.Duration + }{ + { + retries: 0, + min: minBackoff, + max: minBackoff, + }, + { + retries: 1, + min: minBackoff, + max: time.Duration(rate * float64(minBackoff)), + }, + { + retries: 3, + min: time.Duration(math.Pow(rate, 3) * (1 - jitter) * float64(minBackoff)), + max: time.Duration(math.Pow(rate, 3) * float64(minBackoff)), + }, + { + retries: 1000, + min: time.Duration((1 - jitter) * float64(maxBackoff)), + max: maxBackoff, + }, + } + for _, test := range tests { + got := b.delay(test.retries) + if float64(got) < float64(test.min) || float64(got) > float64(test.max) { + t.Errorf("delay(%v) = %v, want in range [%v, %v]", test.retries, got, test.min, test.max) + } + } +} diff --git a/vendor/cloud.google.com/go/spanner/client.go b/vendor/cloud.google.com/go/spanner/client.go new file mode 100644 index 00000000..3a5ab33e --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/client.go @@ -0,0 +1,302 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "fmt" + "regexp" + "runtime" + "sync/atomic" + "time" + + "cloud.google.com/go/internal/version" + + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +const ( + prodAddr = "spanner.googleapis.com:443" + + // resourcePrefixHeader is the name of the metadata header used to indicate + // the resource being operated on. + resourcePrefixHeader = "google-cloud-resource-prefix" + // apiClientHeader is the name of the metadata header used to indicate client + // information. + apiClientHeader = "x-goog-api-client" +) + +const ( + // Scope is the scope for Cloud Spanner Data API. + Scope = "https://www.googleapis.com/auth/spanner.data" + + // AdminScope is the scope for Cloud Spanner Admin APIs. + AdminScope = "https://www.googleapis.com/auth/spanner.admin" +) + +var ( + validDBPattern = regexp.MustCompile("^projects/[^/]+/instances/[^/]+/databases/[^/]+$") + clientUserAgent = fmt.Sprintf("cloudspanner go/%s", runtime.Version()) +) + +func validDatabaseName(db string) error { + if matched := validDBPattern.MatchString(db); !matched { + return fmt.Errorf("database name %q should conform to pattern %q", + db, validDBPattern.String()) + } + return nil +} + +// Client is a client for reading and writing data to a Cloud Spanner database. A +// client is safe to use concurrently, except for its Close method. +type Client struct { + // rr must be accessed through atomic operations. + rr uint32 + conns []*grpc.ClientConn + clients []sppb.SpannerClient + database string + // Metadata to be sent with each request. + md metadata.MD + idleSessions *sessionPool +} + +// ClientConfig has configurations for the client. +type ClientConfig struct { + // NumChannels is the number of GRPC channels. + NumChannels int + co []option.ClientOption + // SessionPoolConfig is the configuration for session pool. + SessionPoolConfig +} + +// errDial returns error for dialing to Cloud Spanner. +func errDial(ci int, err error) error { + e := toSpannerError(err).(*Error) + e.decorate(fmt.Sprintf("dialing fails for channel[%v]", ci)) + return e +} + +func contextWithMetadata(ctx context.Context, md metadata.MD) context.Context { + existing, ok := metadata.FromContext(ctx) + if ok { + md = metadata.Join(existing, md) + } + return metadata.NewContext(ctx, md) +} + +// NewClient creates a client to a database. A valid database name has the +// form projects/PROJECT_ID/instances/INSTANCE_ID/databases/DATABASE_ID. It uses a default +// configuration. +func NewClient(ctx context.Context, database string, opts ...option.ClientOption) (*Client, error) { + return NewClientWithConfig(ctx, database, ClientConfig{}, opts...) +} + +// NewClientWithConfig creates a client to a database. A valid database name has the +// form projects/PROJECT_ID/instances/INSTANCE_ID/databases/DATABASE_ID. +func NewClientWithConfig(ctx context.Context, database string, config ClientConfig, opts ...option.ClientOption) (*Client, error) { + // Validate database path. + if err := validDatabaseName(database); err != nil { + return nil, err + } + c := &Client{ + database: database, + md: metadata.Pairs( + resourcePrefixHeader, database, + apiClientHeader, clientUserAgent, + "x-goog-api-client", fmt.Sprintf("gl-go/%s gccl/%s grpc/", version.Go(), version.Repo)), + } + allOpts := []option.ClientOption{option.WithEndpoint(prodAddr), option.WithScopes(Scope), option.WithUserAgent(clientUserAgent)} + allOpts = append(allOpts, opts...) + // Prepare gRPC channels. + if config.NumChannels == 0 { + config.NumChannels = 4 + } + for i := 0; i < config.NumChannels; i++ { + conn, err := transport.DialGRPC(ctx, allOpts...) + if err != nil { + return nil, errDial(i, err) + } + c.conns = append(c.conns, conn) + c.clients = append(c.clients, sppb.NewSpannerClient(conn)) + } + // Prepare session pool. + config.SessionPoolConfig.getRPCClient = func() (sppb.SpannerClient, error) { + // TODO: support more loadbalancing options. + return c.rrNext(), nil + } + sp, err := newSessionPool(database, config.SessionPoolConfig, c.md) + if err != nil { + c.Close() + return nil, err + } + c.idleSessions = sp + return c, nil +} + +// rrNext returns the next available Cloud Spanner RPC client in a round-robin manner. +func (c *Client) rrNext() sppb.SpannerClient { + return c.clients[atomic.AddUint32(&c.rr, 1)%uint32(len(c.clients))] +} + +// Close closes the client. +func (c *Client) Close() { + if c.idleSessions != nil { + c.idleSessions.close() + } + for _, conn := range c.conns { + conn.Close() + } +} + +// Single provides a read-only snapshot transaction optimized for the case +// where only a single read or query is needed. This is more efficient than +// using ReadOnlyTransaction() for a single read or query. +// +// Single will use a strong TimestampBound by default. Use +// ReadOnlyTransaction.WithTimestampBound to specify a different +// TimestampBound. A non-strong bound can be used to reduce latency, or +// "time-travel" to prior versions of the database, see the documentation of +// TimestampBound for details. +func (c *Client) Single() *ReadOnlyTransaction { + t := &ReadOnlyTransaction{singleUse: true, sp: c.idleSessions} + t.txReadOnly.txReadEnv = t + return t +} + +// ReadOnlyTransaction returns a ReadOnlyTransaction that can be used for +// multiple reads from the database. You must call Close() when the +// ReadOnlyTransaction is no longer needed to release resources on the server. +// +// ReadOnlyTransaction will use a strong TimestampBound by default. Use +// ReadOnlyTransaction.WithTimestampBound to specify a different +// TimestampBound. A non-strong bound can be used to reduce latency, or +// "time-travel" to prior versions of the database, see the documentation of +// TimestampBound for details. +func (c *Client) ReadOnlyTransaction() *ReadOnlyTransaction { + t := &ReadOnlyTransaction{ + singleUse: false, + sp: c.idleSessions, + txReadyOrClosed: make(chan struct{}), + } + t.txReadOnly.txReadEnv = t + return t +} + +// ReadWriteTransaction executes a read-write transaction, with retries as +// necessary. +// +// The function f will be called one or more times. It must not maintain +// any state between calls. +// +// If the transaction cannot be committed or if f returns an IsAborted error, +// ReadWriteTransaction will call f again. It will continue to call f until the +// transaction can be committed or the Context times out or is cancelled. If f +// returns an error other than IsAborted, ReadWriteTransaction will abort the +// transaction and return the error. +// +// To limit the number of retries, set a deadline on the Context rather than +// using a fixed limit on the number of attempts. ReadWriteTransaction will +// retry as needed until that deadline is met. +func (c *Client) ReadWriteTransaction(ctx context.Context, f func(t *ReadWriteTransaction) error) (time.Time, error) { + var ( + ts time.Time + sh *sessionHandle + ) + err := runRetryable(ctx, func(ctx context.Context) error { + var ( + err error + t *ReadWriteTransaction + ) + if sh == nil || sh.getID() == "" || sh.getClient() == nil { + // Session handle hasn't been allocated or has been destroyed. + sh, err = c.idleSessions.takeWriteSession(ctx) + if err != nil { + // If session retrieval fails, just fail the transaction. + return err + } + t = &ReadWriteTransaction{ + sh: sh, + tx: sh.getTransactionID(), + } + } else { + t = &ReadWriteTransaction{ + sh: sh, + } + } + t.txReadOnly.txReadEnv = t + if err = t.begin(ctx); err != nil { + // Mask error from begin operation as retryable error. + return errRetry(err) + } + ts, err = t.runInTransaction(ctx, f) + if err != nil { + return err + } + return nil + }) + if sh != nil { + sh.recycle() + } + return ts, err +} + +// applyOption controls the behavior of Client.Apply. +type applyOption struct { + // If atLeastOnce == true, Client.Apply will execute the mutations on Cloud Spanner at least once. + atLeastOnce bool +} + +// An ApplyOption is an optional argument to Apply. +type ApplyOption func(*applyOption) + +// ApplyAtLeastOnce returns an ApplyOption that removes replay protection. +// +// With this option, Apply may attempt to apply mutations more than once; if +// the mutations are not idempotent, this may lead to a failure being reported +// when the mutation was applied more than once. For example, an insert may +// fail with ALREADY_EXISTS even though the row did not exist before Apply was +// called. For this reason, most users of the library will prefer not to use +// this option. However, ApplyAtLeastOnce requires only a single RPC, whereas +// Apply's default replay protection may require an additional RPC. So this +// option may be appropriate for latency sensitive and/or high throughput blind +// writing. +func ApplyAtLeastOnce() ApplyOption { + return func(ao *applyOption) { + ao.atLeastOnce = true + } +} + +// Apply applies a list of mutations atomically to the database. +func (c *Client) Apply(ctx context.Context, ms []*Mutation, opts ...ApplyOption) (time.Time, error) { + ao := &applyOption{} + for _, opt := range opts { + opt(ao) + } + if !ao.atLeastOnce { + return c.ReadWriteTransaction(ctx, func(t *ReadWriteTransaction) error { + t.BufferWrite(ms) + return nil + }) + } + t := &writeOnlyTransaction{c.idleSessions} + return t.applyAtLeastOnce(ctx, ms...) +} diff --git a/vendor/cloud.google.com/go/spanner/client_test.go b/vendor/cloud.google.com/go/spanner/client_test.go new file mode 100644 index 00000000..eb439562 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/client_test.go @@ -0,0 +1,43 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "strings" + "testing" +) + +// Test validDatabaseName() +func TestValidDatabaseName(t *testing.T) { + validDbUri := "projects/spanner-cloud-test/instances/foo/databases/foodb" + invalidDbUris := []string{ + // Completely wrong DB URI. + "foobarDB", + // Project ID contains "/". + "projects/spanner-cloud/test/instances/foo/databases/foodb", + // No instance ID. + "projects/spanner-cloud-test/instances//databases/foodb", + } + if err := validDatabaseName(validDbUri); err != nil { + t.Errorf("validateDatabaseName(%q) = %v, want nil", validDbUri, err) + } + for _, d := range invalidDbUris { + if err, wantErr := validDatabaseName(d), "should conform to pattern"; !strings.Contains(err.Error(), wantErr) { + t.Errorf("validateDatabaseName(%q) = %q, want error pattern %q", validDbUri, err, wantErr) + } + } +} diff --git a/vendor/cloud.google.com/go/spanner/doc.go b/vendor/cloud.google.com/go/spanner/doc.go new file mode 100644 index 00000000..3aee32a8 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/doc.go @@ -0,0 +1,319 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package spanner provides a client for reading and writing to Cloud Spanner +databases. See the packages under admin for clients that operate on databases +and instances. + +Note: This package is in alpha. Backwards-incompatible changes may occur +without notice. + +See https://cloud.google.com/spanner/docs/getting-started/go/ for an introduction +to Cloud Spanner and additional help on using this API. + +Creating a Client + +To start working with this package, create a client that refers to the database +of interest: + + ctx := context.Background() + client, err := spanner.NewClient(ctx, "projects/P/instances/I/databases/D") + if err != nil { + // TODO: Handle error. + } + + +Simple Reads and Writes + +Two Client methods, Apply and Single, work well for simple reads and writes. As +a quick introduction, here we write a new row to the database and read it back: + + _, err := client.Apply(ctx, []*spanner.Mutation{ + spanner.Insert("Users", + []string{"name", "email"}, + []interface{}{"alice", "a@example.com"})}) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Users", + spanner.Key{"alice"}, []string{"email"}) + if err != nil { + // TODO: Handle error. + } + +All the methods used above are discussed in more detail below. + + +Keys + +Every Cloud Spanner row has a unique key, composed of one or more columns. +Construct keys with a literal of type Key: + + key1 := spanner.Key{"alice"} + + +KeyRanges + +The keys of a Cloud Spanner table are ordered. You can specify ranges of keys +using the KeyRange type: + + kr1 := spanner.KeyRange{Start: key1, End: key2} + +By default, a KeyRange includes its start key but not its end key. Use +the Kind field to specify other boundary conditions: + + // include both keys + kr2 := spanner.KeyRange{Start: key1, End: key2, Kind: spanner.ClosedClosed} + + +KeySets + +A KeySet represents a set of keys. AllKeys returns a KeySet that refers to all +the keys in a table: + + ks1 := spanner.AllKeys() + +To construct a set of specific keys, use the Keys function: + + ks2 := spanner.Keys(key1, key2, key3) + +You can also build KeySets from ranges of keys: + + ks3 := spanner.Range(kr1) + +Use UnionKeySets to build up more complex KeySets, or construct one directly +using a KeySet literal: + + ks4 := spanner.KeySet{ + Keys: []spanner.Keys{key1, key2}, + Ranges: []spanner.KeyRange{kr1, kr2}, + } + + +Transactions + +All Cloud Spanner reads and writes occur inside transactions. There are two +types of transactions, read-only and read-write. Read-only transactions cannot +change the database, do not acquire locks, and may access either the current +database state or states in the past. Read-write transactions can read the +database before writing to it, and always apply to the most recent database +state. + + +Single Reads + +The simplest and fastest transaction is a ReadOnlyTransaction that supports a +single read operation. Use Client.Single to create such a transaction. You can +chain the call to Single with a call to a Read method. + +When you only want one row whose key you know, use ReadRow. Provide the table +name, key, and the columns you want to read: + + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"balance"}) + +Read multiple rows with the Read method. It takes a table name, KeySet, and list +of columns: + + iter := client.Single().Read(ctx, "Accounts", keyset1, columns) + +Read returns a RowIterator. You can call the Do method on the iterator and pass +a callback: + + err := iter.Do(func(row *Row) error { + // TODO: use row + return nil + }) + +RowIterator also follows the standard pattern for the Google +Cloud Client Libraries: + + defer iter.Stop() + for { + row, err := iter.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + // TODO: use row + } + +Always call Stop when you finish using an iterator this way, whether or not you +iterate to the end. (Failing to call Stop could lead you to exhaust the +database's session quota.) + +To read rows with an index, use ReadUsingIndex. + +Statements + +The most general form of reading uses SQL statements. Construct a Statement +with NewStatement, setting any parameters using the Statement's Params map: + + stmt := spanner.NewStatement("SELECT First, Last FROM SINGERS WHERE Last >= @start") + stmt.Params["start"] = "Dylan" + +You can also construct a Statement directly with a struct literal, providing +your own map of parameters. + +Use the Query method to run the statement and obtain an iterator: + + iter := client.Single().Query(ctx, stmt) + + +Rows + +Once you have a Row, via an iterator or a call to ReadRow, you can extract +column values in several ways. Pass in a pointer to a Go variable of the +appropriate type when you extract a value. + +You can extract by column position or name: + + err := row.Column(0, &name) + err = row.ColumnByName("balance", &balance) + +You can extract all the columns at once: + + err = row.Columns(&name, &balance) + +Or you can define a Go struct that corresponds to your columns, and extract +into that: + + var s struct { Name string; Balance int64 } + err = row.ToStruct(&s) + + +For Cloud Spanner columns that may contain NULL, use one of the NullXXX types, +like NullString: + + var ns spanner.NullString + if err =: row.Column(0, &ns); err != nil { + // TODO: Handle error. + } + if ns.Valid { + fmt.Println(ns.StringVal) + } else { + fmt.Println("column is NULL") + } + + +Multiple Reads + +To perform more than one read in a transaction, use ReadOnlyTransaction: + + txn := client.ReadOnlyTransaction() + defer txn.Close() + iter := txn.Query(ctx, stmt1) + // ... + iter = txn.Query(ctx, stmt2) + // ... + +You must call Close when you are done with the transaction. + + +Timestamps and Timestamp Bounds + +Cloud Spanner read-only transactions conceptually perform all their reads at a +single moment in time, called the transaction's read timestamp. Once a read has +started, you can call ReadOnlyTransaction's Timestamp method to obtain the read +timestamp. + +By default, a transaction will pick the most recent time (a time where all +previously committed transactions are visible) for its reads. This provides the +freshest data, but may involve some delay. You can often get a quicker response +if you are willing to tolerate "stale" data. You can control the read timestamp +selected by a transaction by calling the WithTimestampBound method on the +transaction before using it. For example, to perform a query on data that is at +most one minute stale, use + + client.Single(). + WithTimestampBound(spanner.MaxStaleness(1*time.Minute)). + Query(ctx, stmt) + +See the documentation of TimestampBound for more details. + + +Mutations + +To write values to a Cloud Spanner database, construct a Mutation. The spanner +package has functions for inserting, updating and deleting rows. Except for the +Delete methods, which take a Key or KeyRange, each mutation-building function +comes in three varieties. + +One takes lists of columns and values along with the table name: + + m1 := spanner.Insert("Users", + []string{"name", "email"}, + []interface{}{"alice", "a@example.com"}) + +One takes a map from column names to values: + + m2 := spanner.InsertMap("Users", map[string]interface{}{ + "name": "alice", + "email": "a@example.com", + }) + +And the third accepts a struct value, and determines the columns from the +struct field names: + + type User struct { Name, Email string } + u := User{Name: "alice", Email: "a@example.com"} + m3, err := spanner.InsertStruct("Users", u) + + +Writes + +To apply a list of mutations to the database, use Apply: + + _, err := client.Apply(ctx, []*spanner.Mutation{m1, m2, m3}) + +If you need to read before writing in a single transaction, use a +ReadWriteTransaction. ReadWriteTransactions may abort and need to be retried. +You pass in a function to ReadWriteTransaction, and the client will handle the +retries automatically. Use the transaction's BufferWrite method to buffer +mutations, which will all be executed at the end of the transaction: + + _, err := client.ReadWriteTransaction(ctx, func(txn *spanner.ReadWriteTransaction) error { + var balance int64 + row, err := txn.ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"balance"}) + if err != nil { + // This function will be called again if this is an IsAborted error. + return err + } + if err := row.Column(0, &balance); err != nil { + return err + } + + if balance <= 10 { + return errors.New("insufficient funds in account") + } + balance -= 10 + m := spanner.Update("Accounts", []string{"user", "balance"}, []interface{}{"alice", balance}) + txn.BufferWrite([]*spanner.Mutation{m}) + + // The buffered mutation will be committed. If the commit + // fails with an IsAborted error, this function will be called + // again. + return nil + }) + +Authentication + +See examples of authorization and authentication at +https://godoc.org/cloud.google.com/go#pkg-examples. +*/ +package spanner // import "cloud.google.com/go/spanner" diff --git a/vendor/cloud.google.com/go/spanner/errors.go b/vendor/cloud.google.com/go/spanner/errors.go new file mode 100644 index 00000000..e1ec0b22 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/errors.go @@ -0,0 +1,105 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "fmt" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// Error is the structured error returned by Cloud Spanner client. +type Error struct { + // Code is the canonical error code for describing the nature of a + // particular error. + Code codes.Code + // Desc explains more details of the error. + Desc string + // trailers are the trailers returned in the response, if any. + trailers metadata.MD +} + +// Error implements error.Error. +func (e *Error) Error() string { + if e == nil { + return fmt.Sprintf("spanner: OK") + } + return fmt.Sprintf("spanner: code = %q, desc = %q", e.Code, e.Desc) +} + +// decorate decorates an existing spanner.Error with more information. +func (e *Error) decorate(info string) { + e.Desc = fmt.Sprintf("%v, %v", info, e.Desc) +} + +// spannerErrorf generates a *spanner.Error with the given error code and +// description. +func spannerErrorf(ec codes.Code, format string, args ...interface{}) error { + return &Error{ + Code: ec, + Desc: fmt.Sprintf(format, args...), + } +} + +// toSpannerError converts general Go error to *spanner.Error. +func toSpannerError(err error) error { + return toSpannerErrorWithMetadata(err, nil) +} + +// toSpannerErrorWithMetadata converts general Go error and grpc trailers to *spanner.Error. +func toSpannerErrorWithMetadata(err error, trailers metadata.MD) error { + if err == nil { + return nil + } + if se, ok := err.(*Error); ok { + se.trailers = metadata.Join(se.trailers, trailers) + return se + } + if grpc.Code(err) == codes.Unknown { + return &Error{codes.Unknown, err.Error(), trailers} + } + return &Error{grpc.Code(err), grpc.ErrorDesc(err), trailers} +} + +// ErrCode extracts the canonical error code from a Go error. +func ErrCode(err error) codes.Code { + se, ok := toSpannerError(err).(*Error) + if !ok { + return codes.Unknown + } + return se.Code +} + +// ErrDesc extracts the Cloud Spanner error description from a Go error. +func ErrDesc(err error) string { + se, ok := toSpannerError(err).(*Error) + if !ok { + return err.Error() + } + return se.Desc +} + +// errTrailers extracts the grpc trailers if present from a Go error. +func errTrailers(err error) metadata.MD { + se, ok := err.(*Error) + if !ok { + return nil + } + return se.trailers +} diff --git a/vendor/cloud.google.com/go/spanner/examples_test.go b/vendor/cloud.google.com/go/spanner/examples_test.go new file mode 100644 index 00000000..ed99dcad --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/examples_test.go @@ -0,0 +1,420 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner_test + +import ( + "errors" + "fmt" + "time" + + "cloud.google.com/go/spanner" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleNewClient() { + ctx := context.Background() + const myDB = "projects/my-project/instances/my-instance/database/my-db" + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + _ = client // TODO: Use client. +} + +const myDB = "projects/my-project/instances/my-instance/database/my-db" + +func ExampleNewClientWithConfig() { + ctx := context.Background() + const myDB = "projects/my-project/instances/my-instance/database/my-db" + client, err := spanner.NewClientWithConfig(ctx, myDB, spanner.ClientConfig{ + NumChannels: 10, + }) + if err != nil { + // TODO: Handle error. + } + _ = client // TODO: Use client. + client.Close() // Close client when done. +} + +func ExampleClient_Single() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + iter := client.Single().Query(ctx, spanner.NewStatement("SELECT FirstName FROM Singers")) + _ = iter // TODO: iterate using Next or Do. +} + +func ExampleClient_ReadOnlyTransaction() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + t := client.ReadOnlyTransaction() + defer t.Close() + // TODO: Read with t using Read, ReadRow, ReadUsingIndex, or Query. +} + +func ExampleClient_ReadWriteTransaction() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + _, err = client.ReadWriteTransaction(ctx, func(txn *spanner.ReadWriteTransaction) error { + var balance int64 + row, err := txn.ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"balance"}) + if err != nil { + // This function will be called again if this is an + // IsAborted error. + return err + } + if err := row.Column(0, &balance); err != nil { + return err + } + + if balance <= 10 { + return errors.New("insufficient funds in account") + } + balance -= 10 + m := spanner.Update("Accounts", []string{"user", "balance"}, []interface{}{"alice", balance}) + txn.BufferWrite([]*spanner.Mutation{m}) + + // The buffered mutation will be committed. If the commit + // fails with an IsAborted error, this function will be called + // again. + return nil + }) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_Apply() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + m := spanner.Update("Users", []string{"name", "email"}, []interface{}{"alice", "a@example.com"}) + _, err = client.Apply(ctx, []*spanner.Mutation{m}) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleInsert() { + m := spanner.Insert("Users", []string{"name", "email"}, []interface{}{"alice", "a@example.com"}) + _ = m // TODO: use with Client.Apply or in a ReadWriteTransaction. +} + +func ExampleInsertMap() { + m := spanner.InsertMap("Users", map[string]interface{}{ + "name": "alice", + "email": "a@example.com", + }) + _ = m // TODO: use with Client.Apply or in a ReadWriteTransaction. +} + +func ExampleInsertStruct() { + type User struct { + Name, Email string + } + u := User{Name: "alice", Email: "a@example.com"} + m, err := spanner.InsertStruct("Users", u) + if err != nil { + // TODO: Handle error. + } + _ = m // TODO: use with Client.Apply or in a ReadWriteTransaction. +} + +func ExampleDelete() { + m := spanner.Delete("Users", spanner.Key{"alice"}) + _ = m // TODO: use with Client.Apply or in a ReadWriteTransaction. +} + +func ExampleDeleteKeyRange() { + m := spanner.DeleteKeyRange("Users", spanner.KeyRange{ + Start: spanner.Key{"alice"}, + End: spanner.Key{"bob"}, + Kind: spanner.ClosedClosed, + }) + _ = m // TODO: use with Client.Apply or in a ReadWriteTransaction. +} + +func ExampleRowIterator_Next() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + iter := client.Single().Query(ctx, spanner.NewStatement("SELECT FirstName FROM Singers")) + defer iter.Stop() + for { + row, err := iter.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + var firstName string + if err := row.Column(0, &firstName); err != nil { + // TODO: Handle error. + } + fmt.Println(firstName) + } +} + +func ExampleRowIterator_Do() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + iter := client.Single().Query(ctx, spanner.NewStatement("SELECT FirstName FROM Singers")) + err = iter.Do(func(r *spanner.Row) error { + var firstName string + if err := r.Column(0, &firstName); err != nil { + return err + } + fmt.Println(firstName) + return nil + }) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleRow_Size() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) + if err != nil { + // TODO: Handle error. + } + fmt.Println(row.Size()) // size is 2 +} + +func ExampleRow_ColumnName() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) + if err != nil { + // TODO: Handle error. + } + fmt.Println(row.ColumnName(1)) // prints "balance" +} + +func ExampleRow_ColumnIndex() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) + if err != nil { + // TODO: Handle error. + } + index, err := row.ColumnIndex("balance") + if err != nil { + // TODO: Handle error. + } + fmt.Println(index) +} + +func ExampleRow_ColumnNames() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) + if err != nil { + // TODO: Handle error. + } + fmt.Println(row.ColumnNames()) +} + +func ExampleRow_ColumnByName() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) + if err != nil { + // TODO: Handle error. + } + var balance int64 + if err := row.ColumnByName("balance", &balance); err != nil { + // TODO: Handle error. + } + fmt.Println(balance) +} + +func ExampleRow_Columns() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) + if err != nil { + // TODO: Handle error. + } + var name string + var balance int64 + if err := row.Columns(&name, &balance); err != nil { + // TODO: Handle error. + } + fmt.Println(name, balance) +} + +func ExampleRow_ToStruct() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Accounts", spanner.Key{"alice"}, []string{"name", "balance"}) + if err != nil { + // TODO: Handle error. + } + + type Account struct { + Name string + Balance int64 + } + + var acct Account + if err := row.ToStruct(&acct); err != nil { + // TODO: Handle error. + } + fmt.Println(acct) +} + +func ExampleReadOnlyTransaction_Read() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + iter := client.Single().Read(ctx, "Users", + spanner.Keys(spanner.Key{"alice"}, spanner.Key{"bob"}), + []string{"name", "email"}) + _ = iter // TODO: iterate using Next or Do. +} + +func ExampleReadOnlyTransaction_ReadUsingIndex() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + iter := client.Single().ReadUsingIndex(ctx, "Users", + "UsersByEmail", + spanner.Keys(spanner.Key{"a@example.com"}, spanner.Key{"b@example.com"}), + []string{"name", "email"}) + _ = iter // TODO: iterate using Next or Do. +} + +func ExampleReadOnlyTransaction_ReadRow() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + row, err := client.Single().ReadRow(ctx, "Users", spanner.Key{"alice"}, + []string{"name", "email"}) + if err != nil { + // TODO: Handle error. + } + _ = row // TODO: use row +} + +func ExampleReadOnlyTransaction_Query() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + iter := client.Single().Query(ctx, spanner.NewStatement("SELECT FirstName FROM Singers")) + _ = iter // TODO: iterate using Next or Do. +} + +func ExampleNewStatement() { + stmt := spanner.NewStatement("SELECT FirstName, LastName FROM SINGERS WHERE LastName >= @start") + stmt.Params["start"] = "Dylan" + // TODO: Use stmt in Query. +} + +func ExampleNewStatement_structLiteral() { + stmt := spanner.Statement{ + SQL: "SELECT FirstName, LastName FROM SINGERS WHERE LastName >= @start", + Params: map[string]interface{}{"start": "Dylan"}, + } + _ = stmt // TODO: Use stmt in Query. +} + +func ExampleReadOnlyTransaction_Timestamp() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + txn := client.Single() + row, err := txn.ReadRow(ctx, "Users", spanner.Key{"alice"}, + []string{"name", "email"}) + if err != nil { + // TODO: Handle error. + } + readTimestamp, err := txn.Timestamp() + if err != nil { + // TODO: Handle error. + } + fmt.Println("read happened at", readTimestamp) + _ = row // TODO: use row +} + +func ExampleReadOnlyTransaction_WithTimestampBound() { + ctx := context.Background() + client, err := spanner.NewClient(ctx, myDB) + if err != nil { + // TODO: Handle error. + } + txn := client.Single().WithTimestampBound(spanner.MaxStaleness(30 * time.Second)) + row, err := txn.ReadRow(ctx, "Users", spanner.Key{"alice"}, []string{"name", "email"}) + if err != nil { + // TODO: Handle error. + } + _ = row // TODO: use row + readTimestamp, err := txn.Timestamp() + if err != nil { + // TODO: Handle error. + } + fmt.Println("read happened at", readTimestamp) +} diff --git a/vendor/cloud.google.com/go/spanner/internal/testutil/mockclient.go b/vendor/cloud.google.com/go/spanner/internal/testutil/mockclient.go new file mode 100644 index 00000000..f278c7cc --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/internal/testutil/mockclient.go @@ -0,0 +1,355 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutil + +import ( + "errors" + "fmt" + "reflect" + "sync" + "testing" + "time" + + "golang.org/x/net/context" + + "github.com/golang/protobuf/ptypes/empty" + proto3 "github.com/golang/protobuf/ptypes/struct" + pbt "github.com/golang/protobuf/ptypes/timestamp" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// Action is a mocked RPC activity that MockCloudSpannerClient will take. +type Action struct { + method string + err error +} + +// NewAction creates Action objects. +func NewAction(m string, e error) Action { + return Action{m, e} +} + +// MockCloudSpannerClient is a mock implementation of sppb.SpannerClient. +type MockCloudSpannerClient struct { + mu sync.Mutex + t *testing.T + // Live sessions on the client. + sessions map[string]bool + // Expected set of actions that will be executed by the client. + actions []Action + // Session ping history + pings []string + // Injected error, will be returned by all APIs + injErr map[string]error + // nice client will not fail on any request + nice bool +} + +// NewMockCloudSpannerClient creates new MockCloudSpannerClient instance. +func NewMockCloudSpannerClient(t *testing.T, acts ...Action) *MockCloudSpannerClient { + mc := &MockCloudSpannerClient{t: t, sessions: map[string]bool{}, injErr: map[string]error{}} + mc.SetActions(acts...) + return mc +} + +// MakeNice makes this a nice mock which will not fail on any request. +func (m *MockCloudSpannerClient) MakeNice() { + m.mu.Lock() + defer m.mu.Unlock() + m.nice = true +} + +// MakeStrict makes this a strict mock which will fail on any unexpected request. +func (m *MockCloudSpannerClient) MakeStrict() { + m.mu.Lock() + defer m.mu.Unlock() + m.nice = false +} + +// InjectError injects a global error that will be returned by all APIs regardless of +// the actions array. +func (m *MockCloudSpannerClient) InjectError(method string, err error) { + m.mu.Lock() + defer m.mu.Unlock() + m.injErr[method] = err +} + +// SetActions sets the new set of expected actions to MockCloudSpannerClient. +func (m *MockCloudSpannerClient) SetActions(acts ...Action) { + m.mu.Lock() + defer m.mu.Unlock() + m.actions = []Action{} + for _, act := range acts { + m.actions = append(m.actions, act) + } +} + +// DumpPings dumps the ping history. +func (m *MockCloudSpannerClient) DumpPings() []string { + m.mu.Lock() + defer m.mu.Unlock() + return append([]string(nil), m.pings...) +} + +// DumpSessions dumps the internal session table. +func (m *MockCloudSpannerClient) DumpSessions() map[string]bool { + m.mu.Lock() + defer m.mu.Unlock() + st := map[string]bool{} + for s, v := range m.sessions { + st[s] = v + } + return st +} + +// CreateSession is a placeholder for SpannerClient.CreateSession. +func (m *MockCloudSpannerClient) CreateSession(c context.Context, r *sppb.CreateSessionRequest, opts ...grpc.CallOption) (*sppb.Session, error) { + m.mu.Lock() + defer m.mu.Unlock() + if err := m.injErr["CreateSession"]; err != nil { + return nil, err + } + s := &sppb.Session{} + if r.Database != "mockdb" { + // Reject other databases + return s, grpc.Errorf(codes.NotFound, fmt.Sprintf("database not found: %v", r.Database)) + } + // Generate & record session name. + s.Name = fmt.Sprintf("mockdb-%v", time.Now().UnixNano()) + m.sessions[s.Name] = true + return s, nil +} + +// GetSession is a placeholder for SpannerClient.GetSession. +func (m *MockCloudSpannerClient) GetSession(c context.Context, r *sppb.GetSessionRequest, opts ...grpc.CallOption) (*sppb.Session, error) { + m.mu.Lock() + defer m.mu.Unlock() + if err := m.injErr["GetSession"]; err != nil { + return nil, err + } + m.pings = append(m.pings, r.Name) + if _, ok := m.sessions[r.Name]; !ok { + return nil, grpc.Errorf(codes.NotFound, fmt.Sprintf("Session not found: %v", r.Name)) + } + return &sppb.Session{Name: r.Name}, nil +} + +// DeleteSession is a placeholder for SpannerClient.DeleteSession. +func (m *MockCloudSpannerClient) DeleteSession(c context.Context, r *sppb.DeleteSessionRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + m.mu.Lock() + defer m.mu.Unlock() + if err := m.injErr["DeleteSession"]; err != nil { + return nil, err + } + if _, ok := m.sessions[r.Name]; !ok { + // Session not found. + return &empty.Empty{}, grpc.Errorf(codes.NotFound, fmt.Sprintf("Session not found: %v", r.Name)) + } + // Delete session from in-memory table. + delete(m.sessions, r.Name) + return &empty.Empty{}, nil +} + +// ExecuteSql is a placeholder for SpannerClient.ExecuteSql. +func (m *MockCloudSpannerClient) ExecuteSql(c context.Context, r *sppb.ExecuteSqlRequest, opts ...grpc.CallOption) (*sppb.ResultSet, error) { + return nil, errors.New("Unimplemented") +} + +// ExecuteStreamingSql is a mock implementation of SpannerClient.ExecuteStreamingSql. +func (m *MockCloudSpannerClient) ExecuteStreamingSql(c context.Context, r *sppb.ExecuteSqlRequest, opts ...grpc.CallOption) (sppb.Spanner_ExecuteStreamingSqlClient, error) { + m.mu.Lock() + defer m.mu.Unlock() + if err := m.injErr["ExecuteStreamingSql"]; err != nil { + return nil, err + } + if len(m.actions) == 0 { + m.t.Fatalf("unexpected ExecuteStreamingSql executed") + } + act := m.actions[0] + m.actions = m.actions[1:] + if act.method != "ExecuteStreamingSql" { + m.t.Fatalf("unexpected ExecuteStreamingSql call, want action: %v", act) + } + wantReq := &sppb.ExecuteSqlRequest{ + Session: "mocksession", + Transaction: &sppb.TransactionSelector{ + Selector: &sppb.TransactionSelector_SingleUse{ + SingleUse: &sppb.TransactionOptions{ + Mode: &sppb.TransactionOptions_ReadOnly_{ + ReadOnly: &sppb.TransactionOptions_ReadOnly{ + TimestampBound: &sppb.TransactionOptions_ReadOnly_Strong{ + Strong: true, + }, + ReturnReadTimestamp: false, + }, + }, + }, + }, + }, + Sql: "mockquery", + Params: &proto3.Struct{ + Fields: map[string]*proto3.Value{"var1": &proto3.Value{Kind: &proto3.Value_StringValue{StringValue: "abc"}}}, + }, + ParamTypes: map[string]*sppb.Type{"var1": &sppb.Type{Code: sppb.TypeCode_STRING}}, + } + if !reflect.DeepEqual(r, wantReq) { + return nil, fmt.Errorf("got query request: %v, want: %v", r, wantReq) + } + if act.err != nil { + return nil, act.err + } + return nil, errors.New("query never succeeds on mock client") +} + +// Read is a placeholder for SpannerClient.Read. +func (m *MockCloudSpannerClient) Read(c context.Context, r *sppb.ReadRequest, opts ...grpc.CallOption) (*sppb.ResultSet, error) { + m.t.Fatalf("Read is unimplemented") + return nil, errors.New("Unimplemented") +} + +// StreamingRead is a placeholder for SpannerClient.StreamingRead. +func (m *MockCloudSpannerClient) StreamingRead(c context.Context, r *sppb.ReadRequest, opts ...grpc.CallOption) (sppb.Spanner_StreamingReadClient, error) { + m.mu.Lock() + defer m.mu.Unlock() + if err := m.injErr["StreamingRead"]; err != nil { + return nil, err + } + if len(m.actions) == 0 { + m.t.Fatalf("unexpected StreamingRead executed") + } + act := m.actions[0] + m.actions = m.actions[1:] + if act.method != "StreamingRead" && act.method != "StreamingIndexRead" { + m.t.Fatalf("unexpected read call, want action: %v", act) + } + wantReq := &sppb.ReadRequest{ + Session: "mocksession", + Transaction: &sppb.TransactionSelector{ + Selector: &sppb.TransactionSelector_SingleUse{ + SingleUse: &sppb.TransactionOptions{ + Mode: &sppb.TransactionOptions_ReadOnly_{ + ReadOnly: &sppb.TransactionOptions_ReadOnly{ + TimestampBound: &sppb.TransactionOptions_ReadOnly_Strong{ + Strong: true, + }, + ReturnReadTimestamp: false, + }, + }, + }, + }, + }, + Table: "t_mock", + Columns: []string{"col1", "col2"}, + KeySet: &sppb.KeySet{ + []*proto3.ListValue{ + &proto3.ListValue{ + Values: []*proto3.Value{ + &proto3.Value{Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + }, + }, + }, + []*sppb.KeyRange{}, + false, + }, + } + if act.method == "StreamingIndexRead" { + wantReq.Index = "idx1" + } + if !reflect.DeepEqual(r, wantReq) { + return nil, fmt.Errorf("got query request: %v, want: %v", r, wantReq) + } + if act.err != nil { + return nil, act.err + } + return nil, errors.New("read never succeeds on mock client") +} + +// BeginTransaction is a placeholder for SpannerClient.BeginTransaction. +func (m *MockCloudSpannerClient) BeginTransaction(c context.Context, r *sppb.BeginTransactionRequest, opts ...grpc.CallOption) (*sppb.Transaction, error) { + m.mu.Lock() + defer m.mu.Unlock() + if !m.nice { + if err := m.injErr["BeginTransaction"]; err != nil { + return nil, err + } + if len(m.actions) == 0 { + m.t.Fatalf("unexpected Begin executed") + } + act := m.actions[0] + m.actions = m.actions[1:] + if act.method != "Begin" { + m.t.Fatalf("unexpected Begin call, want action: %v", act) + } + if act.err != nil { + return nil, act.err + } + } + resp := &sppb.Transaction{Id: []byte("transaction-1")} + if _, ok := r.Options.Mode.(*sppb.TransactionOptions_ReadOnly_); ok { + resp.ReadTimestamp = &pbt.Timestamp{Seconds: 3, Nanos: 4} + } + return resp, nil +} + +// Commit is a placeholder for SpannerClient.Commit. +func (m *MockCloudSpannerClient) Commit(c context.Context, r *sppb.CommitRequest, opts ...grpc.CallOption) (*sppb.CommitResponse, error) { + m.mu.Lock() + defer m.mu.Unlock() + if !m.nice { + if err := m.injErr["Commit"]; err != nil { + return nil, err + } + if len(m.actions) == 0 { + m.t.Fatalf("unexpected Commit executed") + } + act := m.actions[0] + m.actions = m.actions[1:] + if act.method != "Commit" { + m.t.Fatalf("unexpected Commit call, want action: %v", act) + } + if act.err != nil { + return nil, act.err + } + } + return &sppb.CommitResponse{CommitTimestamp: &pbt.Timestamp{Seconds: 1, Nanos: 2}}, nil +} + +// Rollback is a placeholder for SpannerClient.Rollback. +func (m *MockCloudSpannerClient) Rollback(c context.Context, r *sppb.RollbackRequest, opts ...grpc.CallOption) (*empty.Empty, error) { + m.mu.Lock() + defer m.mu.Unlock() + if !m.nice { + if err := m.injErr["Rollback"]; err != nil { + return nil, err + } + if len(m.actions) == 0 { + m.t.Fatalf("unexpected Rollback executed") + } + act := m.actions[0] + m.actions = m.actions[1:] + if act.method != "Rollback" { + m.t.Fatalf("unexpected Rollback call, want action: %v", act) + } + if act.err != nil { + return nil, act.err + } + } + return nil, nil +} diff --git a/vendor/cloud.google.com/go/spanner/internal/testutil/mockserver.go b/vendor/cloud.google.com/go/spanner/internal/testutil/mockserver.go new file mode 100644 index 00000000..7a04e7f7 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/internal/testutil/mockserver.go @@ -0,0 +1,255 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutil + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "net" + "testing" + "time" + + "golang.org/x/net/context" + + "github.com/golang/protobuf/ptypes/empty" + proto3 "github.com/golang/protobuf/ptypes/struct" + pbt "github.com/golang/protobuf/ptypes/timestamp" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + // KvMeta is the Metadata for mocked KV table. + KvMeta = sppb.ResultSetMetadata{ + RowType: &sppb.StructType{ + Fields: []*sppb.StructType_Field{ + { + Name: "Key", + Type: &sppb.Type{Code: sppb.TypeCode_STRING}, + }, + { + Name: "Value", + Type: &sppb.Type{Code: sppb.TypeCode_STRING}, + }, + }, + }, + } +) + +// MockCtlMsg encapsulates PartialResultSet/error that might be sent to +// client +type MockCtlMsg struct { + // If ResumeToken == true, mock server will generate a row with + // resume token. + ResumeToken bool + // If Err != nil, mock server will return error in RPC response. + Err error +} + +// MockCloudSpanner is a mock implementation of SpannerServer interface. +// TODO: make MockCloudSpanner a full-fleged Cloud Spanner implementation. +type MockCloudSpanner struct { + s *grpc.Server + t *testing.T + addr string + msgs chan MockCtlMsg + readTs time.Time + next int +} + +// Addr returns the listening address of mock server. +func (m *MockCloudSpanner) Addr() string { + return m.addr +} + +// AddMsg generates a new mocked row which can be received by client. +func (m *MockCloudSpanner) AddMsg(err error, resumeToken bool) { + msg := MockCtlMsg{ + ResumeToken: resumeToken, + Err: err, + } + if err == io.EOF { + close(m.msgs) + } else { + m.msgs <- msg + } +} + +// Done signals an end to a mocked stream. +func (m *MockCloudSpanner) Done() { + close(m.msgs) +} + +// CreateSession is a placeholder for SpannerServer.CreateSession. +func (m *MockCloudSpanner) CreateSession(c context.Context, r *sppb.CreateSessionRequest) (*sppb.Session, error) { + m.t.Fatalf("CreateSession is unimplemented") + return nil, errors.New("Unimplemented") +} + +// GetSession is a placeholder for SpannerServer.GetSession. +func (m *MockCloudSpanner) GetSession(c context.Context, r *sppb.GetSessionRequest) (*sppb.Session, error) { + m.t.Fatalf("GetSession is unimplemented") + return nil, errors.New("Unimplemented") +} + +// DeleteSession is a placeholder for SpannerServer.DeleteSession. +func (m *MockCloudSpanner) DeleteSession(c context.Context, r *sppb.DeleteSessionRequest) (*empty.Empty, error) { + m.t.Fatalf("DeleteSession is unimplemented") + return nil, errors.New("Unimplemented") +} + +// ExecuteSql is a placeholder for SpannerServer.ExecuteSql. +func (m *MockCloudSpanner) ExecuteSql(c context.Context, r *sppb.ExecuteSqlRequest) (*sppb.ResultSet, error) { + m.t.Fatalf("ExecuteSql is unimplemented") + return nil, errors.New("Unimplemented") +} + +// EncodeResumeToken return mock resume token encoding for an uint64 integer. +func EncodeResumeToken(t uint64) []byte { + rt := make([]byte, 16) + binary.PutUvarint(rt, t) + return rt +} + +// DecodeResumeToken decodes a mock resume token into an uint64 integer. +func DecodeResumeToken(t []byte) (uint64, error) { + s, n := binary.Uvarint(t) + if n <= 0 { + return 0, fmt.Errorf("invalid resume token: %v", t) + } + return s, nil +} + +// ExecuteStreamingSql is a mock implementation of SpannerServer.ExecuteStreamingSql. +func (m *MockCloudSpanner) ExecuteStreamingSql(r *sppb.ExecuteSqlRequest, s sppb.Spanner_ExecuteStreamingSqlServer) error { + switch r.Sql { + case "SELECT * from t_unavailable": + return grpc.Errorf(codes.Unavailable, "mock table unavailable") + case "SELECT t.key key, t.value value FROM t_mock t": + if r.ResumeToken != nil { + s, err := DecodeResumeToken(r.ResumeToken) + if err != nil { + return err + } + m.next = int(s) + 1 + } + for { + msg, more := <-m.msgs + if !more { + break + } + if msg.Err == nil { + var rt []byte + if msg.ResumeToken { + rt = EncodeResumeToken(uint64(m.next)) + } + meta := KvMeta + meta.Transaction = &sppb.Transaction{ + ReadTimestamp: &pbt.Timestamp{ + Seconds: m.readTs.Unix(), + Nanos: int32(m.readTs.Nanosecond()), + }, + } + err := s.Send(&sppb.PartialResultSet{ + Metadata: &meta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: fmt.Sprintf("foo-%02d", m.next)}}, + {Kind: &proto3.Value_StringValue{StringValue: fmt.Sprintf("bar-%02d", m.next)}}, + }, + ResumeToken: rt, + }) + m.next = m.next + 1 + if err != nil { + return err + } + continue + } + return msg.Err + } + return nil + default: + return fmt.Errorf("unsupported SQL: %v", r.Sql) + } +} + +// Read is a placeholder for SpannerServer.Read. +func (m *MockCloudSpanner) Read(c context.Context, r *sppb.ReadRequest) (*sppb.ResultSet, error) { + m.t.Fatalf("Read is unimplemented") + return nil, errors.New("Unimplemented") +} + +// StreamingRead is a placeholder for SpannerServer.StreamingRead. +func (m *MockCloudSpanner) StreamingRead(r *sppb.ReadRequest, s sppb.Spanner_StreamingReadServer) error { + m.t.Fatalf("StreamingRead is unimplemented") + return errors.New("Unimplemented") +} + +// BeginTransaction is a placeholder for SpannerServer.BeginTransaction. +func (m *MockCloudSpanner) BeginTransaction(c context.Context, r *sppb.BeginTransactionRequest) (*sppb.Transaction, error) { + m.t.Fatalf("BeginTransaction is unimplemented") + return nil, errors.New("Unimplemented") +} + +// Commit is a placeholder for SpannerServer.Commit. +func (m *MockCloudSpanner) Commit(c context.Context, r *sppb.CommitRequest) (*sppb.CommitResponse, error) { + m.t.Fatalf("Commit is unimplemented") + return nil, errors.New("Unimplemented") +} + +// Rollback is a placeholder for SpannerServer.Rollback. +func (m *MockCloudSpanner) Rollback(c context.Context, r *sppb.RollbackRequest) (*empty.Empty, error) { + m.t.Fatalf("Rollback is unimplemented") + return nil, errors.New("Unimplemented") +} + +// Serve runs a MockCloudSpanner listening on a random localhost address. +func (m *MockCloudSpanner) Serve() { + m.s = grpc.NewServer() + if m.addr == "" { + m.addr = "localhost:0" + } + lis, err := net.Listen("tcp", m.addr) + if err != nil { + m.t.Fatalf("Failed to listen: %v", err) + } + go m.s.Serve(lis) + _, port, err := net.SplitHostPort(lis.Addr().String()) + if err != nil { + m.t.Fatalf("Failed to parse listener address: %v", err) + } + sppb.RegisterSpannerServer(m.s, m) + m.addr = "localhost:" + port +} + +// Stop terminates MockCloudSpanner and closes the serving port. +func (m *MockCloudSpanner) Stop() { + m.s.Stop() +} + +// NewMockCloudSpanner creates a new MockCloudSpanner instance. +func NewMockCloudSpanner(t *testing.T, ts time.Time) *MockCloudSpanner { + mcs := &MockCloudSpanner{ + t: t, + msgs: make(chan MockCtlMsg, 1000), + readTs: ts, + } + return mcs +} diff --git a/vendor/cloud.google.com/go/spanner/key.go b/vendor/cloud.google.com/go/spanner/key.go new file mode 100644 index 00000000..3b688637 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/key.go @@ -0,0 +1,321 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "bytes" + "fmt" + "time" + + "google.golang.org/grpc/codes" + + "cloud.google.com/go/civil" + proto3 "github.com/golang/protobuf/ptypes/struct" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// A Key can be either a Cloud Spanner row's primary key or a secondary index key. +// It is essentially an interface{} array, which represents a set of Cloud Spanner +// columns. A Key type has the following usages: +// +// - Used as primary key which uniquely identifies a Cloud Spanner row. +// - Used as secondary index key which maps to a set of Cloud Spanner rows +// indexed under it. +// - Used as endpoints of primary key/secondary index ranges, +// see also the KeyRange type. +// +// Rows that are identified by the Key type are outputs of read operation or targets of +// delete operation in a mutation. Note that for Insert/Update/InsertOrUpdate/Update +// mutation types, although they don't require a primary key explicitly, the column list +// provided must contain enough columns that can comprise a primary key. +// +// Keys are easy to construct. For example, suppose you have a table with a +// primary key of username and product ID. To make a key for this table: +// +// key := spanner.Key{"john", 16} +// +// See the description of Row and Mutation types for how Go types are +// mapped to Cloud Spanner types. For convenience, Key type supports a wide range +// of Go types: +// - int, int8, int16, int32, int64, and NullInt64 are mapped to Cloud Spanner's INT64 type. +// - uint8, uint16 and uint32 are also mapped to Cloud Spanner's INT64 type. +// - float32, float64, NullFloat64 are mapped to Cloud Spanner's FLOAT64 type. +// - bool and NullBool are mapped to Cloud Spanner's BOOL type. +// - []byte is mapped to Cloud Spanner's BYTES type. +// - string and NullString are mapped to Cloud Spanner's STRING type. +// - time.Time and NullTime are mapped to Cloud Spanner's TIMESTAMP type. +// - civil.Date and NullDate are mapped to Cloud Spanner's DATE type. +type Key []interface{} + +// errInvdKeyPartType returns error for unsupported key part type. +func errInvdKeyPartType(part interface{}) error { + return spannerErrorf(codes.InvalidArgument, "key part has unsupported type %T", part) +} + +// keyPartValue converts a part of the Key (which is a valid Cloud Spanner type) +// into a proto3.Value. Used for encoding Key type into protobuf. +func keyPartValue(part interface{}) (pb *proto3.Value, err error) { + switch v := part.(type) { + case int: + pb, _, err = encodeValue(int64(v)) + case int8: + pb, _, err = encodeValue(int64(v)) + case int16: + pb, _, err = encodeValue(int64(v)) + case int32: + pb, _, err = encodeValue(int64(v)) + case uint8: + pb, _, err = encodeValue(int64(v)) + case uint16: + pb, _, err = encodeValue(int64(v)) + case uint32: + pb, _, err = encodeValue(int64(v)) + case float32: + pb, _, err = encodeValue(float64(v)) + case int64, float64, NullInt64, NullFloat64, bool, NullBool, []byte, string, NullString, time.Time, civil.Date, NullTime, NullDate: + pb, _, err = encodeValue(v) + default: + return nil, errInvdKeyPartType(v) + } + return pb, err +} + +// proto converts a spanner.Key into a proto3.ListValue. +func (key Key) proto() (*proto3.ListValue, error) { + lv := &proto3.ListValue{} + lv.Values = make([]*proto3.Value, 0, len(key)) + for _, part := range key { + v, err := keyPartValue(part) + if err != nil { + return nil, err + } + lv.Values = append(lv.Values, v) + } + return lv, nil +} + +// String implements fmt.Stringer for Key. For string, []byte and NullString, it +// prints the uninterpreted bytes of their contents, leaving caller with the +// opportunity to escape the output. +func (key Key) String() string { + b := &bytes.Buffer{} + fmt.Fprint(b, "(") + for i, part := range []interface{}(key) { + if i != 0 { + fmt.Fprint(b, ",") + } + switch v := part.(type) { + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, float32, float64, bool: + // Use %v to print numeric types and bool. + fmt.Fprintf(b, "%v", v) + case string: + fmt.Fprintf(b, "%q", v) + case []byte: + if v != nil { + fmt.Fprintf(b, "%q", v) + } else { + fmt.Fprint(b, "") + } + case NullInt64, NullFloat64, NullBool, NullString, NullTime, NullDate: + // The above types implement fmt.Stringer. + fmt.Fprintf(b, "%s", v) + case civil.Date: + fmt.Fprintf(b, "%q", v) + case time.Time: + fmt.Fprintf(b, "%q", v.Format(time.RFC3339Nano)) + default: + fmt.Fprintf(b, "%v", v) + } + } + fmt.Fprint(b, ")") + return b.String() +} + +// KeyRangeKind describes the kind of interval represented by a KeyRange: +// whether it is open or closed on the left and right. +type KeyRangeKind int + +const ( + // ClosedOpen is closed on the left and open on the right: the Start + // key is included, the End key is excluded. + ClosedOpen KeyRangeKind = iota + + // ClosedClosed is closed on the left and the right: both keys are included. + ClosedClosed + + // OpenClosed is open on the left and closed on the right: the Start + // key is excluded, the End key is included. + OpenClosed + + // OpenOpen is open on the left and the right: neither key is included. + OpenOpen +) + +// A KeyRange represents a range of rows in a table or index. +// +// A range has a Start key and an End key. IncludeStart and IncludeEnd +// indicate whether the Start and End keys are included in the range. +// +// For example, consider the following table definition: +// +// CREATE TABLE UserEvents ( +// UserName STRING(MAX), +// EventDate STRING(10), +// ) PRIMARY KEY(UserName, EventDate); +// +// The following keys name rows in this table: +// +// spanner.Key{"Bob", "2014-09-23"} +// spanner.Key{"Alfred", "2015-06-12"} +// +// Since the UserEvents table's PRIMARY KEY clause names two columns, each +// UserEvents key has two elements; the first is the UserName, and the second +// is the EventDate. +// +// Key ranges with multiple components are interpreted lexicographically by +// component using the table or index key's declared sort order. For example, +// the following range returns all events for user "Bob" that occurred in the +// year 2015: +// +// spanner.KeyRange{ +// Start: spanner.Key{"Bob", "2015-01-01"}, +// End: spanner.Key{"Bob", "2015-12-31"}, +// Kind: ClosedClosed, +// } +// +// Start and end keys can omit trailing key components. This affects the +// inclusion and exclusion of rows that exactly match the provided key +// components: if IncludeStart is true, then rows that exactly match the +// provided components of the Start key are included; if IncludeStart is false +// then rows that exactly match are not included. IncludeEnd and End key +// behave in the same fashion. +// +// For example, the following range includes all events for "Bob" that occurred +// during and after the year 2000: +// +// spanner.KeyRange{ +// Start: spanner.Key{"Bob", "2000-01-01"}, +// End: spanner.Key{"Bob"}, +// Kind: ClosedClosed, +// } +// +// The next example retrieves all events for "Bob": +// +// spanner.PrefixRange(spanner.Key{"Bob"}) +// +// To retrieve events before the year 2000: +// +// spanner.KeyRange{ +// Start: spanner.Key{"Bob"}, +// End: spanner.Key{"Bob", "2000-01-01"}, +// Kind: ClosedOpen, +// } +// +// Although we specified a Kind for this KeyRange, we didn't need to, because +// the default is ClosedOpen. In later examples we'll omit Kind if it is +// ClosedOpen. +// +// The following range includes all rows in a table or under a +// index: +// +// spanner.AllKeys() +// +// This range returns all users whose UserName begins with any +// character from A to C: +// +// spanner.KeyRange{ +// Start: spanner.Key{"A"}, +// End: spanner.Key{"D"}, +// } +// +// This range returns all users whose UserName begins with B: +// +// spanner.KeyRange{ +// Start: spanner.Key{"B"}, +// End: spanner.Key{"C"}, +// } +// +// Key ranges honor column sort order. For example, suppose a table is defined +// as follows: +// +// CREATE TABLE DescendingSortedTable { +// Key INT64, +// ... +// ) PRIMARY KEY(Key DESC); +// +// The following range retrieves all rows with key values between 1 and 100 +// inclusive: +// +// spanner.KeyRange{ +// Start: spanner.Key{100}, +// End: spanner.Key{1}, +// Kind: ClosedClosed, +// } +// +// Note that 100 is passed as the start, and 1 is passed as the end, because +// Key is a descending column in the schema. +type KeyRange struct { + // Start specifies the left boundary of the key range; End specifies + // the right boundary of the key range. + Start, End Key + + // Kind describes whether the boundaries of the key range include + // their keys. + Kind KeyRangeKind +} + +// String implements fmt.Stringer for KeyRange type. +func (r KeyRange) String() string { + var left, right string + switch r.Kind { + case ClosedClosed: + left, right = "[", "]" + case ClosedOpen: + left, right = "[", ")" + case OpenClosed: + left, right = "(", "]" + case OpenOpen: + left, right = "(", ")" + default: + left, right = "?", "?" + } + return fmt.Sprintf("%s%s,%s%s", left, r.Start, r.End, right) +} + +// proto converts KeyRange into sppb.KeyRange. +func (r KeyRange) proto() (*sppb.KeyRange, error) { + var err error + var start, end *proto3.ListValue + pb := &sppb.KeyRange{} + if start, err = r.Start.proto(); err != nil { + return nil, err + } + if end, err = r.End.proto(); err != nil { + return nil, err + } + if r.Kind == ClosedClosed || r.Kind == ClosedOpen { + pb.StartKeyType = &sppb.KeyRange_StartClosed{StartClosed: start} + } else { + pb.StartKeyType = &sppb.KeyRange_StartOpen{StartOpen: start} + } + if r.Kind == ClosedClosed || r.Kind == OpenClosed { + pb.EndKeyType = &sppb.KeyRange_EndClosed{EndClosed: end} + } else { + pb.EndKeyType = &sppb.KeyRange_EndOpen{EndOpen: end} + } + return pb, nil +} diff --git a/vendor/cloud.google.com/go/spanner/key_test.go b/vendor/cloud.google.com/go/spanner/key_test.go new file mode 100644 index 00000000..66b957d3 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/key_test.go @@ -0,0 +1,253 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "reflect" + "testing" + "time" + + "cloud.google.com/go/civil" + proto3 "github.com/golang/protobuf/ptypes/struct" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// Test Key.String() and Key.proto(). +func TestKey(t *testing.T) { + tm, _ := time.Parse(time.RFC3339Nano, "2016-11-15T15:04:05.999999999Z") + dt, _ := civil.ParseDate("2016-11-15") + for _, test := range []struct { + k Key + wantProto *proto3.ListValue + wantStr string + }{ + { + k: Key{int(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{int8(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{int16(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{int32(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{int64(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{uint8(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{uint16(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{uint32(1)}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{true}, + wantProto: listValueProto(boolProto(true)), + wantStr: "(true)", + }, + { + k: Key{float32(1.5)}, + wantProto: listValueProto(floatProto(1.5)), + wantStr: "(1.5)", + }, + { + k: Key{float64(1.5)}, + wantProto: listValueProto(floatProto(1.5)), + wantStr: "(1.5)", + }, + { + k: Key{"value"}, + wantProto: listValueProto(stringProto("value")), + wantStr: `("value")`, + }, + { + k: Key{[]byte(nil)}, + wantProto: listValueProto(nullProto()), + wantStr: "()", + }, + { + k: Key{[]byte{}}, + wantProto: listValueProto(stringProto("")), + wantStr: `("")`, + }, + { + k: Key{tm}, + wantProto: listValueProto(stringProto("2016-11-15T15:04:05.999999999Z")), + wantStr: `("2016-11-15T15:04:05.999999999Z")`, + }, + {k: Key{dt}, + wantProto: listValueProto(stringProto("2016-11-15")), + wantStr: `("2016-11-15")`, + }, + { + k: Key{[]byte("value")}, + wantProto: listValueProto(bytesProto([]byte("value"))), + wantStr: `("value")`, + }, + { + k: Key{NullInt64{1, true}}, + wantProto: listValueProto(stringProto("1")), + wantStr: "(1)", + }, + { + k: Key{NullInt64{2, false}}, + wantProto: listValueProto(nullProto()), + wantStr: "()", + }, + { + k: Key{NullFloat64{1.5, true}}, + wantProto: listValueProto(floatProto(1.5)), + wantStr: "(1.5)", + }, + { + k: Key{NullFloat64{2.0, false}}, + wantProto: listValueProto(nullProto()), + wantStr: "()", + }, + { + k: Key{NullBool{true, true}}, + wantProto: listValueProto(boolProto(true)), + wantStr: "(true)", + }, + { + k: Key{NullBool{true, false}}, + wantProto: listValueProto(nullProto()), + wantStr: "()", + }, + { + k: Key{NullString{"value", true}}, + wantProto: listValueProto(stringProto("value")), + wantStr: `("value")`, + }, + { + k: Key{NullString{"value", false}}, + wantProto: listValueProto(nullProto()), + wantStr: "()", + }, + { + k: Key{NullTime{tm, true}}, + wantProto: listValueProto(timeProto(tm)), + wantStr: `("2016-11-15T15:04:05.999999999Z")`, + }, + + { + k: Key{NullTime{time.Now(), false}}, + wantProto: listValueProto(nullProto()), + wantStr: "()", + }, + { + k: Key{NullDate{dt, true}}, + wantProto: listValueProto(dateProto(dt)), + wantStr: `("2016-11-15")`, + }, + { + k: Key{NullDate{civil.Date{}, false}}, + wantProto: listValueProto(nullProto()), + wantStr: "()", + }, + { + k: Key{int(1), NullString{"value", false}, "value", 1.5, true}, + wantProto: listValueProto(stringProto("1"), nullProto(), stringProto("value"), floatProto(1.5), boolProto(true)), + wantStr: `(1,,"value",1.5,true)`, + }, + } { + if got := test.k.String(); got != test.wantStr { + t.Errorf("%v.String() = %v, want %v", test.k, got, test.wantStr) + } + gotProto, err := test.k.proto() + if err != nil { + t.Errorf("%v.proto() returns error %v; want nil error", test.k, err) + } + if !reflect.DeepEqual(gotProto, test.wantProto) { + t.Errorf("%v.proto() = \n%v\nwant:\n%v", test.k, gotProto, test.wantProto) + } + } +} + +// Test KeyRange.String() and KeyRange.proto(). +func TestKeyRange(t *testing.T) { + for _, test := range []struct { + kr KeyRange + wantProto *sppb.KeyRange + wantStr string + }{ + { + kr: KeyRange{Key{"A"}, Key{"D"}, OpenOpen}, + wantProto: &sppb.KeyRange{ + &sppb.KeyRange_StartOpen{listValueProto(stringProto("A"))}, + &sppb.KeyRange_EndOpen{listValueProto(stringProto("D"))}, + }, + wantStr: `(("A"),("D"))`, + }, + { + kr: KeyRange{Key{1}, Key{10}, OpenClosed}, + wantProto: &sppb.KeyRange{ + &sppb.KeyRange_StartOpen{listValueProto(stringProto("1"))}, + &sppb.KeyRange_EndClosed{listValueProto(stringProto("10"))}, + }, + wantStr: "((1),(10)]", + }, + { + kr: KeyRange{Key{1.5, 2.1, 0.2}, Key{1.9, 0.7}, ClosedOpen}, + wantProto: &sppb.KeyRange{ + &sppb.KeyRange_StartClosed{listValueProto(floatProto(1.5), floatProto(2.1), floatProto(0.2))}, + &sppb.KeyRange_EndOpen{listValueProto(floatProto(1.9), floatProto(0.7))}, + }, + wantStr: "[(1.5,2.1,0.2),(1.9,0.7))", + }, + { + kr: KeyRange{Key{NullInt64{1, true}}, Key{10}, ClosedClosed}, + wantProto: &sppb.KeyRange{ + &sppb.KeyRange_StartClosed{listValueProto(stringProto("1"))}, + &sppb.KeyRange_EndClosed{listValueProto(stringProto("10"))}, + }, + wantStr: "[(1),(10)]", + }, + } { + if got := test.kr.String(); got != test.wantStr { + t.Errorf("%v.String() = %v, want %v", test.kr, got, test.wantStr) + } + gotProto, err := test.kr.proto() + if err != nil { + t.Errorf("%v.proto() returns error %v; want nil error", test.kr, err) + } + if !reflect.DeepEqual(gotProto, test.wantProto) { + t.Errorf("%v.proto() = \n%v\nwant:\n%v", test.kr, gotProto.String(), test.wantProto.String()) + } + } +} diff --git a/vendor/cloud.google.com/go/spanner/keyset.go b/vendor/cloud.google.com/go/spanner/keyset.go new file mode 100644 index 00000000..28c7f054 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/keyset.go @@ -0,0 +1,108 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + proto3 "github.com/golang/protobuf/ptypes/struct" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// A KeySet defines a collection of Cloud Spanner keys and/or key +// ranges. All the keys are expected to be in the same table or index. The keys +// need not be sorted in any particular way. +// +// If the same key is specified multiple times in the set (for example if two +// ranges, two keys, or a key and a range overlap), the Cloud Spanner backend behaves +// as if the key were only specified once. +type KeySet struct { + // If All == true, then the KeySet names all rows of a table or + // under a index. + All bool + // Keys is a list of keys covered by KeySet, see also documentation of + // Key for details. + Keys []Key + // Ranges is a list of key ranges covered by KeySet, see also documentation of + // KeyRange for details. + Ranges []KeyRange +} + +// AllKeys returns a KeySet that represents all Keys of a table or a index. +func AllKeys() KeySet { + return KeySet{All: true} +} + +// Keys returns a KeySet for a set of keys. +func Keys(keys ...Key) KeySet { + ks := KeySet{Keys: make([]Key, len(keys))} + copy(ks.Keys, keys) + return ks +} + +// Range returns a KeySet for a range of keys. +func Range(r KeyRange) KeySet { + return KeySet{Ranges: []KeyRange{r}} +} + +// PrefixRange returns a KeySet for all keys with the given prefix, which is +// a key itself. +func PrefixRange(prefix Key) KeySet { + return KeySet{Ranges: []KeyRange{ + { + Start: prefix, + End: prefix, + Kind: ClosedClosed, + }, + }} +} + +// UnionKeySets unions multiple KeySets into a superset. +func UnionKeySets(keySets ...KeySet) KeySet { + s := KeySet{} + for _, ks := range keySets { + if ks.All { + return KeySet{All: true} + } + s.Keys = append(s.Keys, ks.Keys...) + s.Ranges = append(s.Ranges, ks.Ranges...) + } + return s +} + +// proto converts KeySet into sppb.KeySet, which is the protobuf +// representation of KeySet. +func (keys KeySet) proto() (*sppb.KeySet, error) { + pb := &sppb.KeySet{ + Keys: make([]*proto3.ListValue, 0, len(keys.Keys)), + Ranges: make([]*sppb.KeyRange, 0, len(keys.Ranges)), + All: keys.All, + } + for _, key := range keys.Keys { + keyProto, err := key.proto() + if err != nil { + return nil, err + } + pb.Keys = append(pb.Keys, keyProto) + } + for _, r := range keys.Ranges { + rProto, err := r.proto() + if err != nil { + return nil, err + } + pb.Ranges = append(pb.Ranges, rProto) + } + return pb, nil +} diff --git a/vendor/cloud.google.com/go/spanner/keyset_test.go b/vendor/cloud.google.com/go/spanner/keyset_test.go new file mode 100644 index 00000000..ba1e9dba --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/keyset_test.go @@ -0,0 +1,119 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "reflect" + "testing" + + proto3 "github.com/golang/protobuf/ptypes/struct" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// Test KeySet.proto(). +func TestKeySetToProto(t *testing.T) { + for _, test := range []struct { + ks KeySet + wantProto *sppb.KeySet + }{ + { + KeySet{}, + &sppb.KeySet{ + Keys: []*proto3.ListValue{}, + Ranges: []*sppb.KeyRange{}, + }, + }, + { + KeySet{All: true}, + &sppb.KeySet{ + All: true, + Keys: []*proto3.ListValue{}, + Ranges: []*sppb.KeyRange{}, + }, + }, + { + KeySet{Keys: []Key{{1, 2}, {3, 4}}}, + &sppb.KeySet{ + Keys: []*proto3.ListValue{listValueProto(intProto(1), intProto(2)), listValueProto(intProto(3), intProto(4))}, + Ranges: []*sppb.KeyRange{}, + }, + }, + { + KeySet{Ranges: []KeyRange{{Key{1}, Key{2}, ClosedClosed}, {Key{3}, Key{10}, OpenClosed}}}, + &sppb.KeySet{ + Keys: []*proto3.ListValue{}, + Ranges: []*sppb.KeyRange{ + &sppb.KeyRange{ + &sppb.KeyRange_StartClosed{listValueProto(intProto(1))}, + &sppb.KeyRange_EndClosed{listValueProto(intProto(2))}, + }, + &sppb.KeyRange{ + &sppb.KeyRange_StartOpen{listValueProto(intProto(3))}, + &sppb.KeyRange_EndClosed{listValueProto(intProto(10))}, + }, + }, + }, + }, + } { + gotProto, err := test.ks.proto() + if err != nil { + t.Errorf("%v.proto() returns error %v; want nil error", test.ks, err) + } + if !reflect.DeepEqual(gotProto, test.wantProto) { + t.Errorf("%v.proto() = \n%v\nwant:\n%v", test.ks, gotProto.String(), test.wantProto.String()) + } + } +} + +// Test helpers that help to create KeySets. +func TestKeySetHelpers(t *testing.T) { + // Test Keys with one key. + k := Key{[]byte{1, 2, 3}} + if got, want := Keys(k), (KeySet{Keys: []Key{k}}); !reflect.DeepEqual(got, want) { + t.Errorf("Keys(%q) = %q, want %q", k, got, want) + } + // Test Keys with multiple keys. + ks := []Key{Key{57}, Key{NullString{"value", false}}} + if got, want := Keys(ks...), (KeySet{Keys: ks}); !reflect.DeepEqual(got, want) { + t.Errorf("Keys(%v) = %v, want %v", ks, got, want) + } + // Test Range. + kr := KeyRange{Key{1}, Key{10}, ClosedClosed} + if got, want := Range(kr), (KeySet{Ranges: []KeyRange{kr}}); !reflect.DeepEqual(got, want) { + t.Errorf("Range(%v) = %v, want %v", kr, got, want) + } + // Test PrefixRange. + k = Key{2} + kr = KeyRange{k, k, ClosedClosed} + if got, want := PrefixRange(k), (KeySet{Ranges: []KeyRange{kr}}); !reflect.DeepEqual(got, want) { + t.Errorf("PrefixRange(%v) = %v, want %v", k, got, want) + } + // Test UnionKeySets. + sk1, sk2 := Keys(Key{2}), Keys(Key{3}) + r1, r2 := Range(KeyRange{Key{1}, Key{10}, ClosedClosed}), Range(KeyRange{Key{15}, Key{20}, OpenClosed}) + want := KeySet{ + Keys: []Key{Key{2}, Key{3}}, + Ranges: []KeyRange{KeyRange{Key{1}, Key{10}, ClosedClosed}, KeyRange{Key{15}, Key{20}, OpenClosed}}, + } + if got := UnionKeySets(sk1, sk2, r1, r2); !reflect.DeepEqual(got, want) { + t.Errorf("UnionKeySets(%v, %v, %v, %v) = %v, want %v", sk1, sk2, r1, r2, got, want) + } + all := AllKeys() + if got := UnionKeySets(sk1, sk2, r1, r2, all); !reflect.DeepEqual(got, all) { + t.Errorf("UnionKeySets(%v, %v, %v, %v, %v) = %v, want %v", sk1, sk2, r1, r2, all, got, all) + } +} diff --git a/vendor/cloud.google.com/go/spanner/mutation.go b/vendor/cloud.google.com/go/spanner/mutation.go new file mode 100644 index 00000000..eba7fe81 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/mutation.go @@ -0,0 +1,422 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "reflect" + + proto3 "github.com/golang/protobuf/ptypes/struct" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc/codes" +) + +// op is the mutation operation. +type op int + +const ( + // opDelete removes a row from a table. Succeeds whether or not the + // key was present. + opDelete op = iota + // opInsert inserts a row into a table. If the row already exists, the + // write or transaction fails. + opInsert + // opInsertOrUpdate inserts a row into a table. If the row already + // exists, it updates it instead. Any column values not explicitly + // written are preserved. + opInsertOrUpdate + // opReplace inserts a row into a table, deleting any existing row. + // Unlike InsertOrUpdate, this means any values not explicitly written + // become NULL. + opReplace + // opUpdate updates a row in a table. If the row does not already + // exist, the write or transaction fails. + opUpdate +) + +// A Mutation describes a modification to one or more Cloud Spanner rows. The +// mutation represents an insert, update, delete, etc on a table. +// +// Many mutations can be applied in a single atomic commit. For purposes of +// constraint checking (such as foreign key constraints), the operations can be +// viewed as applying in same order as the mutations are supplied in (so that +// e.g., a row and its logical "child" can be inserted in the same commit). +// +// - The Apply function applies series of mutations. +// - A ReadWriteTransaction applies a series of mutations as part of an +// atomic read-modify-write operation. +// Example: +// +// m := spanner.Insert("User", +// []string{"user_id", "profile"}, +// []interface{}{UserID, profile}) +// _, err := client.Apply(ctx, []*spanner.Mutation{m}) +// +// In this example, we insert a new row into the User table. The primary key +// for the new row is UserID (presuming that "user_id" has been declared as the +// primary key of the "User" table). +// +// Updating a row +// +// Changing the values of columns in an existing row is very similar to +// inserting a new row: +// +// m := spanner.Update("User", +// []string{"user_id", "profile"}, +// []interface{}{UserID, profile}) +// _, err := client.Apply(ctx, []*spanner.Mutation{m}) +// +// Deleting a row +// +// To delete a row, use spanner.Delete: +// +// m := spanner.Delete("User", spanner.Key{UserId}) +// _, err := client.Apply(ctx, []*spanner.Mutation{m}) +// +// Note that deleting a row in a table may also delete rows from other tables +// if cascading deletes are specified in those tables' schemas. Delete does +// nothing if the named row does not exist (does not yield an error). +// +// Deleting a field +// +// To delete/clear a field within a row, use spanner.Update with the value nil: +// +// m := spanner.Update("User", +// []string{"user_id", "profile"}, +// []interface{}{UserID, nil}) +// _, err := client.Apply(ctx, []*spanner.Mutation{m}) +// +// The valid Go types and their corresponding Cloud Spanner types that can be +// used in the Insert/Update/InsertOrUpdate functions are: +// +// string, NullString - STRING +// []string, []NullString - STRING ARRAY +// []byte - BYTES +// [][]byte - BYTES ARRAY +// int, int64, NullInt64 - INT64 +// []int, []int64, []NullInt64 - INT64 ARRAY +// bool, NullBool - BOOL +// []bool, []NullBool - BOOL ARRAY +// float64, NullFloat64 - FLOAT64 +// []float64, []NullFloat64 - FLOAT64 ARRAY +// time.Time, NullTime - TIMESTAMP +// []time.Time, []NullTime - TIMESTAMP ARRAY +// Date, NullDate - DATE +// []Date, []NullDate - DATE ARRAY +// +// To compare two Mutations for testing purposes, use reflect.DeepEqual. +type Mutation struct { + // op is the operation type of the mutation. + // See documentation for spanner.op for more details. + op op + // Table is the name of the taget table to be modified. + table string + // keySet is a set of primary keys that names the rows + // in a delete operation. + keySet KeySet + // columns names the set of columns that are going to be + // modified by Insert, InsertOrUpdate, Replace or Update + // operations. + columns []string + // values specifies the new values for the target columns + // named by Columns. + values []interface{} +} + +// mapToMutationParams converts Go map into mutation parameters. +func mapToMutationParams(in map[string]interface{}) ([]string, []interface{}) { + cols := []string{} + vals := []interface{}{} + for k, v := range in { + cols = append(cols, k) + vals = append(vals, v) + } + return cols, vals +} + +// errNotStruct returns error for not getting a go struct type. +func errNotStruct(in interface{}) error { + return spannerErrorf(codes.InvalidArgument, "%T is not a go struct type", in) +} + +// structToMutationParams converts Go struct into mutation parameters. +// If the input is not a valid Go struct type, structToMutationParams +// returns error. +func structToMutationParams(in interface{}) ([]string, []interface{}, error) { + if in == nil { + return nil, nil, errNotStruct(in) + } + v := reflect.ValueOf(in) + t := v.Type() + if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { + // t is a pointer to a struct. + if v.IsNil() { + // Return empty results. + return nil, nil, nil + } + // Get the struct value that in points to. + v = v.Elem() + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return nil, nil, errNotStruct(in) + } + fields, err := fieldCache.Fields(t) + if err != nil { + return nil, nil, toSpannerError(err) + } + var cols []string + var vals []interface{} + for _, f := range fields { + cols = append(cols, f.Name) + vals = append(vals, v.FieldByIndex(f.Index).Interface()) + } + return cols, vals, nil +} + +// Insert returns a Mutation to insert a row into a table. If the row already +// exists, the write or transaction fails. +func Insert(table string, cols []string, vals []interface{}) *Mutation { + return &Mutation{ + op: opInsert, + table: table, + columns: cols, + values: vals, + } +} + +// InsertMap returns a Mutation to insert a row into a table, specified by +// a map of column name to value. If the row already exists, the write or +// transaction fails. +func InsertMap(table string, in map[string]interface{}) *Mutation { + cols, vals := mapToMutationParams(in) + return Insert(table, cols, vals) +} + +// InsertStruct returns a Mutation to insert a row into a table, specified by +// a Go struct. If the row already exists, the write or transaction fails. +// +// The in argument must be a struct or a pointer to a struct. Its exported +// fields specify the column names and values. Use a field tag like "spanner:name" +// to provide an alternative column name, or use "spanner:-" to ignore the field. +func InsertStruct(table string, in interface{}) (*Mutation, error) { + cols, vals, err := structToMutationParams(in) + if err != nil { + return nil, err + } + return Insert(table, cols, vals), nil +} + +// Update returns a Mutation to update a row in a table. If the row does not +// already exist, the write or transaction fails. +func Update(table string, cols []string, vals []interface{}) *Mutation { + return &Mutation{ + op: opUpdate, + table: table, + columns: cols, + values: vals, + } +} + +// UpdateMap returns a Mutation to update a row in a table, specified by +// a map of column to value. If the row does not already exist, the write or +// transaction fails. +func UpdateMap(table string, in map[string]interface{}) *Mutation { + cols, vals := mapToMutationParams(in) + return Update(table, cols, vals) +} + +// UpdateStruct returns a Mutation to update a row in a table, specified by a Go +// struct. If the row does not already exist, the write or transaction fails. +func UpdateStruct(table string, in interface{}) (*Mutation, error) { + cols, vals, err := structToMutationParams(in) + if err != nil { + return nil, err + } + return Update(table, cols, vals), nil +} + +// InsertOrUpdate returns a Mutation to insert a row into a table. If the row +// already exists, it updates it instead. Any column values not explicitly +// written are preserved. +func InsertOrUpdate(table string, cols []string, vals []interface{}) *Mutation { + return &Mutation{ + op: opInsertOrUpdate, + table: table, + columns: cols, + values: vals, + } +} + +// InsertOrUpdateMap returns a Mutation to insert a row into a table, +// specified by a map of column to value. If the row already exists, it +// updates it instead. Any column values not explicitly written are preserved. +func InsertOrUpdateMap(table string, in map[string]interface{}) *Mutation { + cols, vals := mapToMutationParams(in) + return InsertOrUpdate(table, cols, vals) +} + +// InsertOrUpdateStruct returns a Mutation to insert a row into a table, +// specified by a Go struct. If the row already exists, it updates it instead. +// Any column values not explicitly written are preserved. +// +// The in argument must be a struct or a pointer to a struct. Its exported +// fields specify the column names and values. Use a field tag like "spanner:name" +// to provide an alternative column name, or use "spanner:-" to ignore the field. +func InsertOrUpdateStruct(table string, in interface{}) (*Mutation, error) { + cols, vals, err := structToMutationParams(in) + if err != nil { + return nil, err + } + return InsertOrUpdate(table, cols, vals), nil +} + +// Replace returns a Mutation to insert a row into a table, deleting any +// existing row. Unlike InsertOrUpdate, this means any values not explicitly +// written become NULL. +func Replace(table string, cols []string, vals []interface{}) *Mutation { + return &Mutation{ + op: opReplace, + table: table, + columns: cols, + values: vals, + } +} + +// ReplaceMap returns a Mutation to insert a row into a table, deleting any +// existing row. Unlike InsertOrUpdateMap, this means any values not explicitly +// written become NULL. The row is specified by a map of column to value. +func ReplaceMap(table string, in map[string]interface{}) *Mutation { + cols, vals := mapToMutationParams(in) + return Replace(table, cols, vals) +} + +// ReplaceStruct returns a Mutation to insert a row into a table, deleting any +// existing row. Unlike InsertOrUpdateMap, this means any values not explicitly +// written become NULL. The row is specified by a Go struct. +// +// The in argument must be a struct or a pointer to a struct. Its exported +// fields specify the column names and values. Use a field tag like "spanner:name" +// to provide an alternative column name, or use "spanner:-" to ignore the field. +func ReplaceStruct(table string, in interface{}) (*Mutation, error) { + cols, vals, err := structToMutationParams(in) + if err != nil { + return nil, err + } + return Replace(table, cols, vals), nil +} + +// Delete removes a key from a table. Succeeds whether or not the key was +// present. +func Delete(table string, key Key) *Mutation { + return &Mutation{ + op: opDelete, + table: table, + keySet: Keys(key), + } +} + +// DeleteKeyRange removes a range of keys from a table. Succeeds whether or not +// the keys were present. +func DeleteKeyRange(table string, r KeyRange) *Mutation { + return &Mutation{ + op: opDelete, + table: table, + keySet: Range(r), + } +} + +// prepareWrite generates sppb.Mutation_Write from table name, column names +// and new column values. +func prepareWrite(table string, columns []string, vals []interface{}) (*sppb.Mutation_Write, error) { + v, err := encodeValueArray(vals) + if err != nil { + return nil, err + } + return &sppb.Mutation_Write{ + Table: table, + Columns: columns, + Values: []*proto3.ListValue{v}, + }, nil +} + +// errInvdMutationOp returns error for unrecognized mutation operation. +func errInvdMutationOp(m Mutation) error { + return spannerErrorf(codes.InvalidArgument, "Unknown op type: %d", m.op) +} + +// proto converts spanner.Mutation to sppb.Mutation, in preparation to send +// RPCs. +func (m Mutation) proto() (*sppb.Mutation, error) { + var pb *sppb.Mutation + switch m.op { + case opDelete: + keySetProto, err := m.keySet.proto() + if err != nil { + return nil, err + } + pb = &sppb.Mutation{ + Operation: &sppb.Mutation_Delete_{ + Delete: &sppb.Mutation_Delete{ + Table: m.table, + KeySet: keySetProto, + }, + }, + } + case opInsert: + w, err := prepareWrite(m.table, m.columns, m.values) + if err != nil { + return nil, err + } + pb = &sppb.Mutation{Operation: &sppb.Mutation_Insert{Insert: w}} + case opInsertOrUpdate: + w, err := prepareWrite(m.table, m.columns, m.values) + if err != nil { + return nil, err + } + pb = &sppb.Mutation{Operation: &sppb.Mutation_InsertOrUpdate{InsertOrUpdate: w}} + case opReplace: + w, err := prepareWrite(m.table, m.columns, m.values) + if err != nil { + return nil, err + } + pb = &sppb.Mutation{Operation: &sppb.Mutation_Replace{Replace: w}} + case opUpdate: + w, err := prepareWrite(m.table, m.columns, m.values) + if err != nil { + return nil, err + } + pb = &sppb.Mutation{Operation: &sppb.Mutation_Update{Update: w}} + default: + return nil, errInvdMutationOp(m) + } + return pb, nil +} + +// mutationsProto turns a spanner.Mutation array into a sppb.Mutation array, +// it is convenient for sending batch mutations to Cloud Spanner. +func mutationsProto(ms []*Mutation) ([]*sppb.Mutation, error) { + l := make([]*sppb.Mutation, 0, len(ms)) + for _, m := range ms { + pb, err := m.proto() + if err != nil { + return nil, err + } + l = append(l, pb) + } + return l, nil +} diff --git a/vendor/cloud.google.com/go/spanner/mutation_test.go b/vendor/cloud.google.com/go/spanner/mutation_test.go new file mode 100644 index 00000000..6233b390 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/mutation_test.go @@ -0,0 +1,545 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "reflect" + "sort" + "strings" + "testing" + + proto3 "github.com/golang/protobuf/ptypes/struct" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// keysetProto returns protobuf encoding of valid spanner.KeySet. +func keysetProto(t *testing.T, ks KeySet) *sppb.KeySet { + k, err := ks.proto() + if err != nil { + t.Fatalf("cannot convert keyset %v to protobuf: %v", ks, err) + } + return k +} + +// Test encoding from spanner.Mutation to protobuf. +func TestMutationToProto(t *testing.T) { + for i, test := range []struct { + m *Mutation + want *sppb.Mutation + }{ + // Delete Mutation + { + &Mutation{opDelete, "t_foo", Keys(Key{"foo"}), nil, nil}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Delete_{ + Delete: &sppb.Mutation_Delete{ + Table: "t_foo", + KeySet: keysetProto(t, Keys(Key{"foo"})), + }, + }, + }, + }, + // Insert Mutation + { + &Mutation{opInsert, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{int64(1), int64(2)}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Insert{ + Insert: &sppb.Mutation_Write{ + Table: "t_foo", + Columns: []string{"col1", "col2"}, + Values: []*proto3.ListValue{ + &proto3.ListValue{ + Values: []*proto3.Value{intProto(1), intProto(2)}, + }, + }, + }, + }, + }, + }, + // InsertOrUpdate Mutation + { + &Mutation{opInsertOrUpdate, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{1.0, 2.0}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_InsertOrUpdate{ + InsertOrUpdate: &sppb.Mutation_Write{ + Table: "t_foo", + Columns: []string{"col1", "col2"}, + Values: []*proto3.ListValue{ + &proto3.ListValue{ + Values: []*proto3.Value{floatProto(1.0), floatProto(2.0)}, + }, + }, + }, + }, + }, + }, + // Replace Mutation + { + &Mutation{opReplace, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{"one", 2.0}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Replace{ + Replace: &sppb.Mutation_Write{ + Table: "t_foo", + Columns: []string{"col1", "col2"}, + Values: []*proto3.ListValue{ + &proto3.ListValue{ + Values: []*proto3.Value{stringProto("one"), floatProto(2.0)}, + }, + }, + }, + }, + }, + }, + // Update Mutation + { + &Mutation{opUpdate, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{"one", []byte(nil)}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Update{ + Update: &sppb.Mutation_Write{ + Table: "t_foo", + Columns: []string{"col1", "col2"}, + Values: []*proto3.ListValue{ + &proto3.ListValue{ + Values: []*proto3.Value{stringProto("one"), nullProto()}, + }, + }, + }, + }, + }, + }, + } { + if got, err := test.m.proto(); err != nil || !reflect.DeepEqual(got, test.want) { + t.Errorf("%d: (%#v).proto() = (%v, %v), want (%v, nil)", i, test.m, got, err, test.want) + } + } +} + +// mutationColumnSorter implements sort.Interface for sorting column-value pairs in a Mutation by column names. +type mutationColumnSorter struct { + Mutation +} + +// newMutationColumnSorter creates new instance of mutationColumnSorter by duplicating the input Mutation so that +// sorting won't change the input Mutation. +func newMutationColumnSorter(m *Mutation) *mutationColumnSorter { + return &mutationColumnSorter{ + Mutation{ + m.op, + m.table, + m.keySet, + append([]string(nil), m.columns...), + append([]interface{}(nil), m.values...), + }, + } +} + +// Len implements sort.Interface.Len. +func (ms *mutationColumnSorter) Len() int { + return len(ms.columns) +} + +// Swap implements sort.Interface.Swap. +func (ms *mutationColumnSorter) Swap(i, j int) { + ms.columns[i], ms.columns[j] = ms.columns[j], ms.columns[i] + ms.values[i], ms.values[j] = ms.values[j], ms.values[i] +} + +// Less implements sort.Interface.Less. +func (ms *mutationColumnSorter) Less(i, j int) bool { + return strings.Compare(ms.columns[i], ms.columns[j]) < 0 +} + +// mutationEqual returns true if two mutations in question are equal +// to each other. +func mutationEqual(t *testing.T, m1, m2 Mutation) bool { + // Two mutations are considered to be equal even if their column values have different + // orders. + ms1 := newMutationColumnSorter(&m1) + ms2 := newMutationColumnSorter(&m2) + sort.Sort(ms1) + sort.Sort(ms2) + return reflect.DeepEqual(ms1, ms2) +} + +// Test helper functions which help to generate spanner.Mutation. +func TestMutationHelpers(t *testing.T) { + for _, test := range []struct { + m string + got *Mutation + want *Mutation + }{ + { + "Insert", + Insert("t_foo", []string{"col1", "col2"}, []interface{}{int64(1), int64(2)}), + &Mutation{opInsert, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{int64(1), int64(2)}}, + }, + { + "InsertMap", + InsertMap("t_foo", map[string]interface{}{"col1": int64(1), "col2": int64(2)}), + &Mutation{opInsert, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{int64(1), int64(2)}}, + }, + { + "InsertStruct", + func() *Mutation { + m, err := InsertStruct( + "t_foo", + struct { + notCol bool + Col1 int64 `spanner:"col1"` + Col2 int64 `spanner:"col2"` + }{false, int64(1), int64(2)}, + ) + if err != nil { + t.Errorf("cannot convert struct into mutation: %v", err) + } + return m + }(), + &Mutation{opInsert, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{int64(1), int64(2)}}, + }, + { + "Update", + Update("t_foo", []string{"col1", "col2"}, []interface{}{"one", []byte(nil)}), + &Mutation{opUpdate, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{"one", []byte(nil)}}, + }, + { + "UpdateMap", + UpdateMap("t_foo", map[string]interface{}{"col1": "one", "col2": []byte(nil)}), + &Mutation{opUpdate, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{"one", []byte(nil)}}, + }, + { + "UpdateStruct", + func() *Mutation { + m, err := UpdateStruct( + "t_foo", + struct { + Col1 string `spanner:"col1"` + notCol int + Col2 []byte `spanner:"col2"` + }{"one", 1, nil}, + ) + if err != nil { + t.Errorf("cannot convert struct into mutation: %v", err) + } + return m + }(), + &Mutation{opUpdate, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{"one", []byte(nil)}}, + }, + { + "InsertOrUpdate", + InsertOrUpdate("t_foo", []string{"col1", "col2"}, []interface{}{1.0, 2.0}), + &Mutation{opInsertOrUpdate, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{1.0, 2.0}}, + }, + { + "InsertOrUpdateMap", + InsertOrUpdateMap("t_foo", map[string]interface{}{"col1": 1.0, "col2": 2.0}), + &Mutation{opInsertOrUpdate, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{1.0, 2.0}}, + }, + { + "InsertOrUpdateStruct", + func() *Mutation { + m, err := InsertOrUpdateStruct( + "t_foo", + struct { + Col1 float64 `spanner:"col1"` + Col2 float64 `spanner:"col2"` + notCol float64 + }{1.0, 2.0, 3.0}, + ) + if err != nil { + t.Errorf("cannot convert struct into mutation: %v", err) + } + return m + }(), + &Mutation{opInsertOrUpdate, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{1.0, 2.0}}, + }, + { + "Replace", + Replace("t_foo", []string{"col1", "col2"}, []interface{}{"one", 2.0}), + &Mutation{opReplace, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{"one", 2.0}}, + }, + { + "ReplaceMap", + ReplaceMap("t_foo", map[string]interface{}{"col1": "one", "col2": 2.0}), + &Mutation{opReplace, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{"one", 2.0}}, + }, + { + "ReplaceStruct", + func() *Mutation { + m, err := ReplaceStruct( + "t_foo", + struct { + Col1 string `spanner:"col1"` + Col2 float64 `spanner:"col2"` + notCol string + }{"one", 2.0, "foo"}, + ) + if err != nil { + t.Errorf("cannot convert struct into mutation: %v", err) + } + return m + }(), + &Mutation{opReplace, "t_foo", KeySet{}, []string{"col1", "col2"}, []interface{}{"one", 2.0}}, + }, + { + "Delete", + Delete("t_foo", Key{"foo"}), + &Mutation{opDelete, "t_foo", Keys(Key{"foo"}), nil, nil}, + }, + { + "DeleteRange", + DeleteKeyRange("t_foo", KeyRange{Key{"bar"}, Key{"foo"}, ClosedClosed}), + &Mutation{opDelete, "t_foo", Range(KeyRange{Key{"bar"}, Key{"foo"}, ClosedClosed}), nil, nil}, + }, + } { + if !mutationEqual(t, *test.got, *test.want) { + t.Errorf("%v: got Mutation %v, want %v", test.m, test.got, test.want) + } + } +} + +// Test encoding non-struct types by using *Struct helpers. +func TestBadStructs(t *testing.T) { + val := "i_am_not_a_struct" + wantErr := errNotStruct(val) + if _, gotErr := InsertStruct("t_test", val); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("InsertStruct(%q) returns error %v, want %v", val, gotErr, wantErr) + } + if _, gotErr := InsertOrUpdateStruct("t_test", val); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("InsertOrUpdateStruct(%q) returns error %v, want %v", val, gotErr, wantErr) + } + if _, gotErr := UpdateStruct("t_test", val); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("UpdateStruct(%q) returns error %v, want %v", val, gotErr, wantErr) + } + if _, gotErr := ReplaceStruct("t_test", val); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("ReplaceStruct(%q) returns error %v, want %v", val, gotErr, wantErr) + } +} + +// Test encoding Mutation into proto. +func TestEncodeMutation(t *testing.T) { + for _, test := range []struct { + name string + mutation Mutation + wantProto *sppb.Mutation + wantErr error + }{ + { + "OpDelete", + Mutation{opDelete, "t_test", Keys(Key{1}), nil, nil}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Delete_{ + Delete: &sppb.Mutation_Delete{ + Table: "t_test", + KeySet: &sppb.KeySet{ + Keys: []*proto3.ListValue{listValueProto(intProto(1))}, + Ranges: []*sppb.KeyRange{}, + }, + }, + }, + }, + nil, + }, + { + "OpDelete - Key error", + Mutation{opDelete, "t_test", Keys(Key{struct{}{}}), nil, nil}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Delete_{ + Delete: &sppb.Mutation_Delete{ + Table: "t_test", + KeySet: &sppb.KeySet{}, + }, + }, + }, + errInvdKeyPartType(struct{}{}), + }, + { + "OpInsert", + Mutation{opInsert, "t_test", KeySet{}, []string{"key", "val"}, []interface{}{"foo", 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Insert{ + Insert: &sppb.Mutation_Write{ + Table: "t_test", + Columns: []string{"key", "val"}, + Values: []*proto3.ListValue{listValueProto(stringProto("foo"), intProto(1))}, + }, + }, + }, + nil, + }, + { + "OpInsert - Value Type Error", + Mutation{opInsert, "t_test", KeySet{}, []string{"key", "val"}, []interface{}{struct{}{}, 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Insert{ + Insert: &sppb.Mutation_Write{}, + }, + }, + errEncoderUnsupportedType(struct{}{}), + }, + { + "OpInsertOrUpdate", + Mutation{opInsertOrUpdate, "t_test", KeySet{}, []string{"key", "val"}, []interface{}{"foo", 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_InsertOrUpdate{ + InsertOrUpdate: &sppb.Mutation_Write{ + Table: "t_test", + Columns: []string{"key", "val"}, + Values: []*proto3.ListValue{listValueProto(stringProto("foo"), intProto(1))}, + }, + }, + }, + nil, + }, + { + "OpInsertOrUpdate - Value Type Error", + Mutation{opInsertOrUpdate, "t_test", KeySet{}, []string{"key", "val"}, []interface{}{struct{}{}, 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_InsertOrUpdate{ + InsertOrUpdate: &sppb.Mutation_Write{}, + }, + }, + errEncoderUnsupportedType(struct{}{}), + }, + { + "OpReplace", + Mutation{opReplace, "t_test", KeySet{}, []string{"key", "val"}, []interface{}{"foo", 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Replace{ + Replace: &sppb.Mutation_Write{ + Table: "t_test", + Columns: []string{"key", "val"}, + Values: []*proto3.ListValue{listValueProto(stringProto("foo"), intProto(1))}, + }, + }, + }, + nil, + }, + { + "OpReplace - Value Type Error", + Mutation{opReplace, "t_test", KeySet{}, []string{"key", "val"}, []interface{}{struct{}{}, 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Replace{ + Replace: &sppb.Mutation_Write{}, + }, + }, + errEncoderUnsupportedType(struct{}{}), + }, + { + "OpUpdate", + Mutation{opUpdate, "t_test", KeySet{}, []string{"key", "val"}, []interface{}{"foo", 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Update{ + Update: &sppb.Mutation_Write{ + Table: "t_test", + Columns: []string{"key", "val"}, + Values: []*proto3.ListValue{listValueProto(stringProto("foo"), intProto(1))}, + }, + }, + }, + nil, + }, + { + "OpUpdate - Value Type Error", + Mutation{opUpdate, "t_test", KeySet{}, []string{"key", "val"}, []interface{}{struct{}{}, 1}}, + &sppb.Mutation{ + Operation: &sppb.Mutation_Update{ + Update: &sppb.Mutation_Write{}, + }, + }, + errEncoderUnsupportedType(struct{}{}), + }, + { + "OpKnown - Unknown Mutation Operation Code", + Mutation{op(100), "t_test", KeySet{}, nil, nil}, + &sppb.Mutation{}, + errInvdMutationOp(Mutation{op(100), "t_test", KeySet{}, nil, nil}), + }, + } { + gotProto, gotErr := test.mutation.proto() + if gotErr != nil { + if !reflect.DeepEqual(gotErr, test.wantErr) { + t.Errorf("%v: %v.proto() returns error %v, want %v", test.name, test.mutation, gotErr, test.wantErr) + } + continue + } + if !reflect.DeepEqual(gotProto, test.wantProto) { + t.Errorf("%v: %v.proto() = (%v, nil), want (%v, nil)", test.name, test.mutation, gotProto, test.wantProto) + } + } +} + +// Test Encoding an array of mutations. +func TestEncodeMutationArray(t *testing.T) { + for _, test := range []struct { + name string + ms []*Mutation + want []*sppb.Mutation + wantErr error + }{ + { + "Multiple Mutations", + []*Mutation{ + &Mutation{opDelete, "t_test", Keys(Key{"bar"}), nil, nil}, + &Mutation{opInsertOrUpdate, "t_test", KeySet{}, []string{"key", "val"}, []interface{}{"foo", 1}}, + }, + []*sppb.Mutation{ + &sppb.Mutation{ + Operation: &sppb.Mutation_Delete_{ + Delete: &sppb.Mutation_Delete{ + Table: "t_test", + KeySet: &sppb.KeySet{ + Keys: []*proto3.ListValue{listValueProto(stringProto("bar"))}, + Ranges: []*sppb.KeyRange{}, + }, + }, + }, + }, + &sppb.Mutation{ + Operation: &sppb.Mutation_InsertOrUpdate{ + InsertOrUpdate: &sppb.Mutation_Write{ + Table: "t_test", + Columns: []string{"key", "val"}, + Values: []*proto3.ListValue{listValueProto(stringProto("foo"), intProto(1))}, + }, + }, + }, + }, + nil, + }, + { + "Multiple Mutations - Bad Mutation", + []*Mutation{ + &Mutation{opDelete, "t_test", Keys(Key{"bar"}), nil, nil}, + &Mutation{opInsertOrUpdate, "t_test", KeySet{}, []string{"key", "val"}, []interface{}{"foo", struct{}{}}}, + }, + []*sppb.Mutation{}, + errEncoderUnsupportedType(struct{}{}), + }, + } { + gotProto, gotErr := mutationsProto(test.ms) + if gotErr != nil { + if !reflect.DeepEqual(gotErr, test.wantErr) { + t.Errorf("%v: mutationsProto(%v) returns error %v, want %v", test.name, test.ms, gotErr, test.wantErr) + } + continue + } + if !reflect.DeepEqual(gotProto, test.want) { + t.Errorf("%v: mutationsProto(%v) = (%v, nil), want (%v, nil)", test.name, test.ms, gotProto, test.want) + } + } +} diff --git a/vendor/cloud.google.com/go/spanner/protoutils.go b/vendor/cloud.google.com/go/spanner/protoutils.go new file mode 100644 index 00000000..df12432d --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/protoutils.go @@ -0,0 +1,113 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "encoding/base64" + "strconv" + "time" + + "cloud.google.com/go/civil" + proto3 "github.com/golang/protobuf/ptypes/struct" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// Helpers to generate protobuf values and Cloud Spanner types. + +func stringProto(s string) *proto3.Value { + return &proto3.Value{Kind: stringKind(s)} +} + +func stringKind(s string) *proto3.Value_StringValue { + return &proto3.Value_StringValue{StringValue: s} +} + +func stringType() *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_STRING} +} + +func boolProto(b bool) *proto3.Value { + return &proto3.Value{Kind: &proto3.Value_BoolValue{BoolValue: b}} +} + +func boolType() *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_BOOL} +} + +func intProto(n int64) *proto3.Value { + return &proto3.Value{Kind: &proto3.Value_StringValue{StringValue: strconv.FormatInt(n, 10)}} +} + +func intType() *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_INT64} +} + +func floatProto(n float64) *proto3.Value { + return &proto3.Value{Kind: &proto3.Value_NumberValue{NumberValue: n}} +} + +func floatType() *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_FLOAT64} +} + +func bytesProto(b []byte) *proto3.Value { + return &proto3.Value{Kind: &proto3.Value_StringValue{StringValue: base64.StdEncoding.EncodeToString(b)}} +} + +func bytesType() *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_BYTES} +} + +func timeProto(t time.Time) *proto3.Value { + return stringProto(t.UTC().Format(time.RFC3339Nano)) +} + +func timeType() *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_TIMESTAMP} +} + +func dateProto(d civil.Date) *proto3.Value { + return stringProto(d.String()) +} + +func dateType() *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_DATE} +} + +func listProto(p ...*proto3.Value) *proto3.Value { + return &proto3.Value{Kind: &proto3.Value_ListValue{ListValue: &proto3.ListValue{Values: p}}} +} + +func listValueProto(p ...*proto3.Value) *proto3.ListValue { + return &proto3.ListValue{Values: p} +} + +func listType(t *sppb.Type) *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_ARRAY, ArrayElementType: t} +} + +func mkField(n string, t *sppb.Type) *sppb.StructType_Field { + return &sppb.StructType_Field{n, t} +} + +func structType(fields ...*sppb.StructType_Field) *sppb.Type { + return &sppb.Type{Code: sppb.TypeCode_STRUCT, StructType: &sppb.StructType{Fields: fields}} +} + +func nullProto() *proto3.Value { + return &proto3.Value{Kind: &proto3.Value_NullValue{NullValue: proto3.NullValue_NULL_VALUE}} +} diff --git a/vendor/cloud.google.com/go/spanner/read.go b/vendor/cloud.google.com/go/spanner/read.go new file mode 100644 index 00000000..d7a1ad0c --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/read.go @@ -0,0 +1,679 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "bytes" + "io" + "sync/atomic" + "time" + + log "github.com/golang/glog" + proto "github.com/golang/protobuf/proto" + proto3 "github.com/golang/protobuf/ptypes/struct" + "golang.org/x/net/context" + + "google.golang.org/api/iterator" + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc/codes" +) + +// streamingReceiver is the interface for receiving data from a client side +// stream. +type streamingReceiver interface { + Recv() (*sppb.PartialResultSet, error) +} + +// errEarlyReadEnd returns error for read finishes when gRPC stream is still active. +func errEarlyReadEnd() error { + return spannerErrorf(codes.FailedPrecondition, "read completed with active stream") +} + +// stream is the internal fault tolerant method for streaming data from +// Cloud Spanner. +func stream(ctx context.Context, rpc func(ct context.Context, resumeToken []byte) (streamingReceiver, error), release func(time.Time, error)) *RowIterator { + ctx, cancel := context.WithCancel(ctx) + return &RowIterator{ + streamd: newResumableStreamDecoder(ctx, rpc), + rowd: &partialResultSetDecoder{}, + release: release, + cancel: cancel, + } +} + +// RowIterator is an iterator over Rows. +type RowIterator struct { + streamd *resumableStreamDecoder + rowd *partialResultSetDecoder + release func(time.Time, error) + cancel func() + err error + rows []*Row +} + +// Next returns the next result. Its second return value is iterator.Done if +// there are no more results. Once Next returns Done, all subsequent calls +// will return Done. +func (r *RowIterator) Next() (*Row, error) { + if r.err != nil { + return nil, r.err + } + for len(r.rows) == 0 && r.streamd.next() { + r.rows, r.err = r.rowd.add(r.streamd.get()) + if r.err != nil { + return nil, r.err + } + } + if len(r.rows) > 0 { + row := r.rows[0] + r.rows = r.rows[1:] + return row, nil + } + if err := r.streamd.lastErr(); err != nil { + r.err = toSpannerError(err) + } else if !r.rowd.done() { + r.err = errEarlyReadEnd() + } else { + r.err = iterator.Done + } + return nil, r.err +} + +// Do calls the provided function once in sequence for each row in the iteration. If the +// function returns a non-nil error, Do immediately returns that value. +// +// If there are no rows in the iterator, Do will return nil without calling the +// provided function. +// +// Do always calls Stop on the iterator. +func (r *RowIterator) Do(f func(r *Row) error) error { + defer r.Stop() + for { + row, err := r.Next() + switch err { + case iterator.Done: + return nil + case nil: + if err = f(row); err != nil { + return err + } + default: + return err + } + } +} + +// Stop terminates the iteration. It should be called after every iteration. +func (r *RowIterator) Stop() { + if r.cancel != nil { + r.cancel() + } + if r.release != nil { + r.release(r.rowd.ts, r.err) + if r.err == nil { + r.err = spannerErrorf(codes.FailedPrecondition, "Next called after Stop") + } + r.release = nil + + } +} + +// partialResultQueue implements a simple FIFO queue. The zero value is a +// valid queue. +type partialResultQueue struct { + q []*sppb.PartialResultSet + first int + last int + n int // number of elements in queue +} + +// empty returns if the partialResultQueue is empty. +func (q *partialResultQueue) empty() bool { + return q.n == 0 +} + +// errEmptyQueue returns error for dequeuing an empty queue. +func errEmptyQueue() error { + return spannerErrorf(codes.OutOfRange, "empty partialResultQueue") +} + +// peekLast returns the last item in partialResultQueue; if the queue +// is empty, it returns error. +func (q *partialResultQueue) peekLast() (*sppb.PartialResultSet, error) { + if q.empty() { + return nil, errEmptyQueue() + } + return q.q[(q.last+cap(q.q)-1)%cap(q.q)], nil +} + +// push adds an item to the tail of partialResultQueue. +func (q *partialResultQueue) push(r *sppb.PartialResultSet) { + if q.q == nil { + q.q = make([]*sppb.PartialResultSet, 8 /* arbitrary */) + } + if q.n == cap(q.q) { + buf := make([]*sppb.PartialResultSet, cap(q.q)*2) + for i := 0; i < q.n; i++ { + buf[i] = q.q[(q.first+i)%cap(q.q)] + } + q.q = buf + q.first = 0 + q.last = q.n + } + q.q[q.last] = r + q.last = (q.last + 1) % cap(q.q) + q.n++ +} + +// pop removes an item from the head of partialResultQueue and returns +// it. +func (q *partialResultQueue) pop() *sppb.PartialResultSet { + if q.n == 0 { + return nil + } + r := q.q[q.first] + q.q[q.first] = nil + q.first = (q.first + 1) % cap(q.q) + q.n-- + return r +} + +// clear empties partialResultQueue. +func (q *partialResultQueue) clear() { + *q = partialResultQueue{} +} + +// dump retrives all items from partialResultQueue and return them in a slice. +// It is used only in tests. +func (q *partialResultQueue) dump() []*sppb.PartialResultSet { + var dq []*sppb.PartialResultSet + for i := q.first; len(dq) < q.n; i = (i + 1) % cap(q.q) { + dq = append(dq, q.q[i]) + } + return dq +} + +// resumableStreamDecoderState encodes resumableStreamDecoder's status. +// See also the comments for resumableStreamDecoder.Next. +type resumableStreamDecoderState int + +const ( + unConnected resumableStreamDecoderState = iota // 0 + queueingRetryable // 1 + queueingUnretryable // 2 + aborted // 3 + finished // 4 +) + +// resumableStreamDecoder provides a resumable interface for receiving +// sppb.PartialResultSet(s) from a given query wrapped by +// resumableStreamDecoder.rpc(). +type resumableStreamDecoder struct { + // state is the current status of resumableStreamDecoder, see also + // the comments for resumableStreamDecoder.Next. + state resumableStreamDecoderState + // stateWitness when non-nil is called to observe state change, + // used for testing. + stateWitness func(resumableStreamDecoderState) + // ctx is the caller's context, used for cancel/timeout Next(). + ctx context.Context + // rpc is a factory of streamingReceiver, which might resume + // a pervious stream from the point encoded in restartToken. + // rpc is always a wrapper of a Cloud Spanner query which is + // resumable. + rpc func(ctx context.Context, restartToken []byte) (streamingReceiver, error) + // stream is the current RPC streaming receiver. + stream streamingReceiver + // q buffers received yet undecoded partial results. + q partialResultQueue + // bytesBetweenResumeTokens is the proxy of the byte size of PartialResultSets being queued + // between two resume tokens. Once bytesBetweenResumeTokens is greater than + // maxBytesBetweenResumeTokens, resumableStreamDecoder goes into queueingUnretryable state. + bytesBetweenResumeTokens int32 + // maxBytesBetweenResumeTokens is the max number of bytes that can be buffered + // between two resume tokens. It is always copied from the global maxBytesBetweenResumeTokens + // atomically. + maxBytesBetweenResumeTokens int32 + // np is the next sppb.PartialResultSet ready to be returned + // to caller of resumableStreamDecoder.Get(). + np *sppb.PartialResultSet + // resumeToken stores the resume token that resumableStreamDecoder has + // last revealed to caller. + resumeToken []byte + // retryCount is the number of retries that have been carried out so far + retryCount int + // err is the last error resumableStreamDecoder has encountered so far. + err error + // backoff to compute delays between retries. + backoff exponentialBackoff +} + +// newResumableStreamDecoder creates a new resumeableStreamDecoder instance. +// Parameter rpc should be a function that creates a new stream +// beginning at the restartToken if non-nil. +func newResumableStreamDecoder(ctx context.Context, rpc func(ct context.Context, restartToken []byte) (streamingReceiver, error)) *resumableStreamDecoder { + return &resumableStreamDecoder{ + ctx: ctx, + rpc: rpc, + maxBytesBetweenResumeTokens: atomic.LoadInt32(&maxBytesBetweenResumeTokens), + backoff: defaultBackoff, + } +} + +// changeState fulfills state transition for resumableStateDecoder. +func (d *resumableStreamDecoder) changeState(target resumableStreamDecoderState) { + if d.state == queueingRetryable && d.state != target { + // Reset bytesBetweenResumeTokens because it is only meaningful/changed under + // queueingRetryable state. + d.bytesBetweenResumeTokens = 0 + } + d.state = target + if d.stateWitness != nil { + d.stateWitness(target) + } +} + +// isNewResumeToken returns if the observed resume token is different from +// the one returned from server last time. +func (d *resumableStreamDecoder) isNewResumeToken(rt []byte) bool { + if rt == nil { + return false + } + if bytes.Compare(rt, d.resumeToken) == 0 { + return false + } + return true +} + +// Next advances to the next available partial result set. If error or no +// more, returns false, call Err to determine if an error was encountered. +// The following diagram illustrates the state machine of resumableStreamDecoder +// that Next() implements. Note that state transition can be only triggered by +// RPC activities. +/* + rpc() fails retryable + +---------+ + | | rpc() fails unretryable/ctx timeouts or cancelled + | | +------------------------------------------------+ + | | | | + | v | v + | +---+---+---+ +--------+ +------+--+ + +-----+unConnected| |finished| | aborted |<----+ + | | ++-----+-+ +------+--+ | + +---+----+--+ ^ ^ ^ | + | ^ | | | | + | | | | recv() fails | + | | | | | | + | |recv() fails retryable | | | | + | |with valid ctx | | | | + | | | | | | + rpc() succeeds | +-----------------------+ | | | + | | | recv EOF recv EOF | | + | | | | | | + v | | Queue size exceeds | | | + +---+----+---+----+threshold +-------+-----------+ | | ++---------->+ +--------------->+ +-+ | +| |queueingRetryable| |queueingUnretryable| | +| | +<---------------+ | | +| +---+----------+--+ pop() returns +--+----+-----------+ | +| | | resume token | ^ | +| | | | | | +| | | | | | ++---------------+ | | | | + recv() succeeds | +----+ | + | recv() succeeds | + | | + | | + | | + | | + | | + +--------------------------------------------------+ + recv() fails unretryable + +*/ +var ( + // maxBytesBetweenResumeTokens is the maximum amount of bytes that resumableStreamDecoder + // in queueingRetryable state can use to queue PartialResultSets before getting + // into queueingUnretryable state. + maxBytesBetweenResumeTokens = int32(128 * 1024 * 1024) +) + +func (d *resumableStreamDecoder) next() bool { + for { + select { + case <-d.ctx.Done(): + // Do context check here so that even gRPC failed to do + // so, resumableStreamDecoder can still break the loop + // as expected. + d.err = errContextCanceled(d.err) + d.changeState(aborted) + default: + } + switch d.state { + case unConnected: + // If no gRPC stream is available, try to initiate one. + if d.stream, d.err = d.rpc(d.ctx, d.resumeToken); d.err != nil { + if isRetryable(d.err) { + d.doBackOff() + // Be explicit about state transition, although the + // state doesn't actually change. State transition + // will be triggered only by RPC activity, regardless of + // whether there is an actual state change or not. + d.changeState(unConnected) + continue + } + d.changeState(aborted) + continue + } + d.resetBackOff() + d.changeState(queueingRetryable) + continue + case queueingRetryable: + fallthrough + case queueingUnretryable: + // Receiving queue is not empty. + last, err := d.q.peekLast() + if err != nil { + // Only the case that receiving queue is empty could cause peekLast to + // return error and in such case, we should try to receive from stream. + d.tryRecv() + continue + } + if d.isNewResumeToken(last.ResumeToken) { + // Got new resume token, return buffered sppb.PartialResultSets to caller. + d.np = d.q.pop() + if d.q.empty() { + d.bytesBetweenResumeTokens = 0 + // The new resume token was just popped out from queue, record it. + d.resumeToken = d.np.ResumeToken + d.changeState(queueingRetryable) + } + return true + } + if d.bytesBetweenResumeTokens >= d.maxBytesBetweenResumeTokens && d.state == queueingRetryable { + d.changeState(queueingUnretryable) + continue + } + if d.state == queueingUnretryable { + // When there is no resume token observed, + // only yield sppb.PartialResultSets to caller under + // queueingUnretryable state. + d.np = d.q.pop() + return true + } + // Needs to receive more from gRPC stream till a new resume token + // is observed. + d.tryRecv() + continue + case aborted: + // Discard all pending items because none of them + // should be yield to caller. + d.q.clear() + return false + case finished: + // If query has finished, check if there are still buffered messages. + if d.q.empty() { + // No buffered PartialResultSet. + return false + } + // Although query has finished, there are still buffered PartialResultSets. + d.np = d.q.pop() + return true + + default: + log.Errorf("Unexpected resumableStreamDecoder.state: %v", d.state) + return false + } + } +} + +// tryRecv attempts to receive a PartialResultSet from gRPC stream. +func (d *resumableStreamDecoder) tryRecv() { + var res *sppb.PartialResultSet + if res, d.err = d.stream.Recv(); d.err != nil { + if d.err == io.EOF { + d.err = nil + d.changeState(finished) + return + } + if isRetryable(d.err) && d.state == queueingRetryable { + d.err = nil + // Discard all queue items (none have resume tokens). + d.q.clear() + d.stream = nil + d.changeState(unConnected) + d.doBackOff() + return + } + d.changeState(aborted) + return + } + d.q.push(res) + if d.state == queueingRetryable && !d.isNewResumeToken(res.ResumeToken) { + // adjusting d.bytesBetweenResumeTokens + d.bytesBetweenResumeTokens += int32(proto.Size(res)) + } + d.resetBackOff() + d.changeState(d.state) +} + +// resetBackOff clears the internal retry counter of +// resumableStreamDecoder so that the next exponential +// backoff will start at a fresh state. +func (d *resumableStreamDecoder) resetBackOff() { + d.retryCount = 0 +} + +// doBackoff does an exponential backoff sleep. +func (d *resumableStreamDecoder) doBackOff() { + ticker := time.NewTicker(d.backoff.delay(d.retryCount)) + defer ticker.Stop() + d.retryCount++ + select { + case <-d.ctx.Done(): + case <-ticker.C: + } +} + +// get returns the most recent PartialResultSet generated by a call to next. +func (d *resumableStreamDecoder) get() *sppb.PartialResultSet { + return d.np +} + +// lastErr returns the last non-EOF error encountered. +func (d *resumableStreamDecoder) lastErr() error { + return d.err +} + +// partialResultSetDecoder assembles PartialResultSet(s) into Cloud Spanner +// Rows. +type partialResultSetDecoder struct { + row Row + tx *sppb.Transaction + chunked bool // if true, next value should be merged with last values entry. + ts time.Time // read timestamp +} + +// yield checks we have a complete row, and if so returns it. A row is not +// complete if it doesn't have enough columns, or if this is a chunked response +// and there are no further values to process. +func (p *partialResultSetDecoder) yield(chunked, last bool) *Row { + if len(p.row.vals) == len(p.row.fields) && (!chunked || !last) { + // When partialResultSetDecoder gets enough number of + // Column values, There are two cases that a new Row + // should be yield: + // 1. The incoming PartialResultSet is not chunked; + // 2. The incoming PartialResultSet is chunked, but the + // proto3.Value being merged is not the last one in + // the PartialResultSet. + // + // Use a fresh Row to simplify clients that want to use yielded results + // after the next row is retrieved. Note that fields is never changed + // so it doesn't need to be copied. + fresh := Row{ + fields: p.row.fields, + vals: make([]*proto3.Value, len(p.row.vals)), + } + copy(fresh.vals, p.row.vals) + p.row.vals = p.row.vals[:0] // empty and reuse slice + return &fresh + } + return nil +} + +// yieldTx returns transaction information via caller supplied callback. +func errChunkedEmptyRow() error { + return spannerErrorf(codes.FailedPrecondition, "partialResultSetDecoder gets chunked empty row") +} + +// add tries to merge a new PartialResultSet into buffered Row. It returns +// any rows that have been completed as a result. +func (p *partialResultSetDecoder) add(r *sppb.PartialResultSet) ([]*Row, error) { + var rows []*Row + if r.Metadata != nil { + // Metadata should only be returned in the first result. + if p.row.fields == nil { + p.row.fields = r.Metadata.RowType.Fields + } + if p.tx == nil && r.Metadata.Transaction != nil { + p.tx = r.Metadata.Transaction + if p.tx.ReadTimestamp != nil { + p.ts = time.Unix(p.tx.ReadTimestamp.Seconds, int64(p.tx.ReadTimestamp.Nanos)) + } + } + } + if len(r.Values) == 0 { + return nil, nil + } + if p.chunked { + p.chunked = false + // Try to merge first value in r.Values into + // uncompleted row. + last := len(p.row.vals) - 1 + if last < 0 { // sanity check + return nil, errChunkedEmptyRow() + } + var err error + // If p is chunked, then we should always try to merge p.last with r.first. + if p.row.vals[last], err = p.merge(p.row.vals[last], r.Values[0]); err != nil { + return nil, err + } + r.Values = r.Values[1:] + // Merge is done, try to yield a complete Row. + if row := p.yield(r.ChunkedValue, len(r.Values) == 0); row != nil { + rows = append(rows, row) + } + } + for i, v := range r.Values { + // The rest values in r can be appened into p directly. + p.row.vals = append(p.row.vals, v) + // Again, check to see if a complete Row can be yielded because of + // the newly added value. + if row := p.yield(r.ChunkedValue, i == len(r.Values)-1); row != nil { + rows = append(rows, row) + } + } + if r.ChunkedValue { + // After dealing with all values in r, if r is chunked then p must + // be also chunked. + p.chunked = true + } + return rows, nil +} + +// isMergeable returns if a protobuf Value can be potentially merged with +// other protobuf Values. +func (p *partialResultSetDecoder) isMergeable(a *proto3.Value) bool { + switch a.Kind.(type) { + case *proto3.Value_StringValue: + return true + case *proto3.Value_ListValue: + return true + default: + return false + } +} + +// errIncompatibleMergeTypes returns error for incompatible protobuf types +// that cannot be merged by partialResultSetDecoder. +func errIncompatibleMergeTypes(a, b *proto3.Value) error { + return spannerErrorf(codes.FailedPrecondition, "partialResultSetDecoder merge(%T,%T) - incompatible types", a.Kind, b.Kind) +} + +// errUnsupportedMergeType returns error for protobuf type that cannot be +// merged to other protobufs. +func errUnsupportedMergeType(a *proto3.Value) error { + return spannerErrorf(codes.FailedPrecondition, "unsupported type merge (%T)", a.Kind) +} + +// merge tries to combine two protobuf Values if possible. +func (p *partialResultSetDecoder) merge(a, b *proto3.Value) (*proto3.Value, error) { + var err error + typeErr := errIncompatibleMergeTypes(a, b) + switch t := a.Kind.(type) { + case *proto3.Value_StringValue: + s, ok := b.Kind.(*proto3.Value_StringValue) + if !ok { + return nil, typeErr + } + return &proto3.Value{ + Kind: &proto3.Value_StringValue{StringValue: t.StringValue + s.StringValue}, + }, nil + case *proto3.Value_ListValue: + l, ok := b.Kind.(*proto3.Value_ListValue) + if !ok { + return nil, typeErr + } + if l.ListValue == nil || len(l.ListValue.Values) <= 0 { + // b is an empty list, just return a. + return a, nil + } + if t.ListValue == nil || len(t.ListValue.Values) <= 0 { + // a is an empty list, just return b. + return b, nil + } + if la := len(t.ListValue.Values) - 1; p.isMergeable(t.ListValue.Values[la]) { + // When the last item in a is of type String, + // List or Struct(encoded into List by Cloud Spanner), + // try to Merge last item in a and first item in b. + t.ListValue.Values[la], err = p.merge(t.ListValue.Values[la], l.ListValue.Values[0]) + if err != nil { + return nil, err + } + l.ListValue.Values = l.ListValue.Values[1:] + } + return &proto3.Value{ + Kind: &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: append(t.ListValue.Values, l.ListValue.Values...), + }, + }, + }, nil + default: + return nil, errUnsupportedMergeType(a) + } + +} + +// Done returns if partialResultSetDecoder has already done with all buffered +// values. +func (p *partialResultSetDecoder) done() bool { + // There is no explicit end of stream marker, but ending part way + // through a row is obviously bad, or ending with the last column still + // awaiting completion. + return len(p.row.vals) == 0 && !p.chunked +} diff --git a/vendor/cloud.google.com/go/spanner/read_test.go b/vendor/cloud.google.com/go/spanner/read_test.go new file mode 100644 index 00000000..f6476571 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/read_test.go @@ -0,0 +1,1727 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "errors" + "fmt" + "io" + "reflect" + "sync/atomic" + "testing" + "time" + + "golang.org/x/net/context" + + proto "github.com/golang/protobuf/proto" + proto3 "github.com/golang/protobuf/ptypes/struct" + + "cloud.google.com/go/spanner/internal/testutil" + "google.golang.org/api/iterator" + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var ( + // Mocked transaction timestamp. + trxTs = time.Unix(1, 2) + // Metadata for mocked KV table, its rows are returned by SingleUse transactions. + kvMeta = func() *sppb.ResultSetMetadata { + meta := testutil.KvMeta + meta.Transaction = &sppb.Transaction{ + ReadTimestamp: timestampProto(trxTs), + } + return &meta + }() + // Metadata for mocked ListKV table, which uses List for its key and value. + // Its rows are returned by snapshot readonly transactions, as indicated in the transaction metadata. + kvListMeta = &sppb.ResultSetMetadata{ + RowType: &sppb.StructType{ + Fields: []*sppb.StructType_Field{ + { + Name: "Key", + Type: &sppb.Type{ + Code: sppb.TypeCode_ARRAY, + ArrayElementType: &sppb.Type{ + Code: sppb.TypeCode_STRING, + }, + }, + }, + { + Name: "Value", + Type: &sppb.Type{ + Code: sppb.TypeCode_ARRAY, + ArrayElementType: &sppb.Type{ + Code: sppb.TypeCode_STRING, + }, + }, + }, + }, + }, + Transaction: &sppb.Transaction{ + Id: transactionID{5, 6, 7, 8, 9}, + ReadTimestamp: timestampProto(trxTs), + }, + } + // Metadata for mocked schema of a query result set, which has two struct + // columns named "Col1" and "Col2", the struct's schema is like the + // following: + // + // STRUCT { + // INT + // LIST + // } + // + // Its rows are returned in readwrite transaction, as indicated in the transaction metadata. + kvObjectMeta = &sppb.ResultSetMetadata{ + RowType: &sppb.StructType{ + Fields: []*sppb.StructType_Field{ + { + Name: "Col1", + Type: &sppb.Type{ + Code: sppb.TypeCode_STRUCT, + StructType: &sppb.StructType{ + Fields: []*sppb.StructType_Field{ + { + Name: "foo-f1", + Type: &sppb.Type{ + Code: sppb.TypeCode_INT64, + }, + }, + { + Name: "foo-f2", + Type: &sppb.Type{ + Code: sppb.TypeCode_ARRAY, + ArrayElementType: &sppb.Type{ + Code: sppb.TypeCode_STRING, + }, + }, + }, + }, + }, + }, + }, + { + Name: "Col2", + Type: &sppb.Type{ + Code: sppb.TypeCode_STRUCT, + StructType: &sppb.StructType{ + Fields: []*sppb.StructType_Field{ + { + Name: "bar-f1", + Type: &sppb.Type{ + Code: sppb.TypeCode_INT64, + }, + }, + { + Name: "bar-f2", + Type: &sppb.Type{ + Code: sppb.TypeCode_ARRAY, + ArrayElementType: &sppb.Type{ + Code: sppb.TypeCode_STRING, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Transaction: &sppb.Transaction{ + Id: transactionID{1, 2, 3, 4, 5}, + }, + } +) + +// String implements fmt.stringer. +func (r *Row) String() string { + return fmt.Sprintf("{fields: %s, val: %s}", r.fields, r.vals) +} + +func describeRows(l []*Row) string { + // generate a nice test failure description + var s = "[" + for i, r := range l { + if i != 0 { + s += ",\n " + } + s += fmt.Sprint(r) + } + s += "]" + return s +} + +// Helper for generating proto3 Value_ListValue instances, making +// test code shorter and readable. +func genProtoListValue(v ...string) *proto3.Value_ListValue { + r := &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: []*proto3.Value{}, + }, + } + for _, e := range v { + r.ListValue.Values = append( + r.ListValue.Values, + &proto3.Value{ + Kind: &proto3.Value_StringValue{StringValue: e}, + }, + ) + } + return r +} + +// Test Row generation logics of partialResultSetDecoder. +func TestPartialResultSetDecoder(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + var tests = []struct { + input []*sppb.PartialResultSet + wantF []*Row + wantTxID transactionID + wantTs time.Time + wantD bool + }{ + { + // Empty input. + wantD: true, + }, + // String merging examples. + { + // Single KV result. + input: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + {Kind: &proto3.Value_StringValue{StringValue: "bar"}}, + }, + }, + }, + wantF: []*Row{ + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + {Kind: &proto3.Value_StringValue{StringValue: "bar"}}, + }, + }, + }, + wantTs: trxTs, + wantD: true, + }, + { + // Incomplete partial result. + input: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + }, + }, + }, + wantTs: trxTs, + wantD: false, + }, + { + // Complete splitted result. + input: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + }, + }, + { + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "bar"}}, + }, + }, + }, + wantF: []*Row{ + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + {Kind: &proto3.Value_StringValue{StringValue: "bar"}}, + }, + }, + }, + wantTs: trxTs, + wantD: true, + }, + { + // Multi-row example with splitted row in the middle. + input: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + {Kind: &proto3.Value_StringValue{StringValue: "bar"}}, + {Kind: &proto3.Value_StringValue{StringValue: "A"}}, + }, + }, + { + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "1"}}, + {Kind: &proto3.Value_StringValue{StringValue: "B"}}, + {Kind: &proto3.Value_StringValue{StringValue: "2"}}, + }, + }, + }, + wantF: []*Row{ + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "foo"}}, + {Kind: &proto3.Value_StringValue{StringValue: "bar"}}, + }, + }, + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "A"}}, + {Kind: &proto3.Value_StringValue{StringValue: "1"}}, + }, + }, + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "B"}}, + {Kind: &proto3.Value_StringValue{StringValue: "2"}}, + }, + }, + }, + wantTs: trxTs, + wantD: true, + }, + { + // Merging example in result_set.proto. + input: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "Hello"}}, + {Kind: &proto3.Value_StringValue{StringValue: "W"}}, + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "orl"}}, + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "d"}}, + }, + }, + }, + wantF: []*Row{ + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "Hello"}}, + {Kind: &proto3.Value_StringValue{StringValue: "World"}}, + }, + }, + }, + wantTs: trxTs, + wantD: true, + }, + { + // More complex example showing completing a merge and + // starting a new merge in the same partialResultSet. + input: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "Hello"}}, + {Kind: &proto3.Value_StringValue{StringValue: "W"}}, // start split in value + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "orld"}}, // complete value + {Kind: &proto3.Value_StringValue{StringValue: "i"}}, // start split in key + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "s"}}, // complete key + {Kind: &proto3.Value_StringValue{StringValue: "not"}}, + {Kind: &proto3.Value_StringValue{StringValue: "a"}}, + {Kind: &proto3.Value_StringValue{StringValue: "qu"}}, // split in value + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "estion"}}, // complete value + }, + }, + }, + wantF: []*Row{ + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "Hello"}}, + {Kind: &proto3.Value_StringValue{StringValue: "World"}}, + }, + }, + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "is"}}, + {Kind: &proto3.Value_StringValue{StringValue: "not"}}, + }, + }, + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: "a"}}, + {Kind: &proto3.Value_StringValue{StringValue: "question"}}, + }, + }, + }, + wantTs: trxTs, + wantD: true, + }, + // List merging examples. + { + // Non-splitting Lists. + input: []*sppb.PartialResultSet{ + { + Metadata: kvListMeta, + Values: []*proto3.Value{ + { + Kind: genProtoListValue("foo-1", "foo-2"), + }, + }, + }, + { + Values: []*proto3.Value{ + { + Kind: genProtoListValue("bar-1", "bar-2"), + }, + }, + }, + }, + wantF: []*Row{ + { + fields: kvListMeta.RowType.Fields, + vals: []*proto3.Value{ + { + Kind: genProtoListValue("foo-1", "foo-2"), + }, + { + Kind: genProtoListValue("bar-1", "bar-2"), + }, + }, + }, + }, + wantTxID: transactionID{5, 6, 7, 8, 9}, + wantTs: trxTs, + wantD: true, + }, + { + // Simple List merge case: splitted string element. + input: []*sppb.PartialResultSet{ + { + Metadata: kvListMeta, + Values: []*proto3.Value{ + { + Kind: genProtoListValue("foo-1", "foo-"), + }, + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + { + Kind: genProtoListValue("2"), + }, + }, + }, + { + Values: []*proto3.Value{ + { + Kind: genProtoListValue("bar-1", "bar-2"), + }, + }, + }, + }, + wantF: []*Row{ + { + fields: kvListMeta.RowType.Fields, + vals: []*proto3.Value{ + { + Kind: genProtoListValue("foo-1", "foo-2"), + }, + { + Kind: genProtoListValue("bar-1", "bar-2"), + }, + }, + }, + }, + wantTxID: transactionID{5, 6, 7, 8, 9}, + wantTs: trxTs, + wantD: true, + }, + { + // Struct merging is also implemented by List merging. Note that + // Cloud Spanner uses proto.ListValue to encode Structs as well. + input: []*sppb.PartialResultSet{ + { + Metadata: kvObjectMeta, + Values: []*proto3.Value{ + { + Kind: &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: []*proto3.Value{ + {Kind: &proto3.Value_NumberValue{NumberValue: 23}}, + {Kind: genProtoListValue("foo-1", "fo")}, + }, + }, + }, + }, + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + { + Kind: &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: []*proto3.Value{ + {Kind: genProtoListValue("o-2", "f")}, + }, + }, + }, + }, + }, + ChunkedValue: true, + }, + { + Values: []*proto3.Value{ + { + Kind: &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: []*proto3.Value{ + {Kind: genProtoListValue("oo-3")}, + }, + }, + }, + }, + { + Kind: &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: []*proto3.Value{ + {Kind: &proto3.Value_NumberValue{NumberValue: 45}}, + {Kind: genProtoListValue("bar-1")}, + }, + }, + }, + }, + }, + }, + }, + wantF: []*Row{ + { + fields: kvObjectMeta.RowType.Fields, + vals: []*proto3.Value{ + { + Kind: &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: []*proto3.Value{ + {Kind: &proto3.Value_NumberValue{NumberValue: 23}}, + {Kind: genProtoListValue("foo-1", "foo-2", "foo-3")}, + }, + }, + }, + }, + { + Kind: &proto3.Value_ListValue{ + ListValue: &proto3.ListValue{ + Values: []*proto3.Value{ + {Kind: &proto3.Value_NumberValue{NumberValue: 45}}, + {Kind: genProtoListValue("bar-1")}, + }, + }, + }, + }, + }, + }, + }, + wantTxID: transactionID{1, 2, 3, 4, 5}, + wantD: true, + }, + } + +nextTest: + for i, test := range tests { + var rows []*Row + p := &partialResultSetDecoder{} + for j, v := range test.input { + rs, err := p.add(v) + if err != nil { + t.Errorf("test %d.%d: partialResultSetDecoder.add(%v) = %v; want nil", i, j, v, err) + continue nextTest + } + rows = append(rows, rs...) + } + if !reflect.DeepEqual(p.ts, test.wantTs) { + t.Errorf("got transaction(%v), want %v", p.ts, test.wantTs) + } + if !reflect.DeepEqual(rows, test.wantF) { + t.Errorf("test %d: rows=\n%v\n; want\n%v\n; p.row:\n%v\n", i, describeRows(rows), describeRows(test.wantF), p.row) + } + if got := p.done(); got != test.wantD { + t.Errorf("test %d: partialResultSetDecoder.done() = %v", i, got) + } + } +} + +const ( + maxBuffers = 16 // max number of PartialResultSets that will be buffered in tests. +) + +// setMaxBytesBetweenResumeTokens sets the global maxBytesBetweenResumeTokens to a smaller +// value more suitable for tests. It returns a function which should be called to restore +// the maxBytesBetweenResumeTokens to its old value +func setMaxBytesBetweenResumeTokens() func() { + o := atomic.LoadInt32(&maxBytesBetweenResumeTokens) + atomic.StoreInt32(&maxBytesBetweenResumeTokens, int32(maxBuffers*proto.Size(&sppb.PartialResultSet{ + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(0)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(0)}}, + }, + }))) + return func() { + atomic.StoreInt32(&maxBytesBetweenResumeTokens, o) + } +} + +// keyStr generates key string for kvMeta schema. +func keyStr(i int) string { + return fmt.Sprintf("foo-%02d", i) +} + +// valStr generates value string for kvMeta schema. +func valStr(i int) string { + return fmt.Sprintf("bar-%02d", i) +} + +// Test state transitions of resumableStreamDecoder where state machine +// ends up to a non-blocking state(resumableStreamDecoder.Next returns +// on non-blocking state). +func TestRsdNonblockingStates(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + tests := []struct { + name string + msgs []testutil.MockCtlMsg + rpc func(ct context.Context, resumeToken []byte) (streamingReceiver, error) + sql string + // Expected values + want []*sppb.PartialResultSet // PartialResultSets that should be returned to caller + queue []*sppb.PartialResultSet // PartialResultSets that should be buffered + resumeToken []byte // Resume token that is maintained by resumableStreamDecoder + stateHistory []resumableStreamDecoderState // State transition history of resumableStreamDecoder + wantErr error + }{ + { + // unConnected->queueingRetryable->finished + name: "unConnected->queueingRetryable->finished", + msgs: []testutil.MockCtlMsg{ + {}, + {}, + {Err: io.EOF, ResumeToken: false}, + }, + sql: "SELECT t.key key, t.value value FROM t_mock t", + want: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(0)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(0)}}, + }, + }, + }, + queue: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(1)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(1)}}, + }, + }, + }, + stateHistory: []resumableStreamDecoderState{ + queueingRetryable, // do RPC + queueingRetryable, // got foo-00 + queueingRetryable, // got foo-01 + finished, // got EOF + }, + }, + { + // unConnected->queueingRetryable->aborted + name: "unConnected->queueingRetryable->aborted", + msgs: []testutil.MockCtlMsg{ + {}, + {Err: nil, ResumeToken: true}, + {}, + {Err: errors.New("I quit"), ResumeToken: false}, + }, + sql: "SELECT t.key key, t.value value FROM t_mock t", + want: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(0)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(0)}}, + }, + }, + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(1)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(1)}}, + }, + ResumeToken: testutil.EncodeResumeToken(1), + }, + }, + stateHistory: []resumableStreamDecoderState{ + queueingRetryable, // do RPC + queueingRetryable, // got foo-00 + queueingRetryable, // got foo-01 + queueingRetryable, // foo-01, resume token + queueingRetryable, // got foo-02 + aborted, // got error + }, + wantErr: grpc.Errorf(codes.Unknown, "I quit"), + }, + { + // unConnected->queueingRetryable->queueingUnretryable->queueingUnretryable + name: "unConnected->queueingRetryable->queueingUnretryable->queueingUnretryable", + msgs: func() (m []testutil.MockCtlMsg) { + for i := 0; i < maxBuffers+1; i++ { + m = append(m, testutil.MockCtlMsg{}) + } + return m + }(), + sql: "SELECT t.key key, t.value value FROM t_mock t", + want: func() (s []*sppb.PartialResultSet) { + for i := 0; i < maxBuffers+1; i++ { + s = append(s, &sppb.PartialResultSet{ + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, + }, + }) + } + return s + }(), + stateHistory: func() (s []resumableStreamDecoderState) { + s = append(s, queueingRetryable) // RPC + for i := 0; i < maxBuffers; i++ { + s = append(s, queueingRetryable) // the internal queue of resumableStreamDecoder fills up + } + // the first item fills up the queue and triggers state transition; + // the second item is received under queueingUnretryable state. + s = append(s, queueingUnretryable) + s = append(s, queueingUnretryable) + return s + }(), + }, + { + // unConnected->queueingRetryable->queueingUnretryable->aborted + name: "unConnected->queueingRetryable->queueingUnretryable->aborted", + msgs: func() (m []testutil.MockCtlMsg) { + for i := 0; i < maxBuffers; i++ { + m = append(m, testutil.MockCtlMsg{}) + } + m = append(m, testutil.MockCtlMsg{Err: errors.New("Just Abort It"), ResumeToken: false}) + return m + }(), + sql: "SELECT t.key key, t.value value FROM t_mock t", + want: func() (s []*sppb.PartialResultSet) { + for i := 0; i < maxBuffers; i++ { + s = append(s, &sppb.PartialResultSet{ + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, + }, + }) + } + return s + }(), + stateHistory: func() (s []resumableStreamDecoderState) { + s = append(s, queueingRetryable) // RPC + for i := 0; i < maxBuffers; i++ { + s = append(s, queueingRetryable) // internal queue of resumableStreamDecoder fills up + } + s = append(s, queueingUnretryable) // the last row triggers state change + s = append(s, aborted) // Error happens + return s + }(), + wantErr: grpc.Errorf(codes.Unknown, "Just Abort It"), + }, + } +nextTest: + for _, test := range tests { + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + opts := []grpc.DialOption{ + grpc.WithInsecure(), + } + cc, err := grpc.Dial(ms.Addr(), opts...) + if err != nil { + t.Fatalf("%v: Dial(%q) = %v", test.name, ms.Addr(), err) + } + mc := sppb.NewSpannerClient(cc) + if test.rpc == nil { + test.rpc = func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: test.sql, + ResumeToken: resumeToken, + }) + } + } + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + r := newResumableStreamDecoder( + ctx, + test.rpc, + ) + st := []resumableStreamDecoderState{} + var lastErr error + // Once the expected number of state transitions are observed, + // send a signal by setting stateDone = true. + stateDone := false + // Set stateWitness to listen to state changes. + hl := len(test.stateHistory) // To avoid data race on test. + r.stateWitness = func(rs resumableStreamDecoderState) { + if !stateDone { + // Record state transitions. + st = append(st, rs) + if len(st) == hl { + lastErr = r.lastErr() + stateDone = true + } + } + } + // Let mock server stream given messages to resumableStreamDecoder. + for _, m := range test.msgs { + ms.AddMsg(m.Err, m.ResumeToken) + } + var rs []*sppb.PartialResultSet + for { + select { + case <-ctx.Done(): + t.Errorf("context cancelled or timeout during test") + continue nextTest + default: + } + if stateDone { + // Check if resumableStreamDecoder carried out expected + // state transitions. + if !reflect.DeepEqual(st, test.stateHistory) { + t.Errorf("%v: observed state transitions: \n%v\n, want \n%v\n", + test.name, st, test.stateHistory) + } + // Check if resumableStreamDecoder returns expected array of + // PartialResultSets. + if !reflect.DeepEqual(rs, test.want) { + t.Errorf("%v: received PartialResultSets: \n%v\n, want \n%v\n", test.name, rs, test.want) + } + // Verify that resumableStreamDecoder's internal buffering is also correct. + var q []*sppb.PartialResultSet + for { + item := r.q.pop() + if item == nil { + break + } + q = append(q, item) + } + if !reflect.DeepEqual(q, test.queue) { + t.Errorf("%v: PartialResultSets still queued: \n%v\n, want \n%v\n", test.name, q, test.queue) + } + // Verify resume token. + if test.resumeToken != nil && !reflect.DeepEqual(r.resumeToken, test.resumeToken) { + t.Errorf("%v: Resume token is %v, want %v\n", test.name, r.resumeToken, test.resumeToken) + } + // Verify error message. + if !reflect.DeepEqual(lastErr, test.wantErr) { + t.Errorf("%v: got error %v, want %v", test.name, lastErr, test.wantErr) + } + // Proceed to next test + continue nextTest + } + // Receive next decoded item. + if r.next() { + rs = append(rs, r.get()) + } + } + } +} + +// Test state transitions of resumableStreamDecoder where state machine +// ends up to a blocking state(resumableStreamDecoder.Next blocks +// on blocking state). +func TestRsdBlockingStates(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + tests := []struct { + name string + msgs []testutil.MockCtlMsg + rpc func(ct context.Context, resumeToken []byte) (streamingReceiver, error) + sql string + // Expected values + want []*sppb.PartialResultSet // PartialResultSets that should be returned to caller + queue []*sppb.PartialResultSet // PartialResultSets that should be buffered + resumeToken []byte // Resume token that is maintained by resumableStreamDecoder + stateHistory []resumableStreamDecoderState // State transition history of resumableStreamDecoder + wantErr error + }{ + { + // unConnected -> unConnected + name: "unConnected -> unConnected", + rpc: func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + return nil, grpc.Errorf(codes.Unavailable, "trust me: server is unavailable") + }, + sql: "SELECT * from t_whatever", + stateHistory: []resumableStreamDecoderState{unConnected, unConnected, unConnected}, + wantErr: grpc.Errorf(codes.Unavailable, "trust me: server is unavailable"), + }, + { + // unConnected -> queueingRetryable + name: "unConnected -> queueingRetryable", + sql: "SELECT t.key key, t.value value FROM t_mock t", + stateHistory: []resumableStreamDecoderState{queueingRetryable}, + }, + { + // unConnected->queueingRetryable->queueingRetryable + name: "unConnected->queueingRetryable->queueingRetryable", + msgs: []testutil.MockCtlMsg{ + {}, + {Err: nil, ResumeToken: true}, + {Err: nil, ResumeToken: true}, + {}, + }, + sql: "SELECT t.key key, t.value value FROM t_mock t", + want: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(0)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(0)}}, + }, + }, + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(1)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(1)}}, + }, + ResumeToken: testutil.EncodeResumeToken(1), + }, + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(2)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(2)}}, + }, + ResumeToken: testutil.EncodeResumeToken(2), + }, + }, + queue: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(3)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(3)}}, + }, + }, + }, + resumeToken: testutil.EncodeResumeToken(2), + stateHistory: []resumableStreamDecoderState{ + queueingRetryable, // do RPC + queueingRetryable, // got foo-00 + queueingRetryable, // got foo-01 + queueingRetryable, // foo-01, resume token + queueingRetryable, // got foo-02 + queueingRetryable, // foo-02, resume token + queueingRetryable, // got foo-03 + }, + }, + { + // unConnected->queueingRetryable->queueingUnretryable->queueingRetryable->queueingRetryable + name: "unConnected->queueingRetryable->queueingUnretryable->queueingRetryable->queueingRetryable", + msgs: func() (m []testutil.MockCtlMsg) { + for i := 0; i < maxBuffers+1; i++ { + m = append(m, testutil.MockCtlMsg{}) + } + m = append(m, testutil.MockCtlMsg{Err: nil, ResumeToken: true}) + m = append(m, testutil.MockCtlMsg{}) + return m + }(), + sql: "SELECT t.key key, t.value value FROM t_mock t", + want: func() (s []*sppb.PartialResultSet) { + for i := 0; i < maxBuffers+2; i++ { + s = append(s, &sppb.PartialResultSet{ + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, + }, + }) + } + s[maxBuffers+1].ResumeToken = testutil.EncodeResumeToken(maxBuffers + 1) + return s + }(), + resumeToken: testutil.EncodeResumeToken(maxBuffers + 1), + queue: []*sppb.PartialResultSet{ + { + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(maxBuffers + 2)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(maxBuffers + 2)}}, + }, + }, + }, + stateHistory: func() (s []resumableStreamDecoderState) { + s = append(s, queueingRetryable) // RPC + for i := 0; i < maxBuffers; i++ { + s = append(s, queueingRetryable) // internal queue of resumableStreamDecoder filles up + } + for i := maxBuffers - 1; i < maxBuffers+1; i++ { + // the first item fills up the queue and triggers state change; + // the second item is received under queueingUnretryable state. + s = append(s, queueingUnretryable) + } + s = append(s, queueingUnretryable) // got (maxBuffers+1)th row under Unretryable state + s = append(s, queueingRetryable) // (maxBuffers+1)th row has resume token + s = append(s, queueingRetryable) // (maxBuffers+2)th row has no resume token + return s + }(), + }, + { + // unConnected->queueingRetryable->queueingUnretryable->finished + name: "unConnected->queueingRetryable->queueingUnretryable->finished", + msgs: func() (m []testutil.MockCtlMsg) { + for i := 0; i < maxBuffers; i++ { + m = append(m, testutil.MockCtlMsg{}) + } + m = append(m, testutil.MockCtlMsg{Err: io.EOF, ResumeToken: false}) + return m + }(), + sql: "SELECT t.key key, t.value value FROM t_mock t", + want: func() (s []*sppb.PartialResultSet) { + for i := 0; i < maxBuffers; i++ { + s = append(s, &sppb.PartialResultSet{ + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, + }, + }) + } + return s + }(), + stateHistory: func() (s []resumableStreamDecoderState) { + s = append(s, queueingRetryable) // RPC + for i := 0; i < maxBuffers; i++ { + s = append(s, queueingRetryable) // internal queue of resumableStreamDecoder fills up + } + s = append(s, queueingUnretryable) // last row triggers state change + s = append(s, finished) // query finishes + return s + }(), + }, + } + for _, test := range tests { + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + opts := []grpc.DialOption{ + grpc.WithInsecure(), + } + cc, err := grpc.Dial(ms.Addr(), opts...) + if err != nil { + t.Fatalf("%v: Dial(%q) = %v", test.name, ms.Addr(), err) + } + mc := sppb.NewSpannerClient(cc) + if test.rpc == nil { + // Avoid using test.sql directly in closure because for loop changes test. + sql := test.sql + test.rpc = func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: sql, + ResumeToken: resumeToken, + }) + } + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + r := newResumableStreamDecoder( + ctx, + test.rpc, + ) + // Override backoff to make the test run faster. + r.backoff = exponentialBackoff{1 * time.Nanosecond, 1 * time.Nanosecond} + // st is the set of observed state transitions. + st := []resumableStreamDecoderState{} + // q is the content of the decoder's partial result queue when expected number of state transitions are done. + q := []*sppb.PartialResultSet{} + var lastErr error + // Once the expected number of state transitions are observed, + // send a signal to channel stateDone. + stateDone := make(chan int) + // Set stateWitness to listen to state changes. + hl := len(test.stateHistory) // To avoid data race on test. + r.stateWitness = func(rs resumableStreamDecoderState) { + select { + case <-stateDone: + // Noop after expected number of state transitions + default: + // Record state transitions. + st = append(st, rs) + if len(st) == hl { + lastErr = r.lastErr() + q = r.q.dump() + close(stateDone) + } + } + } + // Let mock server stream given messages to resumableStreamDecoder. + for _, m := range test.msgs { + ms.AddMsg(m.Err, m.ResumeToken) + } + var rs []*sppb.PartialResultSet + go func() { + for { + if !r.next() { + // Note that r.Next also exits on context cancel/timeout. + return + } + rs = append(rs, r.get()) + } + }() + // Verify that resumableStreamDecoder reaches expected state. + select { + case <-stateDone: // Note that at this point, receiver is still blocking on r.next(). + // Check if resumableStreamDecoder carried out expected + // state transitions. + if !reflect.DeepEqual(st, test.stateHistory) { + t.Errorf("%v: observed state transitions: \n%v\n, want \n%v\n", + test.name, st, test.stateHistory) + } + // Check if resumableStreamDecoder returns expected array of + // PartialResultSets. + if !reflect.DeepEqual(rs, test.want) { + t.Errorf("%v: received PartialResultSets: \n%v\n, want \n%v\n", test.name, rs, test.want) + } + // Verify that resumableStreamDecoder's internal buffering is also correct. + if !reflect.DeepEqual(q, test.queue) { + t.Errorf("%v: PartialResultSets still queued: \n%v\n, want \n%v\n", test.name, q, test.queue) + } + // Verify resume token. + if test.resumeToken != nil && !reflect.DeepEqual(r.resumeToken, test.resumeToken) { + t.Errorf("%v: Resume token is %v, want %v\n", test.name, r.resumeToken, test.resumeToken) + } + // Verify error message. + if !reflect.DeepEqual(lastErr, test.wantErr) { + t.Errorf("%v: got error %v, want %v", test.name, lastErr, test.wantErr) + } + case <-time.After(1 * time.Second): + t.Errorf("%v: Timeout in waiting for state change", test.name) + } + ms.Stop() + cc.Close() + } +} + +// sReceiver signals every receiving attempt through a channel, +// used by TestResumeToken to determine if the receiving of a certain +// PartialResultSet will be attempted next. +type sReceiver struct { + c chan int + rpcReceiver sppb.Spanner_ExecuteStreamingSqlClient +} + +// Recv() implements streamingReceiver.Recv for sReceiver. +func (sr *sReceiver) Recv() (*sppb.PartialResultSet, error) { + sr.c <- 1 + return sr.rpcReceiver.Recv() +} + +// waitn waits for nth receiving attempt from now on, until +// the signal for nth Recv() attempts is received or timeout. +// Note that because the way stream() works, the signal for the +// nth Recv() means that the previous n - 1 PartialResultSets +// has already been returned to caller or queued, if no error happened. +func (sr *sReceiver) waitn(n int) error { + for i := 0; i < n; i++ { + select { + case <-sr.c: + case <-time.After(10 * time.Second): + return fmt.Errorf("timeout in waiting for %v-th Recv()", i+1) + } + } + return nil +} + +// Test the handling of resumableStreamDecoder.bytesBetweenResumeTokens. +func TestQueueBytes(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + defer ms.Stop() + opts := []grpc.DialOption{ + grpc.WithInsecure(), + } + cc, err := grpc.Dial(ms.Addr(), opts...) + if err != nil { + t.Fatalf("Dial(%q) = %v", ms.Addr(), err) + } + defer cc.Close() + mc := sppb.NewSpannerClient(cc) + sr := &sReceiver{ + c: make(chan int, 1000), // will never block in this test + } + wantQueueBytes := 0 + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + r := newResumableStreamDecoder( + ctx, + func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + r, err := mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: "SELECT t.key key, t.value value FROM t_mock t", + ResumeToken: resumeToken, + }) + sr.rpcReceiver = r + return sr, err + }, + ) + go func() { + for r.next() { + } + }() + // Let server send maxBuffers / 2 rows. + for i := 0; i < maxBuffers/2; i++ { + wantQueueBytes += proto.Size(&sppb.PartialResultSet{ + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, + }, + }) + ms.AddMsg(nil, false) + } + if err := sr.waitn(maxBuffers/2 + 1); err != nil { + t.Fatalf("failed to wait for the first %v recv() calls: %v", maxBuffers, err) + } + if int32(wantQueueBytes) != r.bytesBetweenResumeTokens { + t.Errorf("r.bytesBetweenResumeTokens = %v, want %v", r.bytesBetweenResumeTokens, wantQueueBytes) + } + // Now send a resume token to drain the queue. + ms.AddMsg(nil, true) + // Wait for all rows to be processes. + if err := sr.waitn(1); err != nil { + t.Fatalf("failed to wait for rows to be processed: %v", err) + } + if r.bytesBetweenResumeTokens != 0 { + t.Errorf("r.bytesBetweenResumeTokens = %v, want 0", r.bytesBetweenResumeTokens) + } + // Let server send maxBuffers - 1 rows. + wantQueueBytes = 0 + for i := 0; i < maxBuffers-1; i++ { + wantQueueBytes += proto.Size(&sppb.PartialResultSet{ + Metadata: kvMeta, + Values: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, + }, + }) + ms.AddMsg(nil, false) + } + if err := sr.waitn(maxBuffers - 1); err != nil { + t.Fatalf("failed to wait for %v rows to be processed: %v", maxBuffers-1, err) + } + if int32(wantQueueBytes) != r.bytesBetweenResumeTokens { + t.Errorf("r.bytesBetweenResumeTokens = %v, want 0", r.bytesBetweenResumeTokens) + } + // Trigger a state transition: queueingRetryable -> queueingUnretryable. + ms.AddMsg(nil, false) + if err := sr.waitn(1); err != nil { + t.Fatalf("failed to wait for state transition: %v", err) + } + if r.bytesBetweenResumeTokens != 0 { + t.Errorf("r.bytesBetweenResumeTokens = %v, want 0", r.bytesBetweenResumeTokens) + } +} + +// Verify that client can deal with resume token correctly +func TestResumeToken(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + opts := []grpc.DialOption{ + grpc.WithInsecure(), + } + cc, err := grpc.Dial(ms.Addr(), opts...) + if err != nil { + t.Fatalf("Dial(%q) = %v", ms.Addr(), err) + } + defer func() { + ms.Stop() + cc.Close() + }() + mc := sppb.NewSpannerClient(cc) + sr := &sReceiver{ + c: make(chan int, 1000), // will never block in this test + } + rows := []*Row{} + done := make(chan int) + streaming := func() { + // Establish a stream to mock cloud spanner server. + iter := stream(context.Background(), + func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + r, err := mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: "SELECT t.key key, t.value value FROM t_mock t", + ResumeToken: resumeToken, + }) + sr.rpcReceiver = r + return sr, err + }, + func(time.Time, error) {}) + defer iter.Stop() + for { + var row *Row + row, err = iter.Next() + if err == iterator.Done { + err = nil + break + } + if err != nil { + break + } + rows = append(rows, row) + } + done <- 1 + } + go streaming() + // Server streaming row 0 - 2, only row 1 has resume token. + // Client will receive row 0 - 2, so it will try receiving for + // 4 times (the last recv will block), and only row 0 - 1 will + // be yielded. + for i := 0; i < 3; i++ { + if i == 1 { + ms.AddMsg(nil, true) + } else { + ms.AddMsg(nil, false) + } + } + // Wait for 4 receive attempts, as explained above. + if err = sr.waitn(4); err != nil { + t.Fatalf("failed to wait for row 0 - 2: %v", err) + } + want := []*Row{ + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(0)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(0)}}, + }, + }, + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(1)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(1)}}, + }, + }, + } + if !reflect.DeepEqual(rows, want) { + t.Errorf("received rows: \n%v\n; but want\n%v\n", rows, want) + } + // Inject resumable failure. + ms.AddMsg( + grpc.Errorf(codes.Unavailable, "mock server unavailable"), + false, + ) + // Test if client detects the resumable failure and retries. + if err = sr.waitn(1); err != nil { + t.Fatalf("failed to wait for client to retry: %v", err) + } + // Client has resumed the query, now server resend row 2. + ms.AddMsg(nil, true) + if err = sr.waitn(1); err != nil { + t.Fatalf("failed to wait for resending row 2: %v", err) + } + // Now client should have received row 0 - 2. + want = append(want, &Row{ + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(2)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(2)}}, + }, + }) + if !reflect.DeepEqual(rows, want) { + t.Errorf("received rows: \n%v\n, want\n%v\n", rows, want) + } + // Sending 3rd - (maxBuffers+1)th rows without resume tokens, client should buffer them. + for i := 3; i < maxBuffers+2; i++ { + ms.AddMsg(nil, false) + } + if err = sr.waitn(maxBuffers - 1); err != nil { + t.Fatalf("failed to wait for row 3-%v: %v", maxBuffers+1, err) + } + // Received rows should be unchanged. + if !reflect.DeepEqual(rows, want) { + t.Errorf("receive rows: \n%v\n, want\n%v\n", rows, want) + } + // Send (maxBuffers+2)th row to trigger state change of resumableStreamDecoder: + // queueingRetryable -> queueingUnretryable + ms.AddMsg(nil, false) + if err = sr.waitn(1); err != nil { + t.Fatalf("failed to wait for row %v: %v", maxBuffers+2, err) + } + // Client should yield row 3rd - (maxBuffers+2)th to application. Therefore, application should + // see row 0 - (maxBuffers+2)th so far. + for i := 3; i < maxBuffers+3; i++ { + want = append(want, &Row{ + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(i)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(i)}}, + }, + }) + } + if !reflect.DeepEqual(rows, want) { + t.Errorf("received rows: \n%v\n; want\n%v\n", rows, want) + } + // Inject resumable error, but since resumableStreamDecoder is already at queueingUnretryable + // state, query will just fail. + ms.AddMsg( + grpc.Errorf(codes.Unavailable, "mock server wants some sleep"), + false, + ) + select { + case <-done: + case <-time.After(10 * time.Second): + t.Fatalf("timeout in waiting for failed query to return.") + } + if wantErr := toSpannerError(grpc.Errorf(codes.Unavailable, "mock server wants some sleep")); !reflect.DeepEqual(err, wantErr) { + t.Fatalf("stream() returns error: %v, but want error: %v", err, wantErr) + } + + // Reconnect to mock Cloud Spanner. + rows = []*Row{} + go streaming() + // Let server send two rows without resume token. + for i := maxBuffers + 3; i < maxBuffers+5; i++ { + ms.AddMsg(nil, false) + } + if err = sr.waitn(3); err != nil { + t.Fatalf("failed to wait for row %v - %v: %v", maxBuffers+3, maxBuffers+5, err) + } + if len(rows) > 0 { + t.Errorf("client received some rows unexpectedly: %v, want nothing", rows) + } + // Let server end the query. + ms.AddMsg(io.EOF, false) + select { + case <-done: + case <-time.After(10 * time.Second): + t.Fatalf("timeout in waiting for failed query to return") + } + if err != nil { + t.Fatalf("stream() returns unexpected error: %v, but want no error", err) + } + // Verify if a normal server side EOF flushes all queued rows. + want = []*Row{ + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(maxBuffers + 3)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(maxBuffers + 3)}}, + }, + }, + { + fields: kvMeta.RowType.Fields, + vals: []*proto3.Value{ + {Kind: &proto3.Value_StringValue{StringValue: keyStr(maxBuffers + 4)}}, + {Kind: &proto3.Value_StringValue{StringValue: valStr(maxBuffers + 4)}}, + }, + }, + } + if !reflect.DeepEqual(rows, want) { + t.Errorf("received rows: \n%v\n; but want\n%v\n", rows, want) + } +} + +// Verify that streaming query get retried upon real gRPC server transport failures. +func TestGrpcReconnect(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + defer ms.Stop() + cc, err := grpc.Dial(ms.Addr(), grpc.WithInsecure()) + if err != nil { + t.Fatalf("Dial(%q) = %v", ms.Addr(), err) + } + defer cc.Close() + mc := sppb.NewSpannerClient(cc) + retry := make(chan int) + row := make(chan int) + go func() { + r := 0 + // Establish a stream to mock cloud spanner server. + iter := stream(context.Background(), + func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + if r > 0 { + // This RPC attempt is a retry, signal it. + retry <- r + } + r++ + return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: "SELECT t.key key, t.value value FROM t_mock t", + ResumeToken: resumeToken, + }) + + }, + func(time.Time, error) {}) + defer iter.Stop() + for { + _, err = iter.Next() + if err == iterator.Done { + err = nil + break + } + if err != nil { + break + } + row <- 0 + } + }() + // Add a message and wait for the receipt. + ms.AddMsg(nil, true) + select { + case <-row: + case <-time.After(10 * time.Second): + t.Fatalf("expect stream to be established within 10 seconds, but it didn't") + } + // Error injection: force server to close all connections. + ms.Stop() + // Test to see if client respond to the real RPC failure correctly by + // retrying RPC. + select { + case r, ok := <-retry: + if ok && r == 1 { + break + } + t.Errorf("retry count = %v, want 1", r) + case <-time.After(10 * time.Second): + t.Errorf("client library failed to respond after 10 seconds, aborting") + return + } +} + +// Test cancel/timeout for client operations. +func TestCancelTimeout(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + defer ms.Stop() + opts := []grpc.DialOption{ + grpc.WithInsecure(), + } + cc, err := grpc.Dial(ms.Addr(), opts...) + defer cc.Close() + if err != nil { + t.Fatalf("Dial(%q) = %v", ms.Addr(), err) + } + mc := sppb.NewSpannerClient(cc) + done := make(chan int) + go func() { + for { + ms.AddMsg(nil, true) + } + }() + // Test cancelling query. + ctx, cancel := context.WithCancel(context.Background()) + go func() { + // Establish a stream to mock cloud spanner server. + iter := stream(ctx, + func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: "SELECT t.key key, t.value value FROM t_mock t", + ResumeToken: resumeToken, + }) + }, + func(time.Time, error) {}) + defer iter.Stop() + for { + _, err = iter.Next() + if err == iterator.Done { + break + } + if err != nil { + done <- 0 + break + } + } + }() + cancel() + select { + case <-done: + if ErrCode(err) != codes.Canceled { + t.Errorf("streaming query is canceled and returns error %v, want error code %v", err, codes.Canceled) + } + case <-time.After(1 * time.Second): + t.Errorf("query doesn't exit timely after being cancelled") + } + // Test query timeout. + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second) + go func() { + // Establish a stream to mock cloud spanner server. + iter := stream(ctx, + func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: "SELECT t.key key, t.value value FROM t_mock t", + ResumeToken: resumeToken, + }) + }, + func(time.Time, error) {}) + defer iter.Stop() + for { + _, err = iter.Next() + if err == iterator.Done { + err = nil + break + } + if err != nil { + break + } + } + done <- 0 + }() + select { + case <-done: + if ErrCode(err) != codes.Canceled { + t.Errorf("streaming query timeout returns error %v, want error code %v", err, codes.Canceled) + } + case <-time.After(2 * time.Second): + t.Errorf("query doesn't timeout as expected") + } +} + +func TestRowIteratorDo(t *testing.T) { + restore := setMaxBytesBetweenResumeTokens() + defer restore() + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + defer ms.Stop() + cc, err := grpc.Dial(ms.Addr(), grpc.WithInsecure()) + if err != nil { + t.Fatalf("Dial(%q) = %v", ms.Addr(), err) + } + defer cc.Close() + mc := sppb.NewSpannerClient(cc) + + for i := 0; i < 3; i++ { + ms.AddMsg(nil, false) + } + ms.AddMsg(io.EOF, true) + nRows := 0 + iter := stream(context.Background(), + func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: "SELECT t.key key, t.value value FROM t_mock t", + ResumeToken: resumeToken, + }) + }, + func(time.Time, error) {}) + err = iter.Do(func(r *Row) error { nRows++; return nil }) + if err != nil { + t.Errorf("Using Do: %v", err) + } + if nRows != 3 { + t.Errorf("got %d rows, want 3", nRows) + } +} + +func TestIteratorStopEarly(t *testing.T) { + ctx := context.Background() + restore := setMaxBytesBetweenResumeTokens() + defer restore() + ms := testutil.NewMockCloudSpanner(t, trxTs) + ms.Serve() + defer ms.Stop() + cc, err := grpc.Dial(ms.Addr(), grpc.WithInsecure()) + if err != nil { + t.Fatalf("Dial(%q) = %v", ms.Addr(), err) + } + defer cc.Close() + mc := sppb.NewSpannerClient(cc) + + ms.AddMsg(nil, false) + ms.AddMsg(nil, false) + ms.AddMsg(io.EOF, true) + + iter := stream(ctx, + func(ct context.Context, resumeToken []byte) (streamingReceiver, error) { + return mc.ExecuteStreamingSql(ct, &sppb.ExecuteSqlRequest{ + Sql: "SELECT t.key key, t.value value FROM t_mock t", + ResumeToken: resumeToken, + }) + }, + func(time.Time, error) {}) + _, err = iter.Next() + if err != nil { + t.Fatalf("before Stop: %v", err) + } + iter.Stop() + // Stop sets r.err to the FailedPrecondition error "Next called after Stop". + // Override that here so this test can observe the Canceled error from the stream. + iter.err = nil + iter.Next() + if ErrCode(iter.streamd.lastErr()) != codes.Canceled { + t.Errorf("after Stop: got %v, wanted Canceled", err) + } +} + +func TestIteratorWithError(t *testing.T) { + injected := errors.New("Failed iterator") + iter := RowIterator{err: injected} + defer iter.Stop() + if _, err := iter.Next(); err != injected { + t.Fatalf("Expected error: %v, got %v", injected, err) + } +} diff --git a/vendor/cloud.google.com/go/spanner/retry.go b/vendor/cloud.google.com/go/spanner/retry.go new file mode 100644 index 00000000..83a3826b --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/retry.go @@ -0,0 +1,189 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "fmt" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + edpb "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +const ( + retryInfoKey = "google.rpc.retryinfo-bin" +) + +// errRetry returns an unavailable error under error namespace EsOther. It is a +// generic retryable error that is used to mask and recover unretryable errors +// in a retry loop. +func errRetry(err error) error { + if se, ok := err.(*Error); ok { + return &Error{codes.Unavailable, fmt.Sprintf("generic Cloud Spanner retryable error: { %v }", se.Error()), se.trailers} + } + return spannerErrorf(codes.Unavailable, "generic Cloud Spanner retryable error: { %v }", err.Error()) +} + +// isErrorClosing reports whether the error is generated by gRPC layer talking to a closed server. +func isErrorClosing(err error) bool { + if err == nil { + return false + } + if ErrCode(err) == codes.Internal && strings.Contains(ErrDesc(err), "transport is closing") { + // Handle the case when connection is closed unexpectedly. + // TODO: once gRPC is able to categorize + // this as retryable error, we should stop parsing the + // error message here. + return true + } + return false +} + +// isErrorRST reports whether the error is generated by gRPC client receiving a RST frame from server. +func isErrorRST(err error) bool { + if err == nil { + return false + } + if ErrCode(err) == codes.Internal && strings.Contains(ErrDesc(err), "stream terminated by RST_STREAM") { + // TODO: once gRPC is able to categorize this error as "go away" or "retryable", + // we should stop parsing the error message. + return true + } + return false +} + +// isErrorUnexpectedEOF returns true if error is generated by gRPC layer +// receiving io.EOF unexpectedly. +func isErrorUnexpectedEOF(err error) bool { + if err == nil { + return false + } + if ErrCode(err) == codes.Unknown && strings.Contains(ErrDesc(err), "unexpected EOF") { + // Unexpected EOF is an transport layer issue that + // could be recovered by retries. The most likely + // scenario is a flaky RecvMsg() call due to network + // issues. + // TODO: once gRPC is able to categorize + // this as retryable error, we should stop parsing the + // error message here. + return true + } + return false +} + +// isErrorUnavailable returns true if the error is about server being unavailable. +func isErrorUnavailable(err error) bool { + if err == nil { + return false + } + if ErrCode(err) == codes.Unavailable { + return true + } + return false +} + +// isRetryable returns true if the Cloud Spanner error being checked is a retryable error. +func isRetryable(err error) bool { + if isErrorClosing(err) { + return true + } + if isErrorUnexpectedEOF(err) { + return true + } + if isErrorRST(err) { + return true + } + if isErrorUnavailable(err) { + return true + } + return false +} + +// errContextCanceled returns *spanner.Error for canceled context. +func errContextCanceled(lastErr error) error { + return spannerErrorf(codes.Canceled, "context is canceled, lastErr is <%v>", lastErr) +} + +// extractRetryDelay extracts retry backoff if present. +func extractRetryDelay(err error) (time.Duration, bool) { + trailers := errTrailers(err) + if trailers == nil { + return 0, false + } + elem, ok := trailers[retryInfoKey] + if !ok || len(elem) <= 0 { + return 0, false + } + _, b, err := metadata.DecodeKeyValue(retryInfoKey, elem[0]) + if err != nil { + return 0, false + } + var retryInfo edpb.RetryInfo + if proto.Unmarshal([]byte(b), &retryInfo) != nil { + return 0, false + } + delay, err := ptypes.Duration(retryInfo.RetryDelay) + if err != nil { + return 0, false + } + return delay, true +} + +// runRetryable keeps attempting to run f until one of the following happens: +// 1) f returns nil error or an unretryable error; +// 2) context is cancelled or timeout. +// TODO: consider using https://github.com/googleapis/gax-go once it +// becomes available internally. +func runRetryable(ctx context.Context, f func(context.Context) error) error { + var funcErr error + retryCount := 0 + for { + select { + case <-ctx.Done(): + // Do context check here so that even f() failed to do + // so (for example, gRPC implementation bug), the loop + // can still have a chance to exit as expected. + return errContextCanceled(funcErr) + default: + } + funcErr = f(ctx) + if funcErr == nil { + return nil + } + if isRetryable(funcErr) { + // Error is retryable, do exponential backoff and continue. + b, ok := extractRetryDelay(funcErr) + if !ok { + b = defaultBackoff.delay(retryCount) + } + select { + case <-ctx.Done(): + return errContextCanceled(funcErr) + case <-time.After(b): + } + retryCount++ + continue + } + // Error isn't retryable / no error, return immediately. + return toSpannerError(funcErr) + } +} diff --git a/vendor/cloud.google.com/go/spanner/retry_test.go b/vendor/cloud.google.com/go/spanner/retry_test.go new file mode 100644 index 00000000..e07bcf5f --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/retry_test.go @@ -0,0 +1,106 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "errors" + "fmt" + "reflect" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + edpb "google.golang.org/genproto/googleapis/rpc/errdetails" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// Test if runRetryable loop deals with various errors correctly. +func TestRetry(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + responses := []error{ + grpc.Errorf(codes.Internal, "transport is closing"), + grpc.Errorf(codes.Unknown, "unexpected EOF"), + grpc.Errorf(codes.Internal, "stream terminated by RST_STREAM with error code: 2"), + grpc.Errorf(codes.Unavailable, "service is currently unavailable"), + errRetry(fmt.Errorf("just retry it")), + } + err := runRetryable(context.Background(), func(ct context.Context) error { + var r error + if len(responses) > 0 { + r = responses[0] + responses = responses[1:] + } + return r + }) + if err != nil { + t.Errorf("runRetryable should be able to survive all retryable errors, but it returns %v", err) + } + // Unretryable errors + injErr := errors.New("this is unretryable") + err = runRetryable(context.Background(), func(ct context.Context) error { + return injErr + }) + if wantErr := toSpannerError(injErr); !reflect.DeepEqual(err, wantErr) { + t.Errorf("runRetryable returns error %v, want %v", err, wantErr) + } + // Timeout + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + retryErr := errRetry(fmt.Errorf("still retrying")) + err = runRetryable(ctx, func(ct context.Context) error { + // Expect to trigger timeout in retryable runner after 10 executions. + <-time.After(100 * time.Millisecond) + // Let retryable runner to retry so that timeout will eventually happen. + return retryErr + }) + if wantErr := errContextCanceled(retryErr); !reflect.DeepEqual(err, wantErr) { + t.Errorf("runRetryable returns error: %v, want error: %v", err, wantErr) + } + // Cancellation + ctx, cancel = context.WithCancel(context.Background()) + retries := 3 + retryErr = errRetry(fmt.Errorf("retry before cancel")) + err = runRetryable(ctx, func(ct context.Context) error { + retries-- + if retries == 0 { + cancel() + } + return retryErr + }) + if wantErr := errContextCanceled(retryErr); !reflect.DeepEqual(err, wantErr) || retries != 0 { + t.Errorf("=<%v, %v>, want <%v, %v>", err, retries, wantErr, 0) + } +} + +func TestRetryInfo(t *testing.T) { + b, _ := proto.Marshal(&edpb.RetryInfo{ + RetryDelay: ptypes.DurationProto(time.Second), + }) + trailers := map[string]string{ + retryInfoKey: string(b), + } + gotDelay, ok := extractRetryDelay(errRetry(toSpannerErrorWithMetadata(grpc.Errorf(codes.Aborted, ""), metadata.New(trailers)))) + if !ok || !reflect.DeepEqual(time.Second, gotDelay) { + t.Errorf(" = <%t, %v>, want ", ok, gotDelay, time.Second) + } +} diff --git a/vendor/cloud.google.com/go/spanner/row.go b/vendor/cloud.google.com/go/spanner/row.go new file mode 100644 index 00000000..70c2861b --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/row.go @@ -0,0 +1,307 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "fmt" + "reflect" + + proto3 "github.com/golang/protobuf/ptypes/struct" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc/codes" +) + +// A Row is a view of a row of data produced by a Cloud Spanner read. +// +// A row consists of a number of columns; the number depends on the columns +// used to construct the read. +// +// The column values can be accessed by index, where the indices are with +// respect to the columns. For instance, if the read specified +// []string{"photo_id", "caption", "metadata"}, then each row will +// contain three columns: the 0th column corresponds to "photo_id", the +// 1st column corresponds to "caption", etc. +// +// Column values are decoded by using one of the Column, ColumnByName, or +// Columns methods. The valid values passed to these methods depend on the +// column type. For example: +// +// var photoID int64 +// err := row.Column(0, &photoID) // Decode column 0 as an integer. +// +// var caption string +// err := row.Column(1, &caption) // Decode column 1 as a string. +// +// // The above two operations at once. +// err := row.Columns(&photoID, &caption) +// +// Supported types and their corresponding Cloud Spanner column type(s) are: +// +// *string(not NULL), *NullString - STRING +// *[]NullString - STRING ARRAY +// *[]byte - BYTES +// *[][]byte - BYTES ARRAY +// *int64(not NULL), *NullInt64 - INT64 +// *[]NullInt64 - INT64 ARRAY +// *bool(not NULL), *NullBool - BOOL +// *[]NullBool - BOOL ARRAY +// *float64(not NULL), *NullFloat64 - FLOAT64 +// *[]NullFloat64 - FLOAT64 ARRAY +// *time.Time(not NULL), *NullTime - TIMESTAMP +// *[]NullTime - TIMESTAMP ARRAY +// *Date(not NULL), *NullDate - DATE +// *[]NullDate - DATE ARRAY +// *[]*some_go_struct, *[]NullRow - STRUCT ARRAY +// *GenericColumnValue - any Cloud Spanner type +// +// For TIMESTAMP columns, returned time.Time object will be in UTC. +// +// To fetch an array of BYTES, pass a *[][]byte. To fetch an array of +// (sub)rows, pass a *[]spanner.NullRow or a *[]*some_go_struct where +// some_go_struct holds all information of the subrow, see spannr.Row.ToStruct +// for the mapping between Cloud Spanner row and Go struct. To fetch an array of +// other types, pass a *[]spanner.Null* type of the appropriate type. Use +// *GenericColumnValue when you don't know in advance what column type to +// expect. +// +// Row decodes the row contents lazily; as a result, each call to a getter has +// a chance of returning an error. +// +// A column value may be NULL if the corresponding value is not present in +// Cloud Spanner. The spanner.Null* types (spanner.NullInt64 et al.) allow fetching +// values that may be null. A NULL BYTES can be fetched into a *[]byte as nil. +// It is an error to fetch a NULL value into any other type. +type Row struct { + fields []*sppb.StructType_Field + vals []*proto3.Value // keep decoded for now +} + +// errNamesValuesMismatch returns error for when columnNames count is not equal +// to columnValues count. +func errNamesValuesMismatch(columnNames []string, columnValues []interface{}) error { + return spannerErrorf(codes.FailedPrecondition, + "different number of names(%v) and values(%v)", len(columnNames), len(columnValues)) +} + +// NewRow returns a Row containing the supplied data. This can be useful for +// mocking Cloud Spanner Read and Query responses for unit testing. +func NewRow(columnNames []string, columnValues []interface{}) (*Row, error) { + if len(columnValues) != len(columnNames) { + return nil, errNamesValuesMismatch(columnNames, columnValues) + } + r := Row{ + fields: make([]*sppb.StructType_Field, len(columnValues)), + vals: make([]*proto3.Value, len(columnValues)), + } + for i := range columnValues { + val, typ, err := encodeValue(columnValues[i]) + if err != nil { + return nil, err + } + r.fields[i] = &sppb.StructType_Field{ + Name: columnNames[i], + Type: typ, + } + r.vals[i] = val + } + return &r, nil +} + +// Size is the number of columns in the row. +func (r *Row) Size() int { + return len(r.fields) +} + +// ColumnName returns the name of column i, or empty string for invalid column. +func (r *Row) ColumnName(i int) string { + if i < 0 || i >= len(r.fields) { + return "" + } + return r.fields[i].Name +} + +// ColumnIndex returns the index of the column with the given name. The +// comparison is case-sensitive. +func (r *Row) ColumnIndex(name string) (int, error) { + found := false + var index int + if len(r.vals) != len(r.fields) { + return 0, errFieldsMismatchVals(r) + } + for i, f := range r.fields { + if f == nil { + return 0, errNilColType(i) + } + if name == f.Name { + if found { + return 0, errDupColName(name) + } + found = true + index = i + } + } + if !found { + return 0, errColNotFound(name) + } + return index, nil +} + +// ColumnNames returns all column names of the row. +func (r *Row) ColumnNames() []string { + var n []string + for _, c := range r.fields { + n = append(n, c.Name) + } + return n +} + +// errColIdxOutOfRange returns error for requested column index is out of the +// range of the target Row's columns. +func errColIdxOutOfRange(i int, r *Row) error { + return spannerErrorf(codes.OutOfRange, "column index %d out of range [0,%d)", i, len(r.vals)) +} + +// errDecodeColumn returns error for not being able to decode a indexed column. +func errDecodeColumn(i int, err error) error { + if err == nil { + return nil + } + se, ok := toSpannerError(err).(*Error) + if !ok { + return spannerErrorf(codes.InvalidArgument, "failed to decode column %v, error = <%v>", i, err) + } + se.decorate(fmt.Sprintf("failed to decode column %v", i)) + return se +} + +// errFieldsMismatchVals returns error for field count isn't equal to value count in a Row. +func errFieldsMismatchVals(r *Row) error { + return spannerErrorf(codes.FailedPrecondition, "row has different number of fields(%v) and values(%v)", + len(r.fields), len(r.vals)) +} + +// errNilColType returns error for column type for column i being nil in the row. +func errNilColType(i int) error { + return spannerErrorf(codes.FailedPrecondition, "column(%v)'s type is nil", i) +} + +// Column fetches the value from the ith column, decoding it into ptr. +// See the Row documentation for the list of acceptable argument types. +func (r *Row) Column(i int, ptr interface{}) error { + if len(r.vals) != len(r.fields) { + return errFieldsMismatchVals(r) + } + if i < 0 || i >= len(r.fields) { + return errColIdxOutOfRange(i, r) + } + if r.fields[i] == nil { + return errNilColType(i) + } + if err := decodeValue(r.vals[i], r.fields[i].Type, ptr); err != nil { + return errDecodeColumn(i, err) + } + return nil +} + +// errDupColName returns error for duplicated column name in the same row. +func errDupColName(n string) error { + return spannerErrorf(codes.FailedPrecondition, "ambiguous column name %q", n) +} + +// errColNotFound returns error for not being able to find a named column. +func errColNotFound(n string) error { + return spannerErrorf(codes.NotFound, "column %q not found", n) +} + +// ColumnByName fetches the value from the named column, decoding it into ptr. +// See the Row documentation for the list of acceptable argument types. +func (r *Row) ColumnByName(name string, ptr interface{}) error { + index, err := r.ColumnIndex(name) + if err != nil { + return err + } + return r.Column(index, ptr) +} + +// errNumOfColValue returns error for providing wrong number of values to Columns. +func errNumOfColValue(n int, r *Row) error { + return spannerErrorf(codes.InvalidArgument, + "Columns(): number of arguments (%d) does not match row size (%d)", n, len(r.vals)) +} + +// Columns fetches all the columns in the row at once. +// +// The value of the kth column will be decoded into the kth argument to +// Columns. See above for the list of acceptable argument types. The number of +// arguments must be equal to the number of columns. Pass nil to specify that a +// column should be ignored. +func (r *Row) Columns(ptrs ...interface{}) error { + if len(ptrs) != len(r.vals) { + return errNumOfColValue(len(ptrs), r) + } + if len(r.vals) != len(r.fields) { + return errFieldsMismatchVals(r) + } + for i, p := range ptrs { + if p == nil { + continue + } + if err := r.Column(i, p); err != nil { + return err + } + } + return nil +} + +// errToStructArgType returns error for p not having the correct data type(pointer to Go struct) to +// be the argument of Row.ToStruct. +func errToStructArgType(p interface{}) error { + return spannerErrorf(codes.InvalidArgument, "ToStruct(): type %T is not a valid pointer to Go struct", p) +} + +// ToStruct fetches the columns in a row into the fields of a struct. +// The rules for mapping a row's columns into a struct's exported fields +// are as the following: +// 1. If a field has a `spanner: "column_name"` tag, then decode column +// 'column_name' into the field. A special case is the `spanner: "-"` +// tag, which instructs ToStruct to ignore the field during decoding. +// 2. Otherwise, if the name of a field matches the name of a column (ignoring case), +// decode the column into the field. +// +// The fields of the destination struct can be of any type that is acceptable +// to (*spanner.Row).Column. +// +// Slice and pointer fields will be set to nil if the source column +// is NULL, and a non-nil value if the column is not NULL. To decode NULL +// values of other types, use one of the spanner.Null* as the type of the +// destination field. +func (r *Row) ToStruct(p interface{}) error { + // Check if p is a pointer to a struct + if t := reflect.TypeOf(p); t == nil || t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { + return errToStructArgType(p) + } + if len(r.vals) != len(r.fields) { + return errFieldsMismatchVals(r) + } + // Call decodeStruct directly to decode the row as a typed proto.ListValue. + return decodeStruct( + &sppb.StructType{Fields: r.fields}, + &proto3.ListValue{Values: r.vals}, + p, + ) +} diff --git a/vendor/cloud.google.com/go/spanner/row_test.go b/vendor/cloud.google.com/go/spanner/row_test.go new file mode 100644 index 00000000..2120421a --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/row_test.go @@ -0,0 +1,1775 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "encoding/base64" + "reflect" + "strconv" + "strings" + "testing" + "time" + + "cloud.google.com/go/civil" + proto "github.com/golang/protobuf/proto" + proto3 "github.com/golang/protobuf/ptypes/struct" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +var ( + tm = time.Date(2016, 11, 15, 0, 0, 0, 0, time.UTC) + dt, _ = civil.ParseDate("2016-11-15") + // row contains a column for each unique Cloud Spanner type. + row = Row{ + []*sppb.StructType_Field{ + // STRING / STRING ARRAY + {"STRING", stringType()}, + {"NULL_STRING", stringType()}, + {"STRING_ARRAY", listType(stringType())}, + {"NULL_STRING_ARRAY", listType(stringType())}, + // BYTES / BYTES ARRAY + {"BYTES", bytesType()}, + {"NULL_BYTES", bytesType()}, + {"BYTES_ARRAY", listType(bytesType())}, + {"NULL_BYTES_ARRAY", listType(bytesType())}, + // INT64 / INT64 ARRAY + {"INT64", intType()}, + {"NULL_INT64", intType()}, + {"INT64_ARRAY", listType(intType())}, + {"NULL_INT64_ARRAY", listType(intType())}, + // BOOL / BOOL ARRAY + {"BOOL", boolType()}, + {"NULL_BOOL", boolType()}, + {"BOOL_ARRAY", listType(boolType())}, + {"NULL_BOOL_ARRAY", listType(boolType())}, + // FLOAT64 / FLOAT64 ARRAY + {"FLOAT64", floatType()}, + {"NULL_FLOAT64", floatType()}, + {"FLOAT64_ARRAY", listType(floatType())}, + {"NULL_FLOAT64_ARRAY", listType(floatType())}, + // TIMESTAMP / TIMESTAMP ARRAY + {"TIMESTAMP", timeType()}, + {"NULL_TIMESTAMP", timeType()}, + {"TIMESTAMP_ARRAY", listType(timeType())}, + {"NULL_TIMESTAMP_ARRAY", listType(timeType())}, + // DATE / DATE ARRAY + {"DATE", dateType()}, + {"NULL_DATE", dateType()}, + {"DATE_ARRAY", listType(dateType())}, + {"NULL_DATE_ARRAY", listType(dateType())}, + + // STRUCT ARRAY + { + "STRUCT_ARRAY", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + { + "NULL_STRUCT_ARRAY", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{ + // STRING / STRING ARRAY + stringProto("value"), + nullProto(), + listProto(stringProto("value1"), nullProto(), stringProto("value3")), + nullProto(), + // BYTES / BYTES ARRAY + bytesProto([]byte("value")), + nullProto(), + listProto(bytesProto([]byte("value1")), nullProto(), bytesProto([]byte("value3"))), + nullProto(), + // INT64 / INT64 ARRAY + intProto(17), + nullProto(), + listProto(intProto(1), intProto(2), nullProto()), + nullProto(), + // BOOL / BOOL ARRAY + boolProto(true), + nullProto(), + listProto(nullProto(), boolProto(true), boolProto(false)), + nullProto(), + // FLOAT64 / FLOAT64 ARRAY + floatProto(1.7), + nullProto(), + listProto(nullProto(), nullProto(), floatProto(1.7)), + nullProto(), + // TIMESTAMP / TIMESTAMP ARRAY + timeProto(tm), + nullProto(), + listProto(nullProto(), timeProto(tm)), + nullProto(), + // DATE / DATE ARRAY + dateProto(dt), + nullProto(), + listProto(nullProto(), dateProto(dt)), + nullProto(), + // STRUCT ARRAY + listProto( + nullProto(), + listProto(intProto(3), floatProto(33.3), stringProto("three")), + nullProto(), + ), + nullProto(), + }, + } +) + +// Test helpers for getting column values. +func TestColumnValues(t *testing.T) { + vals := []interface{}{} + wantVals := []interface{}{} + // Test getting column values. + for i, wants := range [][]interface{}{ + // STRING / STRING ARRAY + {"value", NullString{"value", true}}, + {NullString{}}, + {[]NullString{{"value1", true}, {}, {"value3", true}}}, + {[]NullString(nil)}, + // BYTES / BYTES ARRAY + {[]byte("value")}, + {[]byte(nil)}, + {[][]byte{[]byte("value1"), nil, []byte("value3")}}, + {[][]byte(nil)}, + // INT64 / INT64 ARRAY + {int64(17), NullInt64{17, true}}, + {NullInt64{}}, + {[]NullInt64{{1, true}, {2, true}, {}}}, + {[]NullInt64(nil)}, + // BOOL / BOOL ARRAY + {true, NullBool{true, true}}, + {NullBool{}}, + {[]NullBool{{}, {true, true}, {false, true}}}, + {[]NullBool(nil)}, + // FLOAT64 / FLOAT64 ARRAY + {1.7, NullFloat64{1.7, true}}, + {NullFloat64{}}, + {[]NullFloat64{{}, {}, {1.7, true}}}, + {[]NullFloat64(nil)}, + // TIMESTAMP / TIMESTAMP ARRAY + {tm, NullTime{tm, true}}, + {NullTime{}}, + {[]NullTime{{}, {tm, true}}}, + {[]NullTime(nil)}, + // DATE / DATE ARRAY + {dt, NullDate{dt, true}}, + {NullDate{}}, + {[]NullDate{{}, {dt, true}}}, + {[]NullDate(nil)}, + // STRUCT ARRAY + { + []*struct { + Col1 NullInt64 + Col2 NullFloat64 + Col3 string + }{ + nil, + &struct { + Col1 NullInt64 + Col2 NullFloat64 + Col3 string + }{ + NullInt64{3, true}, + NullFloat64{33.3, true}, + "three", + }, + nil, + }, + []NullRow{ + {}, + { + Row: Row{ + fields: []*sppb.StructType_Field{ + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + }, + vals: []*proto3.Value{ + intProto(3), + floatProto(33.3), + stringProto("three"), + }, + }, + Valid: true, + }, + {}, + }, + }, + { + []*struct { + Col1 NullInt64 + Col2 NullFloat64 + Col3 string + }(nil), + []NullRow(nil), + }, + } { + for j, want := range wants { + // Prepare Value vector to test Row.Columns. + if j == 0 { + vals = append(vals, reflect.New(reflect.TypeOf(want)).Interface()) + wantVals = append(wantVals, want) + } + // Column + gotp := reflect.New(reflect.TypeOf(want)) + err := row.Column(i, gotp.Interface()) + if err != nil { + t.Errorf("\t row.Column(%v, %T) returns error: %v, want nil", i, gotp.Interface(), err) + } + if got := reflect.Indirect(gotp).Interface(); !reflect.DeepEqual(got, want) { + t.Errorf("\t row.Column(%v, %T) retrives %v, want %v", i, gotp.Interface(), got, want) + } + // ColumnByName + gotp = reflect.New(reflect.TypeOf(want)) + err = row.ColumnByName(row.fields[i].Name, gotp.Interface()) + if err != nil { + t.Errorf("\t row.ColumnByName(%v, %T) returns error: %v, want nil", row.fields[i].Name, gotp.Interface(), err) + } + if got := reflect.Indirect(gotp).Interface(); !reflect.DeepEqual(got, want) { + t.Errorf("\t row.ColumnByName(%v, %T) retrives %v, want %v", row.fields[i].Name, gotp.Interface(), got, want) + } + } + } + // Test Row.Columns. + if err := row.Columns(vals...); err != nil { + t.Errorf("row.Columns() returns error: %v, want nil", err) + } + for i, want := range wantVals { + if got := reflect.Indirect(reflect.ValueOf(vals[i])).Interface(); !reflect.DeepEqual(got, want) { + t.Errorf("\t got %v(%T) for column[%v], want %v(%T)", got, got, row.fields[i].Name, want, want) + } + } +} + +// Test decoding into nil destination. +func TestNilDst(t *testing.T) { + for i, test := range []struct { + r *Row + dst interface{} + wantErr error + structDst interface{} + wantToStructErr error + }{ + { + &Row{ + []*sppb.StructType_Field{ + {"Col0", stringType()}, + }, + []*proto3.Value{stringProto("value")}, + }, + nil, + errDecodeColumn(0, errNilDst(nil)), + nil, + errToStructArgType(nil), + }, + { + &Row{ + []*sppb.StructType_Field{ + {"Col0", stringType()}, + }, + []*proto3.Value{stringProto("value")}, + }, + (*string)(nil), + errDecodeColumn(0, errNilDst((*string)(nil))), + (*struct{ STRING string })(nil), + errNilDst((*struct{ STRING string })(nil)), + }, + { + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + ), + ), + }, + }, + []*proto3.Value{listProto( + listProto(intProto(3), floatProto(33.3)), + )}, + }, + (*[]*struct { + Col1 int + Col2 float64 + })(nil), + errDecodeColumn(0, errNilDst((*[]*struct { + Col1 int + Col2 float64 + })(nil))), + (*struct { + StructArray []*struct { + Col1 int + Col2 float64 + } `spanner:"STRUCT_ARRAY"` + })(nil), + errNilDst((*struct { + StructArray []*struct { + Col1 int + Col2 float64 + } `spanner:"STRUCT_ARRAY"` + })(nil)), + }, + } { + if gotErr := test.r.Column(0, test.dst); !reflect.DeepEqual(gotErr, test.wantErr) { + t.Errorf("%v: test.r.Column() returns error %v, want %v", i, gotErr, test.wantErr) + } + if gotErr := test.r.ColumnByName("Col0", test.dst); !reflect.DeepEqual(gotErr, test.wantErr) { + t.Errorf("%v: test.r.ColumnByName() returns error %v, want %v", i, gotErr, test.wantErr) + } + // Row.Columns(T) should return nil on T == nil, otherwise, it should return test.wantErr. + wantColumnsErr := test.wantErr + if test.dst == nil { + wantColumnsErr = nil + } + if gotErr := test.r.Columns(test.dst); !reflect.DeepEqual(gotErr, wantColumnsErr) { + t.Errorf("%v: test.r.Columns() returns error %v, want %v", i, gotErr, wantColumnsErr) + } + if gotErr := test.r.ToStruct(test.structDst); !reflect.DeepEqual(gotErr, test.wantToStructErr) { + t.Errorf("%v: test.r.ToStruct() returns error %v, want %v", i, gotErr, test.wantToStructErr) + } + } +} + +// Test decoding NULL columns using Go types that don't support NULL. +func TestNullTypeErr(t *testing.T) { + var tm time.Time + ntoi := func(n string) int { + for i, f := range row.fields { + if f.Name == n { + return i + } + } + t.Errorf("cannot find column name %q in row", n) + return 0 + } + for _, test := range []struct { + colName string + dst interface{} + }{ + { + "NULL_STRING", + proto.String(""), + }, + { + "NULL_INT64", + proto.Int64(0), + }, + { + "NULL_BOOL", + proto.Bool(false), + }, + { + "NULL_FLOAT64", + proto.Float64(0.0), + }, + { + "NULL_TIMESTAMP", + &tm, + }, + { + "NULL_DATE", + &dt, + }, + } { + wantErr := errDecodeColumn(ntoi(test.colName), errDstNotForNull(test.dst)) + if gotErr := row.ColumnByName(test.colName, test.dst); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("row.ColumnByName(%v) returns error %v, want %v", test.colName, gotErr, wantErr) + } + } +} + +// Test using wrong destination type in column decoders. +func TestColumnTypeErr(t *testing.T) { + // badDst cannot hold any of the column values. + badDst := &struct{}{} + for i, f := range row.fields { // For each of the columns, try to decode it into badDst. + tc := f.Type.Code + isArray := strings.Contains(f.Name, "ARRAY") + if isArray { + tc = f.Type.ArrayElementType.Code + } + wantErr := errDecodeColumn(i, errTypeMismatch(tc, isArray, badDst)) + if gotErr := row.Column(i, badDst); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("Column(%v): decoding into destination with wrong type %T returns error %v, want %v", + i, badDst, gotErr, wantErr) + } + if gotErr := row.ColumnByName(f.Name, badDst); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("ColumnByName(%v): decoding into destination with wrong type %T returns error %v, want %v", + f.Name, badDst, gotErr, wantErr) + } + } + wantErr := errDecodeColumn(1, errTypeMismatch(sppb.TypeCode_STRING, false, badDst)) + // badDst is used to receive column 1. + vals := []interface{}{nil, badDst} // Row.Column() is expected to fail at column 1. + // Skip decoding the rest columns by providing nils as the destinations. + for i := 2; i < len(row.fields); i++ { + vals = append(vals, nil) + } + if gotErr := row.Columns(vals...); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("Columns(): decoding column 1 with wrong type %T returns error %v, want %v", + badDst, gotErr, wantErr) + } +} + +// Test the handling of invalid column decoding requests which cannot be mapped to correct column(s). +func TestInvalidColumnRequest(t *testing.T) { + for _, test := range []struct { + desc string + f func() error + wantErr error + }{ + { + "Request column index is out of range", + func() error { + return row.Column(10000, &struct{}{}) + }, + errColIdxOutOfRange(10000, &row), + }, + { + "Cannot find the named column", + func() error { + return row.ColumnByName("string", &struct{}{}) + }, + errColNotFound("string"), + }, + { + "Not enough arguments to call row.Columns()", + func() error { + return row.Columns(nil, nil) + }, + errNumOfColValue(2, &row), + }, + { + "Call ColumnByName on row with duplicated column names", + func() error { + var s string + r := &Row{ + []*sppb.StructType_Field{ + {"Val", stringType()}, + {"Val", stringType()}, + }, + []*proto3.Value{stringProto("value1"), stringProto("value2")}, + } + return r.ColumnByName("Val", &s) + }, + errDupColName("Val"), + }, + { + "Call ToStruct on row with duplicated column names", + func() error { + s := &struct { + Val string + }{} + r := &Row{ + []*sppb.StructType_Field{ + {"Val", stringType()}, + {"Val", stringType()}, + }, + []*proto3.Value{stringProto("value1"), stringProto("value2")}, + } + return r.ToStruct(s) + }, + errDupSpannerField("Val", &sppb.StructType{ + Fields: []*sppb.StructType_Field{ + {"Val", stringType()}, + {"Val", stringType()}, + }, + }), + }, + { + "Call ToStruct on a row with unnamed field", + func() error { + s := &struct { + Val string + }{} + r := &Row{ + []*sppb.StructType_Field{ + {"", stringType()}, + }, + []*proto3.Value{stringProto("value1")}, + } + return r.ToStruct(s) + }, + errUnnamedField(&sppb.StructType{Fields: []*sppb.StructType_Field{{"", stringType()}}}, 0), + }, + } { + if gotErr := test.f(); !reflect.DeepEqual(gotErr, test.wantErr) { + t.Errorf("%v: test.f() returns error %v, want %v", test.desc, gotErr, test.wantErr) + } + } +} + +// Test decoding the row with row.ToStruct into an invalid destination. +func TestToStructInvalidDst(t *testing.T) { + for _, test := range []struct { + desc string + dst interface{} + wantErr error + }{ + { + "Decode row as STRUCT into int32", + proto.Int(1), + errToStructArgType(proto.Int(1)), + }, + { + "Decode row as STRUCT to nil Go struct", + (*struct{})(nil), + errNilDst((*struct{})(nil)), + }, + { + "Decode row as STRUCT to Go struct with duplicated fields for the PK column", + &struct { + PK1 string `spanner:"STRING"` + PK2 string `spanner:"STRING"` + }{}, + errNoOrDupGoField(&struct { + PK1 string `spanner:"STRING"` + PK2 string `spanner:"STRING"` + }{}, "STRING"), + }, + { + "Decode row as STRUCT to Go struct with no field for the PK column", + &struct { + PK1 string `spanner:"_STRING"` + }{}, + errNoOrDupGoField(&struct { + PK1 string `spanner:"_STRING"` + }{}, "STRING"), + }, + { + "Decode row as STRUCT to Go struct with wrong type for the PK column", + &struct { + PK1 int64 `spanner:"STRING"` + }{}, + errDecodeStructField(&sppb.StructType{Fields: row.fields}, "STRING", + errTypeMismatch(sppb.TypeCode_STRING, false, proto.Int64(0))), + }, + } { + if gotErr := row.ToStruct(test.dst); !reflect.DeepEqual(gotErr, test.wantErr) { + t.Errorf("%v: decoding:\ngot %v\nwant %v", test.desc, gotErr, test.wantErr) + } + } +} + +// Test decoding a broken row. +func TestBrokenRow(t *testing.T) { + for i, test := range []struct { + row *Row + dst interface{} + wantErr error + }{ + { + // A row with no field. + &Row{ + []*sppb.StructType_Field{}, + []*proto3.Value{stringProto("value")}, + }, + &NullString{"value", true}, + errFieldsMismatchVals(&Row{ + []*sppb.StructType_Field{}, + []*proto3.Value{stringProto("value")}, + }), + }, + { + // A row with nil field. + &Row{ + []*sppb.StructType_Field{nil}, + []*proto3.Value{stringProto("value")}, + }, + &NullString{"value", true}, + errNilColType(0), + }, + { + // Field is not nil, but its type is nil. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + nil, + }, + }, + []*proto3.Value{listProto(stringProto("value1"), stringProto("value2"))}, + }, + &[]NullString{}, + errDecodeColumn(0, errNilSpannerType()), + }, + { + // Field is not nil, field type is not nil, but it is an array and its array element type is nil. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + &sppb.Type{ + Code: sppb.TypeCode_ARRAY, + }, + }, + }, + []*proto3.Value{listProto(stringProto("value1"), stringProto("value2"))}, + }, + &[]NullString{}, + errDecodeColumn(0, errNilArrElemType(&sppb.Type{Code: sppb.TypeCode_ARRAY})), + }, + { + // Field specifies valid type, value is nil. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + intType(), + }, + }, + []*proto3.Value{nil}, + }, + &NullInt64{1, true}, + errDecodeColumn(0, errNilSrc()), + }, + { + // Field specifies INT64 type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + intType(), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_StringValue)(nil)}}, + }, + &NullInt64{1, true}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_StringValue)(nil)}, "String")), + }, + { + // Field specifies INT64 type, but value is for Number type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + intType(), + }, + }, + []*proto3.Value{floatProto(1.0)}, + }, + &NullInt64{1, true}, + errDecodeColumn(0, errSrcVal(floatProto(1.0), "String")), + }, + { + // Field specifies INT64 type, but value is wrongly encoded. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + intType(), + }, + }, + []*proto3.Value{stringProto("&1")}, + }, + proto.Int64(0), + errDecodeColumn(0, errBadEncoding(stringProto("&1"), func() error { + _, err := strconv.ParseInt("&1", 10, 64) + return err + }())), + }, + { + // Field specifies INT64 type, but value is wrongly encoded. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + intType(), + }, + }, + []*proto3.Value{stringProto("&1")}, + }, + &NullInt64{}, + errDecodeColumn(0, errBadEncoding(stringProto("&1"), func() error { + _, err := strconv.ParseInt("&1", 10, 64) + return err + }())), + }, + { + // Field specifies STRING type, but value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + stringType(), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_StringValue)(nil)}}, + }, + &NullString{"value", true}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_StringValue)(nil)}, "String")), + }, + { + // Field specifies STRING type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + stringType(), + }, + }, + []*proto3.Value{listProto(stringProto("value"))}, + }, + &NullString{"value", true}, + errDecodeColumn(0, errSrcVal(listProto(stringProto("value")), "String")), + }, + { + // Field specifies FLOAT64 type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + floatType(), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_NumberValue)(nil)}}, + }, + &NullFloat64{1.0, true}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_NumberValue)(nil)}, "Number")), + }, + { + // Field specifies FLOAT64 type, but value is for BOOL type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + floatType(), + }, + }, + []*proto3.Value{boolProto(true)}, + }, + &NullFloat64{1.0, true}, + errDecodeColumn(0, errSrcVal(boolProto(true), "Number")), + }, + { + // Field specifies FLOAT64 type, but value is wrongly encoded. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + floatType(), + }, + }, + []*proto3.Value{stringProto("nan")}, + }, + &NullFloat64{}, + errDecodeColumn(0, errUnexpectedNumStr("nan")), + }, + { + // Field specifies FLOAT64 type, but value is wrongly encoded. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + floatType(), + }, + }, + []*proto3.Value{stringProto("nan")}, + }, + proto.Float64(0), + errDecodeColumn(0, errUnexpectedNumStr("nan")), + }, + { + // Field specifies BYTES type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + bytesType(), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_StringValue)(nil)}}, + }, + &[]byte{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_StringValue)(nil)}, "String")), + }, + { + // Field specifies BYTES type, but value is for BOOL type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + bytesType(), + }, + }, + []*proto3.Value{boolProto(false)}, + }, + &[]byte{}, + errDecodeColumn(0, errSrcVal(boolProto(false), "String")), + }, + { + // Field specifies BYTES type, but value is wrongly encoded. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + bytesType(), + }, + }, + []*proto3.Value{stringProto("&&")}, + }, + &[]byte{}, + errDecodeColumn(0, errBadEncoding(stringProto("&&"), func() error { + _, err := base64.StdEncoding.DecodeString("&&") + return err + }())), + }, + { + // Field specifies BOOL type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + boolType(), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_BoolValue)(nil)}}, + }, + &NullBool{false, true}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_BoolValue)(nil)}, "Bool")), + }, + { + // Field specifies BOOL type, but value is for STRING type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + boolType(), + }, + }, + []*proto3.Value{stringProto("false")}, + }, + &NullBool{false, true}, + errDecodeColumn(0, errSrcVal(stringProto("false"), "Bool")), + }, + { + // Field specifies TIMESTAMP type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + timeType(), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_StringValue)(nil)}}, + }, + &NullTime{time.Now(), true}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_StringValue)(nil)}, "String")), + }, + { + // Field specifies TIMESTAMP type, but value is for BOOL type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + timeType(), + }, + }, + []*proto3.Value{boolProto(false)}, + }, + &NullTime{time.Now(), true}, + errDecodeColumn(0, errSrcVal(boolProto(false), "String")), + }, + { + // Field specifies TIMESTAMP type, but value is invalid timestamp. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + timeType(), + }, + }, + []*proto3.Value{stringProto("junk")}, + }, + &NullTime{time.Now(), true}, + errDecodeColumn(0, errBadEncoding(stringProto("junk"), func() error { + _, err := time.Parse(time.RFC3339Nano, "junk") + return err + }())), + }, + { + // Field specifies DATE type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + dateType(), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_StringValue)(nil)}}, + }, + &NullDate{civil.Date{}, true}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_StringValue)(nil)}, "String")), + }, + { + // Field specifies DATE type, but value is for BOOL type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + dateType(), + }, + }, + []*proto3.Value{boolProto(false)}, + }, + &NullDate{civil.Date{}, true}, + errDecodeColumn(0, errSrcVal(boolProto(false), "String")), + }, + { + // Field specifies DATE type, but value is invalid timestamp. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + dateType(), + }, + }, + []*proto3.Value{stringProto("junk")}, + }, + &NullDate{civil.Date{}, true}, + errDecodeColumn(0, errBadEncoding(stringProto("junk"), func() error { + _, err := civil.ParseDate("junk") + return err + }())), + }, + + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(intType()), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[]NullInt64{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(intType()), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]NullInt64{}, + errDecodeColumn(0, errNilListValue("INT64")), + }, + { + // Field specifies ARRAY type, but value is for BYTES type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(intType()), + }, + }, + []*proto3.Value{bytesProto([]byte("value"))}, + }, + &[]NullInt64{}, + errDecodeColumn(0, errSrcVal(bytesProto([]byte("value")), "List")), + }, + { + // Field specifies ARRAY type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(intType()), + }, + }, + []*proto3.Value{listProto(boolProto(true))}, + }, + &[]NullInt64{}, + errDecodeColumn(0, errDecodeArrayElement(0, boolProto(true), + "INT64", errSrcVal(boolProto(true), "String"))), + }, + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(stringType()), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[]NullString{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(stringType()), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]NullString{}, + errDecodeColumn(0, errNilListValue("STRING")), + }, + { + // Field specifies ARRAY type, but value is for BOOL type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(stringType()), + }, + }, + []*proto3.Value{boolProto(true)}, + }, + &[]NullString{}, + errDecodeColumn(0, errSrcVal(boolProto(true), "List")), + }, + { + // Field specifies ARRAY type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(stringType()), + }, + }, + []*proto3.Value{listProto(boolProto(true))}, + }, + &[]NullString{}, + errDecodeColumn(0, errDecodeArrayElement(0, boolProto(true), + "STRING", errSrcVal(boolProto(true), "String"))), + }, + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(floatType()), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[]NullFloat64{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(floatType()), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]NullFloat64{}, + errDecodeColumn(0, errNilListValue("FLOAT64")), + }, + { + // Field specifies ARRAY type, but value is for STRING type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(floatType()), + }, + }, + []*proto3.Value{stringProto("value")}, + }, + &[]NullFloat64{}, + errDecodeColumn(0, errSrcVal(stringProto("value"), "List")), + }, + { + // Field specifies ARRAY type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(floatType()), + }, + }, + []*proto3.Value{listProto(boolProto(true))}, + }, + &[]NullFloat64{}, + errDecodeColumn(0, errDecodeArrayElement(0, boolProto(true), + "FLOAT64", errSrcVal(boolProto(true), "Number"))), + }, + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(bytesType()), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[][]byte{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(bytesType()), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[][]byte{}, + errDecodeColumn(0, errNilListValue("BYTES")), + }, + { + // Field specifies ARRAY type, but value is for FLOAT64 type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(bytesType()), + }, + }, + []*proto3.Value{floatProto(1.0)}, + }, + &[][]byte{}, + errDecodeColumn(0, errSrcVal(floatProto(1.0), "List")), + }, + { + // Field specifies ARRAY type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(bytesType()), + }, + }, + []*proto3.Value{listProto(floatProto(1.0))}, + }, + &[][]byte{}, + errDecodeColumn(0, errDecodeArrayElement(0, floatProto(1.0), + "BYTES", errSrcVal(floatProto(1.0), "String"))), + }, + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(boolType()), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[]NullBool{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(boolType()), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]NullBool{}, + errDecodeColumn(0, errNilListValue("BOOL")), + }, + { + // Field specifies ARRAY type, but value is for FLOAT64 type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(boolType()), + }, + }, + []*proto3.Value{floatProto(1.0)}, + }, + &[]NullBool{}, + errDecodeColumn(0, errSrcVal(floatProto(1.0), "List")), + }, + { + // Field specifies ARRAY type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(boolType()), + }, + }, + []*proto3.Value{listProto(floatProto(1.0))}, + }, + &[]NullBool{}, + errDecodeColumn(0, errDecodeArrayElement(0, floatProto(1.0), + "BOOL", errSrcVal(floatProto(1.0), "Bool"))), + }, + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(timeType()), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[]NullTime{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(timeType()), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]NullTime{}, + errDecodeColumn(0, errNilListValue("TIMESTAMP")), + }, + { + // Field specifies ARRAY type, but value is for FLOAT64 type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(timeType()), + }, + }, + []*proto3.Value{floatProto(1.0)}, + }, + &[]NullTime{}, + errDecodeColumn(0, errSrcVal(floatProto(1.0), "List")), + }, + { + // Field specifies ARRAY type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(timeType()), + }, + }, + []*proto3.Value{listProto(floatProto(1.0))}, + }, + &[]NullTime{}, + errDecodeColumn(0, errDecodeArrayElement(0, floatProto(1.0), + "TIMESTAMP", errSrcVal(floatProto(1.0), "String"))), + }, + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(dateType()), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[]NullDate{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(dateType()), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]NullDate{}, + errDecodeColumn(0, errNilListValue("DATE")), + }, + { + // Field specifies ARRAY type, but value is for FLOAT64 type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(dateType()), + }, + }, + []*proto3.Value{floatProto(1.0)}, + }, + &[]NullDate{}, + errDecodeColumn(0, errSrcVal(floatProto(1.0), "List")), + }, + { + // Field specifies ARRAY type, but value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType(dateType()), + }, + }, + []*proto3.Value{listProto(floatProto(1.0))}, + }, + &[]NullDate{}, + errDecodeColumn(0, errDecodeArrayElement(0, floatProto(1.0), + "DATE", errSrcVal(floatProto(1.0), "String"))), + }, + { + // Field specifies ARRAY type, value is having a nil Kind. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{{Kind: (*proto3.Value_ListValue)(nil)}}, + }, + &[]*struct { + Col1 int64 + Col2 float64 + Col3 string + }{}, + errDecodeColumn(0, errSrcVal(&proto3.Value{Kind: (*proto3.Value_ListValue)(nil)}, "List")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]*struct { + Col1 int64 + Col2 float64 + Col3 string + }{}, + errDecodeColumn(0, errNilListValue("STRUCT")), + }, + { + // Field specifies ARRAY type, value is having a nil ListValue. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{{Kind: &proto3.Value_ListValue{}}}, + }, + &[]NullRow{}, + errDecodeColumn(0, errNilListValue("STRUCT")), + }, + { + // Field specifies ARRAY type, value is for BYTES type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{bytesProto([]byte("value"))}, + }, + &[]*struct { + Col1 int64 + Col2 float64 + Col3 string + }{}, + errDecodeColumn(0, errSrcVal(bytesProto([]byte("value")), "List")), + }, + { + // Field specifies ARRAY type, value is for BYTES type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{listProto(bytesProto([]byte("value")))}, + }, + &[]NullRow{}, + errDecodeColumn(0, errNotStructElement(0, bytesProto([]byte("value")))), + }, + { + // Field specifies ARRAY type, value is for ARRAY type. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{listProto(bytesProto([]byte("value")))}, + }, + &[]*struct { + Col1 int64 + Col2 float64 + Col3 string + }{}, + errDecodeColumn(0, errDecodeArrayElement(0, bytesProto([]byte("value")), + "STRUCT", errSrcVal(bytesProto([]byte("value")), "List"))), + }, + { + // Field specifies ARRAY, but is having nil StructType. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + &sppb.Type{Code: sppb.TypeCode_STRUCT}, + ), + }, + }, + []*proto3.Value{listProto(listProto(intProto(1), floatProto(2.0), stringProto("3")))}, + }, + &[]*struct { + Col1 int64 + Col2 float64 + Col3 string + }{}, + errDecodeColumn(0, errDecodeArrayElement(0, listProto(intProto(1), floatProto(2.0), stringProto("3")), + "STRUCT", errNilSpannerStructType())), + }, + { + // Field specifies ARRAY, but the second struct value is for BOOL type instead of FLOAT64. + &Row{ + []*sppb.StructType_Field{ + { + "Col0", + listType( + structType( + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + ), + ), + }, + }, + []*proto3.Value{listProto(listProto(intProto(1), boolProto(true), stringProto("3")))}, + }, + &[]*struct { + Col1 int64 + Col2 float64 + Col3 string + }{}, + errDecodeColumn( + 0, + errDecodeArrayElement( + 0, listProto(intProto(1), boolProto(true), stringProto("3")), "STRUCT", + errDecodeStructField( + &sppb.StructType{ + Fields: []*sppb.StructType_Field{ + mkField("Col1", intType()), + mkField("Col2", floatType()), + mkField("Col3", stringType()), + }, + }, + "Col2", + errSrcVal(boolProto(true), "Number"), + ), + ), + ), + }, + } { + if gotErr := test.row.Column(0, test.dst); !reflect.DeepEqual(gotErr, test.wantErr) { + t.Errorf("%v: test.row.Column(0) got error %v, want %v", i, gotErr, test.wantErr) + } + if gotErr := test.row.ColumnByName("Col0", test.dst); !reflect.DeepEqual(gotErr, test.wantErr) { + t.Errorf("%v: test.row.ColumnByName(%q) got error %v, want %v", i, "Col0", gotErr, test.wantErr) + } + if gotErr := test.row.Columns(test.dst); !reflect.DeepEqual(gotErr, test.wantErr) { + t.Errorf("%v: test.row.Columns(%T) got error %v, want %v", i, test.dst, gotErr, test.wantErr) + } + } +} + +// Test Row.ToStruct(). +func TestToStruct(t *testing.T) { + s := []struct { + // STRING / STRING ARRAY + PrimaryKey string `spanner:"STRING"` + NullString NullString `spanner:"NULL_STRING"` + StringArray []NullString `spanner:"STRING_ARRAY"` + NullStringArray []NullString `spanner:"NULL_STRING_ARRAY"` + // BYTES / BYTES ARRAY + Bytes []byte `spanner:"BYTES"` + NullBytes []byte `spanner:"NULL_BYTES"` + BytesArray [][]byte `spanner:"BYTES_ARRAY"` + NullBytesArray [][]byte `spanner:"NULL_BYTES_ARRAY"` + // INT64 / INT64 ARRAY + Int64 int64 `spanner:"INT64"` + NullInt64 NullInt64 `spanner:"NULL_INT64"` + Int64Array []NullInt64 `spanner:"INT64_ARRAY"` + NullInt64Array []NullInt64 `spanner:"NULL_INT64_ARRAY"` + // BOOL / BOOL ARRAY + Bool bool `spanner:"BOOL"` + NullBool NullBool `spanner:"NULL_BOOL"` + BoolArray []NullBool `spanner:"BOOL_ARRAY"` + NullBoolArray []NullBool `spanner:"NULL_BOOL_ARRAY"` + // FLOAT64 / FLOAT64 ARRAY + Float64 float64 `spanner:"FLOAT64"` + NullFloat64 NullFloat64 `spanner:"NULL_FLOAT64"` + Float64Array []NullFloat64 `spanner:"FLOAT64_ARRAY"` + NullFloat64Array []NullFloat64 `spanner:"NULL_FLOAT64_ARRAY"` + // TIMESTAMP / TIMESTAMP ARRAY + Timestamp time.Time `spanner:"TIMESTAMP"` + NullTimestamp NullTime `spanner:"NULL_TIMESTAMP"` + TimestampArray []NullTime `spanner:"TIMESTAMP_ARRAY"` + NullTimestampArray []NullTime `spanner:"NULL_TIMESTAMP_ARRAY"` + // DATE / DATE ARRAY + Date civil.Date `spanner:"DATE"` + NullDate NullDate `spanner:"NULL_DATE"` + DateArray []NullDate `spanner:"DATE_ARRAY"` + NullDateArray []NullDate `spanner:"NULL_DATE_ARRAY"` + + // STRUCT ARRAY + StructArray []*struct { + Col1 int64 + Col2 float64 + Col3 string + } `spanner:"STRUCT_ARRAY"` + NullStructArray []*struct { + Col1 int64 + Col2 float64 + Col3 string + } `spanner:"NULL_STRUCT_ARRAY"` + }{ + {}, // got + { + // STRING / STRING ARRAY + "value", + NullString{}, + []NullString{{"value1", true}, {}, {"value3", true}}, + []NullString(nil), + // BYTES / BYTES ARRAY + []byte("value"), + []byte(nil), + [][]byte{[]byte("value1"), nil, []byte("value3")}, + [][]byte(nil), + // INT64 / INT64 ARRAY + int64(17), + NullInt64{}, + []NullInt64{{int64(1), true}, {int64(2), true}, {}}, + []NullInt64(nil), + // BOOL / BOOL ARRAY + true, + NullBool{}, + []NullBool{{}, {true, true}, {false, true}}, + []NullBool(nil), + // FLOAT64 / FLOAT64 ARRAY + 1.7, + NullFloat64{}, + []NullFloat64{{}, {}, {1.7, true}}, + []NullFloat64(nil), + // TIMESTAMP / TIMESTAMP ARRAY + tm, + NullTime{}, + []NullTime{{}, {tm, true}}, + []NullTime(nil), + // DATE / DATE ARRAY + dt, + NullDate{}, + []NullDate{{}, {dt, true}}, + []NullDate(nil), + // STRUCT ARRAY + []*struct { + Col1 int64 + Col2 float64 + Col3 string + }{ + nil, + &struct { + Col1 int64 + Col2 float64 + Col3 string + }{3, 33.3, "three"}, + nil, + }, + []*struct { + Col1 int64 + Col2 float64 + Col3 string + }(nil), + }, // want + } + err := row.ToStruct(&s[0]) + if err != nil { + t.Errorf("row.ToStruct() returns error: %v, want nil", err) + } + if !reflect.DeepEqual(s[0], s[1]) { + t.Errorf("row.ToStruct() fetches struct %v, want %v", s[0], s[1]) + } +} + +// Test helpers for getting column names. +func TestColumnNameAndIndex(t *testing.T) { + // Test Row.Size(). + if rs := row.Size(); rs != len(row.fields) { + t.Errorf("row.Size() returns %v, want %v", rs, len(row.fields)) + } + // Test Row.Size() on empty Row. + if rs := (&Row{}).Size(); rs != 0 { + t.Errorf("empty_row.Size() returns %v, want %v", rs, 0) + } + // Test Row.ColumnName() + for i, col := range row.fields { + if cn := row.ColumnName(i); cn != col.Name { + t.Errorf("row.ColumnName(%v) returns %q, want %q", i, cn, col.Name) + } + goti, err := row.ColumnIndex(col.Name) + if err != nil { + t.Errorf("ColumnIndex(%q) error %v", col.Name, err) + continue + } + if goti != i { + t.Errorf("ColumnIndex(%q) = %d, want %d", col.Name, goti, i) + } + } + // Test Row.ColumnName on empty Row. + if cn := (&Row{}).ColumnName(0); cn != "" { + t.Errorf("empty_row.ColumnName(%v) returns %q, want %q", 0, cn, "") + } + // Test Row.ColumnIndex on empty Row. + if _, err := (&Row{}).ColumnIndex(""); err == nil { + t.Error("empty_row.ColumnIndex returns nil, want error") + } +} + +func TestNewRow(t *testing.T) { + for _, test := range []struct { + names []string + values []interface{} + want *Row + wantErr error + }{ + { + want: &Row{fields: []*sppb.StructType_Field{}, vals: []*proto3.Value{}}, + }, + { + names: []string{}, + values: []interface{}{}, + want: &Row{fields: []*sppb.StructType_Field{}, vals: []*proto3.Value{}}, + }, + { + names: []string{"a", "b"}, + values: []interface{}{}, + want: nil, + wantErr: errNamesValuesMismatch([]string{"a", "b"}, []interface{}{}), + }, + { + names: []string{"a", "b", "c"}, + values: []interface{}{5, "abc", GenericColumnValue{listType(intType()), listProto(intProto(91), nullProto(), intProto(87))}}, + want: &Row{ + []*sppb.StructType_Field{ + {"a", intType()}, + {"b", stringType()}, + {"c", listType(intType())}, + }, + []*proto3.Value{ + intProto(5), + stringProto("abc"), + listProto(intProto(91), nullProto(), intProto(87)), + }, + }, + }, + } { + got, err := NewRow(test.names, test.values) + if !reflect.DeepEqual(err, test.wantErr) { + t.Errorf("NewRow(%v,%v).err = %s, want %s", test.names, test.values, err, test.wantErr) + continue + } + if !reflect.DeepEqual(got, test.want) { + t.Errorf("NewRow(%v,%v) = %s, want %s", test.names, test.values, got, test.want) + continue + } + } +} diff --git a/vendor/cloud.google.com/go/spanner/session.go b/vendor/cloud.google.com/go/spanner/session.go new file mode 100644 index 00000000..5ab1386c --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/session.go @@ -0,0 +1,965 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "container/heap" + "container/list" + "fmt" + "math/rand" + "strings" + "sync" + "time" + + log "github.com/golang/glog" + "golang.org/x/net/context" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// sessionHandle is an interface for transactions to access Cloud Spanner sessions safely. It is generated by sessionPool.take(). +type sessionHandle struct { + // mu guarantees that inner session object is returned / destroyed only once. + mu sync.Mutex + // session is a pointer to a session object. Transactions never need to access it directly. + session *session +} + +// recycle gives the inner session object back to its home session pool. It is safe to call recycle multiple times but only the first one would take effect. +func (sh *sessionHandle) recycle() { + sh.mu.Lock() + defer sh.mu.Unlock() + if sh.session == nil { + // sessionHandle has already been recycled. + return + } + sh.session.recycle() + sh.session = nil +} + +// getID gets the Cloud Spanner session ID from the internal session object. getID returns empty string if the sessionHandle is nil or the inner session +// object has been released by recycle / destroy. +func (sh *sessionHandle) getID() string { + sh.mu.Lock() + defer sh.mu.Unlock() + if sh.session == nil { + // sessionHandle has already been recycled/destroyed. + return "" + } + return sh.session.getID() +} + +// getClient gets the Cloud Spanner RPC client associated with the session ID in sessionHandle. +func (sh *sessionHandle) getClient() sppb.SpannerClient { + sh.mu.Lock() + defer sh.mu.Unlock() + if sh.session == nil { + return nil + } + return sh.session.client +} + +// getMetadata returns the metadata associated with the session in sessionHandle. +func (sh *sessionHandle) getMetadata() metadata.MD { + sh.mu.Lock() + defer sh.mu.Unlock() + if sh.session == nil { + return nil + } + return sh.session.md +} + +// getTransactionID returns the transaction id in the session if available. +func (sh *sessionHandle) getTransactionID() transactionID { + sh.mu.Lock() + defer sh.mu.Unlock() + if sh.session == nil { + return nil + } + return sh.session.tx +} + +// destroy destroys the inner session object. It is safe to call destroy multiple times and only the first call would attempt to +// destroy the inner session object. +func (sh *sessionHandle) destroy() { + sh.mu.Lock() + s := sh.session + sh.session = nil + sh.mu.Unlock() + if s == nil { + // sessionHandle has already been destroyed. + return + } + s.destroy(false) +} + +// session wraps a Cloud Spanner session ID through which transactions are created and executed. +type session struct { + // client is the RPC channel to Cloud Spanner. It is set only once during session's creation. + client sppb.SpannerClient + // id is the unique id of the session in Cloud Spanner. It is set only once during session's creation. + id string + // pool is the session's home session pool where it was created. It is set only once during session's creation. + pool *sessionPool + // createTime is the timestamp of the session's creation. It is set only once during session's creation. + createTime time.Time + + // mu protects the following fields from concurrent access: both healthcheck workers and transactions can modify them. + mu sync.Mutex + // valid marks the validity of a session. + valid bool + // hcIndex is the index of the session inside the global healthcheck queue. If hcIndex < 0, session has been unregistered from the queue. + hcIndex int + // idleList is the linkedlist node which links the session to its home session pool's idle list. If idleList == nil, the + // session is not in idle list. + idleList *list.Element + // nextCheck is the timestamp of next scheduled healthcheck of the session. It is maintained by the global health checker. + nextCheck time.Time + // checkingHelath is true if currently this session is being processed by health checker. Must be modified under health checker lock. + checkingHealth bool + // md is the Metadata to be sent with each request. + md metadata.MD + // tx contains the transaction id if the session has been prepared for write. + tx transactionID +} + +// isValid returns true if the session is still valid for use. +func (s *session) isValid() bool { + s.mu.Lock() + defer s.mu.Unlock() + return s.valid +} + +// isWritePrepared returns true if the session is prepared for write. +func (s *session) isWritePrepared() bool { + s.mu.Lock() + defer s.mu.Unlock() + return s.tx != nil +} + +// String implements fmt.Stringer for session. +func (s *session) String() string { + s.mu.Lock() + defer s.mu.Unlock() + return fmt.Sprintf("", + s.id, s.hcIndex, s.idleList, s.valid, s.createTime, s.nextCheck) +} + +// ping verifies if the session is still alive in Cloud Spanner. +func (s *session) ping() error { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + return runRetryable(ctx, func(ctx context.Context) error { + _, err := s.client.GetSession(contextWithMetadata(ctx, s.pool.md), &sppb.GetSessionRequest{Name: s.getID()}) // s.getID is safe even when s is invalid. + return err + }) +} + +// refreshIdle refreshes the session's session ID if it is in its home session pool's idle list +// and returns true if successful. +func (s *session) refreshIdle() bool { + s.mu.Lock() + validAndIdle := s.valid && s.idleList != nil + s.mu.Unlock() + if !validAndIdle { + // Optimization: return early if s is not valid or if s is not in idle list. + return false + } + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + var sid string + err := runRetryable(ctx, func(ctx context.Context) error { + session, e := s.client.CreateSession(contextWithMetadata(ctx, s.pool.md), &sppb.CreateSessionRequest{Database: s.pool.db}) + if e != nil { + return e + } + sid = session.Name + return nil + }) + if err != nil { + return false + } + s.pool.mu.Lock() + s.mu.Lock() + var recycle bool + if s.valid && s.idleList != nil { + // session is in idle list, refresh its session id. + sid, s.id = s.id, sid + if s.tx != nil { + s.tx = nil + s.pool.idleWriteList.Remove(s.idleList) + // We need to put this session back into the pool. + recycle = true + } + } + s.mu.Unlock() + s.pool.mu.Unlock() + if recycle { + s.pool.recycle(s) + } + // If we fail to explicitly destroy the session, it will be eventually garbage collected by + // Cloud Spanner. + if err = runRetryable(ctx, func(ctx context.Context) error { + _, e := s.client.DeleteSession(contextWithMetadata(ctx, s.pool.md), &sppb.DeleteSessionRequest{Name: sid}) + return e + }); err != nil { + return false + } + return true +} + +// setHcIndex atomically sets the session's index in the healthcheck queue and returns the old index. +func (s *session) setHcIndex(i int) int { + s.mu.Lock() + defer s.mu.Unlock() + oi := s.hcIndex + s.hcIndex = i + return oi +} + +// setIdleList atomically sets the session's idle list link and returns the old link. +func (s *session) setIdleList(le *list.Element) *list.Element { + s.mu.Lock() + defer s.mu.Unlock() + old := s.idleList + s.idleList = le + return old +} + +// invalidate marks a session as invalid and returns the old validity. +func (s *session) invalidate() bool { + s.mu.Lock() + defer s.mu.Unlock() + ov := s.valid + s.valid = false + return ov +} + +// setNextCheck sets the timestamp for next healthcheck on the session. +func (s *session) setNextCheck(t time.Time) { + s.mu.Lock() + defer s.mu.Unlock() + s.nextCheck = t +} + +// setTransactionID sets the transaction id in the session +func (s *session) setTransactionID(tx transactionID) { + s.mu.Lock() + defer s.mu.Unlock() + s.tx = tx +} + +// getID returns the session ID which uniquely identifies the session in Cloud Spanner. +func (s *session) getID() string { + s.mu.Lock() + defer s.mu.Unlock() + return s.id +} + +// getHcIndex returns the session's index into the global healthcheck priority queue. +func (s *session) getHcIndex() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.hcIndex +} + +// getIdleList returns the session's link in its home session pool's idle list. +func (s *session) getIdleList() *list.Element { + s.mu.Lock() + defer s.mu.Unlock() + return s.idleList +} + +// getNextCheck returns the timestamp for next healthcheck on the session. +func (s *session) getNextCheck() time.Time { + s.mu.Lock() + defer s.mu.Unlock() + return s.nextCheck +} + +// recycle turns the session back to its home session pool. +func (s *session) recycle() { + s.setTransactionID(nil) + if !s.pool.recycle(s) { + // s is rejected by its home session pool because it expired and the session pool is currently having enough number of open sessions. + s.destroy(false) + } +} + +// destroy removes the session from its home session pool, healthcheck queue and Cloud Spanner service. +func (s *session) destroy(isExpire bool) bool { + // Remove s from session pool. + if !s.pool.remove(s, isExpire) { + return false + } + // Unregister s from healthcheck queue. + s.pool.hc.unregister(s) + // Remove s from Cloud Spanner service. + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + // Ignore the error returned by runRetryable because even if we fail to explicitly destroy the session, + // it will be eventually garbage collected by Cloud Spanner. + runRetryable(ctx, func(ctx context.Context) error { + _, e := s.client.DeleteSession(ctx, &sppb.DeleteSessionRequest{Name: s.getID()}) + return e + }) + return true +} + +// prepareForWrite prepares the session for write if it is not already in that state. +func (s *session) prepareForWrite(ctx context.Context) error { + if s.isWritePrepared() { + return nil + } + tx, err := beginTransaction(ctx, s.getID(), s.client) + if err != nil { + return err + } + s.setTransactionID(tx) + return nil +} + +// SessionPoolConfig stores configurations of a session pool. +type SessionPoolConfig struct { + // getRPCClient is the caller supplied method for getting a gRPC client to Cloud Spanner, this makes session pool able to use client pooling. + getRPCClient func() (sppb.SpannerClient, error) + // MaxOpened is the maximum number of opened sessions that is allowed by the + // session pool, zero means unlimited. + MaxOpened uint64 + // MinOpened is the minimum number of opened sessions that the session pool + // tries to maintain. Session pool won't continue to expire sessions if number + // of opened connections drops below MinOpened. However, if session is found + // to be broken, it will still be evicted from session pool, therefore it is + // posssible that the number of opened sessions drops below MinOpened. + MinOpened uint64 + // MaxSessionAge is the maximum duration that a session can be reused, zero + // means session pool will never expire sessions. + MaxSessionAge time.Duration + // MaxBurst is the maximum number of concurrent session creation requests, + MaxBurst uint64 + // WriteSessions is the fraction of sessions we try to keep prepared for write. + WriteSessions float64 + // HealthCheckWorkers is number of workers used by health checker for this pool. + HealthCheckWorkers int + // HealthCheckInterval is how often the health checker pings a session. + HealthCheckInterval time.Duration +} + +// errNoRPCGetter returns error for SessionPoolConfig missing getRPCClient method. +func errNoRPCGetter() error { + return spannerErrorf(codes.InvalidArgument, "require SessionPoolConfig.getRPCClient != nil, got nil") +} + +// errMinOpenedGTMapOpened returns error for SessionPoolConfig.MaxOpened < SessionPoolConfig.MinOpened when SessionPoolConfig.MaxOpened is set. +func errMinOpenedGTMaxOpened(spc *SessionPoolConfig) error { + return spannerErrorf(codes.InvalidArgument, + "require SessionPoolConfig.MaxOpened >= SessionPoolConfig.MinOpened, got %v and %v", spc.MaxOpened, spc.MinOpened) +} + +// validate verifies that the SessionPoolConfig is good for use. +func (spc *SessionPoolConfig) validate() error { + if spc.getRPCClient == nil { + return errNoRPCGetter() + } + if spc.MinOpened > spc.MaxOpened && spc.MaxOpened > 0 { + return errMinOpenedGTMaxOpened(spc) + } + return nil +} + +// sessionPool creates and caches Cloud Spanner sessions. +type sessionPool struct { + // mu protects sessionPool from concurrent access. + mu sync.Mutex + // valid marks the validity of the session pool. + valid bool + // db is the database name that all sessions in the pool are associated with. + db string + // idleList caches idle session IDs. Session IDs in this list can be allocated for use. + idleList list.List + // idleWriteList caches idle sessions which have been prepared for write. + idleWriteList list.List + // mayGetSession is for broadcasting that session retrival/creation may proceed. + mayGetSession chan struct{} + // numOpened is the total number of open sessions from the session pool. + numOpened uint64 + // createReqs is the number of ongoing session creation requests. + createReqs uint64 + // prepareReqs is the number of ongoing session preparation request. + prepareReqs uint64 + // configuration of the session pool. + SessionPoolConfig + // Metadata to be sent with each request + md metadata.MD + // hc is the health checker + hc *healthChecker +} + +// newSessionPool creates a new session pool. +func newSessionPool(db string, config SessionPoolConfig, md metadata.MD) (*sessionPool, error) { + if err := config.validate(); err != nil { + return nil, err + } + pool := &sessionPool{ + db: db, + valid: true, + mayGetSession: make(chan struct{}), + SessionPoolConfig: config, + md: md, + } + if config.HealthCheckWorkers == 0 { + // With 10 workers and assuming average latency of 5 ms for BeginTransaction, we will be able to + // prepare 2000 tx/sec in advance. If the rate of takeWriteSession is more than that, it will + // degrade to doing BeginTransaction inline. + // TODO: consider resizing the worker pool dynamically according to the load. + config.HealthCheckWorkers = 10 + } + if config.HealthCheckInterval == 0 { + config.HealthCheckInterval = 5 * time.Minute + } + // On GCE VM, within the same region an healthcheck ping takes on average 10ms to finish, given a 5 minutes interval and + // 10 healthcheck workers, a healthChecker can effectively mantain 100 checks_per_worker/sec * 10 workers * 300 seconds = 300K sessions. + pool.hc = newHealthChecker(config.HealthCheckInterval, config.HealthCheckWorkers, pool) + return pool, nil +} + +// isValid checks if the session pool is still valid. +func (p *sessionPool) isValid() bool { + if p == nil { + return false + } + p.mu.Lock() + defer p.mu.Unlock() + return p.valid +} + +// close marks the session pool as closed. +func (p *sessionPool) close() { + if p == nil { + return + } + p.mu.Lock() + if !p.valid { + p.mu.Unlock() + return + } + p.valid = false + p.mu.Unlock() + p.hc.close() + // destroy all the sessions + p.hc.mu.Lock() + allSessions := make([]*session, len(p.hc.queue.sessions)) + copy(allSessions, p.hc.queue.sessions) + p.hc.mu.Unlock() + for _, s := range allSessions { + s.destroy(false) + } +} + +// errInvalidSessionPool returns error for using an invalid session pool. +func errInvalidSessionPool() error { + return spannerErrorf(codes.InvalidArgument, "invalid session pool") +} + +// errGetSessionTimeout returns error for context timeout during sessionPool.take(). +func errGetSessionTimeout() error { + return spannerErrorf(codes.Canceled, "timeout / context canceled during getting session") +} + +// shouldPrepareWrite returns true if we should prepare more sessions for write. +func (p *sessionPool) shouldPrepareWrite() bool { + return float64(p.numOpened)*p.WriteSessions > float64(p.idleWriteList.Len()+int(p.prepareReqs)) +} + +func (p *sessionPool) createSession(ctx context.Context) (*session, error) { + doneCreate := func(done bool) { + p.mu.Lock() + if !done { + // Session creation failed, give budget back. + p.numOpened-- + } + p.createReqs-- + // Notify other waiters blocking on session creation. + close(p.mayGetSession) + p.mayGetSession = make(chan struct{}) + p.mu.Unlock() + } + sc, err := p.getRPCClient() + if err != nil { + doneCreate(false) + return nil, err + } + var s *session + err = runRetryable(ctx, func(ctx context.Context) error { + sid, e := sc.CreateSession(ctx, &sppb.CreateSessionRequest{Database: p.db}) + if e != nil { + return e + } + // If no error, construct the new session. + s = &session{valid: true, client: sc, id: sid.Name, pool: p, createTime: time.Now(), md: p.md} + p.hc.register(s) + return nil + }) + if err != nil { + doneCreate(false) + // Should return error directly because of the previous retries on CreateSession RPC. + return nil, err + } + doneCreate(true) + return s, nil +} + +func (p *sessionPool) isHealthy(s *session) bool { + if s.getNextCheck().Add(2 * p.hc.getInterval()).Before(time.Now()) { + // TODO: figure out if we need to schedule a new healthcheck worker here. + if err := s.ping(); shouldDropSession(err) { + // The session is already bad, continue to fetch/create a new one. + s.destroy(false) + return false + } + p.hc.scheduledHC(s) + } + return true +} + +// take returns a cached session if there are available ones; if there isn't any, it tries to allocate a new one. +// Session returned by take should be used for read operations. +func (p *sessionPool) take(ctx context.Context) (*sessionHandle, error) { + ctx = contextWithMetadata(ctx, p.md) + for { + var ( + s *session + err error + ) + + p.mu.Lock() + if !p.valid { + p.mu.Unlock() + return nil, errInvalidSessionPool() + } + if p.idleList.Len() > 0 { + // Idle sessions are available, get one from the top of the idle list. + s = p.idleList.Remove(p.idleList.Front()).(*session) + } else if p.idleWriteList.Len() > 0 { + s = p.idleWriteList.Remove(p.idleWriteList.Front()).(*session) + } + if s != nil { + s.setIdleList(nil) + p.mu.Unlock() + // From here, session is no longer in idle list, so healthcheck workers won't destroy it. + // If healthcheck workers failed to schedule healthcheck for the session timely, do the check here. + // Because session check is still much cheaper than session creation, they should be reused as much as possible. + if !p.isHealthy(s) { + continue + } + return &sessionHandle{session: s}, nil + } + // Idle list is empty, block if session pool has reached max session creation concurrency or max number of open sessions. + if (p.MaxOpened > 0 && p.numOpened >= p.MaxOpened) || (p.MaxBurst > 0 && p.createReqs >= p.MaxBurst) { + mayGetSession := p.mayGetSession + p.mu.Unlock() + select { + case <-ctx.Done(): + return nil, errGetSessionTimeout() + case <-mayGetSession: + } + continue + } + // Take budget before the actual session creation. + p.numOpened++ + p.createReqs++ + p.mu.Unlock() + if s, err = p.createSession(ctx); err != nil { + return nil, toSpannerError(err) + } + return &sessionHandle{session: s}, nil + } +} + +// takeWriteSession returns a write prepared cached session if there are available ones; if there isn't any, it tries to allocate a new one. +// Session returned should be used for read write transactions. +func (p *sessionPool) takeWriteSession(ctx context.Context) (*sessionHandle, error) { + ctx = contextWithMetadata(ctx, p.md) + for { + var ( + s *session + err error + ) + + p.mu.Lock() + if !p.valid { + p.mu.Unlock() + return nil, errInvalidSessionPool() + } + if p.idleWriteList.Len() > 0 { + // Idle sessions are available, get one from the top of the idle list. + s = p.idleWriteList.Remove(p.idleWriteList.Front()).(*session) + } else if p.idleList.Len() > 0 { + s = p.idleList.Remove(p.idleList.Front()).(*session) + } + if s != nil { + s.setIdleList(nil) + p.mu.Unlock() + // From here, session is no longer in idle list, so healthcheck workers won't destroy it. + // If healthcheck workers failed to schedule healthcheck for the session timely, do the check here. + // Because session check is still much cheaper than session creation, they should be reused as much as possible. + if !p.isHealthy(s) { + continue + } + if !s.isWritePrepared() { + if err = s.prepareForWrite(ctx); err != nil { + return nil, toSpannerError(err) + } + } + return &sessionHandle{session: s}, nil + } + // Idle list is empty, block if session pool has reached max session creation concurrency or max number of open sessions. + if (p.MaxOpened > 0 && p.numOpened >= p.MaxOpened) || (p.MaxBurst > 0 && p.createReqs >= p.MaxBurst) { + mayGetSession := p.mayGetSession + p.mu.Unlock() + select { + case <-ctx.Done(): + return nil, errGetSessionTimeout() + case <-mayGetSession: + } + continue + } + + // Take budget before the actual session creation. + p.numOpened++ + p.createReqs++ + p.mu.Unlock() + if s, err = p.createSession(ctx); err != nil { + return nil, toSpannerError(err) + } + if err = s.prepareForWrite(ctx); err != nil { + return nil, toSpannerError(err) + } + return &sessionHandle{session: s}, nil + } +} + +// recycle puts session s back to the session pool's idle list, it returns true if the session pool successfully recycles session s. +func (p *sessionPool) recycle(s *session) bool { + p.mu.Lock() + defer p.mu.Unlock() + if !s.isValid() || !p.valid { + // Reject the session if session is invalid or pool itself is invalid. + return false + } + if p.MaxSessionAge != 0 && s.createTime.Add(p.MaxSessionAge).Before(time.Now()) && p.numOpened > p.MinOpened { + // session expires and number of opened sessions exceeds MinOpened, let the session destroy itself. + return false + } + // Hot sessions will be converging at the front of the list, cold sessions will be evicted by healthcheck workers. + if s.isWritePrepared() { + s.setIdleList(p.idleWriteList.PushFront(s)) + } else { + s.setIdleList(p.idleList.PushFront(s)) + } + // Broadcast that a session has been returned to idle list. + close(p.mayGetSession) + p.mayGetSession = make(chan struct{}) + return true +} + +// remove atomically removes session s from the session pool and invalidates s. +// If isExpire == true, the removal is triggered by session expiration and in such cases, only idle sessions can be removed. +func (p *sessionPool) remove(s *session, isExpire bool) bool { + p.mu.Lock() + defer p.mu.Unlock() + if isExpire && (p.numOpened <= p.MinOpened || s.getIdleList() == nil) { + // Don't expire session if the session is not in idle list (in use), or if number of open sessions is going below p.MinOpened. + return false + } + ol := s.setIdleList(nil) + // If the session is in the idlelist, remove it. + if ol != nil { + // Remove from whichever list it is in. + p.idleList.Remove(ol) + p.idleWriteList.Remove(ol) + } + if s.invalidate() { + // Decrease the number of opened sessions. + p.numOpened-- + // Broadcast that a session has been destroyed. + close(p.mayGetSession) + p.mayGetSession = make(chan struct{}) + return true + } + return false +} + +// hcHeap implements heap.Interface. It is used to create the priority queue for session healthchecks. +type hcHeap struct { + sessions []*session +} + +// Len impelemnts heap.Interface.Len. +func (h hcHeap) Len() int { + return len(h.sessions) +} + +// Less implements heap.Interface.Less. +func (h hcHeap) Less(i, j int) bool { + return h.sessions[i].getNextCheck().Before(h.sessions[j].getNextCheck()) +} + +// Swap implements heap.Interface.Swap. +func (h hcHeap) Swap(i, j int) { + h.sessions[i], h.sessions[j] = h.sessions[j], h.sessions[i] + h.sessions[i].setHcIndex(i) + h.sessions[j].setHcIndex(j) +} + +// Push implements heap.Interface.Push. +func (h *hcHeap) Push(s interface{}) { + ns := s.(*session) + ns.setHcIndex(len(h.sessions)) + h.sessions = append(h.sessions, ns) +} + +// Pop implements heap.Interface.Pop. +func (h *hcHeap) Pop() interface{} { + old := h.sessions + n := len(old) + s := old[n-1] + h.sessions = old[:n-1] + s.setHcIndex(-1) + return s +} + +// healthChecker performs periodical healthchecks on registered sessions. +type healthChecker struct { + // mu protects concurrent access to hcQueue. + mu sync.Mutex + // queue is the priority queue for session healthchecks. Sessions with lower nextCheck rank higher in the queue. + queue hcHeap + // interval is the average interval between two healthchecks on a session. + interval time.Duration + // workers is the number of concurrent healthcheck workers. + workers int + // waitWorkers waits for all healthcheck workers to exit + waitWorkers sync.WaitGroup + // pool is the underlying session pool. + pool *sessionPool + // closed marks if a healthChecker has been closed. + closed bool +} + +// newHealthChecker initializes new instance of healthChecker. +func newHealthChecker(interval time.Duration, workers int, pool *sessionPool) *healthChecker { + if workers <= 0 { + workers = 1 + } + hc := &healthChecker{ + interval: interval, + workers: workers, + pool: pool, + } + for i := 0; i < hc.workers; i++ { + hc.waitWorkers.Add(1) + go hc.worker(i) + } + return hc +} + +// close closes the healthChecker and waits for all healthcheck workers to exit. +func (hc *healthChecker) close() { + hc.mu.Lock() + hc.closed = true + hc.mu.Unlock() + hc.waitWorkers.Wait() +} + +// isClosing checks if a healthChecker is already closing. +func (hc *healthChecker) isClosing() bool { + hc.mu.Lock() + defer hc.mu.Unlock() + return hc.closed +} + +// getInterval gets the healthcheck interval. +func (hc *healthChecker) getInterval() time.Duration { + hc.mu.Lock() + defer hc.mu.Unlock() + return hc.interval +} + +// scheduledHCLocked schedules next healthcheck on session s with the assumption that hc.mu is being held. +func (hc *healthChecker) scheduledHCLocked(s *session) { + // The next healthcheck will be scheduled after [interval*0.5, interval*1.5) nanoseconds. + nsFromNow := rand.Int63n(int64(hc.interval)) + int64(hc.interval)/2 + s.setNextCheck(time.Now().Add(time.Duration(nsFromNow))) + if hi := s.getHcIndex(); hi != -1 { + // Session is still being tracked by healthcheck workers. + heap.Fix(&hc.queue, hi) + } +} + +// scheduledHC schedules next healthcheck on session s. It is safe to be called concurrently. +func (hc *healthChecker) scheduledHC(s *session) { + hc.mu.Lock() + defer hc.mu.Unlock() + hc.scheduledHCLocked(s) +} + +// register registers a session with healthChecker for periodical healthcheck. +func (hc *healthChecker) register(s *session) { + hc.mu.Lock() + defer hc.mu.Unlock() + hc.scheduledHCLocked(s) + heap.Push(&hc.queue, s) +} + +// unregister unregisters a session from healthcheck queue. +func (hc *healthChecker) unregister(s *session) { + hc.mu.Lock() + defer hc.mu.Unlock() + oi := s.setHcIndex(-1) + if oi >= 0 { + heap.Remove(&hc.queue, oi) + } +} + +// markDone marks that health check for session has been performed. +func (hc *healthChecker) markDone(s *session) { + hc.mu.Lock() + defer hc.mu.Unlock() + s.checkingHealth = false +} + +// healthCheck checks the health of the session and pings it if needed. +func (hc *healthChecker) healthCheck(s *session) { + defer hc.markDone(s) + if !s.pool.isValid() { + // Session pool is closed, perform a garbage collection. + s.destroy(false) + return + } + if s.pool.MaxSessionAge != 0 && s.createTime.Add(s.pool.MaxSessionAge).Before(time.Now()) { + // Session reaches its maximum age, retire it. Failing that try to refresh it. + if s.destroy(true) || !s.refreshIdle() { + return + } + } + if err := s.ping(); shouldDropSession(err) { + // Ping failed, destroy the session. + s.destroy(false) + } +} + +// worker performs the healthcheck on sessions in healthChecker's priority queue. +func (hc *healthChecker) worker(i int) { + if log.V(2) { + log.Info("Starting health check worker %v", i) + } + // Returns a session which we should ping to keep it alive. + getNextForPing := func() *session { + hc.pool.mu.Lock() + defer hc.pool.mu.Unlock() + hc.mu.Lock() + defer hc.mu.Unlock() + if hc.queue.Len() <= 0 { + // Queue is empty. + return nil + } + s := hc.queue.sessions[0] + if s.getNextCheck().After(time.Now()) && hc.pool.valid { + // All sessions have been checked recently. + return nil + } + hc.scheduledHCLocked(s) + if !s.checkingHealth { + s.checkingHealth = true + return s + } + return nil + } + + // Returns a session which we should prepare for write. + getNextForTx := func() *session { + hc.pool.mu.Lock() + defer hc.pool.mu.Unlock() + if hc.pool.shouldPrepareWrite() { + if hc.pool.idleList.Len() > 0 && hc.pool.valid { + hc.mu.Lock() + defer hc.mu.Unlock() + if hc.pool.idleList.Front().Value.(*session).checkingHealth { + return nil + } + session := hc.pool.idleList.Remove(hc.pool.idleList.Front()).(*session) + session.checkingHealth = true + hc.pool.prepareReqs++ + return session + } + } + return nil + } + + for { + if hc.isClosing() { + if log.V(2) { + log.Info("Closing health check worker %v", i) + } + // Exit when the pool has been closed and all sessions have been destroyed + // or when health checker has been closed. + hc.waitWorkers.Done() + return + } + ws := getNextForTx() + if ws != nil { + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + ws.prepareForWrite(contextWithMetadata(ctx, hc.pool.md)) + hc.pool.recycle(ws) + hc.pool.mu.Lock() + hc.pool.prepareReqs-- + hc.pool.mu.Unlock() + hc.markDone(ws) + } + rs := getNextForPing() + if rs == nil { + if ws == nil { + // No work to be done so sleep to avoid burning cpu + pause := int64(100 * time.Millisecond) + if pause > int64(hc.interval) { + pause = int64(hc.interval) + } + <-time.After(time.Duration(rand.Int63n(pause) + pause/2)) + } + continue + } + hc.healthCheck(rs) + } +} + +// shouldDropSession returns true if a particular error leads to the removal of a session +func shouldDropSession(err error) bool { + if err == nil { + return false + } + // If a Cloud Spanner can no longer locate the session (for example, if session is garbage collected), then caller + // should not try to return the session back into the session pool. + // TODO: once gRPC can return auxilary error information, stop parsing the error message. + if ErrCode(err) == codes.NotFound && strings.Contains(ErrDesc(err), "Session not found:") { + return true + } + return false +} diff --git a/vendor/cloud.google.com/go/spanner/session_test.go b/vendor/cloud.google.com/go/spanner/session_test.go new file mode 100644 index 00000000..7c3d4f88 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/session_test.go @@ -0,0 +1,792 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "container/heap" + "math/rand" + "reflect" + "sync" + "testing" + "time" + + "golang.org/x/net/context" + + "cloud.google.com/go/spanner/internal/testutil" + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// setup prepares test environment for regular session pool tests. +func setup(t *testing.T, spc SessionPoolConfig) (sp *sessionPool, sc *testutil.MockCloudSpannerClient, cancel func()) { + sc = testutil.NewMockCloudSpannerClient(t) + spc.getRPCClient = func() (sppb.SpannerClient, error) { + return sc, nil + } + spc.HealthCheckInterval = 50 * time.Millisecond + sp, err := newSessionPool("mockdb", spc, nil) + if err != nil { + t.Fatalf("cannot create session pool: %v", err) + } + cancel = func() { + sp.close() + } + return +} + +// TestSessionCreation tests session creation during sessionPool.Take(). +func TestSessionCreation(t *testing.T) { + sp, sc, cancel := setup(t, SessionPoolConfig{}) + defer cancel() + // Take three sessions from session pool, this should trigger session pool to create three new sessions. + shs := make([]*sessionHandle, 3) + // gotDs holds the unique sessions taken from session pool. + gotDs := map[string]bool{} + for i := 0; i < len(shs); i++ { + var err error + shs[i], err = sp.take(context.Background()) + if err != nil { + t.Errorf("failed to get session(%v): %v", i, err) + } + gotDs[shs[i].getID()] = true + } + if len(gotDs) != len(shs) { + t.Errorf("session pool created %v sessions, want %v", len(gotDs), len(shs)) + } + if wantDs := sc.DumpSessions(); !reflect.DeepEqual(gotDs, wantDs) { + t.Errorf("session pool creates sessions %v, want %v", gotDs, wantDs) + } + // Verify that created sessions are recorded correctly in session pool. + sp.mu.Lock() + if int(sp.numOpened) != len(shs) { + t.Errorf("session pool reports %v open sessions, want %v", sp.numOpened, len(shs)) + } + if sp.createReqs != 0 { + t.Errorf("session pool reports %v session create requests, want 0", int(sp.createReqs)) + } + sp.mu.Unlock() + // Verify that created sessions are tracked correctly by healthcheck queue. + hc := sp.hc + hc.mu.Lock() + if hc.queue.Len() != len(shs) { + t.Errorf("healthcheck queue length = %v, want %v", hc.queue.Len(), len(shs)) + } + for _, s := range hc.queue.sessions { + if !gotDs[s.getID()] { + t.Errorf("session %v is in healthcheck queue, but it is not created by session pool", s.getID()) + } + } + hc.mu.Unlock() +} + +// TestTakeFromIdleList tests taking sessions from session pool's idle list. +func TestTakeFromIdleList(t *testing.T) { + sp, sc, cancel := setup(t, SessionPoolConfig{}) + defer cancel() + // Take ten sessions from session pool and recycle them. + shs := make([]*sessionHandle, 10) + for i := 0; i < len(shs); i++ { + var err error + shs[i], err = sp.take(context.Background()) + if err != nil { + t.Errorf("failed to get session(%v): %v", i, err) + } + } + for i := 0; i < len(shs); i++ { + shs[i].recycle() + } + // Further session requests from session pool won't cause mockclient to create more sessions. + wantSessions := sc.DumpSessions() + // Take ten sessions from session pool again, this time all sessions should come from idle list. + gotSessions := map[string]bool{} + for i := 0; i < len(shs); i++ { + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("cannot take session from session pool: %v", err) + } + gotSessions[sh.getID()] = true + } + if len(gotSessions) != 10 { + t.Errorf("got %v unique sessions, want 10", len(gotSessions)) + } + if !reflect.DeepEqual(gotSessions, wantSessions) { + t.Errorf("got sessions: %v, want %v", gotSessions, wantSessions) + } +} + +// TesttakeWriteSessionFromIdleList tests taking write sessions from session pool's idle list. +func TestTakeWriteSessionFromIdleList(t *testing.T) { + sp, sc, cancel := setup(t, SessionPoolConfig{}) + defer cancel() + act := testutil.NewAction("Begin", nil) + acts := make([]testutil.Action, 20) + for i := 0; i < len(acts); i++ { + acts[i] = act + } + sc.SetActions(acts...) + // Take ten sessions from session pool and recycle them. + shs := make([]*sessionHandle, 10) + for i := 0; i < len(shs); i++ { + var err error + shs[i], err = sp.takeWriteSession(context.Background()) + if err != nil { + t.Errorf("failed to get session(%v): %v", i, err) + } + } + for i := 0; i < len(shs); i++ { + shs[i].recycle() + } + // Further session requests from session pool won't cause mockclient to create more sessions. + wantSessions := sc.DumpSessions() + // Take ten sessions from session pool again, this time all sessions should come from idle list. + gotSessions := map[string]bool{} + for i := 0; i < len(shs); i++ { + sh, err := sp.takeWriteSession(context.Background()) + if err != nil { + t.Errorf("cannot take session from session pool: %v", err) + } + gotSessions[sh.getID()] = true + } + if len(gotSessions) != 10 { + t.Errorf("got %v unique sessions, want 10", len(gotSessions)) + } + if !reflect.DeepEqual(gotSessions, wantSessions) { + t.Errorf("got sessions: %v, want %v", gotSessions, wantSessions) + } +} + +// TestTakeFromIdleListChecked tests taking sessions from session pool's idle list, but with a extra ping check. +func TestTakeFromIdleListChecked(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + sp, sc, cancel := setup(t, SessionPoolConfig{}) + defer cancel() + // Stop healthcheck workers to simulate slow pings. + sp.hc.close() + // Create a session and recycle it. + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("failed to get session: %v", err) + } + wantSid := sh.getID() + sh.recycle() + <-time.After(time.Second) + // Two back-to-back session requests, both of them should return the same session created before and + // none of them should trigger a session ping. + for i := 0; i < 2; i++ { + // Take the session from the idle list and recycle it. + sh, err = sp.take(context.Background()) + if err != nil { + t.Errorf("%v - failed to get session: %v", i, err) + } + if gotSid := sh.getID(); gotSid != wantSid { + t.Errorf("%v - got session id: %v, want %v", i, gotSid, wantSid) + } + // The two back-to-back session requests shouldn't trigger any session pings because sessionPool.Take + // reschedules the next healthcheck. + if got, want := sc.DumpPings(), ([]string{wantSid}); !reflect.DeepEqual(got, want) { + t.Errorf("%v - got ping session requests: %v, want %v", i, got, want) + } + sh.recycle() + } + // Inject session error to mockclient, and take the session from the session pool, the old session should be destroyed and + // the session pool will create a new session. + sc.InjectError("GetSession", grpc.Errorf(codes.NotFound, "Session not found:")) + // Delay to trigger sessionPool.Take to ping the session. + <-time.After(time.Second) + sh, err = sp.take(context.Background()) + if err != nil { + t.Errorf("failed to get session: %v", err) + } + ds := sc.DumpSessions() + if len(ds) != 1 { + t.Errorf("dumped sessions from mockclient: %v, want %v", ds, sh.getID()) + } + if sh.getID() == wantSid { + t.Errorf("sessionPool.Take still returns the same session %v, want it to create a new one", wantSid) + } +} + +// TestTakeFromIdleWriteListChecked tests taking sessions from session pool's idle list, but with a extra ping check. +func TestTakeFromIdleWriteListChecked(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + sp, sc, cancel := setup(t, SessionPoolConfig{}) + defer cancel() + sc.MakeNice() + // Stop healthcheck workers to simulate slow pings. + sp.hc.close() + // Create a session and recycle it. + sh, err := sp.takeWriteSession(context.Background()) + if err != nil { + t.Errorf("failed to get session: %v", err) + } + wantSid := sh.getID() + sh.recycle() + <-time.After(time.Second) + // Two back-to-back session requests, both of them should return the same session created before and + // none of them should trigger a session ping. + for i := 0; i < 2; i++ { + // Take the session from the idle list and recycle it. + sh, err = sp.takeWriteSession(context.Background()) + if err != nil { + t.Errorf("%v - failed to get session: %v", i, err) + } + if gotSid := sh.getID(); gotSid != wantSid { + t.Errorf("%v - got session id: %v, want %v", i, gotSid, wantSid) + } + // The two back-to-back session requests shouldn't trigger any session pings because sessionPool.Take + // reschedules the next healthcheck. + if got, want := sc.DumpPings(), ([]string{wantSid}); !reflect.DeepEqual(got, want) { + t.Errorf("%v - got ping session requests: %v, want %v", i, got, want) + } + sh.recycle() + } + // Inject session error to mockclient, and take the session from the session pool, the old session should be destroyed and + // the session pool will create a new session. + sc.InjectError("GetSession", grpc.Errorf(codes.NotFound, "Session not found:")) + // Delay to trigger sessionPool.Take to ping the session. + <-time.After(time.Second) + sh, err = sp.takeWriteSession(context.Background()) + if err != nil { + t.Errorf("failed to get session: %v", err) + } + ds := sc.DumpSessions() + if len(ds) != 1 { + t.Errorf("dumped sessions from mockclient: %v, want %v", ds, sh.getID()) + } + if sh.getID() == wantSid { + t.Errorf("sessionPool.Take still returns the same session %v, want it to create a new one", wantSid) + } +} + +// TestMaxOpenedSessions tests max open sessions constraint. +func TestMaxOpenedSessions(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + sp, _, cancel := setup(t, SessionPoolConfig{MaxOpened: 1}) + defer cancel() + sh1, err := sp.take(context.Background()) + if err != nil { + t.Errorf("cannot take session from session pool: %v", err) + } + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + // Session request will timeout due to the max open sessions constraint. + sh2, gotErr := sp.take(ctx) + if wantErr := errGetSessionTimeout(); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("the second session retrival returns error %v, want %v", gotErr, wantErr) + } + go func() { + <-time.After(time.Second) + // destroy the first session to allow the next session request to proceed. + sh1.destroy() + }() + // Now session request can be processed because the first session will be destroyed. + sh2, err = sp.take(context.Background()) + if err != nil { + t.Errorf("after the first session is destroyed, session retrival still returns error %v, want nil", err) + } + if !sh2.session.isValid() || sh2.getID() == "" { + t.Errorf("got invalid session: %v", sh2.session) + } +} + +// TestMinOpenedSessions tests min open session constraint. +func TestMinOpenedSessions(t *testing.T) { + sp, _, cancel := setup(t, SessionPoolConfig{MinOpened: 1}) + defer cancel() + // Take ten sessions from session pool and recycle them. + var ss []*session + var shs []*sessionHandle + for i := 0; i < 10; i++ { + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("failed to get session(%v): %v", i, err) + } + ss = append(ss, sh.session) + shs = append(shs, sh) + sh.recycle() + } + for _, sh := range shs { + sh.recycle() + } + // Simulate session expiration. + for _, s := range ss { + s.destroy(true) + } + sp.mu.Lock() + defer sp.mu.Unlock() + // There should be still one session left in idle list due to the min open sessions constraint. + if sp.idleList.Len() != 1 { + t.Errorf("got %v sessions in idle list, want 1", sp.idleList.Len()) + } +} + +// TestMaxBurst tests max burst constraint. +func TestMaxBurst(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + sp, sc, cancel := setup(t, SessionPoolConfig{MaxBurst: 1}) + defer cancel() + // Will cause session creation RPC to be retried forever. + sc.InjectError("CreateSession", grpc.Errorf(codes.Unavailable, "try later")) + // This session request will never finish until the injected error is cleared. + go sp.take(context.Background()) + // Poll for the execution of the first session request. + for { + sp.mu.Lock() + cr := sp.createReqs + sp.mu.Unlock() + if cr == 0 { + <-time.After(time.Second) + continue + } + // The first session request is being executed. + break + } + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + sh, gotErr := sp.take(ctx) + // Since MaxBurst == 1, the second session request should block. + if wantErr := errGetSessionTimeout(); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("session retrival returns error %v, want %v", gotErr, wantErr) + } + // Let the first session request succeed. + sc.InjectError("CreateSession", nil) + // Now new session request can proceed because the first session request will eventually succeed. + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("session retrival returns error %v, want nil", err) + } + if !sh.session.isValid() || sh.getID() == "" { + t.Errorf("got invalid session: %v", sh.session) + } +} + +// TestSessionrecycle tests recycling sessions. +func TestSessionRecycle(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + sp, _, cancel := setup(t, SessionPoolConfig{MaxSessionAge: 100 * time.Millisecond, MinOpened: 1}) + // Healthcheck is explicitly turned off in this test because it might aggressively expire sessions in idle list. + sp.hc.close() + defer cancel() + var ss []*session + shs := make([]*sessionHandle, 2) + for i := 0; i < len(shs); i++ { + var err error + shs[i], err = sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get the session %v: %v", i, err) + } + ss = append(ss, shs[i].session) + } + // recycle the first session immediately. + shs[0].recycle() + // Let the second session expire. + <-time.After(time.Second) + // recycle the second session. + shs[1].recycle() + // Now the first session should be still valid, but the second session should have been destroyed. + if !ss[0].isValid() { + t.Errorf("the first session (%v) is invalid, want it to be valid", ss[0]) + } + if ss[1].isValid() { + t.Errorf("the second session (%v) is valid, want it to be invalid", ss[1]) + } +} + +// TestSessionDestroy tests destroying sessions. +func TestSessionDestroy(t *testing.T) { + sp, _, cancel := setup(t, SessionPoolConfig{MinOpened: 1}) + defer cancel() + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + s := sh.session + sh.recycle() + if d := s.destroy(true); d || !s.isValid() { + // Session should be remaining because of min open sessions constraint. + t.Errorf("session %v was destroyed in expiration mode, want it to stay alive", s) + } + if d := s.destroy(false); !d || s.isValid() { + // Session should be destroyed. + t.Errorf("failed to destroy session %s", s) + } +} + +// TestHcHeap tests heap operation on top of hcHeap. +func TestHcHeap(t *testing.T) { + in := []*session{ + &session{nextCheck: time.Unix(10, 0)}, + &session{nextCheck: time.Unix(0, 5)}, + &session{nextCheck: time.Unix(1, 8)}, + &session{nextCheck: time.Unix(11, 7)}, + &session{nextCheck: time.Unix(6, 3)}, + } + want := []*session{ + &session{nextCheck: time.Unix(1, 8), hcIndex: 0}, + &session{nextCheck: time.Unix(6, 3), hcIndex: 1}, + &session{nextCheck: time.Unix(8, 2), hcIndex: 2}, + &session{nextCheck: time.Unix(10, 0), hcIndex: 3}, + &session{nextCheck: time.Unix(11, 7), hcIndex: 4}, + } + hh := hcHeap{} + for _, s := range in { + heap.Push(&hh, s) + } + // Change top of the heap and do a adjustment. + hh.sessions[0].nextCheck = time.Unix(8, 2) + heap.Fix(&hh, 0) + for idx := 0; hh.Len() > 0; idx++ { + got := heap.Pop(&hh).(*session) + want[idx].hcIndex = -1 + if !reflect.DeepEqual(got, want[idx]) { + t.Errorf("%v: heap.Pop returns %v, want %v", idx, got, want[idx]) + } + } +} + +// TestHealthCheckScheduler tests if healthcheck workers can schedule and perform healthchecks properly. +func TestHealthCheckScheduler(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + sp, sc, cancel := setup(t, SessionPoolConfig{}) + defer cancel() + // Create 50 sessions. + ss := []string{} + for i := 0; i < 50; i++ { + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + ss = append(ss, sh.getID()) + } + // Sleep for 1s, allowing healthcheck workers to perform some session pings. + <-time.After(time.Second) + dp := sc.DumpPings() + gotPings := map[string]int64{} + for _, p := range dp { + gotPings[p]++ + } + for _, s := range ss { + // The average ping interval is 50ms. + want := int64(time.Second) / int64(50*time.Millisecond) + if got := gotPings[s]; got < want/2 || got > want+want/2 { + t.Errorf("got %v healthchecks on session %v, want it between (%v, %v)", got, s, want/2, want+want/2) + } + } +} + +// Tests that a fractions of sessions are prepared for write by health checker. +func TestWriteSessionsPrepared(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + sp, sc, cancel := setup(t, SessionPoolConfig{WriteSessions: 0.5}) + sc.MakeNice() + defer cancel() + shs := make([]*sessionHandle, 10) + var err error + for i := 0; i < 10; i++ { + shs[i], err = sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + } + // Now there are 10 sessions in the pool. Release them. + for _, sh := range shs { + sh.recycle() + } + // Sleep for 1s, allowing healthcheck workers to invoke begin transaction. + <-time.After(time.Second) + wshs := make([]*sessionHandle, 5) + for i := 0; i < 5; i++ { + wshs[i], err = sp.takeWriteSession(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + if wshs[i].getTransactionID() == nil { + t.Errorf("got nil transaction id from session pool") + } + } + for _, sh := range wshs { + sh.recycle() + } + <-time.After(time.Second) + // Now force creation of 10 more sessions. + shs = make([]*sessionHandle, 20) + for i := 0; i < 20; i++ { + shs[i], err = sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + } + // Now there are 20 sessions in the pool. Release them. + for _, sh := range shs { + sh.recycle() + } + <-time.After(time.Second) + if sp.idleWriteList.Len() != 10 { + t.Errorf("Expect 10 write prepared session, got: %d", sp.idleWriteList.Len()) + } +} + +// TestTakeFromWriteQueue tests that sessionPool.take() returns write prepared sessions as well. +func TestTakeFromWriteQueue(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + sp, sc, cancel := setup(t, SessionPoolConfig{MaxOpened: 1, WriteSessions: 1.0}) + sc.MakeNice() + defer cancel() + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + sh.recycle() + <-time.After(time.Second) + // The session should now be in write queue but take should also return it. + if sp.idleWriteList.Len() == 0 { + t.Errorf("write queue unexpectedly empty") + } + if sp.idleList.Len() != 0 { + t.Errorf("read queue not empty") + } + sh, err = sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + sh.recycle() +} + +// TestSessionHealthCheck tests healthchecking cases. +func TestSessionHealthCheck(t *testing.T) { + if testing.Short() { + t.SkipNow() + } + sp, sc, cancel := setup(t, SessionPoolConfig{MaxSessionAge: 2 * time.Second}) + defer cancel() + // Test pinging sessions. + sh, err := sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + <-time.After(time.Second) + pings := sc.DumpPings() + if len(pings) == 0 || pings[0] != sh.getID() { + t.Errorf("healthchecker didn't send any ping to session %v", sh.getID()) + } + // Test expiring sessions. + s := sh.session + sh.recycle() + // Sleep enough long for session in idle list to expire. + <-time.After(2 * time.Second) + if s.isValid() { + t.Errorf("session(%v) is still alive, want it to expire", s) + } + // Test broken session detection. + sh, err = sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + sc.InjectError("GetSession", grpc.Errorf(codes.NotFound, "Session not found:")) + // Wait for healthcheck workers to find the broken session and tear it down. + <-time.After(1 * time.Second) + if sh.session.isValid() { + t.Errorf("session(%v) is still alive, want it to be dropped by healthcheck workers", s) + } + sc.InjectError("GetSession", nil) + // Test garbage collection. + sh, err = sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + sp.close() + if sh.session.isValid() { + t.Errorf("session(%v) is still alive, want it to be garbage collected", s) + } + // Test session id refresh. + // Recreate the session pool with min open sessions constraint. + sp, err = newSessionPool("mockdb", SessionPoolConfig{ + MaxSessionAge: time.Second, + MinOpened: 1, + getRPCClient: func() (sppb.SpannerClient, error) { + return sc, nil + }, + HealthCheckInterval: 50 * time.Millisecond, + }, nil) + sh, err = sp.take(context.Background()) + if err != nil { + t.Errorf("cannot get session from session pool: %v", err) + } + oid := sh.getID() + s = sh.session + sh.recycle() + <-time.After(2 * time.Second) + nid := s.getID() + if nid == "" || nid == oid { + t.Errorf("healthcheck workers failed to refresh session: oid=%v, nid=%v", oid, nid) + } + if gotDs, wantDs := sc.DumpSessions(), (map[string]bool{nid: true}); !reflect.DeepEqual(gotDs, wantDs) { + t.Errorf("sessions in mockclient: %v, want %v", gotDs, wantDs) + } +} + +// TestStressSessionPool does stress test on session pool by the following concurrent operations: +// 1) Test worker gets a session from the pool. +// 2) Test worker turns a session back into the pool. +// 3) Test worker destroys a session got from the pool. +// 4) Healthcheck retires an old session from the pool's idlelist by refreshing its session id. +// 5) Healthcheck destroys a broken session (because a worker has already destroyed it). +// 6) Test worker closes the session pool. +// +// During the test, it is expected that all sessions that are taken from session pool remains valid and +// when all test workers and healthcheck workers exit, mockclient, session pool and healthchecker should be in consistent state. +func TestStressSessionPool(t *testing.T) { + // Use concurrent workers to test different session pool built from different configurations. + if testing.Short() { + t.SkipNow() + } + for ti, cfg := range []SessionPoolConfig{ + SessionPoolConfig{}, + SessionPoolConfig{MaxSessionAge: 20 * time.Millisecond}, + SessionPoolConfig{MinOpened: 10, MaxOpened: 100}, + SessionPoolConfig{MaxBurst: 50}, + SessionPoolConfig{MaxSessionAge: 20 * time.Millisecond, MinOpened: 10, MaxOpened: 200, MaxBurst: 5}, + SessionPoolConfig{MaxSessionAge: 20 * time.Millisecond, MinOpened: 10, MaxOpened: 200, MaxBurst: 5, WriteSessions: 0.2}, + } { + var wg sync.WaitGroup + // Create a more aggressive session healthchecker to increase test concurrency. + cfg.HealthCheckInterval = 50 * time.Millisecond + cfg.HealthCheckWorkers = 50 + sc := testutil.NewMockCloudSpannerClient(t) + sc.MakeNice() + cfg.getRPCClient = func() (sppb.SpannerClient, error) { + return sc, nil + } + sp, _ := newSessionPool("mockdb", cfg, nil) + for i := 0; i < 100; i++ { + wg.Add(1) + // Schedule a test worker. + go func(idx int, pool *sessionPool, client sppb.SpannerClient) { + defer wg.Done() + // Test worker iterates 1K times and tries different session / session pool operations. + for j := 0; j < 1000; j++ { + if idx%10 == 0 && j >= 900 { + // Close the pool in selected set of workers during the middle of the test. + pool.close() + } + // Take a write sessions ~ 20% of the times. + takeWrite := rand.Intn(5) == 4 + var ( + sh *sessionHandle + gotErr error + ) + if takeWrite { + sh, gotErr = pool.takeWriteSession(context.Background()) + } else { + sh, gotErr = pool.take(context.Background()) + } + if gotErr != nil { + if pool.isValid() { + t.Errorf("%v.%v: pool.take returns error when pool is still valid: %v", ti, idx, gotErr) + } + if wantErr := errInvalidSessionPool(); !reflect.DeepEqual(gotErr, wantErr) { + t.Errorf("%v.%v: got error when pool is closed: %v, want %v", ti, idx, gotErr, wantErr) + } + continue + } + // Verify if session is valid when session pool is valid. Note that if session pool is invalid after sh is taken, + // then sh might be invalidated by healthcheck workers. + if (sh.getID() == "" || sh.session == nil || !sh.session.isValid()) && pool.isValid() { + t.Errorf("%v.%v.%v: pool.take returns invalid session %v", ti, idx, takeWrite, sh.session) + } + if takeWrite && sh.getTransactionID() == nil { + t.Errorf("%v.%v: pool.takeWriteSession returns session %v without transaction", ti, idx, sh.session) + } + if int64(cfg.MaxSessionAge) > 0 && rand.Intn(100) < idx { + // Random sleep before destroying/recycling the session, to give healthcheck worker a chance to step in. + <-time.After(time.Duration(rand.Int63n(int64(cfg.MaxSessionAge)))) + } + if rand.Intn(100) < idx { + // destroy the session. + sh.destroy() + continue + } + // recycle the session. + sh.recycle() + } + }(i, sp, sc) + } + wg.Wait() + sp.hc.close() + // Here the states of healthchecker, session pool and mockclient are stable. + idleSessions := map[string]bool{} + hcSessions := map[string]bool{} + mockSessions := sc.DumpSessions() + // Dump session pool's idle list. + for sl := sp.idleList.Front(); sl != nil; sl = sl.Next() { + s := sl.Value.(*session) + if idleSessions[s.getID()] { + t.Errorf("%v: found duplicated session in idle list: %v", ti, s.getID()) + } + idleSessions[s.getID()] = true + } + for sl := sp.idleWriteList.Front(); sl != nil; sl = sl.Next() { + s := sl.Value.(*session) + if idleSessions[s.getID()] { + t.Errorf("%v: found duplicated session in idle write list: %v", ti, s.getID()) + } + idleSessions[s.getID()] = true + } + if int(sp.numOpened) != len(idleSessions) { + t.Errorf("%v: number of opened sessions (%v) != number of idle sessions (%v)", ti, sp.numOpened, len(idleSessions)) + } + if sp.createReqs != 0 { + t.Errorf("%v: number of pending session creations = %v, want 0", ti, sp.createReqs) + } + // Dump healthcheck queue. + for _, s := range sp.hc.queue.sessions { + if hcSessions[s.getID()] { + t.Errorf("%v: found duplicated session in healthcheck queue: %v", ti, s.getID()) + } + hcSessions[s.getID()] = true + } + // Verify that idleSessions == hcSessions == mockSessions. + if !reflect.DeepEqual(idleSessions, hcSessions) { + t.Errorf("%v: sessions in idle list (%v) != sessions in healthcheck queue (%v)", ti, idleSessions, hcSessions) + } + if !reflect.DeepEqual(hcSessions, mockSessions) { + t.Errorf("%v: sessions in healthcheck queue (%v) != sessions in mockclient (%v)", ti, hcSessions, mockSessions) + } + sp.close() + mockSessions = sc.DumpSessions() + if len(mockSessions) != 0 { + t.Errorf("Found live sessions: %v", mockSessions) + } + } +} diff --git a/vendor/cloud.google.com/go/spanner/spanner_test.go b/vendor/cloud.google.com/go/spanner/spanner_test.go new file mode 100644 index 00000000..e5070df4 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/spanner_test.go @@ -0,0 +1,955 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "fmt" + "math" + "reflect" + "strings" + "sync" + "testing" + "time" + + "cloud.google.com/go/civil" + "cloud.google.com/go/internal/testutil" + database "cloud.google.com/go/spanner/admin/database/apiv1" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + + adminpb "google.golang.org/genproto/googleapis/spanner/admin/database/v1" +) + +var ( + // testProjectID specifies the project used for testing. + // It can be changed by setting environment variable GCLOUD_TESTS_GOLANG_PROJECT_ID. + testProjectID = testutil.ProjID() + // testInstanceID specifies the Cloud Spanner instance used for testing. + testInstanceID = "go-integration-test" + + // client is a spanner.Client. + client *Client + // admin is a spanner.DatabaseAdminClient. + admin *database.DatabaseAdminClient + // db is the path of the testing database. + db string + // dbName is the short name of the testing database. + dbName string +) + +// prepare initializes Cloud Spanner testing DB and clients. +func prepare(ctx context.Context, t *testing.T) error { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + if testProjectID == "" { + t.Skip("Integration tests skipped: GCLOUD_TESTS_GOLANG_PROJECT_ID is missing") + } + ts := testutil.TokenSource(ctx, AdminScope, Scope) + if ts == nil { + t.Skip("Integration test skipped: cannot get service account credential from environment variable %v", "GCLOUD_TESTS_GOLANG_KEY") + } + var err error + // Create Admin client and Data client. + // TODO: Remove the EndPoint option once this is the default. + admin, err = database.NewDatabaseAdminClient(ctx, option.WithTokenSource(ts), option.WithEndpoint("spanner.googleapis.com:443")) + if err != nil { + t.Errorf("cannot create admin client: %v", err) + return err + } + // Construct test DB name. + dbName = fmt.Sprintf("gotest_%v", time.Now().UnixNano()) + db = fmt.Sprintf("projects/%v/instances/%v/databases/%v", testProjectID, testInstanceID, dbName) + // Create database and tables. + op, err := admin.CreateDatabase(ctx, &adminpb.CreateDatabaseRequest{ + Parent: fmt.Sprintf("projects/%v/instances/%v", testProjectID, testInstanceID), + CreateStatement: "CREATE DATABASE " + dbName, + ExtraStatements: []string{ + `CREATE TABLE Singers ( + SingerId INT64 NOT NULL, + FirstName STRING(1024), + LastName STRING(1024), + SingerInfo BYTES(MAX) + ) PRIMARY KEY (SingerId)`, + `CREATE INDEX SingerByName ON Singers(FirstName, LastName)`, + `CREATE TABLE Accounts ( + AccountId INT64 NOT NULL, + Nickname STRING(100), + Balance INT64 NOT NULL, + ) PRIMARY KEY (AccountId)`, + `CREATE INDEX AccountByNickname ON Accounts(Nickname) STORING (Balance)`, + `CREATE TABLE Types ( + RowID INT64 NOT NULL, + String STRING(MAX), + StringArray ARRAY, + Bytes BYTES(MAX), + BytesArray ARRAY, + Int64a INT64, + Int64Array ARRAY, + Bool BOOL, + BoolArray ARRAY, + Float64 FLOAT64, + Float64Array ARRAY, + Date DATE, + DateArray ARRAY, + Timestamp TIMESTAMP, + TimestampArray ARRAY, + ) PRIMARY KEY (RowID)`, + }, + }) + if err != nil { + t.Errorf("cannot create testing DB %v: %v", db, err) + return err + } + if _, err := op.Wait(ctx); err != nil { + t.Errorf("cannot create testing DB %v: %v", db, err) + return err + } + client, err = NewClientWithConfig(ctx, db, ClientConfig{ + SessionPoolConfig: SessionPoolConfig{ + WriteSessions: 0.2, + }, + }, option.WithTokenSource(ts)) + if err != nil { + t.Errorf("cannot create data client on DB %v: %v", db, err) + return err + } + return nil +} + +// tearDown tears down the testing environment created by prepare(). +func tearDown(ctx context.Context, t *testing.T) { + if admin != nil { + if err := admin.DropDatabase(ctx, &adminpb.DropDatabaseRequest{db}); err != nil { + t.Logf("failed to drop testing database: %v, might need a manual removal", db) + } + admin.Close() + } + if client != nil { + client.Close() + } + admin = nil + client = nil + db = "" +} + +// Test SingleUse transaction. +func TestSingleUse(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + // Set up testing environment. + if err := prepare(ctx, t); err != nil { + // If prepare() fails, tear down whatever that's already up. + tearDown(ctx, t) + t.Fatalf("cannot set up testing environment: %v", err) + } + // After all tests, tear down testing environment. + defer tearDown(ctx, t) + + writes := []struct { + row []interface{} + ts time.Time + }{ + {row: []interface{}{1, "Marc", "Foo"}}, + {row: []interface{}{2, "Tars", "Bar"}}, + {row: []interface{}{3, "Alpha", "Beta"}}, + {row: []interface{}{4, "Last", "End"}}, + } + // Try to write four rows through the Apply API. + for i, w := range writes { + var err error + m := InsertOrUpdate("Singers", + []string{"SingerId", "FirstName", "LastName"}, + w.row) + if writes[i].ts, err = client.Apply(ctx, []*Mutation{m}, ApplyAtLeastOnce()); err != nil { + t.Fatal(err) + } + } + + // For testing timestamp bound staleness. + <-time.After(time.Second) + + // Test reading rows with different timestamp bounds. + for i, test := range []struct { + want [][]interface{} + tb TimestampBound + checkTs func(time.Time) error + }{ + { + // strong + [][]interface{}{{int64(1), "Marc", "Foo"}, {int64(3), "Alpha", "Beta"}, {int64(4), "Last", "End"}}, + StrongRead(), + func(ts time.Time) error { + // writes[3] is the last write, all subsequent strong read should have a timestamp larger than that. + if ts.Before(writes[3].ts) { + return fmt.Errorf("read got timestamp %v, want it to be no later than %v", ts, writes[3].ts) + } + return nil + }, + }, + { + // min_read_timestamp + [][]interface{}{{int64(1), "Marc", "Foo"}, {int64(3), "Alpha", "Beta"}, {int64(4), "Last", "End"}}, + MinReadTimestamp(writes[3].ts), + func(ts time.Time) error { + if ts.Before(writes[3].ts) { + return fmt.Errorf("read got timestamp %v, want it to be no later than %v", ts, writes[3].ts) + } + return nil + }, + }, + { + // max_staleness + [][]interface{}{{int64(1), "Marc", "Foo"}, {int64(3), "Alpha", "Beta"}, {int64(4), "Last", "End"}}, + MaxStaleness(time.Second), + func(ts time.Time) error { + if ts.Before(writes[3].ts) { + return fmt.Errorf("read got timestamp %v, want it to be no later than %v", ts, writes[3].ts) + } + return nil + }, + }, + { + // read_timestamp + [][]interface{}{{int64(1), "Marc", "Foo"}, {int64(3), "Alpha", "Beta"}}, + ReadTimestamp(writes[2].ts), + func(ts time.Time) error { + if ts != writes[2].ts { + return fmt.Errorf("read got timestamp %v, expect %v", ts, writes[2].ts) + } + return nil + }, + }, + { + // exact_staleness + nil, + // Specify a staleness which should be already before this test because + // context timeout is set to be 10s. + ExactStaleness(11 * time.Second), + func(ts time.Time) error { + if ts.After(writes[0].ts) { + return fmt.Errorf("read got timestamp %v, want it to be no earlier than %v", ts, writes[0].ts) + } + return nil + }, + }, + } { + // SingleUse.Query + su := client.Single().WithTimestampBound(test.tb) + got, err := readAll(su.Query( + ctx, + Statement{ + "SELECT SingerId, FirstName, LastName FROM Singers WHERE SingerId IN (@id1, @id3, @id4)", + map[string]interface{}{"id1": int64(1), "id3": int64(3), "id4": int64(4)}, + })) + if err != nil { + t.Errorf("%d: SingleUse.Query returns error %v, want nil", i, err) + } + if !reflect.DeepEqual(got, test.want) { + t.Errorf("%d: got unexpected result from SingleUse.Query: %v, want %v", i, got, test.want) + } + rts, err := su.Timestamp() + if err != nil { + t.Errorf("%d: SingleUse.Query doesn't return a timestamp, error: %v", i, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: SingleUse.Query doesn't return expected timestamp: %v", i, err) + } + // SingleUse.Read + su = client.Single().WithTimestampBound(test.tb) + got, err = readAll(su.Read(ctx, "Singers", Keys(Key{1}, Key{3}, Key{4}), []string{"SingerId", "FirstName", "LastName"})) + if err != nil { + t.Errorf("%d: SingleUse.Read returns error %v, want nil", i, err) + } + if !reflect.DeepEqual(got, test.want) { + t.Errorf("%d: got unexpected result from SingleUse.Read: %v, want %v", i, got, test.want) + } + rts, err = su.Timestamp() + if err != nil { + t.Errorf("%d: SingleUse.Read doesn't return a timestamp, error: %v", i, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: SingleUse.Read doesn't return expected timestamp: %v", i, err) + } + // SingleUse.ReadRow + got = nil + for _, k := range []Key{Key{1}, Key{3}, Key{4}} { + su = client.Single().WithTimestampBound(test.tb) + r, err := su.ReadRow(ctx, "Singers", k, []string{"SingerId", "FirstName", "LastName"}) + if err != nil { + continue + } + v, err := rowToValues(r) + if err != nil { + continue + } + got = append(got, v) + rts, err = su.Timestamp() + if err != nil { + t.Errorf("%d: SingleUse.ReadRow(%v) doesn't return a timestamp, error: %v", i, k, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: SingleUse.ReadRow(%v) doesn't return expected timestamp: %v", i, k, err) + } + } + if !reflect.DeepEqual(got, test.want) { + t.Errorf("%d: got unexpected results from SingleUse.ReadRow: %v, want %v", i, got, test.want) + } + // SingleUse.ReadUsingIndex + su = client.Single().WithTimestampBound(test.tb) + got, err = readAll(su.ReadUsingIndex(ctx, "Singers", "SingerByName", Keys(Key{"Marc", "Foo"}, Key{"Alpha", "Beta"}, Key{"Last", "End"}), []string{"SingerId", "FirstName", "LastName"})) + if err != nil { + t.Errorf("%d: SingleUse.ReadUsingIndex returns error %v, want nil", i, err) + } + // The results from ReadUsingIndex is sorted by the index rather than primary key. + if len(got) != len(test.want) { + t.Errorf("%d: got unexpected result from SingleUse.ReadUsingIndex: %v, want %v", i, got, test.want) + } + for j, g := range got { + if j > 0 { + prev := got[j-1][1].(string) + got[j-1][2].(string) + curr := got[j][1].(string) + got[j][2].(string) + if strings.Compare(prev, curr) > 0 { + t.Errorf("%d: SingleUse.ReadUsingIndex fails to order rows by index keys, %v should be after %v", i, got[j-1], got[j]) + } + } + found := false + for _, w := range test.want { + if reflect.DeepEqual(g, w) { + found = true + } + } + if !found { + t.Errorf("%d: got unexpected result from SingleUse.ReadUsingIndex: %v, want %v", i, got, test.want) + break + } + } + rts, err = su.Timestamp() + if err != nil { + t.Errorf("%d: SingleUse.ReadUsingIndex doesn't return a timestamp, error: %v", i, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: SingleUse.ReadUsingIndex doesn't return expected timestamp: %v", i, err) + } + } +} + +// Test ReadOnlyTransaction. The testsuite is mostly like SingleUse, except it +// also tests for a single timestamp across multiple reads. +func TestReadOnlyTransaction(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + // Set up testing environment. + if err := prepare(ctx, t); err != nil { + // If prepare() fails, tear down whatever that's already up. + tearDown(ctx, t) + t.Fatalf("cannot set up testing environment: %v", err) + } + // After all tests, tear down testing environment. + defer tearDown(ctx, t) + + writes := []struct { + row []interface{} + ts time.Time + }{ + {row: []interface{}{1, "Marc", "Foo"}}, + {row: []interface{}{2, "Tars", "Bar"}}, + {row: []interface{}{3, "Alpha", "Beta"}}, + {row: []interface{}{4, "Last", "End"}}, + } + // Try to write four rows through the Apply API. + for i, w := range writes { + var err error + m := InsertOrUpdate("Singers", + []string{"SingerId", "FirstName", "LastName"}, + w.row) + if writes[i].ts, err = client.Apply(ctx, []*Mutation{m}, ApplyAtLeastOnce()); err != nil { + t.Fatal(err) + } + } + + // For testing timestamp bound staleness. + <-time.After(time.Second) + + // Test reading rows with different timestamp bounds. + for i, test := range []struct { + want [][]interface{} + tb TimestampBound + checkTs func(time.Time) error + }{ + // Note: min_read_timestamp and max_staleness are not supported by ReadOnlyTransaction. See + // API document for more details. + { + // strong + [][]interface{}{{int64(1), "Marc", "Foo"}, {int64(3), "Alpha", "Beta"}, {int64(4), "Last", "End"}}, + StrongRead(), + func(ts time.Time) error { + if ts.Before(writes[3].ts) { + return fmt.Errorf("read got timestamp %v, want it to be no later than %v", ts, writes[3].ts) + } + return nil + }, + }, + { + // read_timestamp + [][]interface{}{{int64(1), "Marc", "Foo"}, {int64(3), "Alpha", "Beta"}}, + ReadTimestamp(writes[2].ts), + func(ts time.Time) error { + if ts != writes[2].ts { + return fmt.Errorf("read got timestamp %v, expect %v", ts, writes[2].ts) + } + return nil + }, + }, + { + // exact_staleness + nil, + // Specify a staleness which should be already before this test because + // context timeout is set to be 10s. + ExactStaleness(11 * time.Second), + func(ts time.Time) error { + if ts.After(writes[0].ts) { + return fmt.Errorf("read got timestamp %v, want it to be no earlier than %v", ts, writes[0].ts) + } + return nil + }, + }, + } { + // ReadOnlyTransaction.Query + ro := client.ReadOnlyTransaction().WithTimestampBound(test.tb) + got, err := readAll(ro.Query( + ctx, + Statement{ + "SELECT SingerId, FirstName, LastName FROM Singers WHERE SingerId IN (@id1, @id3, @id4)", + map[string]interface{}{"id1": int64(1), "id3": int64(3), "id4": int64(4)}, + })) + if err != nil { + t.Errorf("%d: ReadOnlyTransaction.Query returns error %v, want nil", i, err) + } + if !reflect.DeepEqual(got, test.want) { + t.Errorf("%d: got unexpected result from ReadOnlyTransaction.Query: %v, want %v", i, got, test.want) + } + rts, err := ro.Timestamp() + if err != nil { + t.Errorf("%d: ReadOnlyTransaction.Query doesn't return a timestamp, error: %v", i, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: ReadOnlyTransaction.Query doesn't return expected timestamp: %v", i, err) + } + roTs := rts + // ReadOnlyTransaction.Read + got, err = readAll(ro.Read(ctx, "Singers", Keys(Key{1}, Key{3}, Key{4}), []string{"SingerId", "FirstName", "LastName"})) + if err != nil { + t.Errorf("%d: ReadOnlyTransaction.Read returns error %v, want nil", i, err) + } + if !reflect.DeepEqual(got, test.want) { + t.Errorf("%d: got unexpected result from ReadOnlyTransaction.Read: %v, want %v", i, got, test.want) + } + rts, err = ro.Timestamp() + if err != nil { + t.Errorf("%d: ReadOnlyTransaction.Read doesn't return a timestamp, error: %v", i, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: ReadOnlyTransaction.Read doesn't return expected timestamp: %v", i, err) + } + if roTs != rts { + t.Errorf("%d: got two read timestamps: %v, %v, want ReadOnlyTransaction to return always the same read timestamp", i, roTs, rts) + } + // ReadOnlyTransaction.ReadRow + got = nil + for _, k := range []Key{Key{1}, Key{3}, Key{4}} { + r, err := ro.ReadRow(ctx, "Singers", k, []string{"SingerId", "FirstName", "LastName"}) + if err != nil { + continue + } + v, err := rowToValues(r) + if err != nil { + continue + } + got = append(got, v) + rts, err = ro.Timestamp() + if err != nil { + t.Errorf("%d: ReadOnlyTransaction.ReadRow(%v) doesn't return a timestamp, error: %v", i, k, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: ReadOnlyTransaction.ReadRow(%v) doesn't return expected timestamp: %v", i, k, err) + } + if roTs != rts { + t.Errorf("%d: got two read timestamps: %v, %v, want ReadOnlyTransaction to return always the same read timestamp", i, roTs, rts) + } + } + if !reflect.DeepEqual(got, test.want) { + t.Errorf("%d: got unexpected results from ReadOnlyTransaction.ReadRow: %v, want %v", i, got, test.want) + } + // SingleUse.ReadUsingIndex + got, err = readAll(ro.ReadUsingIndex(ctx, "Singers", "SingerByName", Keys(Key{"Marc", "Foo"}, Key{"Alpha", "Beta"}, Key{"Last", "End"}), []string{"SingerId", "FirstName", "LastName"})) + if err != nil { + t.Errorf("%d: ReadOnlyTransaction.ReadUsingIndex returns error %v, want nil", i, err) + } + // The results from ReadUsingIndex is sorted by the index rather than primary key. + if len(got) != len(test.want) { + t.Errorf("%d: got unexpected result from ReadOnlyTransaction.ReadUsingIndex: %v, want %v", i, got, test.want) + } + for j, g := range got { + if j > 0 { + prev := got[j-1][1].(string) + got[j-1][2].(string) + curr := got[j][1].(string) + got[j][2].(string) + if strings.Compare(prev, curr) > 0 { + t.Errorf("%d: ReadOnlyTransaction.ReadUsingIndex fails to order rows by index keys, %v should be after %v", i, got[j-1], got[j]) + } + } + found := false + for _, w := range test.want { + if reflect.DeepEqual(g, w) { + found = true + } + } + if !found { + t.Errorf("%d: got unexpected result from ReadOnlyTransaction.ReadUsingIndex: %v, want %v", i, got, test.want) + break + } + } + rts, err = ro.Timestamp() + if err != nil { + t.Errorf("%d: ReadOnlyTransaction.ReadUsingIndex doesn't return a timestamp, error: %v", i, err) + } + if err := test.checkTs(rts); err != nil { + t.Errorf("%d: ReadOnlyTransaction.ReadUsingIndex doesn't return expected timestamp: %v", i, err) + } + if roTs != rts { + t.Errorf("%d: got two read timestamps: %v, %v, want ReadOnlyTransaction to return always the same read timestamp", i, roTs, rts) + } + ro.Close() + } +} + +// Test ReadWriteTransaction. +func TestReadWriteTransaction(t *testing.T) { + // Give a longer deadline because of transaction backoffs. + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + defer cancel() + if err := prepare(ctx, t); err != nil { + tearDown(ctx, t) + t.Fatalf("cannot set up testing environment: %v", err) + } + defer tearDown(ctx, t) + + // Set up two accounts + accounts := []*Mutation{ + Insert("Accounts", []string{"AccountId", "Nickname", "Balance"}, []interface{}{int64(1), "Foo", int64(50)}), + Insert("Accounts", []string{"AccountId", "Nickname", "Balance"}, []interface{}{int64(2), "Bar", int64(1)}), + } + if _, err := client.Apply(ctx, accounts, ApplyAtLeastOnce()); err != nil { + t.Fatal(err) + } + wg := sync.WaitGroup{} + + readBalance := func(iter *RowIterator) (int64, error) { + defer iter.Stop() + var bal int64 + for { + row, err := iter.Next() + if err == iterator.Done { + return bal, nil + } + if err != nil { + return 0, err + } + if err := row.Column(0, &bal); err != nil { + return 0, err + } + } + } + + for i := 0; i < 20; i++ { + wg.Add(1) + go func(iter int) { + defer wg.Done() + _, err := client.ReadWriteTransaction(ctx, func(tx *ReadWriteTransaction) error { + // Query Foo's balance and Bar's balance. + bf, e := readBalance(tx.Query(ctx, + Statement{"SELECT Balance FROM Accounts WHERE AccountId = @id", map[string]interface{}{"id": int64(1)}})) + if e != nil { + return e + } + bb, e := readBalance(tx.Read(ctx, "Accounts", Keys(Key{int64(2)}), []string{"Balance"})) + if e != nil { + return e + } + if bf <= 0 { + return nil + } + bf-- + bb++ + tx.BufferWrite([]*Mutation{ + Update("Accounts", []string{"AccountId", "Balance"}, []interface{}{int64(1), bf}), + Update("Accounts", []string{"AccountId", "Balance"}, []interface{}{int64(2), bb}), + }) + return nil + }) + if err != nil { + t.Fatalf("%d: failed to execute transaction: %v", iter, err) + } + }(i) + } + // Because of context timeout, all goroutines will eventually return. + wg.Wait() + _, err := client.ReadWriteTransaction(ctx, func(tx *ReadWriteTransaction) error { + var bf, bb int64 + r, e := tx.ReadRow(ctx, "Accounts", Key{int64(1)}, []string{"Balance"}) + if e != nil { + return e + } + if ce := r.Column(0, &bf); ce != nil { + return ce + } + bb, e = readBalance(tx.ReadUsingIndex(ctx, "Accounts", "AccountByNickname", Keys(Key{"Bar"}), []string{"Balance"})) + if e != nil { + return e + } + if bf != 30 || bb != 21 { + t.Errorf("Foo's balance is now %v and Bar's balance is now %v, want %v and %v", bf, bb, 30, 21) + } + return nil + }) + if err != nil { + t.Errorf("failed to check balances: %v", err) + } +} + +// Test client recovery on database recreation. +func TestDbRemovalRecovery(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + if err := prepare(ctx, t); err != nil { + tearDown(ctx, t) + t.Fatalf("cannot set up testing environment: %v", err) + } + defer tearDown(ctx, t) + + // Drop the testing database. + if err := admin.DropDatabase(ctx, &adminpb.DropDatabaseRequest{db}); err != nil { + t.Fatalf("failed to drop testing database %v: %v", db, err) + } + + // Now, send the query. + iter := client.Single().Query(ctx, Statement{SQL: "SELECT SingerId FROM Singers"}) + defer iter.Stop() + _, err := iter.Next() + if err == nil { + t.Errorf("client sends query to removed database successfully, want it to fail") + } + + // Recreate database and table. + op, err := admin.CreateDatabase(ctx, &adminpb.CreateDatabaseRequest{ + Parent: fmt.Sprintf("projects/%v/instances/%v", testProjectID, testInstanceID), + CreateStatement: "CREATE DATABASE " + dbName, + ExtraStatements: []string{ + `CREATE TABLE Singers ( + SingerId INT64 NOT NULL, + FirstName STRING(1024), + LastName STRING(1024), + SingerInfo BYTES(MAX) + ) PRIMARY KEY (SingerId)`, + }, + }) + if _, err := op.Wait(ctx); err != nil { + t.Errorf("cannot recreate testing DB %v: %v", db, err) + } + + // Now, send the query again. + iter = client.Single().Query(ctx, Statement{SQL: "SELECT SingerId FROM Singers"}) + defer iter.Stop() + _, err = iter.Next() + if err != nil && err != iterator.Done { + t.Fatalf("failed to send query to database %v: %v", db, err) + } +} + +// Test encoding/decoding non-struct Cloud Spanner types. +func TestBasicTypes(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + if err := prepare(ctx, t); err != nil { + tearDown(ctx, t) + t.Fatalf("cannot set up testing environment: %v", err) + } + defer tearDown(ctx, t) + t1, _ := time.Parse(time.RFC3339Nano, "2016-11-15T15:04:05.999999999Z") + // Boundaries + t2, _ := time.Parse(time.RFC3339Nano, "0001-01-01T00:00:00.000000000Z") + t3, _ := time.Parse(time.RFC3339Nano, "9999-12-31T23:59:59.999999999Z") + d1, _ := civil.ParseDate("2016-11-15") + // Boundaries + d2, _ := civil.ParseDate("0001-01-01") + d3, _ := civil.ParseDate("9999-12-31") + + tests := []struct { + col string + val interface{} + want interface{} + }{ + {col: "String", val: ""}, + {col: "String", val: "", want: NullString{"", true}}, + {col: "String", val: "foo"}, + {col: "String", val: "foo", want: NullString{"foo", true}}, + {col: "String", val: NullString{"bar", true}, want: "bar"}, + {col: "String", val: NullString{"bar", false}, want: NullString{"", false}}, + {col: "StringArray", val: []string(nil), want: []NullString(nil)}, + {col: "StringArray", val: []string{}, want: []NullString{}}, + {col: "StringArray", val: []string{"foo", "bar"}, want: []NullString{{"foo", true}, {"bar", true}}}, + {col: "StringArray", val: []NullString(nil)}, + {col: "StringArray", val: []NullString{}}, + {col: "StringArray", val: []NullString{{"foo", true}, {}}}, + {col: "Bytes", val: []byte{}}, + {col: "Bytes", val: []byte{1, 2, 3}}, + {col: "Bytes", val: []byte(nil)}, + {col: "BytesArray", val: [][]byte(nil)}, + {col: "BytesArray", val: [][]byte{}}, + {col: "BytesArray", val: [][]byte{[]byte{1}, []byte{2, 3}}}, + {col: "Int64a", val: 0, want: int64(0)}, + {col: "Int64a", val: -1, want: int64(-1)}, + {col: "Int64a", val: 2, want: int64(2)}, + {col: "Int64a", val: int64(3)}, + {col: "Int64a", val: 4, want: NullInt64{4, true}}, + {col: "Int64a", val: NullInt64{5, true}, want: int64(5)}, + {col: "Int64a", val: NullInt64{6, true}, want: int64(6)}, + {col: "Int64a", val: NullInt64{7, false}, want: NullInt64{0, false}}, + {col: "Int64Array", val: []int(nil), want: []NullInt64(nil)}, + {col: "Int64Array", val: []int{}, want: []NullInt64{}}, + {col: "Int64Array", val: []int{1, 2}, want: []NullInt64{{1, true}, {2, true}}}, + {col: "Int64Array", val: []int64(nil), want: []NullInt64(nil)}, + {col: "Int64Array", val: []int64{}, want: []NullInt64{}}, + {col: "Int64Array", val: []int64{1, 2}, want: []NullInt64{{1, true}, {2, true}}}, + {col: "Int64Array", val: []NullInt64(nil)}, + {col: "Int64Array", val: []NullInt64{}}, + {col: "Int64Array", val: []NullInt64{{1, true}, {}}}, + {col: "Bool", val: false}, + {col: "Bool", val: true}, + {col: "Bool", val: false, want: NullBool{false, true}}, + {col: "Bool", val: true, want: NullBool{true, true}}, + {col: "Bool", val: NullBool{true, true}}, + {col: "Bool", val: NullBool{false, false}}, + {col: "BoolArray", val: []bool(nil), want: []NullBool(nil)}, + {col: "BoolArray", val: []bool{}, want: []NullBool{}}, + {col: "BoolArray", val: []bool{true, false}, want: []NullBool{{true, true}, {false, true}}}, + {col: "BoolArray", val: []NullBool(nil)}, + {col: "BoolArray", val: []NullBool{}}, + {col: "BoolArray", val: []NullBool{{false, true}, {true, true}, {}}}, + {col: "Float64", val: 0.0}, + {col: "Float64", val: 3.14}, + {col: "Float64", val: math.NaN()}, + {col: "Float64", val: math.Inf(1)}, + {col: "Float64", val: math.Inf(-1)}, + {col: "Float64", val: 2.78, want: NullFloat64{2.78, true}}, + {col: "Float64", val: NullFloat64{2.71, true}, want: 2.71}, + {col: "Float64", val: NullFloat64{1.41, true}, want: NullFloat64{1.41, true}}, + {col: "Float64", val: NullFloat64{0, false}}, + {col: "Float64Array", val: []float64(nil), want: []NullFloat64(nil)}, + {col: "Float64Array", val: []float64{}, want: []NullFloat64{}}, + {col: "Float64Array", val: []float64{2.72, 3.14, math.Inf(1)}, want: []NullFloat64{{2.72, true}, {3.14, true}, {math.Inf(1), true}}}, + {col: "Float64Array", val: []NullFloat64(nil)}, + {col: "Float64Array", val: []NullFloat64{}}, + {col: "Float64Array", val: []NullFloat64{{2.72, true}, {math.Inf(1), true}, {}}}, + {col: "Date", val: d1}, + {col: "Date", val: d1, want: NullDate{d1, true}}, + {col: "Date", val: NullDate{d1, true}}, + {col: "Date", val: NullDate{d1, true}, want: d1}, + {col: "Date", val: NullDate{civil.Date{}, false}}, + {col: "DateArray", val: []civil.Date(nil), want: []NullDate(nil)}, + {col: "DateArray", val: []civil.Date{}, want: []NullDate{}}, + {col: "DateArray", val: []civil.Date{d1, d2, d3}, want: []NullDate{{d1, true}, {d2, true}, {d3, true}}}, + {col: "Timestamp", val: t1}, + {col: "Timestamp", val: t1, want: NullTime{t1, true}}, + {col: "Timestamp", val: NullTime{t1, true}}, + {col: "Timestamp", val: NullTime{t1, true}, want: t1}, + {col: "Timestamp", val: NullTime{}}, + {col: "TimestampArray", val: []time.Time(nil), want: []NullTime(nil)}, + {col: "TimestampArray", val: []time.Time{}, want: []NullTime{}}, + {col: "TimestampArray", val: []time.Time{t1, t2, t3}, want: []NullTime{{t1, true}, {t2, true}, {t3, true}}}, + } + + // Write rows into table first. + var muts []*Mutation + for i, test := range tests { + muts = append(muts, InsertOrUpdate("Types", []string{"RowID", test.col}, []interface{}{i, test.val})) + } + if _, err := client.Apply(ctx, muts, ApplyAtLeastOnce()); err != nil { + t.Fatal(err) + } + + for i, test := range tests { + row, err := client.Single().ReadRow(ctx, "Types", []interface{}{i}, []string{test.col}) + if err != nil { + t.Fatalf("Unable to fetch row %v: %v", i, err) + } + // Create new instance of type of test.want. + want := test.want + if want == nil { + want = test.val + } + gotp := reflect.New(reflect.TypeOf(want)) + if err := row.Column(0, gotp.Interface()); err != nil { + t.Errorf("%d: col:%v val:%#v, %v", i, test.col, test.val, err) + continue + } + got := reflect.Indirect(gotp).Interface() + + // One of the test cases is checking NaN handling. Given + // NaN!=NaN, we can't use reflect to test for it. + isNaN := func(t interface{}) bool { + f, ok := t.(float64) + if !ok { + return false + } + return math.IsNaN(f) + } + if isNaN(got) && isNaN(want) { + continue + } + + // Check non-NaN cases. + if !reflect.DeepEqual(got, want) { + t.Errorf("%d: col:%v val:%#v, got %#v, want %#v", i, test.col, test.val, got, want) + continue + } + } +} + +// Test decoding Cloud Spanner STRUCT type. +func TestStructTypes(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + if err := prepare(ctx, t); err != nil { + tearDown(ctx, t) + t.Fatalf("cannot set up testing environment: %v", err) + } + defer tearDown(ctx, t) + + tests := []struct { + q Statement + want func(r *Row) error + }{ + { + q: Statement{SQL: `SELECT ARRAY(SELECT STRUCT(1, 2))`}, + want: func(r *Row) error { + // Test STRUCT ARRAY decoding to []NullRow. + var rows []NullRow + if err := r.Column(0, &rows); err != nil { + return err + } + if len(rows) != 1 { + return fmt.Errorf("len(rows) = %d; want 1", len(rows)) + } + if !rows[0].Valid { + return fmt.Errorf("rows[0] is NULL") + } + var i, j int64 + if err := rows[0].Row.Columns(&i, &j); err != nil { + return err + } + if i != 1 || j != 2 { + return fmt.Errorf("got (%d,%d), want (1,2)", i, j) + } + return nil + }, + }, + { + q: Statement{SQL: `SELECT ARRAY(SELECT STRUCT(1 as foo, 2 as bar)) as col1`}, + want: func(r *Row) error { + // Test Row.ToStruct. + s := struct { + Col1 []*struct { + Foo int64 `spanner:"foo"` + Bar int64 `spanner:"bar"` + } `spanner:"col1"` + }{} + if err := r.ToStruct(&s); err != nil { + return err + } + want := struct { + Col1 []*struct { + Foo int64 `spanner:"foo"` + Bar int64 `spanner:"bar"` + } `spanner:"col1"` + }{ + Col1: []*struct { + Foo int64 `spanner:"foo"` + Bar int64 `spanner:"bar"` + }{ + { + Foo: 1, + Bar: 2, + }, + }, + } + if !reflect.DeepEqual(want, s) { + return fmt.Errorf("unexpected decoding result: %v, want %v", s, want) + } + return nil + }, + }, + } + for i, test := range tests { + iter := client.Single().Query(ctx, test.q) + defer iter.Stop() + row, err := iter.Next() + if err != nil { + t.Errorf("%d: %v", i, err) + continue + } + if err := test.want(row); err != nil { + t.Errorf("%d: %v", i, err) + continue + } + } +} + +func rowToValues(r *Row) ([]interface{}, error) { + var x int64 + var y, z string + if err := r.Column(0, &x); err != nil { + return nil, err + } + if err := r.Column(1, &y); err != nil { + return nil, err + } + if err := r.Column(2, &z); err != nil { + return nil, err + } + return []interface{}{x, y, z}, nil +} + +func readAll(iter *RowIterator) ([][]interface{}, error) { + defer iter.Stop() + var vals [][]interface{} + for { + row, err := iter.Next() + if err == iterator.Done { + return vals, nil + } + if err != nil { + return nil, err + } + v, err := rowToValues(row) + if err != nil { + return nil, err + } + vals = append(vals, v) + } +} diff --git a/vendor/cloud.google.com/go/spanner/statement.go b/vendor/cloud.google.com/go/spanner/statement.go new file mode 100644 index 00000000..8e422b09 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/statement.go @@ -0,0 +1,78 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "fmt" + + proto3 "github.com/golang/protobuf/ptypes/struct" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc/codes" +) + +// A Statement is a SQL query with named parameters. +// +// A parameter placeholder consists of '@' followed by the parameter name. +// Parameter names consist of any combination of letters, numbers, and +// underscores. Names may be entirely numeric (e.g., "WHERE m.id = @5"). +// Parameters may appear anywhere that a literal value is expected. The same +// parameter name may be used more than once. It is an error to execute a +// statement with unbound parameters. On the other hand, it is allowable to +// bind parameter names that are not used. +// +// See the documentation of the Row type for how Go types are mapped to Cloud +// Spanner types. +type Statement struct { + SQL string + Params map[string]interface{} +} + +// NewStatement returns a Statement with the given SQL and an empty Params map. +func NewStatement(sql string) Statement { + return Statement{SQL: sql, Params: map[string]interface{}{}} +} + +// errBindParam returns error for not being able to bind parameter to query request. +func errBindParam(k string, v interface{}, err error) error { + if err == nil { + return nil + } + se, ok := toSpannerError(err).(*Error) + if !ok { + return spannerErrorf(codes.InvalidArgument, "failed to bind query parameter(name: %q, value: %q), error = <%v>", k, v, err) + } + se.decorate(fmt.Sprintf("failed to bind query parameter(name: %q, value: %q)", k, v)) + return se +} + +// bindParams binds parameters in a Statement to a sppb.ExecuteSqlRequest. +func (s *Statement) bindParams(r *sppb.ExecuteSqlRequest) error { + r.Params = &proto3.Struct{ + Fields: map[string]*proto3.Value{}, + } + r.ParamTypes = map[string]*sppb.Type{} + for k, v := range s.Params { + val, t, err := encodeValue(v) + if err != nil { + return errBindParam(k, v, err) + } + r.Params.Fields[k] = val + r.ParamTypes[k] = t + } + return nil +} diff --git a/vendor/cloud.google.com/go/spanner/statement_test.go b/vendor/cloud.google.com/go/spanner/statement_test.go new file mode 100644 index 00000000..a441e0e8 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/statement_test.go @@ -0,0 +1,64 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "reflect" + "testing" + + proto3 "github.com/golang/protobuf/ptypes/struct" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// Test Statement.bindParams. +func TestBindParams(t *testing.T) { + // Verify Statement.bindParams generates correct values and types. + want := sppb.ExecuteSqlRequest{ + Params: &proto3.Struct{ + Fields: map[string]*proto3.Value{ + "var1": stringProto("abc"), + "var2": intProto(1), + }, + }, + ParamTypes: map[string]*sppb.Type{ + "var1": stringType(), + "var2": intType(), + }, + } + st := Statement{ + SQL: "SELECT id from t_foo WHERE col1 = @var1 AND col2 = @var2", + Params: map[string]interface{}{"var1": "abc", "var2": int64(1)}, + } + got := sppb.ExecuteSqlRequest{} + if err := st.bindParams(&got); err != nil || !reflect.DeepEqual(got, want) { + t.Errorf("bind result: \n(%v, %v)\nwant\n(%v, %v)\n", got, err, want, nil) + } + // Verify type error reporting. + st.Params["var2"] = struct{}{} + wantErr := errBindParam("var2", struct{}{}, errEncoderUnsupportedType(struct{}{})) + if err := st.bindParams(&got); !reflect.DeepEqual(err, wantErr) { + t.Errorf("got unexpected error: %v, want: %v", err, wantErr) + } +} + +func TestNewStatement(t *testing.T) { + s := NewStatement("query") + if got, want := s.SQL, "query"; got != want { + t.Errorf("got %q, want %q", got, want) + } +} diff --git a/vendor/cloud.google.com/go/spanner/timestampbound.go b/vendor/cloud.google.com/go/spanner/timestampbound.go new file mode 100644 index 00000000..068d9660 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/timestampbound.go @@ -0,0 +1,245 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "fmt" + "time" + + pbd "github.com/golang/protobuf/ptypes/duration" + pbt "github.com/golang/protobuf/ptypes/timestamp" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// timestampBoundType specifies the timestamp bound mode. +type timestampBoundType int + +const ( + strong timestampBoundType = iota // strong reads + exactStaleness // read with exact staleness + maxStaleness // read with max staleness + minReadTimestamp // read with min freshness + readTimestamp // read data at exact timestamp +) + +// TimestampBound defines how Cloud Spanner will choose a timestamp for a single +// read/query or read-only transaction. +// +// The types of timestamp bound are: +// +// - Strong (the default). +// - Bounded staleness. +// - Exact staleness. +// +// If the Cloud Spanner database to be read is geographically distributed, stale +// read-only transactions can execute more quickly than strong or read-write +// transactions, because they are able to execute far from the leader replica. +// +// Each type of timestamp bound is discussed in detail below. A TimestampBound +// can be specified when creating transactions, see the documentation of +// spanner.Client for an example. +// +// Strong reads +// +// Strong reads are guaranteed to see the effects of all transactions that have +// committed before the start of the read. Furthermore, all rows yielded by a +// single read are consistent with each other - if any part of the read +// observes a transaction, all parts of the read see the transaction. +// +// Strong reads are not repeatable: two consecutive strong read-only +// transactions might return inconsistent results if there are concurrent +// writes. If consistency across reads is required, the reads should be +// executed within a transaction or at an exact read timestamp. +// +// Use StrongRead() to create a bound of this type. +// +// Exact staleness +// +// These timestamp bounds execute reads at a user-specified timestamp. Reads at +// a timestamp are guaranteed to see a consistent prefix of the global +// transaction history: they observe modifications done by all transactions +// with a commit timestamp less than or equal to the read timestamp, and +// observe none of the modifications done by transactions with a larger commit +// timestamp. They will block until all conflicting transactions that may be +// assigned commit timestamps less than or equal to the read timestamp have +// finished. +// +// The timestamp can either be expressed as an absolute Cloud Spanner commit +// timestamp or a staleness relative to the current time. +// +// These modes do not require a "negotiation phase" to pick a timestamp. As a +// result, they execute slightly faster than the equivalent boundedly stale +// concurrency modes. On the other hand, boundedly stale reads usually return +// fresher results. +// +// Use ReadTimestamp() and ExactStaleness() to create a bound of this type. +// +// Bounded staleness +// +// Bounded staleness modes allow Cloud Spanner to pick the read timestamp, subject to +// a user-provided staleness bound. Cloud Spanner chooses the newest timestamp within +// the staleness bound that allows execution of the reads at the closest +// available replica without blocking. +// +// All rows yielded are consistent with each other -- if any part of the read +// observes a transaction, all parts of the read see the transaction. Boundedly +// stale reads are not repeatable: two stale reads, even if they use the same +// staleness bound, can execute at different timestamps and thus return +// inconsistent results. +// +// Boundedly stale reads execute in two phases: the first phase negotiates a +// timestamp among all replicas needed to serve the read. In the second phase, +// reads are executed at the negotiated timestamp. +// +// As a result of the two phase execution, bounded staleness reads are usually +// a little slower than comparable exact staleness reads. However, they are +// typically able to return fresher results, and are more likely to execute at +// the closest replica. +// +// Because the timestamp negotiation requires up-front knowledge of which rows +// will be read, it can only be used with single-use reads and single-use +// read-only transactions. +// +// Use MinReadTimestamp() and MaxStaleness() to create a bound of this type. +// +// Old read timestamps and garbage collection +// +// Cloud Spanner continuously garbage collects deleted and overwritten data in the +// background to reclaim storage space. This process is known as "version +// GC". By default, version GC reclaims versions after they are four hours +// old. Because of this, Cloud Spanner cannot perform reads at read timestamps more +// than four hours in the past. This restriction also applies to in-progress +// reads and/or SQL queries whose timestamp become too old while +// executing. Reads and SQL queries with too-old read timestamps fail with the +// error ErrorCode.FAILED_PRECONDITION. +type TimestampBound struct { + mode timestampBoundType + d time.Duration + t time.Time +} + +// StrongRead returns a TimestampBound that will perform reads and queries at a +// timestamp where all previously committed transactions are visible. +func StrongRead() TimestampBound { + return TimestampBound{mode: strong} +} + +// ExactStaleness returns a TimestampBound that will perform reads and queries +// at an exact staleness. +func ExactStaleness(d time.Duration) TimestampBound { + return TimestampBound{ + mode: exactStaleness, + d: d, + } +} + +// MaxStaleness returns a TimestampBound that will perform reads and queries at +// a time chosen to be at most "d" stale. +func MaxStaleness(d time.Duration) TimestampBound { + return TimestampBound{ + mode: maxStaleness, + d: d, + } +} + +// MinReadTimestamp returns a TimestampBound that bound that will perform reads +// and queries at a time chosen to be at least "t". +func MinReadTimestamp(t time.Time) TimestampBound { + return TimestampBound{ + mode: minReadTimestamp, + t: t, + } +} + +// ReadTimestamp returns a TimestampBound that will peform reads and queries at +// the given time. +func ReadTimestamp(t time.Time) TimestampBound { + return TimestampBound{ + mode: readTimestamp, + t: t, + } +} + +// String implements fmt.Stringer. +func (tb TimestampBound) String() string { + switch tb.mode { + case strong: + return fmt.Sprintf("(strong)") + case exactStaleness: + return fmt.Sprintf("(exactStaleness: %s)", tb.d) + case maxStaleness: + return fmt.Sprintf("(maxStaleness: %s)", tb.d) + case minReadTimestamp: + return fmt.Sprintf("(minReadTimestamp: %s)", tb.t) + case readTimestamp: + return fmt.Sprintf("(readTimestamp: %s)", tb.t) + default: + return fmt.Sprintf("{mode=%v, d=%v, t=%v}", tb.mode, tb.d, tb.t) + } +} + +// durationProto takes a time.Duration and converts it into pdb.Duration for +// calling gRPC APIs. +func durationProto(d time.Duration) *pbd.Duration { + n := d.Nanoseconds() + return &pbd.Duration{ + Seconds: n / int64(time.Second), + Nanos: int32(n % int64(time.Second)), + } +} + +// timestampProto takes a time.Time and converts it into pbt.Timestamp for calling +// gRPC APIs. +func timestampProto(t time.Time) *pbt.Timestamp { + return &pbt.Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } +} + +// buildTransactionOptionsReadOnly converts a spanner.TimestampBound into a sppb.TransactionOptions_ReadOnly +// transaction option, which is then used in transactional reads. +func buildTransactionOptionsReadOnly(tb TimestampBound, returnReadTimestamp bool) *sppb.TransactionOptions_ReadOnly { + pb := &sppb.TransactionOptions_ReadOnly{ + ReturnReadTimestamp: returnReadTimestamp, + } + switch tb.mode { + case strong: + pb.TimestampBound = &sppb.TransactionOptions_ReadOnly_Strong{ + Strong: true, + } + case exactStaleness: + pb.TimestampBound = &sppb.TransactionOptions_ReadOnly_ExactStaleness{ + ExactStaleness: durationProto(tb.d), + } + case maxStaleness: + pb.TimestampBound = &sppb.TransactionOptions_ReadOnly_MaxStaleness{ + MaxStaleness: durationProto(tb.d), + } + case minReadTimestamp: + pb.TimestampBound = &sppb.TransactionOptions_ReadOnly_MinReadTimestamp{ + MinReadTimestamp: timestampProto(tb.t), + } + case readTimestamp: + pb.TimestampBound = &sppb.TransactionOptions_ReadOnly_ReadTimestamp{ + ReadTimestamp: timestampProto(tb.t), + } + default: + panic(fmt.Sprintf("buildTransactionOptionsReadOnly(%v,%v)", tb, returnReadTimestamp)) + } + return pb +} diff --git a/vendor/cloud.google.com/go/spanner/timestampbound_test.go b/vendor/cloud.google.com/go/spanner/timestampbound_test.go new file mode 100644 index 00000000..47fb481d --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/timestampbound_test.go @@ -0,0 +1,208 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "reflect" + "testing" + "time" + + pbd "github.com/golang/protobuf/ptypes/duration" + pbt "github.com/golang/protobuf/ptypes/timestamp" + + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +// Test generating TimestampBound for strong reads. +func TestStrong(t *testing.T) { + got := StrongRead() + want := TimestampBound{mode: strong} + if !reflect.DeepEqual(got, want) { + t.Errorf("Strong() = %v; want %v", got, want) + } +} + +// Test generating TimestampBound for reads with exact staleness. +func TestExactStaleness(t *testing.T) { + got := ExactStaleness(10 * time.Second) + want := TimestampBound{mode: exactStaleness, d: 10 * time.Second} + if !reflect.DeepEqual(got, want) { + t.Errorf("ExactStaleness(10*time.Second) = %v; want %v", got, want) + } +} + +// Test generating TimestampBound for reads with max staleness. +func TestMaxStaleness(t *testing.T) { + got := MaxStaleness(10 * time.Second) + want := TimestampBound{mode: maxStaleness, d: 10 * time.Second} + if !reflect.DeepEqual(got, want) { + t.Errorf("MaxStaleness(10*time.Second) = %v; want %v", got, want) + } +} + +// Test generating TimestampBound for reads with minimum freshness requirement. +func TestMinReadTimestamp(t *testing.T) { + ts := time.Now() + got := MinReadTimestamp(ts) + want := TimestampBound{mode: minReadTimestamp, t: ts} + if !reflect.DeepEqual(got, want) { + t.Errorf("MinReadTimestamp(%v) = %v; want %v", ts, got, want) + } +} + +// Test generating TimestampBound for reads requesting data at a exact timestamp. +func TestReadTimestamp(t *testing.T) { + ts := time.Now() + got := ReadTimestamp(ts) + want := TimestampBound{mode: readTimestamp, t: ts} + if !reflect.DeepEqual(got, want) { + t.Errorf("ReadTimestamp(%v) = %v; want %v", ts, got, want) + } +} + +// Test TimestampBound.String. +func TestTimestampBoundString(t *testing.T) { + ts := time.Unix(1136239445, 0).UTC() + var tests = []struct { + tb TimestampBound + want string + }{ + { + tb: TimestampBound{mode: strong}, + want: "(strong)", + }, + { + tb: TimestampBound{mode: exactStaleness, d: 10 * time.Second}, + want: "(exactStaleness: 10s)", + }, + { + tb: TimestampBound{mode: maxStaleness, d: 10 * time.Second}, + want: "(maxStaleness: 10s)", + }, + { + tb: TimestampBound{mode: minReadTimestamp, t: ts}, + want: "(minReadTimestamp: 2006-01-02 22:04:05 +0000 UTC)", + }, + { + tb: TimestampBound{mode: readTimestamp, t: ts}, + want: "(readTimestamp: 2006-01-02 22:04:05 +0000 UTC)", + }, + } + for _, test := range tests { + got := test.tb.String() + if got != test.want { + t.Errorf("%#v.String():\ngot %q\nwant %q", test.tb, got, test.want) + } + } +} + +// Test time.Duration to pdb.Duration conversion. +func TestDurationProto(t *testing.T) { + var tests = []struct { + d time.Duration + want pbd.Duration + }{ + {time.Duration(0), pbd.Duration{Seconds: 0, Nanos: 0}}, + {time.Second, pbd.Duration{Seconds: 1, Nanos: 0}}, + {time.Millisecond, pbd.Duration{Seconds: 0, Nanos: 1e6}}, + {15 * time.Nanosecond, pbd.Duration{Seconds: 0, Nanos: 15}}, + {42 * time.Hour, pbd.Duration{Seconds: 151200}}, + {-(1*time.Hour + 4*time.Millisecond), pbd.Duration{Seconds: -3600, Nanos: -4e6}}, + } + for _, test := range tests { + got := durationProto(test.d) + if !reflect.DeepEqual(got, &test.want) { + t.Errorf("durationProto(%v) = %v; want %v", test.d, got, test.want) + } + } +} + +// Test time.Time to pbt.Timestamp conversion. +func TestTimeProto(t *testing.T) { + var tests = []struct { + t time.Time + want pbt.Timestamp + }{ + {time.Unix(0, 0), pbt.Timestamp{}}, + {time.Unix(1136239445, 12345), pbt.Timestamp{Seconds: 1136239445, Nanos: 12345}}, + {time.Unix(-1000, 12345), pbt.Timestamp{Seconds: -1000, Nanos: 12345}}, + } + for _, test := range tests { + got := timestampProto(test.t) + if !reflect.DeepEqual(got, &test.want) { + t.Errorf("timestampProto(%v) = %v; want %v", test.t, got, test.want) + } + } +} + +// Test readonly transaction option builder. +func TestBuildTransactionOptionsReadOnly(t *testing.T) { + ts := time.Unix(1136239445, 12345) + var tests = []struct { + tb TimestampBound + ts bool + want sppb.TransactionOptions_ReadOnly + }{ + { + StrongRead(), false, + sppb.TransactionOptions_ReadOnly{ + TimestampBound: &sppb.TransactionOptions_ReadOnly_Strong{ + Strong: true}, + ReturnReadTimestamp: false, + }, + }, + { + ExactStaleness(10 * time.Second), true, + sppb.TransactionOptions_ReadOnly{ + TimestampBound: &sppb.TransactionOptions_ReadOnly_ExactStaleness{ + ExactStaleness: &pbd.Duration{Seconds: 10}}, + ReturnReadTimestamp: true, + }, + }, + { + MaxStaleness(10 * time.Second), true, + sppb.TransactionOptions_ReadOnly{ + TimestampBound: &sppb.TransactionOptions_ReadOnly_MaxStaleness{ + MaxStaleness: &pbd.Duration{Seconds: 10}}, + ReturnReadTimestamp: true, + }, + }, + + { + MinReadTimestamp(ts), true, + sppb.TransactionOptions_ReadOnly{ + TimestampBound: &sppb.TransactionOptions_ReadOnly_MinReadTimestamp{ + MinReadTimestamp: &pbt.Timestamp{Seconds: 1136239445, Nanos: 12345}}, + ReturnReadTimestamp: true, + }, + }, + { + ReadTimestamp(ts), true, + sppb.TransactionOptions_ReadOnly{ + TimestampBound: &sppb.TransactionOptions_ReadOnly_ReadTimestamp{ + ReadTimestamp: &pbt.Timestamp{Seconds: 1136239445, Nanos: 12345}}, + ReturnReadTimestamp: true, + }, + }, + } + for _, test := range tests { + got := buildTransactionOptionsReadOnly(test.tb, test.ts) + if !reflect.DeepEqual(got, &test.want) { + t.Errorf("buildTransactionOptionsReadOnly(%v,%v) = %v; want %v", test.tb, test.ts, got, test.want) + } + } +} diff --git a/vendor/cloud.google.com/go/spanner/transaction.go b/vendor/cloud.google.com/go/spanner/transaction.go new file mode 100644 index 00000000..1d5a2797 --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/transaction.go @@ -0,0 +1,821 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "sync" + "time" + + "golang.org/x/net/context" + + "google.golang.org/api/iterator" + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// transactionID stores a transaction ID which uniquely identifies a transaction in Cloud Spanner. +type transactionID []byte + +// txReadEnv manages a read-transaction environment consisting of a session handle and a transaction selector. +type txReadEnv interface { + // acquire returns a read-transaction environment that can be used to perform a transactional read. + acquire(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) + // release should be called at the end of every transactional read to deal with session recycling and read timestamp recording. + release(time.Time, error) +} + +// txReadOnly contains methods for doing transactional reads. +type txReadOnly struct { + // read-transaction environment for performing transactional read operations. + txReadEnv +} + +// errSessionClosed returns error for using a recycled/destroyed session +func errSessionClosed(sh *sessionHandle) error { + return spannerErrorf(codes.FailedPrecondition, + "session is already recycled / destroyed: session_id = %q, rpc_client = %v", sh.getID(), sh.getClient()) +} + +// Read reads multiple rows from the database. +// +// The provided function is called once in serial for each row read. If the +// function returns a non-nil error, Read immediately returns that value. +// +// If no rows are read, Read will return nil without calling the provided +// function. +func (t *txReadOnly) Read(ctx context.Context, table string, keys KeySet, columns []string) *RowIterator { + // ReadUsingIndex will use primary index if an empty index name is provided. + return t.ReadUsingIndex(ctx, table, "", keys, columns) +} + +// ReadUsingIndex reads multiple rows from the database using an index. +// +// Currently, this function can only read columns that are part of the index +// key, part of the primary key, or stored in the index due to a STORING clause +// in the index definition. +// +// The provided function is called once in serial for each row read. If the +// function returns a non-nil error, ReadUsingIndex immediately returns that +// value. +// +// If no rows are read, ReadUsingIndex will return nil without calling the +// provided function. +func (t *txReadOnly) ReadUsingIndex(ctx context.Context, table, index string, keys KeySet, columns []string) *RowIterator { + var ( + sh *sessionHandle + ts *sppb.TransactionSelector + err error + ) + kset, err := keys.proto() + if err != nil { + return &RowIterator{err: err} + } + if sh, ts, err = t.acquire(ctx); err != nil { + return &RowIterator{err: err} + } + // Cloud Spanner will return "Session not found" on bad sessions. + sid, client := sh.getID(), sh.getClient() + if sid == "" || client == nil { + // Might happen if transaction is closed in the middle of a API call. + return &RowIterator{err: errSessionClosed(sh)} + } + return stream( + contextWithMetadata(ctx, sh.getMetadata()), + func(ctx context.Context, resumeToken []byte) (streamingReceiver, error) { + return client.StreamingRead(ctx, + &sppb.ReadRequest{ + Session: sid, + Transaction: ts, + Table: table, + Index: index, + Columns: columns, + KeySet: kset, + ResumeToken: resumeToken, + }) + }, + t.release, + ) +} + +// errRowNotFound returns error for not being able to read the row identified by key. +func errRowNotFound(table string, key Key) error { + return spannerErrorf(codes.NotFound, "row not found(Table: %v, PrimaryKey: %v)", table, key) +} + +// ReadRow reads a single row from the database. +// +// If no row is present with the given key, then ReadRow returns an error where +// IsRowNotFound(err) is true. +func (t *txReadOnly) ReadRow(ctx context.Context, table string, key Key, columns []string) (*Row, error) { + iter := t.Read(ctx, table, Keys(key), columns) + defer iter.Stop() + row, err := iter.Next() + switch err { + case iterator.Done: + return nil, errRowNotFound(table, key) + case nil: + return row, nil + default: + return nil, err + } +} + +// Query executes a query against the database. +// +// The provided function is called once in serial for each row read. If the +// function returns a non-nil error, Query immediately returns that value. +// +// If no rows are read, Query will return nil without calling the provided +// function. +func (t *txReadOnly) Query(ctx context.Context, statement Statement) *RowIterator { + var ( + sh *sessionHandle + ts *sppb.TransactionSelector + err error + ) + if sh, ts, err = t.acquire(ctx); err != nil { + return &RowIterator{err: err} + } + // Cloud Spanner will return "Session not found" on bad sessions. + sid, client := sh.getID(), sh.getClient() + if sid == "" || client == nil { + // Might happen if transaction is closed in the middle of a API call. + return &RowIterator{err: errSessionClosed(sh)} + } + req := &sppb.ExecuteSqlRequest{ + Session: sid, + Transaction: ts, + Sql: statement.SQL, + } + if err := statement.bindParams(req); err != nil { + return &RowIterator{err: err} + } + return stream( + contextWithMetadata(ctx, sh.getMetadata()), + func(ctx context.Context, resumeToken []byte) (streamingReceiver, error) { + req.ResumeToken = resumeToken + return client.ExecuteStreamingSql(ctx, req) + }, + t.release) +} + +// txState is the status of a transaction. +type txState int + +const ( + // transaction is new, waiting to be initialized. + txNew txState = iota + // transaction is being initialized. + txInit + // transaction is active and can perform read/write. + txActive + // transaction is closed, cannot be used anymore. + txClosed +) + +// errRtsUnavailable returns error for read transaction's read timestamp being unavailable. +func errRtsUnavailable() error { + return spannerErrorf(codes.Internal, "read timestamp is unavailable") +} + +// errTxNotInitialized returns error for using an uninitialized transaction. +func errTxNotInitialized() error { + return spannerErrorf(codes.InvalidArgument, "cannot use a uninitialized transaction") +} + +// errTxClosed returns error for using a closed transaction. +func errTxClosed() error { + return spannerErrorf(codes.InvalidArgument, "cannot use a closed transaction") +} + +// errUnexpectedTxState returns error for transaction enters an unexpected state. +func errUnexpectedTxState(ts txState) error { + return spannerErrorf(codes.FailedPrecondition, "unexpected transaction state: %v", ts) +} + +// ReadOnlyTransaction provides a snapshot transaction with guaranteed +// consistency across reads, but does not allow writes. Read-only +// transactions can be configured to read at timestamps in the past. +// +// Read-only transactions do not take locks. Instead, they work by choosing a +// Cloud Spanner timestamp, then executing all reads at that timestamp. Since they do +// not acquire locks, they do not block concurrent read-write transactions. +// +// Unlike locking read-write transactions, read-only transactions never +// abort. They can fail if the chosen read timestamp is garbage collected; +// however, the default garbage collection policy is generous enough that most +// applications do not need to worry about this in practice. See the +// documentation of TimestampBound for more details. +// +// A ReadOnlyTransaction consumes resources on the server until Close() is +// called. +type ReadOnlyTransaction struct { + // txReadOnly contains methods for performing transactional reads. + txReadOnly + + // singleUse indicates that the transaction can be used for only one read. + singleUse bool + + // sp is the session pool for allocating a session to execute the read-only transaction. It is set only once during initialization of the ReadOnlyTransaction. + sp *sessionPool + // mu protects concurrent access to the internal states of ReadOnlyTransaction. + mu sync.Mutex + // tx is the transaction ID in Cloud Spanner that uniquely identifies the ReadOnlyTransaction. + tx transactionID + // txReadyOrClosed is for broadcasting that transaction ID has been returned by Cloud Spanner or that transaction is closed. + txReadyOrClosed chan struct{} + // state is the current transaction status of the ReadOnly transaction. + state txState + // sh is the sessionHandle allocated from sp. + sh *sessionHandle + // rts is the read timestamp returned by transactional reads. + rts time.Time + // tb is the read staleness bound specification for transactional reads. + tb TimestampBound +} + +// errTxInitTimeout returns error for timeout in waiting for initialization of the transaction. +func errTxInitTimeout() error { + return spannerErrorf(codes.Canceled, "timeout/context canceled in waiting for transaction's initialization") +} + +// getTimestampBound returns the read staleness bound specified for the ReadOnlyTransaction. +func (t *ReadOnlyTransaction) getTimestampBound() TimestampBound { + t.mu.Lock() + defer t.mu.Unlock() + return t.tb +} + +// begin starts a snapshot read-only Transaction on Cloud Spanner. +func (t *ReadOnlyTransaction) begin(ctx context.Context) error { + var ( + locked bool + tx transactionID + rts time.Time + sh *sessionHandle + err error + ) + defer func() { + if !locked { + t.mu.Lock() + // Not necessary, just to make it clear that t.mu is being held when locked == true. + locked = true + } + if t.state != txClosed { + // Signal other initialization routines. + close(t.txReadyOrClosed) + t.txReadyOrClosed = make(chan struct{}) + } + t.mu.Unlock() + if err != nil && sh != nil { + // Got a valid session handle, but failed to initalize transaction on Cloud Spanner. + if shouldDropSession(err) { + sh.destroy() + } + // If sh.destroy was already executed, this becomes a noop. + sh.recycle() + } + }() + sh, err = t.sp.take(ctx) + if err != nil { + return err + } + err = runRetryable(contextWithMetadata(ctx, sh.getMetadata()), func(ctx context.Context) error { + res, e := sh.getClient().BeginTransaction(ctx, &sppb.BeginTransactionRequest{ + Session: sh.getID(), + Options: &sppb.TransactionOptions{ + Mode: &sppb.TransactionOptions_ReadOnly_{ + ReadOnly: buildTransactionOptionsReadOnly(t.getTimestampBound(), true), + }, + }, + }) + if e != nil { + return e + } + tx = res.Id + if res.ReadTimestamp != nil { + rts = time.Unix(res.ReadTimestamp.Seconds, int64(res.ReadTimestamp.Nanos)) + } + return nil + }) + t.mu.Lock() + locked = true // defer function will be executed with t.mu being held. + if t.state == txClosed { // During the execution of t.begin(), t.Close() was invoked. + return errSessionClosed(sh) + } + // If begin() fails, this allows other queries to take over the initialization. + t.tx = nil + if err == nil { + t.tx = tx + t.rts = rts + t.sh = sh + // State transite to txActive. + t.state = txActive + } + return err +} + +// acquire implements txReadEnv.acquire. +func (t *ReadOnlyTransaction) acquire(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) { + if t.singleUse { + return t.acquireSingleUse(ctx) + } + return t.acquireMultiUse(ctx) +} + +func (t *ReadOnlyTransaction) acquireSingleUse(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) { + t.mu.Lock() + defer t.mu.Unlock() + switch t.state { + case txClosed: + // A closed single-use transaction can never be reused. + return nil, nil, errTxClosed() + case txNew: + t.state = txClosed + ts := &sppb.TransactionSelector{ + Selector: &sppb.TransactionSelector_SingleUse{ + SingleUse: &sppb.TransactionOptions{ + Mode: &sppb.TransactionOptions_ReadOnly_{ + ReadOnly: buildTransactionOptionsReadOnly(t.tb, true), + }, + }, + }, + } + sh, err := t.sp.take(ctx) + if err != nil { + return nil, nil, err + } + // Install session handle into t, which can be used for readonly operations later. + t.sh = sh + return sh, ts, nil + } + us := t.state + // SingleUse transaction should only be in either txNew state or txClosed state. + return nil, nil, errUnexpectedTxState(us) +} + +func (t *ReadOnlyTransaction) acquireMultiUse(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) { + for { + t.mu.Lock() + switch t.state { + case txClosed: + t.mu.Unlock() + return nil, nil, errTxClosed() + case txNew: + // State transit to txInit so that no further TimestampBound change is accepted. + t.state = txInit + t.mu.Unlock() + continue + case txInit: + if t.tx != nil { + // Wait for a transaction ID to become ready. + txReadyOrClosed := t.txReadyOrClosed + t.mu.Unlock() + select { + case <-txReadyOrClosed: + // Need to check transaction state again. + continue + case <-ctx.Done(): + // The waiting for initialization is timeout, return error directly. + return nil, nil, errTxInitTimeout() + } + } + // Take the ownership of initializing the transaction. + t.tx = transactionID{} + t.mu.Unlock() + // Begin a read-only transaction. + // TODO: consider adding a transaction option which allow queries to initiate transactions by themselves. Note that this option might not be + // always good because the ID of the new transaction won't be ready till the query returns some data or completes. + if err := t.begin(ctx); err != nil { + return nil, nil, err + } + // If t.begin() succeeded, t.state should have been changed to txActive, so we can just continue here. + continue + case txActive: + sh := t.sh + ts := &sppb.TransactionSelector{ + Selector: &sppb.TransactionSelector_Id{ + Id: t.tx, + }, + } + t.mu.Unlock() + return sh, ts, nil + } + state := t.state + t.mu.Unlock() + return nil, nil, errUnexpectedTxState(state) + } +} + +// release implements txReadEnv.release. +func (t *ReadOnlyTransaction) release(rts time.Time, err error) { + t.mu.Lock() + if t.singleUse && !rts.IsZero() { + t.rts = rts + } + sh := t.sh + t.mu.Unlock() + if sh != nil { // sh could be nil if t.acquire() fails. + if shouldDropSession(err) { + sh.destroy() + } + if t.singleUse { + // If session handle is already destroyed, this becomes a noop. + sh.recycle() + } + } +} + +// Close closes a ReadOnlyTransaction, the transaction cannot perform any reads after being closed. +func (t *ReadOnlyTransaction) Close() { + if t.singleUse { + return + } + t.mu.Lock() + if t.state != txClosed { + t.state = txClosed + close(t.txReadyOrClosed) + } + sh := t.sh + t.mu.Unlock() + // If session handle is already destroyed, this becomes a noop. + // If there are still active queries and if the recycled session is reused before they complete, Cloud Spanner will cancel them + // on behalf of the new transaction on the session. + sh.recycle() +} + +// Timestamp returns the timestamp chosen to perform reads and +// queries in this transaction. The value can only be read after some +// read or query has either returned some data or completed without +// returning any data. +func (t *ReadOnlyTransaction) Timestamp() (time.Time, error) { + t.mu.Lock() + defer t.mu.Unlock() + if t.rts.IsZero() { + return t.rts, errRtsUnavailable() + } + return t.rts, nil +} + +// WithTimestampBound specifies the TimestampBound to use for read or query. +// This can only be used before the first read or query is invoked. Note: +// bounded staleness is not available with general ReadOnlyTransactions; use a +// single-use ReadOnlyTransaction instead. +// +// The returned value is the ReadOnlyTransaction so calls can be chained. +func (t *ReadOnlyTransaction) WithTimestampBound(tb TimestampBound) *ReadOnlyTransaction { + t.mu.Lock() + defer t.mu.Unlock() + if t.state == txNew { + // Only allow to set TimestampBound before the first query. + t.tb = tb + } + return t +} + +// ReadWriteTransaction provides a locking read-write transaction. +// +// This type of transaction is the only way to write data into Cloud Spanner; +// (*Client).Apply and (*Client).ApplyAtLeastOnce use transactions +// internally. These transactions rely on pessimistic locking and, if +// necessary, two-phase commit. Locking read-write transactions may abort, +// requiring the application to retry. However, the interface exposed by +// (*Client).ReadWriteTransaction eliminates the need for applications to write +// retry loops explicitly. +// +// Locking transactions may be used to atomically read-modify-write data +// anywhere in a database. This type of transaction is externally consistent. +// +// Clients should attempt to minimize the amount of time a transaction is +// active. Faster transactions commit with higher probability and cause less +// contention. Cloud Spanner attempts to keep read locks active as long as the +// transaction continues to do reads. Long periods of inactivity at the client +// may cause Cloud Spanner to release a transaction's locks and abort it. +// +// Reads performed within a transaction acquire locks on the data being +// read. Writes can only be done at commit time, after all reads have been +// completed. Conceptually, a read-write transaction consists of zero or more +// reads or SQL queries followed by a commit. +// +// See (*Client).ReadWriteTransaction for an example. +// +// Semantics +// +// Cloud Spanner can commit the transaction if all read locks it acquired are still +// valid at commit time, and it is able to acquire write locks for all +// writes. Cloud Spanner can abort the transaction for any reason. If a commit +// attempt returns ABORTED, Cloud Spanner guarantees that the transaction has not +// modified any user data in Cloud Spanner. +// +// Unless the transaction commits, Cloud Spanner makes no guarantees about how long +// the transaction's locks were held for. It is an error to use Cloud Spanner locks +// for any sort of mutual exclusion other than between Cloud Spanner transactions +// themselves. +// +// Aborted transactions +// +// Application code does not need to retry explicitly; RunInTransaction will +// automatically retry a transaction if an attempt results in an abort. The +// lock priority of a transaction increases after each prior aborted +// transaction, meaning that the next attempt has a slightly better chance of +// success than before. +// +// Under some circumstances (e.g., many transactions attempting to modify the +// same row(s)), a transaction can abort many times in a short period before +// successfully committing. Thus, it is not a good idea to cap the number of +// retries a transaction can attempt; instead, it is better to limit the total +// amount of wall time spent retrying. +// +// Idle transactions +// +// A transaction is considered idle if it has no outstanding reads or SQL +// queries and has not started a read or SQL query within the last 10 +// seconds. Idle transactions can be aborted by Cloud Spanner so that they don't hold +// on to locks indefinitely. In that case, the commit will fail with error +// ABORTED. +// +// If this behavior is undesirable, periodically executing a simple SQL query +// in the transaction (e.g., SELECT 1) prevents the transaction from becoming +// idle. +type ReadWriteTransaction struct { + // txReadOnly contains methods for performing transactional reads. + txReadOnly + // sh is the sessionHandle allocated from sp. It is set only once during the initialization of ReadWriteTransaction. + sh *sessionHandle + // tx is the transaction ID in Cloud Spanner that uniquely identifies the ReadWriteTransaction. + // It is set only once in ReadWriteTransaction.begin() during the initialization of ReadWriteTransaction. + tx transactionID + // mu protects concurrent access to the internal states of ReadWriteTransaction. + mu sync.Mutex + // state is the current transaction status of the read-write transaction. + state txState + // wb is the set of buffered mutations waiting to be commited. + wb []*Mutation +} + +// BufferWrite adds a list of mutations to the set of updates that will be +// applied when the transaction is committed. It does not actually apply the +// write until the transaction is committed, so the operation does not +// block. The effects of the write won't be visible to any reads (including +// reads done in the same transaction) until the transaction commits. +// +// See the example for Client.ReadWriteTransaction. +func (t *ReadWriteTransaction) BufferWrite(ms []*Mutation) error { + t.mu.Lock() + defer t.mu.Unlock() + if t.state == txClosed { + return errTxClosed() + } + if t.state != txActive { + return errUnexpectedTxState(t.state) + } + t.wb = append(t.wb, ms...) + return nil +} + +// acquire implements txReadEnv.acquire. +func (t *ReadWriteTransaction) acquire(ctx context.Context) (*sessionHandle, *sppb.TransactionSelector, error) { + ts := &sppb.TransactionSelector{ + Selector: &sppb.TransactionSelector_Id{ + Id: t.tx, + }, + } + t.mu.Lock() + defer t.mu.Unlock() + switch t.state { + case txClosed: + return nil, nil, errTxClosed() + case txActive: + return t.sh, ts, nil + } + return nil, nil, errUnexpectedTxState(t.state) +} + +// release implements txReadEnv.release. +func (t *ReadWriteTransaction) release(_ time.Time, err error) { + t.mu.Lock() + sh := t.sh + t.mu.Unlock() + if sh != nil && shouldDropSession(err) { + sh.destroy() + } +} + +func beginTransaction(ctx context.Context, sid string, client sppb.SpannerClient) (transactionID, error) { + var tx transactionID + err := runRetryable(ctx, func(ctx context.Context) error { + res, e := client.BeginTransaction(ctx, &sppb.BeginTransactionRequest{ + Session: sid, + Options: &sppb.TransactionOptions{ + Mode: &sppb.TransactionOptions_ReadWrite_{ + ReadWrite: &sppb.TransactionOptions_ReadWrite{}, + }, + }, + }) + if e != nil { + return e + } + tx = res.Id + return nil + }) + if err != nil { + return nil, err + } + return tx, nil +} + +// begin starts a read-write transacton on Cloud Spanner, it is always called before any of the public APIs. +func (t *ReadWriteTransaction) begin(ctx context.Context) error { + if t.tx != nil { + t.state = txActive + return nil + } + tx, err := beginTransaction(contextWithMetadata(ctx, t.sh.getMetadata()), t.sh.getID(), t.sh.getClient()) + if err == nil { + t.tx = tx + t.state = txActive + return nil + } + if shouldDropSession(err) { + t.sh.destroy() + } + return err +} + +// commit tries to commit a readwrite transaction to Cloud Spanner. It also returns the commit timestamp for the transactions. +func (t *ReadWriteTransaction) commit(ctx context.Context) (time.Time, error) { + var ts time.Time + t.mu.Lock() + t.state = txClosed // No futher operations after commit. + mPb, err := mutationsProto(t.wb) + t.mu.Unlock() + if err != nil { + return ts, err + } + // In case that sessionHandle was destroyed but transaction body fails to report it. + sid, client := t.sh.getID(), t.sh.getClient() + if sid == "" || client == nil { + return ts, errSessionClosed(t.sh) + } + err = runRetryable(contextWithMetadata(ctx, t.sh.getMetadata()), func(ctx context.Context) error { + var trailer metadata.MD + res, e := client.Commit(ctx, &sppb.CommitRequest{ + Session: sid, + Transaction: &sppb.CommitRequest_TransactionId{ + TransactionId: t.tx, + }, + Mutations: mPb, + }, grpc.Trailer(&trailer)) + if e != nil { + return toSpannerErrorWithMetadata(e, trailer) + } + if tstamp := res.GetCommitTimestamp(); tstamp != nil { + ts = time.Unix(tstamp.Seconds, int64(tstamp.Nanos)) + } + return nil + }) + if shouldDropSession(err) { + t.sh.destroy() + } + return ts, err +} + +// rollback is called when a commit is aborted or the transaction body runs into error. +func (t *ReadWriteTransaction) rollback(ctx context.Context) { + t.mu.Lock() + // Forbid further operations on rollbacked transaction. + t.state = txClosed + t.mu.Unlock() + // In case that sessionHandle was destroyed but transaction body fails to report it. + sid, client := t.sh.getID(), t.sh.getClient() + if sid == "" || client == nil { + return + } + err := runRetryable(contextWithMetadata(ctx, t.sh.getMetadata()), func(ctx context.Context) error { + _, e := client.Rollback(ctx, &sppb.RollbackRequest{ + Session: sid, + TransactionId: t.tx, + }) + return e + }) + if shouldDropSession(err) { + t.sh.destroy() + } + return +} + +// runInTransaction executes f under a read-write transaction context. +func (t *ReadWriteTransaction) runInTransaction(ctx context.Context, f func(t *ReadWriteTransaction) error) (time.Time, error) { + var ( + ts time.Time + err error + ) + if err = f(t); err == nil { + // Try to commit if transaction body returns no error. + ts, err = t.commit(ctx) + } + if err != nil { + if isAbortErr(err) { + // Retry the transaction using the same session on ABORT error. + // Cloud Spanner will create the new transaction with the previous one's wound-wait priority. + err = errRetry(err) + return ts, err + } + // Not going to commit, according to API spec, should rollback the transaction. + t.rollback(ctx) + return ts, err + } + // err == nil, return commit timestamp. + return ts, err +} + +// writeOnlyTransaction provides the most efficient way of doing write-only transactions. It essentially does blind writes to Cloud Spanner. +type writeOnlyTransaction struct { + // sp is the session pool which writeOnlyTransaction uses to get Cloud Spanner sessions for blind writes. + sp *sessionPool +} + +// applyAtLeastOnce commits a list of mutations to Cloud Spanner for at least once, unless one of the following happends: +// 1) Context is timeout. +// 2) An unretryable error(e.g. database not found) occurs. +// 3) There is a malformed Mutation object. +func (t *writeOnlyTransaction) applyAtLeastOnce(ctx context.Context, ms ...*Mutation) (time.Time, error) { + var ( + ts time.Time + sh *sessionHandle + ) + mPb, err := mutationsProto(ms) + if err != nil { + // Malformed mutation found, just return the error. + return ts, err + } + err = runRetryable(ctx, func(ct context.Context) error { + var e error + var trailers metadata.MD + if sh == nil || sh.getID() == "" || sh.getClient() == nil { + // No usable session for doing the commit, take one from pool. + sh, e = t.sp.take(ctx) + if e != nil { + // sessionPool.Take already retries for session creations/retrivals. + return e + } + } + res, e := sh.getClient().Commit(contextWithMetadata(ctx, sh.getMetadata()), &sppb.CommitRequest{ + Session: sh.getID(), + Transaction: &sppb.CommitRequest_SingleUseTransaction{ + SingleUseTransaction: &sppb.TransactionOptions{ + Mode: &sppb.TransactionOptions_ReadWrite_{ + ReadWrite: &sppb.TransactionOptions_ReadWrite{}, + }, + }, + }, + Mutations: mPb, + }, grpc.Trailer(&trailers)) + if e != nil { + if isAbortErr(e) { + // Mask ABORT error as retryable, because aborted transactions are allowed to be retried. + return errRetry(toSpannerErrorWithMetadata(e, trailers)) + } + if shouldDropSession(e) { + // Discard the bad session. + sh.destroy() + } + return e + } + if tstamp := res.GetCommitTimestamp(); tstamp != nil { + ts = time.Unix(tstamp.Seconds, int64(tstamp.Nanos)) + } + return nil + }) + if sh != nil { + sh.recycle() + } + return ts, err +} + +// isAbortedErr returns true if the error indicates that an gRPC call is aborted on the server side. +func isAbortErr(err error) bool { + if err == nil { + return false + } + if ErrCode(err) == codes.Aborted { + return true + } + return false +} diff --git a/vendor/cloud.google.com/go/spanner/value.go b/vendor/cloud.google.com/go/spanner/value.go new file mode 100644 index 00000000..ed13582c --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/value.go @@ -0,0 +1,1244 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "encoding/base64" + "fmt" + "math" + "reflect" + "strconv" + "strings" + "time" + + "cloud.google.com/go/civil" + "cloud.google.com/go/internal/fields" + proto "github.com/golang/protobuf/proto" + proto3 "github.com/golang/protobuf/ptypes/struct" + sppb "google.golang.org/genproto/googleapis/spanner/v1" + "google.golang.org/grpc/codes" +) + +// NullInt64 represents a Cloud Spanner INT64 that may be NULL. +type NullInt64 struct { + Int64 int64 + Valid bool // Valid is true if Int64 is not NULL. +} + +// String implements Stringer.String for NullInt64 +func (n NullInt64) String() string { + if !n.Valid { + return fmt.Sprintf("%v", "") + } + return fmt.Sprintf("%v", n.Int64) +} + +// NullString represents a Cloud Spanner STRING that may be NULL. +type NullString struct { + StringVal string + Valid bool // Valid is true if StringVal is not NULL. +} + +// String implements Stringer.String for NullString +func (n NullString) String() string { + if !n.Valid { + return fmt.Sprintf("%v", "") + } + return fmt.Sprintf("%q", n.StringVal) +} + +// NullFloat64 represents a Cloud Spanner FLOAT64 that may be NULL. +type NullFloat64 struct { + Float64 float64 + Valid bool // Valid is true if Float64 is not NULL. +} + +// String implements Stringer.String for NullFloat64 +func (n NullFloat64) String() string { + if !n.Valid { + return fmt.Sprintf("%v", "") + } + return fmt.Sprintf("%v", n.Float64) +} + +// NullBool represents a Cloud Spanner BOOL that may be NULL. +type NullBool struct { + Bool bool + Valid bool // Valid is true if Bool is not NULL. +} + +// String implements Stringer.String for NullBool +func (n NullBool) String() string { + if !n.Valid { + return fmt.Sprintf("%v", "") + } + return fmt.Sprintf("%v", n.Bool) +} + +// NullTime represents a Cloud Spanner TIMESTAMP that may be null. +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL. +} + +// String implements Stringer.String for NullTime +func (n NullTime) String() string { + if !n.Valid { + return fmt.Sprintf("%s", "") + } + return fmt.Sprintf("%q", n.Time.Format(time.RFC3339Nano)) +} + +// NullDate represents a Cloud Spanner DATE that may be null. +type NullDate struct { + Date civil.Date + Valid bool // Valid is true if Date is not NULL. +} + +// String implements Stringer.String for NullDate +func (n NullDate) String() string { + if !n.Valid { + return fmt.Sprintf("%s", "") + } + return fmt.Sprintf("%q", n.Date) +} + +// NullRow represents a Cloud Spanner STRUCT that may be NULL. +// See also the document for Row. +// Note that NullRow is not a valid Cloud Spanner column Type. +type NullRow struct { + Row Row + Valid bool // Valid is true if Row is not NULL. +} + +// GenericColumnValue represents the generic encoded value and type of the +// column. See google.spanner.v1.ResultSet proto for details. This can be +// useful for proxying query results when the result types are not known in +// advance. +type GenericColumnValue struct { + Type *sppb.Type + Value *proto3.Value +} + +// Decode decodes a GenericColumnValue. The ptr argument should be a pointer +// to a Go value that can accept v. +func (v GenericColumnValue) Decode(ptr interface{}) error { + return decodeValue(v.Value, v.Type, ptr) +} + +// NewGenericColumnValue creates a GenericColumnValue from Go value that is +// valid for Cloud Spanner. +func NewGenericColumnValue(v interface{}) (*GenericColumnValue, error) { + value, typ, err := encodeValue(v) + if err != nil { + return nil, err + } + return &GenericColumnValue{Value: value, Type: typ}, nil +} + +// errTypeMismatch returns error for destination not having a compatible type +// with source Cloud Spanner type. +func errTypeMismatch(srcType sppb.TypeCode, isArray bool, dst interface{}) error { + usage := srcType.String() + if isArray { + usage = fmt.Sprintf("%v[%v]", sppb.TypeCode_ARRAY, srcType) + } + return spannerErrorf(codes.InvalidArgument, "type %T cannot be used for decoding %v", dst, usage) +} + +// errNilSpannerType returns error for nil Cloud Spanner type in decoding. +func errNilSpannerType() error { + return spannerErrorf(codes.FailedPrecondition, "unexpected nil Cloud Spanner data type in decoding") +} + +// errNilSrc returns error for decoding from nil proto value. +func errNilSrc() error { + return spannerErrorf(codes.FailedPrecondition, "unexpected nil Cloud Spanner value in decoding") +} + +// errNilDst returns error for decoding into nil interface{}. +func errNilDst(dst interface{}) error { + return spannerErrorf(codes.InvalidArgument, "cannot decode into nil type %T", dst) +} + +// errNilArrElemType returns error for input Cloud Spanner data type being a array but without a +// non-nil array element type. +func errNilArrElemType(t *sppb.Type) error { + return spannerErrorf(codes.FailedPrecondition, "array type %v is with nil array element type", t) +} + +// errDstNotForNull returns error for decoding a SQL NULL value into a destination which doesn't +// support NULL values. +func errDstNotForNull(dst interface{}) error { + return spannerErrorf(codes.InvalidArgument, "destination %T cannot support NULL SQL values", dst) +} + +// errBadEncoding returns error for decoding wrongly encoded BYTES/INT64. +func errBadEncoding(v *proto3.Value, err error) error { + return spannerErrorf(codes.FailedPrecondition, "%v wasn't correctly encoded: <%v>", v, err) +} + +func parseNullTime(v *proto3.Value, p *NullTime, code sppb.TypeCode, isNull bool) error { + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_TIMESTAMP { + return errTypeMismatch(code, false, p) + } + if isNull { + *p = NullTime{} + return nil + } + x, err := getStringValue(v) + if err != nil { + return err + } + y, err := time.Parse(time.RFC3339Nano, x) + if err != nil { + return errBadEncoding(v, err) + } + p.Valid = true + p.Time = y + return nil +} + +// decodeValue decodes a protobuf Value into a pointer to a Go value, as +// specified by sppb.Type. +func decodeValue(v *proto3.Value, t *sppb.Type, ptr interface{}) error { + if v == nil { + return errNilSrc() + } + if t == nil { + return errNilSpannerType() + } + code := t.Code + acode := sppb.TypeCode_TYPE_CODE_UNSPECIFIED + if code == sppb.TypeCode_ARRAY { + if t.ArrayElementType == nil { + return errNilArrElemType(t) + } + acode = t.ArrayElementType.Code + } + typeErr := errTypeMismatch(code, false, ptr) + if code == sppb.TypeCode_ARRAY { + typeErr = errTypeMismatch(acode, true, ptr) + } + nullErr := errDstNotForNull(ptr) + _, isNull := v.Kind.(*proto3.Value_NullValue) + + // Do the decoding based on the type of ptr. + switch p := ptr.(type) { + case nil: + return errNilDst(nil) + case *string: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_STRING { + return typeErr + } + if isNull { + return nullErr + } + x, err := getStringValue(v) + if err != nil { + return err + } + *p = x + case *NullString: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_STRING { + return typeErr + } + if isNull { + *p = NullString{} + break + } + x, err := getStringValue(v) + if err != nil { + return err + } + p.Valid = true + p.StringVal = x + case *[]NullString: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_STRING { + return typeErr + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeStringArray(x) + if err != nil { + return err + } + *p = y + case *[]byte: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_BYTES { + return typeErr + } + if isNull { + *p = nil + break + } + x, err := getStringValue(v) + if err != nil { + return err + } + y, err := base64.StdEncoding.DecodeString(x) + if err != nil { + return errBadEncoding(v, err) + } + *p = y + case *[][]byte: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_BYTES { + return typeErr + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeByteArray(x) + if err != nil { + return err + } + *p = y + case *int64: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_INT64 { + return typeErr + } + if isNull { + return nullErr + } + x, err := getStringValue(v) + if err != nil { + return err + } + y, err := strconv.ParseInt(x, 10, 64) + if err != nil { + return errBadEncoding(v, err) + } + *p = y + case *NullInt64: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_INT64 { + return typeErr + } + if isNull { + *p = NullInt64{} + break + } + x, err := getStringValue(v) + if err != nil { + return err + } + y, err := strconv.ParseInt(x, 10, 64) + if err != nil { + return errBadEncoding(v, err) + } + p.Valid = true + p.Int64 = y + case *[]NullInt64: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_INT64 { + return typeErr + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeIntArray(x) + if err != nil { + return err + } + *p = y + case *bool: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_BOOL { + return typeErr + } + if isNull { + return nullErr + } + x, err := getBoolValue(v) + if err != nil { + return err + } + *p = x + case *NullBool: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_BOOL { + return typeErr + } + if isNull { + *p = NullBool{} + break + } + x, err := getBoolValue(v) + if err != nil { + return err + } + p.Valid = true + p.Bool = x + case *[]NullBool: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_BOOL { + return typeErr + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeBoolArray(x) + if err != nil { + return err + } + *p = y + case *float64: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_FLOAT64 { + return typeErr + } + if isNull { + return nullErr + } + x, err := getFloat64Value(v) + if err != nil { + return err + } + *p = x + case *NullFloat64: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_FLOAT64 { + return typeErr + } + if isNull { + *p = NullFloat64{} + break + } + x, err := getFloat64Value(v) + if err != nil { + return err + } + p.Valid = true + p.Float64 = x + case *[]NullFloat64: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_FLOAT64 { + return typeErr + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeFloat64Array(x) + if err != nil { + return err + } + *p = y + case *time.Time: + var nt NullTime + if isNull { + return nullErr + } + err := parseNullTime(v, &nt, code, isNull) + if err != nil { + return nil + } + *p = nt.Time + case *NullTime: + err := parseNullTime(v, p, code, isNull) + if err != nil { + return err + } + case *[]NullTime: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_TIMESTAMP { + return typeErr + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeTimeArray(x) + if err != nil { + return err + } + *p = y + case *civil.Date: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_DATE { + return typeErr + } + if isNull { + return nullErr + } + x, err := getStringValue(v) + if err != nil { + return err + } + y, err := civil.ParseDate(x) + if err != nil { + return errBadEncoding(v, err) + } + *p = y + case *NullDate: + if p == nil { + return errNilDst(p) + } + if code != sppb.TypeCode_DATE { + return typeErr + } + if isNull { + *p = NullDate{} + break + } + x, err := getStringValue(v) + if err != nil { + return err + } + y, err := civil.ParseDate(x) + if err != nil { + return errBadEncoding(v, err) + } + p.Valid = true + p.Date = y + case *[]NullDate: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_DATE { + return typeErr + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeDateArray(x) + if err != nil { + return err + } + *p = y + case *[]NullRow: + if p == nil { + return errNilDst(p) + } + if acode != sppb.TypeCode_STRUCT { + return typeErr + } + if isNull { + *p = nil + break + } + x, err := getListValue(v) + if err != nil { + return err + } + y, err := decodeRowArray(t.ArrayElementType.StructType, x) + if err != nil { + return err + } + *p = y + case *GenericColumnValue: + *p = GenericColumnValue{ + // Deep clone to ensure subsequent changes to t or v + // don't affect our decoded value. + Type: proto.Clone(t).(*sppb.Type), + Value: proto.Clone(v).(*proto3.Value), + } + default: + // Check if the proto encoding is for an array of structs. + if !(code == sppb.TypeCode_ARRAY && acode == sppb.TypeCode_STRUCT) { + return typeErr + } + vp := reflect.ValueOf(p) + if !vp.IsValid() { + return errNilDst(p) + } + if !isPtrStructPtrSlice(vp.Type()) { + // The container is not a pointer to a struct pointer slice. + return typeErr + } + // Only use reflection for nil detection on slow path. + // Also, IsNil panics on many types, so check it after the type check. + if vp.IsNil() { + return errNilDst(p) + } + if isNull { + // The proto Value is encoding NULL, set the pointer to struct + // slice to nil as well. + vp.Elem().Set(reflect.Zero(vp.Elem().Type())) + break + } + x, err := getListValue(v) + if err != nil { + return err + } + if err = decodeStructArray(t.ArrayElementType.StructType, x, p); err != nil { + return err + } + } + return nil +} + +// errSrvVal returns an error for getting a wrong source protobuf value in decoding. +func errSrcVal(v *proto3.Value, want string) error { + return spannerErrorf(codes.FailedPrecondition, "cannot use %v(Kind: %T) as Value_%sValue in decoding", + v, v.GetKind(), want) +} + +// getStringValue returns the string value encoded in proto3.Value v whose +// kind is proto3.Value_StringValue. +func getStringValue(v *proto3.Value) (string, error) { + if x, ok := v.GetKind().(*proto3.Value_StringValue); ok && x != nil { + return x.StringValue, nil + } + return "", errSrcVal(v, "String") +} + +// getBoolValue returns the bool value encoded in proto3.Value v whose +// kind is proto3.Value_BoolValue. +func getBoolValue(v *proto3.Value) (bool, error) { + if x, ok := v.GetKind().(*proto3.Value_BoolValue); ok && x != nil { + return x.BoolValue, nil + } + return false, errSrcVal(v, "Bool") +} + +// getListValue returns the proto3.ListValue contained in proto3.Value v whose +// kind is proto3.Value_ListValue. +func getListValue(v *proto3.Value) (*proto3.ListValue, error) { + if x, ok := v.GetKind().(*proto3.Value_ListValue); ok && x != nil { + return x.ListValue, nil + } + return nil, errSrcVal(v, "List") +} + +// errUnexpectedNumStr returns error for decoder getting a unexpected string for +// representing special float values. +func errUnexpectedNumStr(s string) error { + return spannerErrorf(codes.FailedPrecondition, "unexpected string value %q for number", s) +} + +// getFloat64Value returns the float64 value encoded in proto3.Value v whose +// kind is proto3.Value_NumberValue / proto3.Value_StringValue. +// Cloud Spanner uses string to encode NaN, Infinity and -Infinity. +func getFloat64Value(v *proto3.Value) (float64, error) { + switch x := v.GetKind().(type) { + case *proto3.Value_NumberValue: + if x == nil { + break + } + return x.NumberValue, nil + case *proto3.Value_StringValue: + if x == nil { + break + } + switch x.StringValue { + case "NaN": + return math.NaN(), nil + case "Infinity": + return math.Inf(1), nil + case "-Infinity": + return math.Inf(-1), nil + default: + return 0, errUnexpectedNumStr(x.StringValue) + } + } + return 0, errSrcVal(v, "Number") +} + +// errNilListValue returns error for unexpected nil ListValue in decoding Cloud Spanner ARRAYs. +func errNilListValue(sqlType string) error { + return spannerErrorf(codes.FailedPrecondition, "unexpected nil ListValue in decoding %v array", sqlType) +} + +// errDecodeArrayElement returns error for failure in decoding single array element. +func errDecodeArrayElement(i int, v proto.Message, sqlType string, err error) error { + se, ok := toSpannerError(err).(*Error) + if !ok { + return spannerErrorf(codes.Unknown, + "cannot decode %v(array element %v) as %v, error = <%v>", v, i, sqlType, err) + } + se.decorate(fmt.Sprintf("cannot decode %v(array element %v) as %v", v, i, sqlType)) + return se +} + +// decodeStringArray decodes proto3.ListValue pb into a NullString slice. +func decodeStringArray(pb *proto3.ListValue) ([]NullString, error) { + if pb == nil { + return nil, errNilListValue("STRING") + } + a := make([]NullString, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, stringType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "STRING", err) + } + } + return a, nil +} + +// decodeIntArray decodes proto3.ListValue pb into a NullInt64 slice. +func decodeIntArray(pb *proto3.ListValue) ([]NullInt64, error) { + if pb == nil { + return nil, errNilListValue("INT64") + } + a := make([]NullInt64, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, intType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "INT64", err) + } + } + return a, nil +} + +// decodeBoolArray decodes proto3.ListValue pb into a NullBool slice. +func decodeBoolArray(pb *proto3.ListValue) ([]NullBool, error) { + if pb == nil { + return nil, errNilListValue("BOOL") + } + a := make([]NullBool, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, boolType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "BOOL", err) + } + } + return a, nil +} + +// decodeFloat64Array decodes proto3.ListValue pb into a NullFloat64 slice. +func decodeFloat64Array(pb *proto3.ListValue) ([]NullFloat64, error) { + if pb == nil { + return nil, errNilListValue("FLOAT64") + } + a := make([]NullFloat64, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, floatType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "FLOAT64", err) + } + } + return a, nil +} + +// decodeByteArray decodes proto3.ListValue pb into a slice of byte slice. +func decodeByteArray(pb *proto3.ListValue) ([][]byte, error) { + if pb == nil { + return nil, errNilListValue("BYTES") + } + a := make([][]byte, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, bytesType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "BYTES", err) + } + } + return a, nil +} + +// decodeTimeArray decodes proto3.ListValue pb into a NullTime slice. +func decodeTimeArray(pb *proto3.ListValue) ([]NullTime, error) { + if pb == nil { + return nil, errNilListValue("TIMESTAMP") + } + a := make([]NullTime, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, timeType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "TIMESTAMP", err) + } + } + return a, nil +} + +// decodeDateArray decodes proto3.ListValue pb into a NullDate slice. +func decodeDateArray(pb *proto3.ListValue) ([]NullDate, error) { + if pb == nil { + return nil, errNilListValue("DATE") + } + a := make([]NullDate, len(pb.Values)) + for i, v := range pb.Values { + if err := decodeValue(v, dateType(), &a[i]); err != nil { + return nil, errDecodeArrayElement(i, v, "DATE", err) + } + } + return a, nil +} + +func errNotStructElement(i int, v *proto3.Value) error { + return errDecodeArrayElement(i, v, "STRUCT", + spannerErrorf(codes.FailedPrecondition, "%v(type: %T) doesn't encode Cloud Spanner STRUCT", v, v)) +} + +// decodeRowArray decodes proto3.ListValue pb into a NullRow slice according to +// the structual information given in sppb.StructType ty. +func decodeRowArray(ty *sppb.StructType, pb *proto3.ListValue) ([]NullRow, error) { + if pb == nil { + return nil, errNilListValue("STRUCT") + } + a := make([]NullRow, len(pb.Values)) + for i := range pb.Values { + switch v := pb.Values[i].GetKind().(type) { + case *proto3.Value_ListValue: + a[i] = NullRow{ + Row: Row{ + fields: ty.Fields, + vals: v.ListValue.Values, + }, + Valid: true, + } + // Null elements not currently supported by the server, see + // https://cloud.google.com/spanner/docs/query-syntax#using-structs-with-select + case *proto3.Value_NullValue: + // no-op, a[i] is NullRow{} already + default: + return nil, errNotStructElement(i, pb.Values[i]) + } + } + return a, nil +} + +// structFieldColumn returns the name of i-th field of struct type typ if the field +// is untagged; otherwise, it returns the tagged name of the field. +func structFieldColumn(typ reflect.Type, i int) (col string, ok bool) { + desc := typ.Field(i) + if desc.PkgPath != "" || desc.Anonymous { + // Skip unexported or anonymous fields. + return "", false + } + col = desc.Name + if tag := desc.Tag.Get("spanner"); tag != "" { + if tag == "-" { + // Skip fields tagged "-" to match encoding/json and others. + return "", false + } + col = tag + if idx := strings.Index(tag, ","); idx != -1 { + col = tag[:idx] + } + } + return col, true +} + +// errNilSpannerStructType returns error for unexpected nil Cloud Spanner STRUCT schema type in decoding. +func errNilSpannerStructType() error { + return spannerErrorf(codes.FailedPrecondition, "unexpected nil StructType in decoding Cloud Spanner STRUCT") +} + +// errUnnamedField returns error for decoding a Cloud Spanner STRUCT with unnamed field into a Go struct. +func errUnnamedField(ty *sppb.StructType, i int) error { + return spannerErrorf(codes.InvalidArgument, "unnamed field %v in Cloud Spanner STRUCT %+v", i, ty) +} + +// errNoOrDupGoField returns error for decoding a Cloud Spanner +// STRUCT into a Go struct which is either missing a field, or has duplicate fields. +func errNoOrDupGoField(s interface{}, f string) error { + return spannerErrorf(codes.InvalidArgument, "Go struct %+v(type %T) has no or duplicate fields for Cloud Spanner STRUCT field %v", s, s, f) +} + +// errDupColNames returns error for duplicated Cloud Spanner STRUCT field names found in decoding a Cloud Spanner STRUCT into a Go struct. +func errDupSpannerField(f string, ty *sppb.StructType) error { + return spannerErrorf(codes.InvalidArgument, "duplicated field name %q in Cloud Spanner STRUCT %+v", f, ty) +} + +// errDecodeStructField returns error for failure in decoding a single field of a Cloud Spanner STRUCT. +func errDecodeStructField(ty *sppb.StructType, f string, err error) error { + se, ok := toSpannerError(err).(*Error) + if !ok { + return spannerErrorf(codes.Unknown, + "cannot decode field %v of Cloud Spanner STRUCT %+v, error = <%v>", f, ty, err) + } + se.decorate(fmt.Sprintf("cannot decode field %v of Cloud Spanner STRUCT %+v", f, ty)) + return se +} + +// decodeStruct decodes proto3.ListValue pb into struct referenced by pointer ptr, according to +// the structual information given in sppb.StructType ty. +func decodeStruct(ty *sppb.StructType, pb *proto3.ListValue, ptr interface{}) error { + if reflect.ValueOf(ptr).IsNil() { + return errNilDst(ptr) + } + if ty == nil { + return errNilSpannerStructType() + } + // t holds the structual information of ptr. + t := reflect.TypeOf(ptr).Elem() + // v is the actual value that ptr points to. + v := reflect.ValueOf(ptr).Elem() + + fields, err := fieldCache.Fields(t) + if err != nil { + return toSpannerError(err) + } + seen := map[string]bool{} + for i, f := range ty.Fields { + if f.Name == "" { + return errUnnamedField(ty, i) + } + sf := fields.Match(f.Name) + if sf == nil { + return errNoOrDupGoField(ptr, f.Name) + } + if seen[f.Name] { + // We don't allow duplicated field name. + return errDupSpannerField(f.Name, ty) + } + // Try to decode a single field. + if err := decodeValue(pb.Values[i], f.Type, v.FieldByIndex(sf.Index).Addr().Interface()); err != nil { + return errDecodeStructField(ty, f.Name, err) + } + // Mark field f.Name as processed. + seen[f.Name] = true + } + return nil +} + +// isPtrStructPtrSlice returns true if ptr is a pointer to a slice of struct pointers. +func isPtrStructPtrSlice(t reflect.Type) bool { + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Slice { + // t is not a pointer to a slice. + return false + } + if t = t.Elem(); t.Elem().Kind() != reflect.Ptr || t.Elem().Elem().Kind() != reflect.Struct { + // the slice that t points to is not a slice of struct pointers. + return false + } + return true +} + +// decodeStructArray decodes proto3.ListValue pb into struct slice referenced by pointer ptr, according to the +// structual information given in a sppb.StructType. +func decodeStructArray(ty *sppb.StructType, pb *proto3.ListValue, ptr interface{}) error { + if pb == nil { + return errNilListValue("STRUCT") + } + // Type of the struct pointers stored in the slice that ptr points to. + ts := reflect.TypeOf(ptr).Elem().Elem() + // The slice that ptr points to, might be nil at this point. + v := reflect.ValueOf(ptr).Elem() + // Allocate empty slice. + v.Set(reflect.MakeSlice(v.Type(), 0, len(pb.Values))) + // Decode every struct in pb.Values. + for i, pv := range pb.Values { + // Check if pv is a NULL value. + if _, isNull := pv.Kind.(*proto3.Value_NullValue); isNull { + // Append a nil pointer to the slice. + v.Set(reflect.Append(v, reflect.New(ts).Elem())) + continue + } + // Allocate empty struct. + s := reflect.New(ts.Elem()) + // Get proto3.ListValue l from proto3.Value pv. + l, err := getListValue(pv) + if err != nil { + return errDecodeArrayElement(i, pv, "STRUCT", err) + } + // Decode proto3.ListValue l into struct referenced by s.Interface(). + if err = decodeStruct(ty, l, s.Interface()); err != nil { + return errDecodeArrayElement(i, pv, "STRUCT", err) + } + // Append the decoded struct back into the slice. + v.Set(reflect.Append(v, s)) + } + return nil +} + +// errEncoderUnsupportedType returns error for not being able to encode a value of +// certain type. +func errEncoderUnsupportedType(v interface{}) error { + return spannerErrorf(codes.InvalidArgument, "encoder doesn't support type %T", v) +} + +// encodeValue encodes a Go native type into a proto3.Value. +func encodeValue(v interface{}) (*proto3.Value, *sppb.Type, error) { + pb := &proto3.Value{ + Kind: &proto3.Value_NullValue{NullValue: proto3.NullValue_NULL_VALUE}, + } + var pt *sppb.Type + var err error + switch v := v.(type) { + case nil: + case string: + pb.Kind = stringKind(v) + pt = stringType() + case NullString: + if v.Valid { + return encodeValue(v.StringVal) + } + case []string: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + pt = listType(stringType()) + } + case []NullString: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + pt = listType(stringType()) + } + case []byte: + if v != nil { + pb.Kind = stringKind(base64.StdEncoding.EncodeToString(v)) + pt = bytesType() + } + case [][]byte: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + pt = listType(bytesType()) + } + case int: + pb.Kind = stringKind(strconv.FormatInt(int64(v), 10)) + pt = intType() + case []int: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + pt = listType(intType()) + } + case int64: + pb.Kind = stringKind(strconv.FormatInt(v, 10)) + pt = intType() + case []int64: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + pt = listType(intType()) + } + case NullInt64: + if v.Valid { + return encodeValue(v.Int64) + } + case []NullInt64: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + pt = listType(intType()) + } + case bool: + pb.Kind = &proto3.Value_BoolValue{BoolValue: v} + pt = boolType() + case []bool: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + pt = listType(boolType()) + } + case NullBool: + if v.Valid { + return encodeValue(v.Bool) + } + case []NullBool: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + pt = listType(boolType()) + } + case float64: + pb.Kind = &proto3.Value_NumberValue{NumberValue: v} + pt = floatType() + case []float64: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + pt = listType(floatType()) + } + case NullFloat64: + if v.Valid { + return encodeValue(v.Float64) + } + case []NullFloat64: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + pt = listType(floatType()) + } + case time.Time: + pb.Kind = stringKind(v.UTC().Format(time.RFC3339Nano)) + pt = timeType() + case []time.Time: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + pt = listType(timeType()) + } + case NullTime: + if v.Valid { + return encodeValue(v.Time) + } + case []NullTime: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + pt = listType(timeType()) + } + case civil.Date: + pb.Kind = stringKind(v.String()) + pt = dateType() + case []civil.Date: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + pt = listType(dateType()) + } + case NullDate: + if v.Valid { + return encodeValue(v.Date) + } + case []NullDate: + if v != nil { + pb, err = encodeArray(len(v), func(i int) interface{} { return v[i] }) + if err != nil { + return nil, nil, err + } + pt = listType(dateType()) + } + case GenericColumnValue: + // Deep clone to ensure subsequent changes to v before + // transmission don't affect our encoded value. + pb = proto.Clone(v.Value).(*proto3.Value) + pt = proto.Clone(v.Type).(*sppb.Type) + default: + return nil, nil, errEncoderUnsupportedType(v) + } + return pb, pt, nil +} + +// encodeValueArray encodes a Value array into a proto3.ListValue. +func encodeValueArray(vs []interface{}) (*proto3.ListValue, error) { + lv := &proto3.ListValue{} + lv.Values = make([]*proto3.Value, 0, len(vs)) + for _, v := range vs { + pb, _, err := encodeValue(v) + if err != nil { + return nil, err + } + lv.Values = append(lv.Values, pb) + } + return lv, nil +} + +// encodeArray assumes that all values of the array element type encode without error. +func encodeArray(len int, at func(int) interface{}) (*proto3.Value, error) { + vs := make([]*proto3.Value, len) + var err error + for i := 0; i < len; i++ { + vs[i], _, err = encodeValue(at(i)) + if err != nil { + return nil, err + } + } + return listProto(vs...), nil +} + +func spannerTagParser(t reflect.StructTag) (name string, keep bool, other interface{}, err error) { + if s := t.Get("spanner"); s != "" { + if s == "-" { + return "", false, nil, nil + } + return s, true, nil, nil + } + return "", true, nil, nil +} + +var fieldCache = fields.NewCache(spannerTagParser, nil, nil) diff --git a/vendor/cloud.google.com/go/spanner/value_test.go b/vendor/cloud.google.com/go/spanner/value_test.go new file mode 100644 index 00000000..1ea0529d --- /dev/null +++ b/vendor/cloud.google.com/go/spanner/value_test.go @@ -0,0 +1,611 @@ +/* +Copyright 2017 Google Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spanner + +import ( + "math" + "reflect" + "testing" + "time" + + "cloud.google.com/go/civil" + "github.com/golang/protobuf/proto" + proto3 "github.com/golang/protobuf/ptypes/struct" + sppb "google.golang.org/genproto/googleapis/spanner/v1" +) + +var ( + t1 = mustParseTime("2016-11-15T15:04:05.999999999Z") + // Boundaries + t2 = mustParseTime("0000-01-01T00:00:00.000000000Z") + t3 = mustParseTime("9999-12-31T23:59:59.999999999Z") + // Local timezone + t4 = time.Now() + d1 = mustParseDate("2016-11-15") + d2 = mustParseDate("1678-01-01") +) + +func mustParseTime(s string) time.Time { + t, err := time.Parse(time.RFC3339Nano, s) + if err != nil { + panic(err) + } + return t +} + +func mustParseDate(s string) civil.Date { + d, err := civil.ParseDate(s) + if err != nil { + panic(err) + } + return d +} + +// Test encoding Values. +func TestEncodeValue(t *testing.T) { + var ( + tString = stringType() + tInt = intType() + tBool = boolType() + tFloat = floatType() + tBytes = bytesType() + tTime = timeType() + tDate = dateType() + ) + for i, test := range []struct { + in interface{} + want *proto3.Value + wantType *sppb.Type + }{ + // STRING / STRING ARRAY + {"abc", stringProto("abc"), tString}, + {NullString{"abc", true}, stringProto("abc"), tString}, + {NullString{"abc", false}, nullProto(), nil}, + {[]string{"abc", "bcd"}, listProto(stringProto("abc"), stringProto("bcd")), listType(tString)}, + {[]NullString{{"abcd", true}, {"xyz", false}}, listProto(stringProto("abcd"), nullProto()), listType(tString)}, + // BYTES / BYTES ARRAY + {[]byte("foo"), bytesProto([]byte("foo")), tBytes}, + {[]byte(nil), nullProto(), nil}, + {[][]byte{nil, []byte("ab")}, listProto(nullProto(), bytesProto([]byte("ab"))), listType(tBytes)}, + {[][]byte(nil), nullProto(), nil}, + // INT64 / INT64 ARRAY + {7, intProto(7), tInt}, + {[]int{31, 127}, listProto(intProto(31), intProto(127)), listType(tInt)}, + {int64(81), intProto(81), tInt}, + {[]int64{33, 129}, listProto(intProto(33), intProto(129)), listType(tInt)}, + {NullInt64{11, true}, intProto(11), tInt}, + {NullInt64{11, false}, nullProto(), nil}, + {[]NullInt64{{35, true}, {131, false}}, listProto(intProto(35), nullProto()), listType(tInt)}, + // BOOL / BOOL ARRAY + {true, boolProto(true), tBool}, + {NullBool{true, true}, boolProto(true), tBool}, + {NullBool{true, false}, nullProto(), nil}, + {[]bool{true, false}, listProto(boolProto(true), boolProto(false)), listType(tBool)}, + {[]NullBool{{true, true}, {true, false}}, listProto(boolProto(true), nullProto()), listType(tBool)}, + // FLOAT64 / FLOAT64 ARRAY + {3.14, floatProto(3.14), tFloat}, + {NullFloat64{3.1415, true}, floatProto(3.1415), tFloat}, + {NullFloat64{math.Inf(1), true}, floatProto(math.Inf(1)), tFloat}, + {NullFloat64{3.14159, false}, nullProto(), nil}, + {[]float64{3.141, 0.618, math.Inf(-1)}, listProto(floatProto(3.141), floatProto(0.618), floatProto(math.Inf(-1))), listType(tFloat)}, + {[]NullFloat64{{3.141, true}, {0.618, false}}, listProto(floatProto(3.141), nullProto()), listType(tFloat)}, + // TIMESTAMP / TIMESTAMP ARRAY + {t1, timeProto(t1), tTime}, + {NullTime{t1, true}, timeProto(t1), tTime}, + {NullTime{t1, false}, nullProto(), nil}, + {[]time.Time{t1, t2, t3, t4}, listProto(timeProto(t1), timeProto(t2), timeProto(t3), timeProto(t4)), listType(tTime)}, + {[]NullTime{{t1, true}, {t1, false}}, listProto(timeProto(t1), nullProto()), listType(tTime)}, + // DATE / DATE ARRAY + {d1, dateProto(d1), tDate}, + {NullDate{d1, true}, dateProto(d1), tDate}, + {NullDate{civil.Date{}, false}, nullProto(), nil}, + {[]civil.Date{d1, d2}, listProto(dateProto(d1), dateProto(d2)), listType(tDate)}, + {[]NullDate{{d1, true}, {civil.Date{}, false}}, listProto(dateProto(d1), nullProto()), listType(tDate)}, + // GenericColumnValue + {GenericColumnValue{tString, stringProto("abc")}, stringProto("abc"), tString}, + {GenericColumnValue{tString, nullProto()}, nullProto(), tString}, + // not actually valid (stringProto inside int list), but demonstrates pass-through. + { + GenericColumnValue{ + Type: listType(tInt), + Value: listProto(intProto(5), nullProto(), stringProto("bcd")), + }, + listProto(intProto(5), nullProto(), stringProto("bcd")), + listType(tInt), + }, + } { + got, gotType, err := encodeValue(test.in) + if err != nil { + t.Fatalf("#%d: got error during encoding: %v, want nil", i, err) + } + if !reflect.DeepEqual(got, test.want) { + t.Errorf("#%d: got encode result: %v, want %v", i, got, test.want) + } + if !reflect.DeepEqual(gotType, test.wantType) { + t.Errorf("#%d: got encode type: %v, want %v", i, gotType, test.wantType) + } + } +} + +// Test decoding Values. +func TestDecodeValue(t *testing.T) { + for i, test := range []struct { + in *proto3.Value + t *sppb.Type + want interface{} + fail bool + }{ + // STRING + {stringProto("abc"), stringType(), "abc", false}, + {nullProto(), stringType(), "abc", true}, + {stringProto("abc"), stringType(), NullString{"abc", true}, false}, + {nullProto(), stringType(), NullString{}, false}, + // STRING ARRAY + { + listProto(stringProto("abc"), nullProto(), stringProto("bcd")), + listType(stringType()), + []NullString{{"abc", true}, {}, {"bcd", true}}, + false, + }, + {nullProto(), listType(stringType()), []NullString(nil), false}, + // BYTES + {bytesProto([]byte("ab")), bytesType(), []byte("ab"), false}, + {nullProto(), bytesType(), []byte(nil), false}, + // BYTES ARRAY + {listProto(bytesProto([]byte("ab")), nullProto()), listType(bytesType()), [][]byte{[]byte("ab"), nil}, false}, + {nullProto(), listType(bytesType()), [][]byte(nil), false}, + //INT64 + {intProto(15), intType(), int64(15), false}, + {nullProto(), intType(), int64(0), true}, + {intProto(15), intType(), NullInt64{15, true}, false}, + {nullProto(), intType(), NullInt64{}, false}, + // INT64 ARRAY + {listProto(intProto(91), nullProto(), intProto(87)), listType(intType()), []NullInt64{{91, true}, {}, {87, true}}, false}, + {nullProto(), listType(intType()), []NullInt64(nil), false}, + // BOOL + {boolProto(true), boolType(), true, false}, + {nullProto(), boolType(), true, true}, + {boolProto(true), boolType(), NullBool{true, true}, false}, + {nullProto(), boolType(), NullBool{}, false}, + // BOOL ARRAY + {listProto(boolProto(true), boolProto(false), nullProto()), listType(boolType()), []NullBool{{true, true}, {false, true}, {}}, false}, + {nullProto(), listType(boolType()), []NullBool(nil), false}, + // FLOAT64 + {floatProto(3.14), floatType(), 3.14, false}, + {nullProto(), floatType(), 0.00, true}, + {floatProto(3.14), floatType(), NullFloat64{3.14, true}, false}, + {nullProto(), floatType(), NullFloat64{}, false}, + // FLOAT64 ARRAY + { + listProto(floatProto(math.Inf(1)), floatProto(math.Inf(-1)), nullProto(), floatProto(3.1)), + listType(floatType()), + []NullFloat64{{math.Inf(1), true}, {math.Inf(-1), true}, {}, {3.1, true}}, + false, + }, + {nullProto(), listType(floatType()), []NullFloat64(nil), false}, + // TIMESTAMP + {timeProto(t1), timeType(), t1, false}, + {timeProto(t1), timeType(), NullTime{t1, true}, false}, + {nullProto(), timeType(), NullTime{}, false}, + // TIMESTAMP ARRAY + {listProto(timeProto(t1), timeProto(t2), timeProto(t3), nullProto()), listType(timeType()), []NullTime{{t1, true}, {t2, true}, {t3, true}, {}}, false}, + {nullProto(), listType(timeType()), []NullTime(nil), false}, + // DATE + {dateProto(d1), dateType(), d1, false}, + {dateProto(d1), dateType(), NullDate{d1, true}, false}, + {nullProto(), dateType(), NullDate{}, false}, + // DATE ARRAY + {listProto(dateProto(d1), dateProto(d2), nullProto()), listType(dateType()), []NullDate{{d1, true}, {d2, true}, {}}, false}, + {nullProto(), listType(dateType()), []NullDate(nil), false}, + // STRUCT ARRAY + // STRUCT schema is equal to the following Go struct: + // type s struct { + // Col1 NullInt64 + // Col2 []struct { + // SubCol1 float64 + // SubCol2 string + // } + // } + { + in: listProto( + listProto( + intProto(3), + listProto( + listProto(floatProto(3.14), stringProto("this")), + listProto(floatProto(0.57), stringProto("siht")), + ), + ), + listProto( + nullProto(), + nullProto(), + ), + nullProto(), + ), + t: listType( + structType( + mkField("Col1", intType()), + mkField( + "Col2", + listType( + structType( + mkField("SubCol1", floatType()), + mkField("SubCol2", stringType()), + ), + ), + ), + ), + ), + want: []NullRow{ + { + Row: Row{ + fields: []*sppb.StructType_Field{ + mkField("Col1", intType()), + mkField( + "Col2", + listType( + structType( + mkField("SubCol1", floatType()), + mkField("SubCol2", stringType()), + ), + ), + ), + }, + vals: []*proto3.Value{ + intProto(3), + listProto( + listProto(floatProto(3.14), stringProto("this")), + listProto(floatProto(0.57), stringProto("siht")), + ), + }, + }, + Valid: true, + }, + { + Row: Row{ + fields: []*sppb.StructType_Field{ + mkField("Col1", intType()), + mkField( + "Col2", + listType( + structType( + mkField("SubCol1", floatType()), + mkField("SubCol2", stringType()), + ), + ), + ), + }, + vals: []*proto3.Value{ + nullProto(), + nullProto(), + }, + }, + Valid: true, + }, + {}, + }, + fail: false, + }, + { + in: listProto( + listProto( + intProto(3), + listProto( + listProto(floatProto(3.14), stringProto("this")), + listProto(floatProto(0.57), stringProto("siht")), + ), + ), + listProto( + nullProto(), + nullProto(), + ), + nullProto(), + ), + t: listType( + structType( + mkField("Col1", intType()), + mkField( + "Col2", + listType( + structType( + mkField("SubCol1", floatType()), + mkField("SubCol2", stringType()), + ), + ), + ), + ), + ), + want: []*struct { + Col1 NullInt64 + StructCol []*struct { + SubCol1 NullFloat64 + SubCol2 string + } `spanner:"Col2"` + }{ + { + Col1: NullInt64{3, true}, + StructCol: []*struct { + SubCol1 NullFloat64 + SubCol2 string + }{ + { + SubCol1: NullFloat64{3.14, true}, + SubCol2: "this", + }, + { + SubCol1: NullFloat64{0.57, true}, + SubCol2: "siht", + }, + }, + }, + { + Col1: NullInt64{}, + StructCol: []*struct { + SubCol1 NullFloat64 + SubCol2 string + }(nil), + }, + nil, + }, + fail: false, + }, + // GenericColumnValue + {stringProto("abc"), stringType(), GenericColumnValue{stringType(), stringProto("abc")}, false}, + {nullProto(), stringType(), GenericColumnValue{stringType(), nullProto()}, false}, + // not actually valid (stringProto inside int list), but demonstrates pass-through. + { + in: listProto(intProto(5), nullProto(), stringProto("bcd")), + t: listType(intType()), + want: GenericColumnValue{ + Type: listType(intType()), + Value: listProto(intProto(5), nullProto(), stringProto("bcd")), + }, + fail: false, + }, + } { + gotp := reflect.New(reflect.TypeOf(test.want)) + if err := decodeValue(test.in, test.t, gotp.Interface()); err != nil { + if !test.fail { + t.Errorf("%d: cannot decode %v(%v): %v", i, test.in, test.t, err) + } + continue + } + if test.fail { + t.Errorf("%d: decoding %v(%v) succeeds unexpectedly, want error", i, test.in, test.t) + continue + } + got := reflect.Indirect(gotp).Interface() + if !reflect.DeepEqual(got, test.want) { + t.Errorf("%d: unexpected decoding result - got %v, want %v", i, got, test.want) + continue + } + } +} + +// Test error cases for decodeValue. +func TestDecodeValueErrors(t *testing.T) { + for i, test := range []struct { + in *proto3.Value + t *sppb.Type + v interface{} + }{ + {nullProto(), stringType(), nil}, + {nullProto(), stringType(), 1}, + } { + err := decodeValue(test.in, test.t, test.v) + if err == nil { + t.Errorf("#%d: want error, got nil", i) + } + } +} + +// Test NaN encoding/decoding. +func TestNaN(t *testing.T) { + // Decode NaN value. + f := 0.0 + nf := NullFloat64{} + // To float64 + if err := decodeValue(floatProto(math.NaN()), floatType(), &f); err != nil { + t.Errorf("decodeValue returns %q for %v, want nil", err, floatProto(math.NaN())) + } + if !math.IsNaN(f) { + t.Errorf("f = %v, want %v", f, math.NaN()) + } + // To NullFloat64 + if err := decodeValue(floatProto(math.NaN()), floatType(), &nf); err != nil { + t.Errorf("decodeValue returns %q for %v, want nil", err, floatProto(math.NaN())) + } + if !math.IsNaN(nf.Float64) || !nf.Valid { + t.Errorf("f = %v, want %v", f, NullFloat64{math.NaN(), true}) + } + // Encode NaN value + // From float64 + v, _, err := encodeValue(math.NaN()) + if err != nil { + t.Errorf("encodeValue returns %q for NaN, want nil", err) + } + x, ok := v.GetKind().(*proto3.Value_NumberValue) + if !ok { + t.Errorf("incorrect type for v.GetKind(): %T, want *proto3.Value_NumberValue", v.GetKind()) + } + if !math.IsNaN(x.NumberValue) { + t.Errorf("x.NumberValue = %v, want %v", x.NumberValue, math.NaN()) + } + // From NullFloat64 + v, _, err = encodeValue(NullFloat64{math.NaN(), true}) + if err != nil { + t.Errorf("encodeValue returns %q for NaN, want nil", err) + } + x, ok = v.GetKind().(*proto3.Value_NumberValue) + if !ok { + t.Errorf("incorrect type for v.GetKind(): %T, want *proto3.Value_NumberValue", v.GetKind()) + } + if !math.IsNaN(x.NumberValue) { + t.Errorf("x.NumberValue = %v, want %v", x.NumberValue, math.NaN()) + } +} + +func TestGenericColumnValue(t *testing.T) { + for _, test := range []struct { + in GenericColumnValue + want interface{} + fail bool + }{ + {GenericColumnValue{stringType(), stringProto("abc")}, "abc", false}, + {GenericColumnValue{stringType(), stringProto("abc")}, 5, true}, + {GenericColumnValue{listType(intType()), listProto(intProto(91), nullProto(), intProto(87))}, []NullInt64{{91, true}, {}, {87, true}}, false}, + {GenericColumnValue{intType(), intProto(42)}, GenericColumnValue{intType(), intProto(42)}, false}, // trippy! :-) + } { + // We take a copy and mutate because we're paranoid about immutability. + inCopy := GenericColumnValue{ + Type: proto.Clone(test.in.Type).(*sppb.Type), + Value: proto.Clone(test.in.Value).(*proto3.Value), + } + gotp := reflect.New(reflect.TypeOf(test.want)) + if err := inCopy.Decode(gotp.Interface()); err != nil { + if !test.fail { + t.Errorf("cannot decode %v to %v: %v", test.in, test.want, err) + } + continue + } + if test.fail { + t.Errorf("decoding %v to %v succeeds unexpectedly", test.in, test.want) + } + // mutations to inCopy should be invisible to gotp. + inCopy.Type.Code = sppb.TypeCode_TIMESTAMP + inCopy.Value.Kind = &proto3.Value_NumberValue{NumberValue: 999} + got := reflect.Indirect(gotp).Interface() + if !reflect.DeepEqual(got, test.want) { + t.Errorf("unexpected decode result - got %v, want %v", got, test.want) + } + + // Test we can go backwards as well. + v, err := NewGenericColumnValue(test.want) + if err != nil { + t.Errorf("NewGenericColumnValue failed: %v", err) + continue + } + if !reflect.DeepEqual(*v, test.in) { + t.Errorf("unexpected encode result - got %v, want %v", v, test.in) + } + // If want is a GenericColumnValue, mutate its underlying value to validate + // we have taken a deep copy. + if gcv, ok := test.want.(GenericColumnValue); ok { + gcv.Type.Code = sppb.TypeCode_TIMESTAMP + gcv.Value.Kind = &proto3.Value_NumberValue{NumberValue: 999} + if !reflect.DeepEqual(*v, test.in) { + t.Errorf("expected deep copy - got %v, want %v", v, test.in) + } + } + } +} + +func runBench(b *testing.B, size int, f func(a []int) (*proto3.Value, *sppb.Type, error)) { + a := make([]int, size) + for i := 0; i < b.N; i++ { + f(a) + } +} + +func BenchmarkEncodeIntArrayOrig1(b *testing.B) { + runBench(b, 1, encodeIntArrayOrig) +} + +func BenchmarkEncodeIntArrayOrig10(b *testing.B) { + runBench(b, 10, encodeIntArrayOrig) +} + +func BenchmarkEncodeIntArrayOrig100(b *testing.B) { + runBench(b, 100, encodeIntArrayOrig) +} + +func BenchmarkEncodeIntArrayOrig1000(b *testing.B) { + runBench(b, 1000, encodeIntArrayOrig) +} + +func BenchmarkEncodeIntArrayFunc1(b *testing.B) { + runBench(b, 1, encodeIntArrayFunc) +} + +func BenchmarkEncodeIntArrayFunc10(b *testing.B) { + runBench(b, 10, encodeIntArrayFunc) +} + +func BenchmarkEncodeIntArrayFunc100(b *testing.B) { + runBench(b, 100, encodeIntArrayFunc) +} + +func BenchmarkEncodeIntArrayFunc1000(b *testing.B) { + runBench(b, 1000, encodeIntArrayFunc) +} + +func BenchmarkEncodeIntArrayReflect1(b *testing.B) { + runBench(b, 1, encodeIntArrayReflect) +} + +func BenchmarkEncodeIntArrayReflect10(b *testing.B) { + runBench(b, 10, encodeIntArrayReflect) +} + +func BenchmarkEncodeIntArrayReflect100(b *testing.B) { + runBench(b, 100, encodeIntArrayReflect) +} + +func BenchmarkEncodeIntArrayReflect1000(b *testing.B) { + runBench(b, 1000, encodeIntArrayReflect) +} + +func encodeIntArrayOrig(a []int) (*proto3.Value, *sppb.Type, error) { + vs := make([]*proto3.Value, len(a)) + var err error + for i := range a { + vs[i], _, err = encodeValue(a[i]) + if err != nil { + return nil, nil, err + } + } + return listProto(vs...), listType(intType()), nil +} + +func encodeIntArrayFunc(a []int) (*proto3.Value, *sppb.Type, error) { + v, err := encodeArray(len(a), func(i int) interface{} { return a[i] }) + if err != nil { + return nil, nil, err + } + return v, listType(intType()), nil +} + +func encodeIntArrayReflect(a []int) (*proto3.Value, *sppb.Type, error) { + v, err := encodeArrayReflect(a) + if err != nil { + return nil, nil, err + } + return v, listType(intType()), nil +} + +func encodeArrayReflect(a interface{}) (*proto3.Value, error) { + va := reflect.ValueOf(a) + len := va.Len() + vs := make([]*proto3.Value, len) + var err error + for i := 0; i < len; i++ { + vs[i], _, err = encodeValue(va.Index(i).Interface()) + if err != nil { + return nil, err + } + } + return listProto(vs...), nil +} diff --git a/vendor/cloud.google.com/go/speech/apiv1beta1/doc.go b/vendor/cloud.google.com/go/speech/apiv1beta1/doc.go new file mode 100644 index 00000000..1bfa6c84 --- /dev/null +++ b/vendor/cloud.google.com/go/speech/apiv1beta1/doc.go @@ -0,0 +1,33 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package speech is an experimental, auto-generated package for the +// speech API. +// +// Google Cloud Speech API. +package speech // import "cloud.google.com/go/speech/apiv1beta1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertXGoog(ctx context.Context, val string) context.Context { + md, _ := metadata.FromContext(ctx) + md = md.Copy() + md["x-goog-api-client"] = []string{val} + return metadata.NewContext(ctx, md) +} diff --git a/vendor/cloud.google.com/go/speech/apiv1beta1/mock_test.go b/vendor/cloud.google.com/go/speech/apiv1beta1/mock_test.go new file mode 100644 index 00000000..7fe8d492 --- /dev/null +++ b/vendor/cloud.google.com/go/speech/apiv1beta1/mock_test.go @@ -0,0 +1,378 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package speech + +import ( + speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1beta1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" +) + +import ( + "flag" + "io" + "log" + "net" + "os" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockSpeechServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + speechpb.SpeechServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockSpeechServer) SyncRecognize(_ context.Context, req *speechpb.SyncRecognizeRequest) (*speechpb.SyncRecognizeResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*speechpb.SyncRecognizeResponse), nil +} + +func (s *mockSpeechServer) AsyncRecognize(_ context.Context, req *speechpb.AsyncRecognizeRequest) (*longrunningpb.Operation, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*longrunningpb.Operation), nil +} + +func (s *mockSpeechServer) StreamingRecognize(stream speechpb.Speech_StreamingRecognizeServer) error { + for { + if req, err := stream.Recv(); err == io.EOF { + break + } else if err != nil { + return err + } else { + s.reqs = append(s.reqs, req) + } + } + if s.err != nil { + return s.err + } + for _, v := range s.resps { + if err := stream.Send(v.(*speechpb.StreamingRecognizeResponse)); err != nil { + return err + } + } + return nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockSpeech mockSpeechServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + speechpb.RegisterSpeechServer(serv, &mockSpeech) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestSpeechSyncRecognize(t *testing.T) { + var expectedResponse *speechpb.SyncRecognizeResponse = &speechpb.SyncRecognizeResponse{} + + mockSpeech.err = nil + mockSpeech.reqs = nil + + mockSpeech.resps = append(mockSpeech.resps[:0], expectedResponse) + + var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC + var sampleRate int32 = 44100 + var config = &speechpb.RecognitionConfig{ + Encoding: encoding, + SampleRate: sampleRate, + } + var uri string = "gs://bucket_name/file_name.flac" + var audio = &speechpb.RecognitionAudio{ + AudioSource: &speechpb.RecognitionAudio_Uri{ + Uri: uri, + }, + } + var request = &speechpb.SyncRecognizeRequest{ + Config: config, + Audio: audio, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SyncRecognize(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpeech.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpeechSyncRecognizeError(t *testing.T) { + errCode := codes.Internal + mockSpeech.err = grpc.Errorf(errCode, "test error") + + var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC + var sampleRate int32 = 44100 + var config = &speechpb.RecognitionConfig{ + Encoding: encoding, + SampleRate: sampleRate, + } + var uri string = "gs://bucket_name/file_name.flac" + var audio = &speechpb.RecognitionAudio{ + AudioSource: &speechpb.RecognitionAudio_Uri{ + Uri: uri, + }, + } + var request = &speechpb.SyncRecognizeRequest{ + Config: config, + Audio: audio, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.SyncRecognize(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSpeechAsyncRecognize(t *testing.T) { + var expectedResponse *speechpb.AsyncRecognizeResponse = &speechpb.AsyncRecognizeResponse{} + + mockSpeech.err = nil + mockSpeech.reqs = nil + + any, err := ptypes.MarshalAny(expectedResponse) + if err != nil { + t.Fatal(err) + } + mockSpeech.resps = append(mockSpeech.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Response{Response: any}, + }) + + var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC + var sampleRate int32 = 44100 + var config = &speechpb.RecognitionConfig{ + Encoding: encoding, + SampleRate: sampleRate, + } + var uri string = "gs://bucket_name/file_name.flac" + var audio = &speechpb.RecognitionAudio{ + AudioSource: &speechpb.RecognitionAudio_Uri{ + Uri: uri, + }, + } + var request = &speechpb.AsyncRecognizeRequest{ + Config: config, + Audio: audio, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.AsyncRecognize(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpeech.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpeechAsyncRecognizeError(t *testing.T) { + errCode := codes.Internal + mockSpeech.err = nil + mockSpeech.resps = append(mockSpeech.resps[:0], &longrunningpb.Operation{ + Name: "longrunning-test", + Done: true, + Result: &longrunningpb.Operation_Error{ + Error: &status.Status{ + Code: int32(errCode), + Message: "test error", + }, + }, + }) + + var encoding speechpb.RecognitionConfig_AudioEncoding = speechpb.RecognitionConfig_FLAC + var sampleRate int32 = 44100 + var config = &speechpb.RecognitionConfig{ + Encoding: encoding, + SampleRate: sampleRate, + } + var uri string = "gs://bucket_name/file_name.flac" + var audio = &speechpb.RecognitionAudio{ + AudioSource: &speechpb.RecognitionAudio_Uri{ + Uri: uri, + }, + } + var request = &speechpb.AsyncRecognizeRequest{ + Config: config, + Audio: audio, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + respLRO, err := c.AsyncRecognize(context.Background(), request) + if err != nil { + t.Fatal(err) + } + resp, err := respLRO.Wait(context.Background()) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestSpeechStreamingRecognize(t *testing.T) { + var resultIndex int32 = 520358448 + var expectedResponse = &speechpb.StreamingRecognizeResponse{ + ResultIndex: resultIndex, + } + + mockSpeech.err = nil + mockSpeech.reqs = nil + + mockSpeech.resps = append(mockSpeech.resps[:0], expectedResponse) + + var request *speechpb.StreamingRecognizeRequest = &speechpb.StreamingRecognizeRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.StreamingRecognize(context.Background()) + if err != nil { + t.Fatal(err) + } + if err := stream.Send(request); err != nil { + t.Fatal(err) + } + if err := stream.CloseSend(); err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockSpeech.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestSpeechStreamingRecognizeError(t *testing.T) { + errCode := codes.Internal + mockSpeech.err = grpc.Errorf(errCode, "test error") + + var request *speechpb.StreamingRecognizeRequest = &speechpb.StreamingRecognizeRequest{} + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + stream, err := c.StreamingRecognize(context.Background()) + if err != nil { + t.Fatal(err) + } + if err := stream.Send(request); err != nil { + t.Fatal(err) + } + if err := stream.CloseSend(); err != nil { + t.Fatal(err) + } + resp, err := stream.Recv() + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client.go b/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client.go new file mode 100644 index 00000000..9d9d66a5 --- /dev/null +++ b/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client.go @@ -0,0 +1,255 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package speech + +import ( + "time" + + "cloud.google.com/go/internal/version" + "cloud.google.com/go/longrunning" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1beta1" + longrunningpb "google.golang.org/genproto/googleapis/longrunning" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + SyncRecognize []gax.CallOption + AsyncRecognize []gax.CallOption + StreamingRecognize []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("speech.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + ), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + {"default", "non_idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &CallOptions{ + SyncRecognize: retry[[2]string{"default", "idempotent"}], + AsyncRecognize: retry[[2]string{"default", "idempotent"}], + StreamingRecognize: retry[[2]string{"default", "non_idempotent"}], + } +} + +// Client is a client for interacting with Google Cloud Speech API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client speechpb.SpeechClient + + // The call options for this service. + CallOptions *CallOptions + + // The metadata to be sent with each request. + xGoogHeader string +} + +// NewClient creates a new speech client. +// +// Service that implements Google Cloud Speech API. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: speechpb.NewSpeechClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", "") + c.xGoogHeader = gax.XGoogHeader(kv...) +} + +// SyncRecognize perform synchronous speech-recognition: receive results after all audio +// has been sent and processed. +func (c *Client) SyncRecognize(ctx context.Context, req *speechpb.SyncRecognizeRequest) (*speechpb.SyncRecognizeResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *speechpb.SyncRecognizeResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.SyncRecognize(ctx, req) + return err + }, c.CallOptions.SyncRecognize...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AsyncRecognize perform asynchronous speech-recognition: receive results via the +// google.longrunning.Operations interface. Returns either an +// `Operation.error` or an `Operation.response` which contains +// an `AsyncRecognizeResponse` message. +func (c *Client) AsyncRecognize(ctx context.Context, req *speechpb.AsyncRecognizeRequest) (*AsyncRecognizeResponseOperation, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *longrunningpb.Operation + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.AsyncRecognize(ctx, req) + return err + }, c.CallOptions.AsyncRecognize...) + if err != nil { + return nil, err + } + return &AsyncRecognizeResponseOperation{ + lro: longrunning.InternalNewOperation(c.Connection(), resp), + }, nil +} + +// StreamingRecognize perform bidirectional streaming speech-recognition: receive results while +// sending audio. This method is only available via the gRPC API (not REST). +func (c *Client) StreamingRecognize(ctx context.Context) (speechpb.Speech_StreamingRecognizeClient, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp speechpb.Speech_StreamingRecognizeClient + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.StreamingRecognize(ctx) + return err + }, c.CallOptions.StreamingRecognize...) + if err != nil { + return nil, err + } + return resp, nil +} + +// AsyncRecognizeResponseOperation manages a long-running operation yielding speechpb.AsyncRecognizeResponse. +type AsyncRecognizeResponseOperation struct { + lro *longrunning.Operation +} + +// AsyncRecognizeResponseOperation returns a new AsyncRecognizeResponseOperation from a given name. +// The name must be that of a previously created AsyncRecognizeResponseOperation, possibly from a different process. +func (c *Client) AsyncRecognizeResponseOperation(name string) *AsyncRecognizeResponseOperation { + return &AsyncRecognizeResponseOperation{ + lro: longrunning.InternalNewOperation(c.Connection(), &longrunningpb.Operation{Name: name}), + } +} + +// Wait blocks until the long-running operation is completed, returning the response and any errors encountered. +// +// See documentation of Poll for error-handling information. +func (op *AsyncRecognizeResponseOperation) Wait(ctx context.Context) (*speechpb.AsyncRecognizeResponse, error) { + var resp speechpb.AsyncRecognizeResponse + if err := op.lro.Wait(ctx, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +// Poll fetches the latest state of the long-running operation. +// +// Poll also fetches the latest metadata, which can be retrieved by Metadata. +// +// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and +// the operation has completed with failure, the error is returned and op.Done will return true. +// If Poll succeeds and the operation has completed successfully, +// op.Done will return true, and the response of the operation is returned. +// If Poll succeeds and the operation has not completed, the returned response and error are both nil. +func (op *AsyncRecognizeResponseOperation) Poll(ctx context.Context) (*speechpb.AsyncRecognizeResponse, error) { + var resp speechpb.AsyncRecognizeResponse + if err := op.lro.Poll(ctx, &resp); err != nil { + return nil, err + } + if !op.Done() { + return nil, nil + } + return &resp, nil +} + +// Metadata returns metadata associated with the long-running operation. +// Metadata itself does not contact the server, but Poll does. +// To get the latest metadata, call this method after a successful call to Poll. +// If the metadata is not available, the returned metadata and error are both nil. +func (op *AsyncRecognizeResponseOperation) Metadata() (*speechpb.AsyncRecognizeMetadata, error) { + var meta speechpb.AsyncRecognizeMetadata + if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata { + return nil, nil + } else if err != nil { + return nil, err + } + return &meta, nil +} + +// Done reports whether the long-running operation has completed. +func (op *AsyncRecognizeResponseOperation) Done() bool { + return op.lro.Done() +} + +// Name returns the name of the long-running operation. +// The name is assigned by the server and is unique within the service from which the operation is created. +func (op *AsyncRecognizeResponseOperation) Name() string { + return op.lro.Name() +} diff --git a/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client_example_test.go b/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client_example_test.go new file mode 100644 index 00000000..6b9db0a0 --- /dev/null +++ b/vendor/cloud.google.com/go/speech/apiv1beta1/speech_client_example_test.go @@ -0,0 +1,110 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package speech_test + +import ( + "io" + + "cloud.google.com/go/speech/apiv1beta1" + "golang.org/x/net/context" + speechpb "google.golang.org/genproto/googleapis/cloud/speech/v1beta1" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := speech.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_SyncRecognize() { + ctx := context.Background() + c, err := speech.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &speechpb.SyncRecognizeRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.SyncRecognize(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_AsyncRecognize() { + ctx := context.Background() + c, err := speech.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &speechpb.AsyncRecognizeRequest{ + // TODO: Fill request struct fields. + } + op, err := c.AsyncRecognize(ctx, req) + if err != nil { + // TODO: Handle error. + } + + resp, err := op.Wait(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_StreamingRecognize() { + ctx := context.Background() + c, err := speech.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + stream, err := c.StreamingRecognize(ctx) + if err != nil { + // TODO: Handle error. + } + go func() { + reqs := []*speechpb.StreamingRecognizeRequest{ + // TODO: Create requests. + } + for _, req := range reqs { + if err := stream.Send(req); err != nil { + // TODO: Handle error. + } + } + stream.CloseSend() + }() + for { + resp, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + // TODO: handle error. + } + // TODO: Use resp. + _ = resp + } +} diff --git a/vendor/cloud.google.com/go/storage/acl.go b/vendor/cloud.google.com/go/storage/acl.go new file mode 100644 index 00000000..6c243942 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/acl.go @@ -0,0 +1,223 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "fmt" + + "golang.org/x/net/context" + raw "google.golang.org/api/storage/v1" +) + +// ACLRole is the level of access to grant. +type ACLRole string + +const ( + RoleOwner ACLRole = "OWNER" + RoleReader ACLRole = "READER" + RoleWriter ACLRole = "WRITER" +) + +// ACLEntity refers to a user or group. +// They are sometimes referred to as grantees. +// +// It could be in the form of: +// "user-", "user-", "group-", "group-", +// "domain-" and "project-team-". +// +// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers. +type ACLEntity string + +const ( + AllUsers ACLEntity = "allUsers" + AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers" +) + +// ACLRule represents a grant for a role to an entity (user, group or team) for a Google Cloud Storage object or bucket. +type ACLRule struct { + Entity ACLEntity + Role ACLRole +} + +// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object. +type ACLHandle struct { + c *Client + bucket string + object string + isDefault bool +} + +// Delete permanently deletes the ACL entry for the given entity. +func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) error { + if a.object != "" { + return a.objectDelete(ctx, entity) + } + if a.isDefault { + return a.bucketDefaultDelete(ctx, entity) + } + return a.bucketDelete(ctx, entity) +} + +// Set sets the permission level for the given entity. +func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) error { + if a.object != "" { + return a.objectSet(ctx, entity, role) + } + if a.isDefault { + return a.bucketDefaultSet(ctx, entity, role) + } + return a.bucketSet(ctx, entity, role) +} + +// List retrieves ACL entries. +func (a *ACLHandle) List(ctx context.Context) ([]ACLRule, error) { + if a.object != "" { + return a.objectList(ctx) + } + if a.isDefault { + return a.bucketDefaultList(ctx) + } + return a.bucketList(ctx) +} + +func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) { + var acls *raw.ObjectAccessControls + var err error + err = runWithRetry(ctx, func() error { + acls, err = a.c.raw.DefaultObjectAccessControls.List(a.bucket).Context(ctx).Do() + return err + }) + if err != nil { + return nil, fmt.Errorf("storage: error listing default object ACL for bucket %q: %v", a.bucket, err) + } + return toACLRules(acls.Items), nil +} + +func (a *ACLHandle) bucketDefaultSet(ctx context.Context, entity ACLEntity, role ACLRole) error { + acl := &raw.ObjectAccessControl{ + Bucket: a.bucket, + Entity: string(entity), + Role: string(role), + } + err := runWithRetry(ctx, func() error { + _, err := a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl).Context(ctx).Do() + return err + }) + if err != nil { + return fmt.Errorf("storage: error updating default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err) + } + return nil +} + +func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error { + err := runWithRetry(ctx, func() error { + return a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity)).Context(ctx).Do() + }) + if err != nil { + return fmt.Errorf("storage: error deleting default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err) + } + return nil +} + +func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) { + var acls *raw.BucketAccessControls + var err error + err = runWithRetry(ctx, func() error { + acls, err = a.c.raw.BucketAccessControls.List(a.bucket).Context(ctx).Do() + return err + }) + if err != nil { + return nil, fmt.Errorf("storage: error listing bucket ACL for bucket %q: %v", a.bucket, err) + } + r := make([]ACLRule, len(acls.Items)) + for i, v := range acls.Items { + r[i].Entity = ACLEntity(v.Entity) + r[i].Role = ACLRole(v.Role) + } + return r, nil +} + +func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error { + acl := &raw.BucketAccessControl{ + Bucket: a.bucket, + Entity: string(entity), + Role: string(role), + } + err := runWithRetry(ctx, func() error { + _, err := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl).Context(ctx).Do() + return err + }) + if err != nil { + return fmt.Errorf("storage: error updating bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err) + } + return nil +} + +func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error { + err := runWithRetry(ctx, func() error { + return a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity)).Context(ctx).Do() + }) + if err != nil { + return fmt.Errorf("storage: error deleting bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err) + } + return nil +} + +func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) { + var acls *raw.ObjectAccessControls + var err error + err = runWithRetry(ctx, func() error { + acls, err = a.c.raw.ObjectAccessControls.List(a.bucket, a.object).Context(ctx).Do() + return err + }) + if err != nil { + return nil, fmt.Errorf("storage: error listing object ACL for bucket %q, file %q: %v", a.bucket, a.object, err) + } + return toACLRules(acls.Items), nil +} + +func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole) error { + acl := &raw.ObjectAccessControl{ + Bucket: a.bucket, + Entity: string(entity), + Role: string(role), + } + err := runWithRetry(ctx, func() error { + _, err := a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl).Context(ctx).Do() + return err + }) + if err != nil { + return fmt.Errorf("storage: error updating object ACL entry for bucket %q, file %q, entity %q: %v", a.bucket, a.object, entity, err) + } + return nil +} + +func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error { + err := runWithRetry(ctx, func() error { + return a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity)).Context(ctx).Do() + }) + if err != nil { + return fmt.Errorf("storage: error deleting object ACL entry for bucket %q, file %q, entity %q: %v", a.bucket, a.object, entity, err) + } + return nil +} + +func toACLRules(items []*raw.ObjectAccessControl) []ACLRule { + r := make([]ACLRule, 0, len(items)) + for _, item := range items { + r = append(r, ACLRule{Entity: ACLEntity(item.Entity), Role: ACLRole(item.Role)}) + } + return r +} diff --git a/vendor/cloud.google.com/go/storage/bucket.go b/vendor/cloud.google.com/go/storage/bucket.go new file mode 100644 index 00000000..bca1f71e --- /dev/null +++ b/vendor/cloud.google.com/go/storage/bucket.go @@ -0,0 +1,331 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "net/http" + "time" + + "golang.org/x/net/context" + "google.golang.org/api/googleapi" + "google.golang.org/api/iterator" + raw "google.golang.org/api/storage/v1" +) + +// Create creates the Bucket in the project. +// If attrs is nil the API defaults will be used. +func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) error { + var bkt *raw.Bucket + if attrs != nil { + bkt = attrs.toRawBucket() + } else { + bkt = &raw.Bucket{} + } + bkt.Name = b.name + req := b.c.raw.Buckets.Insert(projectID, bkt) + return runWithRetry(ctx, func() error { _, err := req.Context(ctx).Do(); return err }) +} + +// Delete deletes the Bucket. +func (b *BucketHandle) Delete(ctx context.Context) error { + req := b.c.raw.Buckets.Delete(b.name) + return runWithRetry(ctx, func() error { return req.Context(ctx).Do() }) +} + +// ACL returns an ACLHandle, which provides access to the bucket's access control list. +// This controls who can list, create or overwrite the objects in a bucket. +// This call does not perform any network operations. +func (b *BucketHandle) ACL() *ACLHandle { + return &b.acl +} + +// DefaultObjectACL returns an ACLHandle, which provides access to the bucket's default object ACLs. +// These ACLs are applied to newly created objects in this bucket that do not have a defined ACL. +// This call does not perform any network operations. +func (b *BucketHandle) DefaultObjectACL() *ACLHandle { + return &b.defaultObjectACL +} + +// Object returns an ObjectHandle, which provides operations on the named object. +// This call does not perform any network operations. +// +// name must consist entirely of valid UTF-8-encoded runes. The full specification +// for valid object names can be found at: +// https://cloud.google.com/storage/docs/bucket-naming +func (b *BucketHandle) Object(name string) *ObjectHandle { + return &ObjectHandle{ + c: b.c, + bucket: b.name, + object: name, + acl: ACLHandle{ + c: b.c, + bucket: b.name, + object: name, + }, + gen: -1, + } +} + +// Attrs returns the metadata for the bucket. +func (b *BucketHandle) Attrs(ctx context.Context) (*BucketAttrs, error) { + var resp *raw.Bucket + var err error + err = runWithRetry(ctx, func() error { + resp, err = b.c.raw.Buckets.Get(b.name).Projection("full").Context(ctx).Do() + return err + }) + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrBucketNotExist + } + if err != nil { + return nil, err + } + return newBucket(resp), nil +} + +// BucketAttrs represents the metadata for a Google Cloud Storage bucket. +type BucketAttrs struct { + // Name is the name of the bucket. + Name string + + // ACL is the list of access control rules on the bucket. + ACL []ACLRule + + // DefaultObjectACL is the list of access controls to + // apply to new objects when no object ACL is provided. + DefaultObjectACL []ACLRule + + // Location is the location of the bucket. It defaults to "US". + Location string + + // MetaGeneration is the metadata generation of the bucket. + MetaGeneration int64 + + // StorageClass is the default storage class of the bucket. This defines + // how objects in the bucket are stored and determines the SLA + // and the cost of storage. Typical values are "MULTI_REGIONAL", + // "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" and + // "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD", which + // is equivalent to "MULTI_REGIONAL" or "REGIONAL" depending on + // the bucket's location settings. + StorageClass string + + // Created is the creation time of the bucket. + Created time.Time + + // VersioningEnabled reports whether this bucket has versioning enabled. + // This field is read-only. + VersioningEnabled bool +} + +func newBucket(b *raw.Bucket) *BucketAttrs { + if b == nil { + return nil + } + bucket := &BucketAttrs{ + Name: b.Name, + Location: b.Location, + MetaGeneration: b.Metageneration, + StorageClass: b.StorageClass, + Created: convertTime(b.TimeCreated), + VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled, + } + acl := make([]ACLRule, len(b.Acl)) + for i, rule := range b.Acl { + acl[i] = ACLRule{ + Entity: ACLEntity(rule.Entity), + Role: ACLRole(rule.Role), + } + } + bucket.ACL = acl + objACL := make([]ACLRule, len(b.DefaultObjectAcl)) + for i, rule := range b.DefaultObjectAcl { + objACL[i] = ACLRule{ + Entity: ACLEntity(rule.Entity), + Role: ACLRole(rule.Role), + } + } + bucket.DefaultObjectACL = objACL + return bucket +} + +// toRawBucket copies the editable attribute from b to the raw library's Bucket type. +func (b *BucketAttrs) toRawBucket() *raw.Bucket { + var acl []*raw.BucketAccessControl + if len(b.ACL) > 0 { + acl = make([]*raw.BucketAccessControl, len(b.ACL)) + for i, rule := range b.ACL { + acl[i] = &raw.BucketAccessControl{ + Entity: string(rule.Entity), + Role: string(rule.Role), + } + } + } + dACL := toRawObjectACL(b.DefaultObjectACL) + return &raw.Bucket{ + Name: b.Name, + DefaultObjectAcl: dACL, + Location: b.Location, + StorageClass: b.StorageClass, + Acl: acl, + } +} + +// Objects returns an iterator over the objects in the bucket that match the Query q. +// If q is nil, no filtering is done. +func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator { + it := &ObjectIterator{ + ctx: ctx, + bucket: b, + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.items) }, + func() interface{} { b := it.items; it.items = nil; return b }) + if q != nil { + it.query = *q + } + return it +} + +// An ObjectIterator is an iterator over ObjectAttrs. +type ObjectIterator struct { + ctx context.Context + bucket *BucketHandle + query Query + pageInfo *iterator.PageInfo + nextFunc func() error + items []*ObjectAttrs +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *ObjectIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +// Next returns the next result. Its second return value is iterator.Done if +// there are no more results. Once Next returns iterator.Done, all subsequent +// calls will return iterator.Done. +// +// If Query.Delimiter is non-empty, some of the ObjectAttrs returned by Next will +// have a non-empty Prefix field, and a zero value for all other fields. These +// represent prefixes. +func (it *ObjectIterator) Next() (*ObjectAttrs, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + item := it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error) { + req := it.bucket.c.raw.Objects.List(it.bucket.name) + req.Projection("full") + req.Delimiter(it.query.Delimiter) + req.Prefix(it.query.Prefix) + req.Versions(it.query.Versions) + req.PageToken(pageToken) + if pageSize > 0 { + req.MaxResults(int64(pageSize)) + } + var resp *raw.Objects + var err error + err = runWithRetry(it.ctx, func() error { + resp, err = req.Context(it.ctx).Do() + return err + }) + if err != nil { + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + err = ErrBucketNotExist + } + return "", err + } + for _, item := range resp.Items { + it.items = append(it.items, newObject(item)) + } + for _, prefix := range resp.Prefixes { + it.items = append(it.items, &ObjectAttrs{Prefix: prefix}) + } + return resp.NextPageToken, nil +} + +// TODO(jbd): Add storage.buckets.update. + +// Buckets returns an iterator over the buckets in the project. You may +// optionally set the iterator's Prefix field to restrict the list to buckets +// whose names begin with the prefix. By default, all buckets in the project +// are returned. +func (c *Client) Buckets(ctx context.Context, projectID string) *BucketIterator { + it := &BucketIterator{ + ctx: ctx, + client: c, + projectID: projectID, + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo( + it.fetch, + func() int { return len(it.buckets) }, + func() interface{} { b := it.buckets; it.buckets = nil; return b }) + return it +} + +// A BucketIterator is an iterator over BucketAttrs. +type BucketIterator struct { + // Prefix restricts the iterator to buckets whose names begin with it. + Prefix string + + ctx context.Context + client *Client + projectID string + buckets []*BucketAttrs + pageInfo *iterator.PageInfo + nextFunc func() error +} + +// Next returns the next result. Its second return value is iterator.Done if +// there are no more results. Once Next returns iterator.Done, all subsequent +// calls will return iterator.Done. +func (it *BucketIterator) Next() (*BucketAttrs, error) { + if err := it.nextFunc(); err != nil { + return nil, err + } + b := it.buckets[0] + it.buckets = it.buckets[1:] + return b, nil +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo } + +func (it *BucketIterator) fetch(pageSize int, pageToken string) (string, error) { + req := it.client.raw.Buckets.List(it.projectID) + req.Projection("full") + req.Prefix(it.Prefix) + req.PageToken(pageToken) + if pageSize > 0 { + req.MaxResults(int64(pageSize)) + } + var resp *raw.Buckets + var err error + err = runWithRetry(it.ctx, func() error { + resp, err = req.Context(it.ctx).Do() + return err + }) + if err != nil { + return "", err + } + for _, item := range resp.Items { + it.buckets = append(it.buckets, newBucket(item)) + } + return resp.NextPageToken, nil +} diff --git a/vendor/cloud.google.com/go/storage/copy.go b/vendor/cloud.google.com/go/storage/copy.go new file mode 100644 index 00000000..b2db656e --- /dev/null +++ b/vendor/cloud.google.com/go/storage/copy.go @@ -0,0 +1,190 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package storage contains a Google Cloud Storage client. +// +// This package is experimental and may make backwards-incompatible changes. +package storage + +import ( + "errors" + "fmt" + + "golang.org/x/net/context" + raw "google.golang.org/api/storage/v1" +) + +// CopierFrom creates a Copier that can copy src to dst. +// You can immediately call Run on the returned Copier, or +// you can configure it first. +func (dst *ObjectHandle) CopierFrom(src *ObjectHandle) *Copier { + return &Copier{dst: dst, src: src} +} + +// A Copier copies a source object to a destination. +type Copier struct { + // ObjectAttrs are optional attributes to set on the destination object. + // Any attributes must be initialized before any calls on the Copier. Nil + // or zero-valued attributes are ignored. + ObjectAttrs + + // RewriteToken can be set before calling Run to resume a copy + // operation. After Run returns a non-nil error, RewriteToken will + // have been updated to contain the value needed to resume the copy. + RewriteToken string + + // ProgressFunc can be used to monitor the progress of a multi-RPC copy + // operation. If ProgressFunc is not nil and CopyFrom requires multiple + // calls to the underlying service (see + // https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite), then + // ProgressFunc will be invoked after each call with the number of bytes of + // content copied so far and the total size in bytes of the source object. + // + // ProgressFunc is intended to make upload progress available to the + // application. For example, the implementation of ProgressFunc may update + // a progress bar in the application's UI, or log the result of + // float64(copiedBytes)/float64(totalBytes). + // + // ProgressFunc should return quickly without blocking. + ProgressFunc func(copiedBytes, totalBytes uint64) + + dst, src *ObjectHandle +} + +// Run performs the copy. +func (c *Copier) Run(ctx context.Context) (*ObjectAttrs, error) { + if err := c.src.validate(); err != nil { + return nil, err + } + if err := c.dst.validate(); err != nil { + return nil, err + } + // Convert destination attributes to raw form, omitting the bucket. + // If the bucket is included but name or content-type aren't, the service + // returns a 400 with "Required" as the only message. Omitting the bucket + // does not cause any problems. + rawObject := c.ObjectAttrs.toRawObject("") + for { + res, err := c.callRewrite(ctx, c.src, rawObject) + if err != nil { + return nil, err + } + if c.ProgressFunc != nil { + c.ProgressFunc(res.TotalBytesRewritten, res.ObjectSize) + } + if res.Done { // Finished successfully. + return newObject(res.Resource), nil + } + } + return nil, nil +} + +func (c *Copier) callRewrite(ctx context.Context, src *ObjectHandle, rawObj *raw.Object) (*raw.RewriteResponse, error) { + call := c.dst.c.raw.Objects.Rewrite(src.bucket, src.object, c.dst.bucket, c.dst.object, rawObj) + + call.Context(ctx).Projection("full") + if c.RewriteToken != "" { + call.RewriteToken(c.RewriteToken) + } + if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil { + return nil, err + } + if err := applySourceConds(c.src.gen, c.src.conds, call); err != nil { + return nil, err + } + if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil { + return nil, err + } + if err := setEncryptionHeaders(call.Header(), c.src.encryptionKey, true); err != nil { + return nil, err + } + var res *raw.RewriteResponse + var err error + err = runWithRetry(ctx, func() error { res, err = call.Do(); return err }) + if err != nil { + return nil, err + } + c.RewriteToken = res.RewriteToken + return res, nil +} + +// ComposerFrom creates a Composer that can compose srcs into dst. +// You can immediately call Run on the returned Composer, or you can +// configure it first. +// +// The encryption key for the destination object will be used to decrypt all +// source objects and encrypt the destination object. It is an error +// to specify an encryption key for any of the source objects. +func (dst *ObjectHandle) ComposerFrom(srcs ...*ObjectHandle) *Composer { + return &Composer{dst: dst, srcs: srcs} +} + +// A Composer composes source objects into a destination object. +type Composer struct { + // ObjectAttrs are optional attributes to set on the destination object. + // Any attributes must be initialized before any calls on the Composer. Nil + // or zero-valued attributes are ignored. + ObjectAttrs + + dst *ObjectHandle + srcs []*ObjectHandle +} + +// Run performs the compose operation. +func (c *Composer) Run(ctx context.Context) (*ObjectAttrs, error) { + if err := c.dst.validate(); err != nil { + return nil, err + } + if len(c.srcs) == 0 { + return nil, errors.New("storage: at least one source object must be specified") + } + + req := &raw.ComposeRequest{} + // Compose requires a non-empty Destination, so we always set it, + // even if the caller-provided ObjectAttrs is the zero value. + req.Destination = c.ObjectAttrs.toRawObject(c.dst.bucket) + for _, src := range c.srcs { + if err := src.validate(); err != nil { + return nil, err + } + if src.bucket != c.dst.bucket { + return nil, fmt.Errorf("storage: all source objects must be in bucket %q, found %q", c.dst.bucket, src.bucket) + } + if src.encryptionKey != nil { + return nil, fmt.Errorf("storage: compose source %s.%s must not have encryption key", src.bucket, src.object) + } + srcObj := &raw.ComposeRequestSourceObjects{ + Name: src.object, + } + if err := applyConds("ComposeFrom source", src.gen, src.conds, composeSourceObj{srcObj}); err != nil { + return nil, err + } + req.SourceObjects = append(req.SourceObjects, srcObj) + } + + call := c.dst.c.raw.Objects.Compose(c.dst.bucket, c.dst.object, req).Context(ctx) + if err := applyConds("ComposeFrom destination", c.dst.gen, c.dst.conds, call); err != nil { + return nil, err + } + if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil { + return nil, err + } + var obj *raw.Object + var err error + err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) + if err != nil { + return nil, err + } + return newObject(obj), nil +} diff --git a/vendor/cloud.google.com/go/storage/doc.go b/vendor/cloud.google.com/go/storage/doc.go new file mode 100644 index 00000000..951391f5 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/doc.go @@ -0,0 +1,161 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package storage provides an easy way to work with Google Cloud Storage. +Google Cloud Storage stores data in named objects, which are grouped into buckets. + +More information about Google Cloud Storage is available at +https://cloud.google.com/storage/docs. + +All of the methods of this package use exponential backoff to retry calls +that fail with certain errors, as described in +https://cloud.google.com/storage/docs/exponential-backoff. + +Note: This package is in beta. Some backwards-incompatible changes may occur. + + +Creating a Client + +To start working with this package, create a client: + + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + +Buckets + +A Google Cloud Storage bucket is a collection of objects. To work with a +bucket, make a bucket handle: + + bkt := client.Bucket(bucketName) + +A handle is a reference to a bucket. You can have a handle even if the +bucket doesn't exist yet. To create a bucket in Google Cloud Storage, +call Create on the handle: + + if err := bkt.Create(ctx, projectID, nil); err != nil { + // TODO: Handle error. + } + +Note that although buckets are associated with projects, bucket names are +global across all projects. + +Each bucket has associated metadata, represented in this package by +BucketAttrs. The third argument to BucketHandle.Create allows you to set +the intial BucketAttrs of a bucket. To retrieve a bucket's attributes, use +Attrs: + + attrs, err := bkt.Attrs(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("bucket %s, created at %s, is located in %s with storage class %s\n", + attrs.Name, attrs.Created, attrs.Location, attrs.StorageClass) + +Objects + +An object holds arbitrary data as a sequence of bytes, like a file. You +refer to objects using a handle, just as with buckets. You can use the +standard Go io.Reader and io.Writer interfaces to read and write +object data: + + obj := bkt.Object("data") + // Write something to obj. + // w implements io.Writer. + w := obj.NewWriter(ctx) + // Write some text to obj. This will overwrite whatever is there. + if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil { + // TODO: Handle error. + } + // Close, just like writing a file. + if err := w.Close(); err != nil { + // TODO: Handle error. + } + + // Read it back. + r, err := obj.NewReader(ctx) + if err != nil { + // TODO: Handle error. + } + defer r.Close() + if _, err := io.Copy(os.Stdout, r); err != nil { + // TODO: Handle error. + } + // Prints "This object contains text." + +Objects also have attributes, which you can fetch with Attrs: + + objAttrs, err := obj.Attrs(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Printf("object %s has size %d and can be read using %s\n", + objAttrs.Name, objAttrs.Size, objAttrs.MediaLink) + +ACLs + +Both objects and buckets have ACLs (Access Control Lists). An ACL is a list of +ACLRules, each of which specifies the role of a user, group or project. ACLs +are suitable for fine-grained control, but you may prefer using IAM to control +access at the project level (see +https://cloud.google.com/storage/docs/access-control/iam). + +To list the ACLs of a bucket or object, obtain an ACLHandle and call its List method: + + acls, err := obj.ACL().List(ctx) + if err != nil { + // TODO: Handle error. + } + for _, rule := range acls { + fmt.Printf("%s has role %s\n", rule.Entity, rule.Role) + } + +You can also set and delete ACLs. + +Conditions + +Every object has a generation and a metageneration. The generation changes +whenever the content changes, and the metageneration changes whenever the +metadata changes. Conditions let you check these values before an operation; +the operation only executes if the conditions match. You can use conditions to +prevent race conditions in read-modify-write operations. + +For example, say you've read an object's metadata into objAttrs. Now +you want to write to that object, but only if its contents haven't changed +since you read it. Here is how to express that: + + w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx) + // Proceed with writing as above. + +Signed URLs + +You can obtain a URL that lets anyone read or write an object for a limited time. +You don't need to create a client to do this. See the documentation of +SignedURL for details. + + url, err := storage.SignedURL(bucketName, "shared-object", opts) + if err != nil { + // TODO: Handle error. + } + fmt.Println(url) + +Authentication + +See examples of authorization and authentication at +https://godoc.org/cloud.google.com/go#pkg-examples. +*/ +package storage // import "cloud.google.com/go/storage" diff --git a/vendor/cloud.google.com/go/storage/example_test.go b/vendor/cloud.google.com/go/storage/example_test.go new file mode 100644 index 00000000..602bc9d2 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/example_test.go @@ -0,0 +1,501 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage_test + +import ( + "fmt" + "io" + "io/ioutil" + "log" + "os" + "time" + + "cloud.google.com/go/storage" + "golang.org/x/net/context" + "google.golang.org/api/iterator" +) + +func ExampleNewClient() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // Use the client. + + // Close the client when finished. + if err := client.Close(); err != nil { + // TODO: handle error. + } +} + +func ExampleNewClient_auth() { + ctx := context.Background() + // Use Google Application Default Credentials to authorize and authenticate the client. + // More information about Application Default Credentials and how to enable is at + // https://developers.google.com/identity/protocols/application-default-credentials. + client, err := storage.NewClient(ctx) + if err != nil { + log.Fatal(err) + } + + // Use the client. + + // Close the client when finished. + if err := client.Close(); err != nil { + log.Fatal(err) + } +} + +func ExampleBucketHandle_Create() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + if err := client.Bucket("my-bucket").Create(ctx, "my-project", nil); err != nil { + // TODO: handle error. + } +} + +func ExampleBucketHandle_Delete() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + if err := client.Bucket("my-bucket").Delete(ctx); err != nil { + // TODO: handle error. + } +} + +func ExampleBucketHandle_Attrs() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + attrs, err := client.Bucket("my-bucket").Attrs(ctx) + if err != nil { + // TODO: handle error. + } + fmt.Println(attrs) +} + +func ExampleClient_Buckets() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + it := client.Bucket("my-bucket") + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleBucketIterator_Next() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + it := client.Buckets(ctx, "my-project") + for { + bucketAttrs, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(bucketAttrs) + } +} + +func ExampleBucketHandle_Objects() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + it := client.Bucket("my-bucket").Objects(ctx, nil) + _ = it // TODO: iterate using Next or iterator.Pager. +} + +func ExampleObjectIterator_Next() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + it := client.Bucket("my-bucket").Objects(ctx, nil) + for { + objAttrs, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // TODO: Handle error. + } + fmt.Println(objAttrs) + } +} + +func ExampleSignedURL() { + pkey, err := ioutil.ReadFile("my-private-key.pem") + if err != nil { + // TODO: handle error. + } + url, err := storage.SignedURL("my-bucket", "my-object", &storage.SignedURLOptions{ + GoogleAccessID: "xxx@developer.gserviceaccount.com", + PrivateKey: pkey, + Method: "GET", + Expires: time.Now().Add(48 * time.Hour), + }) + if err != nil { + // TODO: handle error. + } + fmt.Println(url) +} + +func ExampleObjectHandle_Attrs() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + objAttrs, err := client.Bucket("my-bucket").Object("my-object").Attrs(ctx) + if err != nil { + // TODO: handle error. + } + fmt.Println(objAttrs) +} + +func ExampleObjectHandle_Attrs_withConditions() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + obj := client.Bucket("my-bucket").Object("my-object") + // Read the object. + objAttrs1, err := obj.Attrs(ctx) + if err != nil { + // TODO: handle error. + } + // Do something else for a while. + time.Sleep(5 * time.Minute) + // Now read the same contents, even if the object has been written since the last read. + objAttrs2, err := obj.Generation(objAttrs1.Generation).Attrs(ctx) + if err != nil { + // TODO: handle error. + } + fmt.Println(objAttrs1, objAttrs2) +} + +func ExampleObjectHandle_Update() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // Change only the content type of the object. + objAttrs, err := client.Bucket("my-bucket").Object("my-object").Update(ctx, storage.ObjectAttrsToUpdate{ + ContentType: "text/html", + ContentDisposition: "", // delete ContentDisposition + }) + if err != nil { + // TODO: handle error. + } + fmt.Println(objAttrs) +} + +func ExampleObjectHandle_NewReader() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + rc, err := client.Bucket("my-bucket").Object("my-object").NewReader(ctx) + if err != nil { + // TODO: handle error. + } + slurp, err := ioutil.ReadAll(rc) + rc.Close() + if err != nil { + // TODO: handle error. + } + fmt.Println("file contents:", slurp) +} + +func ExampleObjectHandle_NewRangeReader() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // Read only the first 64K. + rc, err := client.Bucket("bucketname").Object("filename1").NewRangeReader(ctx, 0, 64*1024) + if err != nil { + // TODO: handle error. + } + slurp, err := ioutil.ReadAll(rc) + rc.Close() + if err != nil { + // TODO: handle error. + } + fmt.Println("first 64K of file contents:", slurp) +} + +func ExampleObjectHandle_NewWriter() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + wc := client.Bucket("bucketname").Object("filename1").NewWriter(ctx) + _ = wc // TODO: Use the Writer. +} + +func ExampleWriter_Write() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + wc := client.Bucket("bucketname").Object("filename1").NewWriter(ctx) + wc.ContentType = "text/plain" + wc.ACL = []storage.ACLRule{{storage.AllUsers, storage.RoleReader}} + if _, err := wc.Write([]byte("hello world")); err != nil { + // TODO: handle error. + } + if err := wc.Close(); err != nil { + // TODO: handle error. + } + fmt.Println("updated object:", wc.Attrs()) +} + +func ExampleObjectHandle_Delete() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // To delete multiple objects in a bucket, list them with an + // ObjectIterator, then Delete them. + + // If you are using this package on the App Engine Flex runtime, + // you can init a bucket client with your app's default bucket name. + // See http://godoc.org/google.golang.org/appengine/file#DefaultBucketName. + bucket := client.Bucket("my-bucket") + it := bucket.Objects(ctx, nil) + for { + objAttrs, err := it.Next() + if err != nil && err != iterator.Done { + // TODO: Handle error. + } + if err == iterator.Done { + break + } + if err := bucket.Object(objAttrs.Name).Delete(ctx); err != nil { + // TODO: Handle error. + } + } + fmt.Println("deleted all object items in the bucket specified.") +} + +func ExampleACLHandle_Delete() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // No longer grant access to the bucket to everyone on the Internet. + if err := client.Bucket("my-bucket").ACL().Delete(ctx, storage.AllUsers); err != nil { + // TODO: handle error. + } +} + +func ExampleACLHandle_Set() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // Let any authenticated user read my-bucket/my-object. + obj := client.Bucket("my-bucket").Object("my-object") + if err := obj.ACL().Set(ctx, storage.AllAuthenticatedUsers, storage.RoleReader); err != nil { + // TODO: handle error. + } +} + +func ExampleACLHandle_List() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // List the default object ACLs for my-bucket. + aclRules, err := client.Bucket("my-bucket").DefaultObjectACL().List(ctx) + if err != nil { + // TODO: handle error. + } + fmt.Println(aclRules) +} + +func ExampleCopier_Run() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + src := client.Bucket("bucketname").Object("file1") + dst := client.Bucket("another-bucketname").Object("file2") + + // Copy content and modify metadata. + copier := dst.CopierFrom(src) + copier.ContentType = "text/plain" + attrs, err := copier.Run(ctx) + if err != nil { + // TODO: Handle error, possibly resuming with copier.RewriteToken. + } + fmt.Println(attrs) + + // Just copy content. + attrs, err = dst.CopierFrom(src).Run(ctx) + if err != nil { + // TODO: Handle error. No way to resume. + } + fmt.Println(attrs) +} + +func ExampleCopier_Run_progress() { + // Display progress across multiple rewrite RPCs. + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + src := client.Bucket("bucketname").Object("file1") + dst := client.Bucket("another-bucketname").Object("file2") + + copier := dst.CopierFrom(src) + copier.ProgressFunc = func(copiedBytes, totalBytes uint64) { + log.Printf("copy %.1f%% done", float64(copiedBytes)/float64(totalBytes)*100) + } + if _, err := copier.Run(ctx); err != nil { + // TODO: handle error. + } +} + +var key1, key2 []byte + +func ExampleObjectHandle_CopierFrom_rotateEncryptionKeys() { + // To rotate the encryption key on an object, copy it onto itself. + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + obj := client.Bucket("bucketname").Object("obj") + // Assume obj is encrypted with key1, and we want to change to key2. + _, err = obj.Key(key2).CopierFrom(obj.Key(key1)).Run(ctx) + if err != nil { + // TODO: handle error. + } +} + +func ExampleComposer_Run() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + bkt := client.Bucket("bucketname") + src1 := bkt.Object("o1") + src2 := bkt.Object("o2") + dst := bkt.Object("o3") + // Compose and modify metadata. + c := dst.ComposerFrom(src1, src2) + c.ContentType = "text/plain" + attrs, err := c.Run(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Println(attrs) + // Just compose. + attrs, err = dst.ComposerFrom(src1, src2).Run(ctx) + if err != nil { + // TODO: Handle error. + } + fmt.Println(attrs) +} + +var gen int64 + +func ExampleObjectHandle_Generation() { + // Read an object's contents from generation gen, regardless of the + // current generation of the object. + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + obj := client.Bucket("my-bucket").Object("my-object") + rc, err := obj.Generation(gen).NewReader(ctx) + if err != nil { + // TODO: handle error. + } + defer rc.Close() + if _, err := io.Copy(os.Stdout, rc); err != nil { + // TODO: handle error. + } +} + +func ExampleObjectHandle_If() { + // Read from an object only if the current generation is gen. + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + obj := client.Bucket("my-bucket").Object("my-object") + rc, err := obj.If(storage.Conditions{GenerationMatch: gen}).NewReader(ctx) + if err != nil { + // TODO: handle error. + } + defer rc.Close() + if _, err := io.Copy(os.Stdout, rc); err != nil { + // TODO: handle error. + } +} + +var secretKey []byte + +func ExampleObjectHandle_Key() { + ctx := context.Background() + client, err := storage.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + obj := client.Bucket("my-bucket").Object("my-object") + // Encrypt the object's contents. + w := obj.Key(secretKey).NewWriter(ctx) + if _, err := w.Write([]byte("top secret")); err != nil { + // TODO: handle error. + } + if err := w.Close(); err != nil { + // TODO: handle error. + } +} diff --git a/vendor/cloud.google.com/go/storage/integration_test.go b/vendor/cloud.google.com/go/storage/integration_test.go new file mode 100644 index 00000000..b0f3851e --- /dev/null +++ b/vendor/cloud.google.com/go/storage/integration_test.go @@ -0,0 +1,1147 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "bytes" + "compress/gzip" + "crypto/md5" + "crypto/sha256" + "encoding/base64" + "flag" + "fmt" + "io" + "io/ioutil" + "log" + "math/rand" + "net/http" + "os" + "reflect" + "sort" + "strings" + "testing" + "time" + + gax "github.com/googleapis/gax-go" + + "golang.org/x/net/context" + + "cloud.google.com/go/internal" + "cloud.google.com/go/internal/testutil" + "google.golang.org/api/googleapi" + "google.golang.org/api/iterator" + itesting "google.golang.org/api/iterator/testing" + "google.golang.org/api/option" +) + +const testPrefix = "-go-cloud-storage-test" + +// suffix is a timestamp-based suffix which is added to all buckets created by +// tests. This reduces flakiness when the tests are run in parallel and allows +// automatic cleaning up of artifacts left when tests fail. +var suffix = fmt.Sprintf("%s-%d", testPrefix, time.Now().UnixNano()) + +func TestMain(m *testing.M) { + integrationTest := initIntegrationTest() + exit := m.Run() + if integrationTest { + if err := cleanup(); err != nil { + // No need to be loud if cleanup() fails; we'll get + // any undeleted buckets next time. + log.Printf("Post-test cleanup failed: %v\n", err) + } + } + os.Exit(exit) +} + +// If integration tests will be run, create a unique bucket for them. +func initIntegrationTest() bool { + flag.Parse() // needed for testing.Short() + ctx := context.Background() + if testing.Short() { + return false + } + client, bucket := config(ctx) + if client == nil { + return false + } + defer client.Close() + if err := client.Bucket(bucket).Create(ctx, testutil.ProjID(), nil); err != nil { + log.Fatalf("creating bucket %q: %v", bucket, err) + } + return true +} + +// testConfig returns the Client used to access GCS and the default bucket +// name to use. testConfig skips the current test if credentials are not +// available or when being run in Short mode. +func testConfig(ctx context.Context, t *testing.T) (*Client, string) { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + client, bucket := config(ctx) + if client == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + return client, bucket +} + +// config is like testConfig, but it doesn't need a *testing.T. +func config(ctx context.Context) (*Client, string) { + ts := testutil.TokenSource(ctx, ScopeFullControl) + if ts == nil { + return nil, "" + } + p := testutil.ProjID() + if p == "" { + log.Fatal("The project ID must be set. See CONTRIBUTING.md for details") + } + client, err := NewClient(ctx, option.WithTokenSource(ts)) + if err != nil { + log.Fatalf("NewClient: %v", err) + } + return client, p + suffix +} + +func TestBucketMethods(t *testing.T) { + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + projectID := testutil.ProjID() + newBucket := bucket + "-new" + // Test Create and Delete. + if err := client.Bucket(newBucket).Create(ctx, projectID, nil); err != nil { + t.Errorf("Bucket(%v).Create(%v, %v) failed: %v", newBucket, projectID, nil, err) + } + if err := client.Bucket(newBucket).Delete(ctx); err != nil { + t.Errorf("Bucket(%v).Delete failed: %v", newBucket, err) + } + + // Test Create and Delete with attributes. + attrs := BucketAttrs{ + DefaultObjectACL: []ACLRule{{Entity: "domain-google.com", Role: RoleReader}}, + } + if err := client.Bucket(newBucket).Create(ctx, projectID, &attrs); err != nil { + t.Errorf("Bucket(%v).Create(%v, %v) failed: %v", newBucket, projectID, attrs, err) + } + if err := client.Bucket(newBucket).Delete(ctx); err != nil { + t.Errorf("Bucket(%v).Delete failed: %v", newBucket, err) + } +} + +func TestIntegration_ConditionalDelete(t *testing.T) { + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + o := client.Bucket(bucket).Object("conddel") + + wc := o.NewWriter(ctx) + wc.ContentType = "text/plain" + if _, err := wc.Write([]byte("foo")); err != nil { + t.Fatal(err) + } + if err := wc.Close(); err != nil { + t.Fatal(err) + } + + gen := wc.Attrs().Generation + metaGen := wc.Attrs().MetaGeneration + + if err := o.Generation(gen - 1).Delete(ctx); err == nil { + t.Fatalf("Unexpected successful delete with Generation") + } + if err := o.If(Conditions{MetagenerationMatch: metaGen + 1}).Delete(ctx); err == nil { + t.Fatalf("Unexpected successful delete with IfMetaGenerationMatch") + } + if err := o.If(Conditions{MetagenerationNotMatch: metaGen}).Delete(ctx); err == nil { + t.Fatalf("Unexpected successful delete with IfMetaGenerationNotMatch") + } + if err := o.Generation(gen).Delete(ctx); err != nil { + t.Fatalf("final delete failed: %v", err) + } +} + +func TestObjects(t *testing.T) { + // TODO(djd): there are a lot of closely-related tests here which share + // a common setup. Once we can depend on Go 1.7 features, we should refactor + // this test to use the sub-test feature. This will increase the readability + // of this test, and should also reduce the time it takes to execute. + // https://golang.org/pkg/testing/#hdr-Subtests_and_Sub_benchmarks + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(bucket) + + const defaultType = "text/plain" + + // Populate object names and make a map for their contents. + objects := []string{ + "obj1", + "obj2", + "obj/with/slashes", + } + contents := make(map[string][]byte) + + // Test Writer. + for _, obj := range objects { + c := randomContents() + if err := writeObject(ctx, bkt.Object(obj), defaultType, c); err != nil { + t.Errorf("Write for %v failed with %v", obj, err) + } + contents[obj] = c + } + + testObjectIterator(t, bkt, objects) + + // Test Reader. + for _, obj := range objects { + rc, err := bkt.Object(obj).NewReader(ctx) + if err != nil { + t.Errorf("Can't create a reader for %v, errored with %v", obj, err) + continue + } + slurp, err := ioutil.ReadAll(rc) + if err != nil { + t.Errorf("Can't ReadAll object %v, errored with %v", obj, err) + } + if got, want := slurp, contents[obj]; !bytes.Equal(got, want) { + t.Errorf("Contents (%q) = %q; want %q", obj, got, want) + } + if got, want := rc.Size(), len(contents[obj]); got != int64(want) { + t.Errorf("Size (%q) = %d; want %d", obj, got, want) + } + if got, want := rc.ContentType(), "text/plain"; got != want { + t.Errorf("ContentType (%q) = %q; want %q", obj, got, want) + } + rc.Close() + + // Test SignedURL + opts := &SignedURLOptions{ + GoogleAccessID: "xxx@clientid", + PrivateKey: dummyKey("rsa"), + Method: "GET", + MD5: []byte("202cb962ac59075b964b07152d234b70"), + Expires: time.Date(2020, time.October, 2, 10, 0, 0, 0, time.UTC), + ContentType: "application/json", + Headers: []string{"x-header1", "x-header2"}, + } + u, err := SignedURL(bucket, obj, opts) + if err != nil { + t.Fatalf("SignedURL(%q, %q) errored with %v", bucket, obj, err) + } + res, err := client.hc.Get(u) + if err != nil { + t.Fatalf("Can't get URL %q: %v", u, err) + } + slurp, err = ioutil.ReadAll(res.Body) + if err != nil { + t.Fatalf("Can't ReadAll signed object %v, errored with %v", obj, err) + } + if got, want := slurp, contents[obj]; !bytes.Equal(got, want) { + t.Errorf("Contents (%v) = %q; want %q", obj, got, want) + } + res.Body.Close() + } + + obj := objects[0] + objlen := int64(len(contents[obj])) + // Test Range Reader. + for i, r := range []struct { + offset, length, want int64 + }{ + {0, objlen, objlen}, + {0, objlen / 2, objlen / 2}, + {objlen / 2, objlen, objlen / 2}, + {0, 0, 0}, + {objlen / 2, 0, 0}, + {objlen / 2, -1, objlen / 2}, + {0, objlen * 2, objlen}, + } { + rc, err := bkt.Object(obj).NewRangeReader(ctx, r.offset, r.length) + if err != nil { + t.Errorf("%d: Can't create a range reader for %v, errored with %v", i, obj, err) + continue + } + if rc.Size() != objlen { + t.Errorf("%d: Reader has a content-size of %d, want %d", i, rc.Size(), objlen) + } + if rc.Remain() != r.want { + t.Errorf("%d: Reader's available bytes reported as %d, want %d", i, rc.Remain(), r.want) + } + slurp, err := ioutil.ReadAll(rc) + if err != nil { + t.Errorf("%d:Can't ReadAll object %v, errored with %v", i, obj, err) + continue + } + if len(slurp) != int(r.want) { + t.Errorf("%d:RangeReader (%d, %d): Read %d bytes, wanted %d bytes", i, r.offset, r.length, len(slurp), r.want) + continue + } + if got, want := slurp, contents[obj][r.offset:r.offset+r.want]; !bytes.Equal(got, want) { + t.Errorf("RangeReader (%d, %d) = %q; want %q", r.offset, r.length, got, want) + } + rc.Close() + } + + // Test content encoding + const zeroCount = 20 << 20 + w := bkt.Object("gzip-test").NewWriter(ctx) + w.ContentEncoding = "gzip" + gw := gzip.NewWriter(w) + if _, err := io.Copy(gw, io.LimitReader(zeros{}, zeroCount)); err != nil { + t.Fatalf("io.Copy, upload: %v", err) + } + if err := gw.Close(); err != nil { + t.Errorf("gzip.Close(): %v", err) + } + if err := w.Close(); err != nil { + t.Errorf("w.Close(): %v", err) + } + r, err := bkt.Object("gzip-test").NewReader(ctx) + if err != nil { + t.Fatalf("NewReader(gzip-test): %v", err) + } + n, err := io.Copy(ioutil.Discard, r) + if err != nil { + t.Errorf("io.Copy, download: %v", err) + } + if n != zeroCount { + t.Errorf("downloaded bad data: got %d bytes, want %d", n, zeroCount) + } + + // Test NotFound. + _, err = bkt.Object("obj-not-exists").NewReader(ctx) + if err != ErrObjectNotExist { + t.Errorf("Object should not exist, err found to be %v", err) + } + + objName := objects[0] + + // Test NewReader googleapi.Error. + // Since a 429 or 5xx is hard to cause, we trigger a 416. + realLen := len(contents[objName]) + _, err = bkt.Object(objName).NewRangeReader(ctx, int64(realLen*2), 10) + if err, ok := err.(*googleapi.Error); !ok { + t.Error("NewRangeReader did not return a googleapi.Error") + } else { + if err.Code != 416 { + t.Errorf("Code = %d; want %d", err.Code, 416) + } + if len(err.Header) == 0 { + t.Error("Missing googleapi.Error.Header") + } + if len(err.Body) == 0 { + t.Error("Missing googleapi.Error.Body") + } + } + + // Test StatObject. + o, err := bkt.Object(objName).Attrs(ctx) + if err != nil { + t.Error(err) + } + if got, want := o.Name, objName; got != want { + t.Errorf("Name (%v) = %q; want %q", objName, got, want) + } + if got, want := o.ContentType, defaultType; got != want { + t.Errorf("ContentType (%v) = %q; want %q", objName, got, want) + } + created := o.Created + // Check that the object is newer than its containing bucket. + bAttrs, err := bkt.Attrs(ctx) + if err != nil { + t.Error(err) + } + if o.Created.Before(bAttrs.Created) { + t.Errorf("Object %v is older than its containing bucket, %v", o, bAttrs) + } + + // Test object copy. + copyName := "copy-" + objName + copyObj, err := bkt.Object(copyName).CopierFrom(bkt.Object(objName)).Run(ctx) + if err != nil { + t.Errorf("Copier.Run failed with %v", err) + } else if !namesEqual(copyObj, bucket, copyName) { + t.Errorf("Copy object bucket, name: got %q.%q, want %q.%q", + copyObj.Bucket, copyObj.Name, bucket, copyName) + } + + // Copying with attributes. + const contentEncoding = "identity" + copier := bkt.Object(copyName).CopierFrom(bkt.Object(objName)) + copier.ContentEncoding = contentEncoding + copyObj, err = copier.Run(ctx) + if err != nil { + t.Errorf("Copier.Run failed with %v", err) + } else { + if !namesEqual(copyObj, bucket, copyName) { + t.Errorf("Copy object bucket, name: got %q.%q, want %q.%q", + copyObj.Bucket, copyObj.Name, bucket, copyName) + } + if copyObj.ContentEncoding != contentEncoding { + t.Errorf("Copy ContentEncoding: got %q, want %q", copyObj.ContentEncoding, contentEncoding) + } + } + + // Test UpdateAttrs. + metadata := map[string]string{"key": "value"} + updated, err := bkt.Object(objName).Update(ctx, ObjectAttrsToUpdate{ + ContentType: "text/html", + ContentLanguage: "en", + Metadata: metadata, + ACL: []ACLRule{{Entity: "domain-google.com", Role: RoleReader}}, + }) + if err != nil { + t.Errorf("UpdateAttrs failed with %v", err) + } else { + if got, want := updated.ContentType, "text/html"; got != want { + t.Errorf("updated.ContentType == %q; want %q", got, want) + } + if got, want := updated.ContentLanguage, "en"; got != want { + t.Errorf("updated.ContentLanguage == %q; want %q", updated.ContentLanguage, want) + } + if got, want := updated.Metadata, metadata; !reflect.DeepEqual(got, want) { + t.Errorf("updated.Metadata == %+v; want %+v", updated.Metadata, want) + } + if got, want := updated.Created, created; got != want { + t.Errorf("updated.Created == %q; want %q", got, want) + } + if !updated.Created.Before(updated.Updated) { + t.Errorf("updated.Updated should be newer than update.Created") + } + } + // Delete ContentType and ContentLanguage. + updated, err = bkt.Object(objName).Update(ctx, ObjectAttrsToUpdate{ + ContentType: "", + ContentLanguage: "", + Metadata: map[string]string{}, + }) + if err != nil { + t.Errorf("UpdateAttrs failed with %v", err) + } else { + if got, want := updated.ContentType, ""; got != want { + t.Errorf("updated.ContentType == %q; want %q", got, want) + } + if got, want := updated.ContentLanguage, ""; got != want { + t.Errorf("updated.ContentLanguage == %q; want %q", updated.ContentLanguage, want) + } + if updated.Metadata != nil { + t.Errorf("updated.Metadata == %+v; want nil", updated.Metadata) + } + if got, want := updated.Created, created; got != want { + t.Errorf("updated.Created == %q; want %q", got, want) + } + if !updated.Created.Before(updated.Updated) { + t.Errorf("updated.Updated should be newer than update.Created") + } + } + + // Test checksums. + checksumCases := []struct { + name string + contents [][]byte + size int64 + md5 string + crc32c uint32 + }{ + { + name: "checksum-object", + contents: [][]byte{[]byte("hello"), []byte("world")}, + size: 10, + md5: "fc5e038d38a57032085441e7fe7010b0", + crc32c: 1456190592, + }, + { + name: "zero-object", + contents: [][]byte{}, + size: 0, + md5: "d41d8cd98f00b204e9800998ecf8427e", + crc32c: 0, + }, + } + for _, c := range checksumCases { + wc := bkt.Object(c.name).NewWriter(ctx) + for _, data := range c.contents { + if _, err := wc.Write(data); err != nil { + t.Errorf("Write(%q) failed with %q", data, err) + } + } + if err = wc.Close(); err != nil { + t.Errorf("%q: close failed with %q", c.name, err) + } + obj := wc.Attrs() + if got, want := obj.Size, c.size; got != want { + t.Errorf("Object (%q) Size = %v; want %v", c.name, got, want) + } + if got, want := fmt.Sprintf("%x", obj.MD5), c.md5; got != want { + t.Errorf("Object (%q) MD5 = %q; want %q", c.name, got, want) + } + if got, want := obj.CRC32C, c.crc32c; got != want { + t.Errorf("Object (%q) CRC32C = %v; want %v", c.name, got, want) + } + } + + // Test public ACL. + publicObj := objects[0] + if err = bkt.Object(publicObj).ACL().Set(ctx, AllUsers, RoleReader); err != nil { + t.Errorf("PutACLEntry failed with %v", err) + } + publicClient, err := NewClient(ctx, option.WithHTTPClient(http.DefaultClient)) + if err != nil { + t.Fatal(err) + } + + slurp, err := readObject(ctx, publicClient.Bucket(bucket).Object(publicObj)) + if err != nil { + t.Errorf("readObject failed with %v", err) + } else if !bytes.Equal(slurp, contents[publicObj]) { + t.Errorf("Public object's content: got %q, want %q", slurp, contents[publicObj]) + } + + // Test writer error handling. + wc := publicClient.Bucket(bucket).Object(publicObj).NewWriter(ctx) + if _, err := wc.Write([]byte("hello")); err != nil { + t.Errorf("Write unexpectedly failed with %v", err) + } + if err = wc.Close(); err == nil { + t.Error("Close expected an error, found none") + } + + // Test deleting the copy object. + if err := bkt.Object(copyName).Delete(ctx); err != nil { + t.Errorf("Deletion of %v failed with %v", copyName, err) + } + // Deleting it a second time should return ErrObjectNotExist. + if err := bkt.Object(copyName).Delete(ctx); err != ErrObjectNotExist { + t.Errorf("second deletion of %v = %v; want ErrObjectNotExist", copyName, err) + } + _, err = bkt.Object(copyName).Attrs(ctx) + if err != ErrObjectNotExist { + t.Errorf("Copy is expected to be deleted, stat errored with %v", err) + } + + // Test object composition. + var compSrcs []*ObjectHandle + var wantContents []byte + for _, obj := range objects { + compSrcs = append(compSrcs, bkt.Object(obj)) + wantContents = append(wantContents, contents[obj]...) + } + checkCompose := func(obj *ObjectHandle, wantContentType string) { + rc, err := obj.NewReader(ctx) + if err != nil { + t.Fatalf("NewReader: %v", err) + } + slurp, err = ioutil.ReadAll(rc) + if err != nil { + t.Fatalf("ioutil.ReadAll: %v", err) + } + defer rc.Close() + if !bytes.Equal(slurp, wantContents) { + t.Errorf("Composed object contents\ngot: %q\nwant: %q", slurp, wantContents) + } + if got := rc.ContentType(); got != wantContentType { + t.Errorf("Composed object content-type = %q, want %q", got, wantContentType) + } + } + + // Compose should work even if the user sets no destination attributes. + compDst := bkt.Object("composed1") + c := compDst.ComposerFrom(compSrcs...) + if _, err := c.Run(ctx); err != nil { + t.Fatalf("ComposeFrom error: %v", err) + } + checkCompose(compDst, "application/octet-stream") + + // It should also work if we do. + compDst = bkt.Object("composed2") + c = compDst.ComposerFrom(compSrcs...) + c.ContentType = "text/json" + if _, err := c.Run(ctx); err != nil { + t.Fatalf("ComposeFrom error: %v", err) + } + checkCompose(compDst, "text/json") +} + +func namesEqual(obj *ObjectAttrs, bucketName, objectName string) bool { + return obj.Bucket == bucketName && obj.Name == objectName +} + +func testObjectIterator(t *testing.T, bkt *BucketHandle, objects []string) { + ctx := context.Background() + // Collect the list of items we expect: ObjectAttrs in lexical order by name. + names := make([]string, len(objects)) + copy(names, objects) + sort.Strings(names) + var attrs []*ObjectAttrs + for _, name := range names { + attr, err := bkt.Object(name).Attrs(ctx) + if err != nil { + t.Errorf("Object(%q).Attrs: %v", name, err) + return + } + attrs = append(attrs, attr) + } + // The following iterator test fails occasionally, probably because the + // underlying Objects.List operation is eventually consistent. So we retry + // it. + tctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + var msg string + var ok bool + err := internal.Retry(tctx, gax.Backoff{}, func() (stop bool, err error) { + msg, ok = itesting.TestIterator(attrs, + func() interface{} { return bkt.Objects(ctx, &Query{Prefix: "obj"}) }, + func(it interface{}) (interface{}, error) { return it.(*ObjectIterator).Next() }) + if ok { + return true, nil + } else { + t.Logf("TestIterator failed, trying again: %s", msg) + return false, nil + } + }) + if !ok { + t.Errorf("ObjectIterator.Next: %s (err=%v)", msg, err) + } + // TODO(jba): test query.Delimiter != "" +} + +func TestACL(t *testing.T) { + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(bucket) + + entity := ACLEntity("domain-google.com") + rule := ACLRule{Entity: entity, Role: RoleReader} + if err := bkt.DefaultObjectACL().Set(ctx, entity, RoleReader); err != nil { + t.Errorf("Can't put default ACL rule for the bucket, errored with %v", err) + } + acl, err := bkt.DefaultObjectACL().List(ctx) + if err != nil { + t.Errorf("DefaultObjectACL.List for bucket %q: %v", bucket, err) + } else if !hasRule(acl, rule) { + t.Errorf("default ACL missing %#v", rule) + } + aclObjects := []string{"acl1", "acl2"} + for _, obj := range aclObjects { + c := randomContents() + if err := writeObject(ctx, bkt.Object(obj), "", c); err != nil { + t.Errorf("Write for %v failed with %v", obj, err) + } + } + name := aclObjects[0] + o := bkt.Object(name) + acl, err = o.ACL().List(ctx) + if err != nil { + t.Errorf("Can't retrieve ACL of %v", name) + } else if !hasRule(acl, rule) { + t.Errorf("object ACL missing %+v", rule) + } + if err := o.ACL().Delete(ctx, entity); err != nil { + t.Errorf("object ACL: could not delete entity %s", entity) + } + // Delete the default ACL rule. We can't move this code earlier in the + // test, because the test depends on the fact that the object ACL inherits + // it. + if err := bkt.DefaultObjectACL().Delete(ctx, entity); err != nil { + t.Errorf("default ACL: could not delete entity %s", entity) + } + + entity2 := ACLEntity("user-jbd@google.com") + rule2 := ACLRule{Entity: entity2, Role: RoleReader} + if err := bkt.ACL().Set(ctx, entity2, RoleReader); err != nil { + t.Errorf("Error while putting bucket ACL rule: %v", err) + } + bACL, err := bkt.ACL().List(ctx) + if err != nil { + t.Errorf("Error while getting the ACL of the bucket: %v", err) + } else if !hasRule(bACL, rule2) { + t.Errorf("bucket ACL missing %+v", rule2) + } + if err := bkt.ACL().Delete(ctx, entity2); err != nil { + t.Errorf("Error while deleting bucket ACL rule: %v", err) + } + +} + +func hasRule(acl []ACLRule, rule ACLRule) bool { + for _, r := range acl { + if r == rule { + return true + } + } + return false +} + +func TestValidObjectNames(t *testing.T) { + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(bucket) + + validNames := []string{ + "gopher", + "Гоферови", + "a", + strings.Repeat("a", 1024), + } + for _, name := range validNames { + if err := writeObject(ctx, bkt.Object(name), "", []byte("data")); err != nil { + t.Errorf("Object %q write failed: %v. Want success", name, err) + continue + } + defer bkt.Object(name).Delete(ctx) + } + + invalidNames := []string{ + "", // Too short. + strings.Repeat("a", 1025), // Too long. + "new\nlines", + "bad\xffunicode", + } + for _, name := range invalidNames { + // Invalid object names will either cause failure during Write or Close. + if err := writeObject(ctx, bkt.Object(name), "", []byte("data")); err != nil { + continue + } + defer bkt.Object(name).Delete(ctx) + t.Errorf("%q should have failed. Didn't", name) + } +} + +func TestWriterContentType(t *testing.T) { + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + obj := client.Bucket(bucket).Object("content") + testCases := []struct { + content string + setType, wantType string + }{ + { + content: "It was the best of times, it was the worst of times.", + wantType: "text/plain; charset=utf-8", + }, + { + content: "My first page", + wantType: "text/html; charset=utf-8", + }, + { + content: "My first page", + setType: "text/html", + wantType: "text/html", + }, + { + content: "My first page", + setType: "image/jpeg", + wantType: "image/jpeg", + }, + } + for i, tt := range testCases { + if err := writeObject(ctx, obj, tt.setType, []byte(tt.content)); err != nil { + t.Errorf("writing #%d: %v", i, err) + } + attrs, err := obj.Attrs(ctx) + if err != nil { + t.Errorf("obj.Attrs: %v", err) + continue + } + if got := attrs.ContentType; got != tt.wantType { + t.Errorf("Content-Type = %q; want %q\nContent: %q\nSet Content-Type: %q", got, tt.wantType, tt.content, tt.setType) + } + } +} + +func TestZeroSizedObject(t *testing.T) { + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + obj := client.Bucket(bucket).Object("zero") + + // Check writing it works as expected. + w := obj.NewWriter(ctx) + if err := w.Close(); err != nil { + t.Fatalf("Writer.Close: %v", err) + } + defer obj.Delete(ctx) + + // Check we can read it too. + body, err := readObject(ctx, obj) + if err != nil { + t.Fatalf("readObject: %v", err) + } + if len(body) != 0 { + t.Errorf("Body is %v, want empty []byte{}", body) + } +} + +func TestIntegration_Encryption(t *testing.T) { + // This function tests customer-supplied encryption keys for all operations + // involving objects. Bucket and ACL operations aren't tested because they + // aren't affected customer encryption. + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + obj := client.Bucket(bucket).Object("customer-encryption") + key := []byte("my-secret-AES-256-encryption-key") + keyHash := sha256.Sum256(key) + keyHashB64 := base64.StdEncoding.EncodeToString(keyHash[:]) + key2 := []byte("My-Secret-AES-256-Encryption-Key") + contents := "top secret." + + checkMetadataCall := func(msg string, f func(o *ObjectHandle) (*ObjectAttrs, error)) { + // Performing a metadata operation without the key should succeed. + attrs, err := f(obj) + if err != nil { + t.Fatalf("%s: %v", msg, err) + } + // The key hash should match... + if got, want := attrs.CustomerKeySHA256, keyHashB64; got != want { + t.Errorf("%s: key hash: got %q, want %q", msg, got, want) + } + // ...but CRC and MD5 should not be present. + if attrs.CRC32C != 0 { + t.Errorf("%s: CRC: got %v, want 0", msg, attrs.CRC32C) + } + if len(attrs.MD5) > 0 { + t.Errorf("%s: MD5: got %v, want len == 0", msg, attrs.MD5) + } + + // Performing a metadata operation with the key should succeed. + attrs, err = f(obj.Key(key)) + if err != nil { + t.Fatalf("%s: %v", msg, err) + } + // Check the key and content hashes. + if got, want := attrs.CustomerKeySHA256, keyHashB64; got != want { + t.Errorf("%s: key hash: got %q, want %q", msg, got, want) + } + if attrs.CRC32C == 0 { + t.Errorf("%s: CRC: got 0, want non-zero", msg) + } + if len(attrs.MD5) == 0 { + t.Errorf("%s: MD5: got len == 0, want len > 0", msg) + } + } + + checkRead := func(msg string, o *ObjectHandle, k []byte, wantContents string) { + // Reading the object without the key should fail. + if _, err := readObject(ctx, o); err == nil { + t.Errorf("%s: reading without key: want error, got nil", msg) + } + // Reading the object with the key should succeed. + got, err := readObject(ctx, o.Key(k)) + if err != nil { + t.Fatalf("%s: %v", msg, err) + } + gotContents := string(got) + // And the contents should match what we wrote. + if gotContents != wantContents { + t.Errorf("%s: contents: got %q, want %q", msg, gotContents, wantContents) + } + } + + checkReadUnencrypted := func(msg string, obj *ObjectHandle, wantContents string) { + got, err := readObject(ctx, obj) + if err != nil { + t.Fatalf("%s: %v", msg, err) + } + gotContents := string(got) + if gotContents != wantContents { + t.Errorf("%s: got %q, want %q", gotContents, wantContents) + } + } + + // Write to obj using our own encryption key, which is a valid 32-byte + // AES-256 key. + w := obj.Key(key).NewWriter(ctx) + w.Write([]byte(contents)) + if err := w.Close(); err != nil { + t.Fatal(err) + } + + checkMetadataCall("Attrs", func(o *ObjectHandle) (*ObjectAttrs, error) { + return o.Attrs(ctx) + }) + + checkMetadataCall("Update", func(o *ObjectHandle) (*ObjectAttrs, error) { + return o.Update(ctx, ObjectAttrsToUpdate{ContentLanguage: "en"}) + }) + + checkRead("first object", obj, key, contents) + + obj2 := client.Bucket(bucket).Object("customer-encryption-2") + // Copying an object without the key should fail. + if _, err := obj2.CopierFrom(obj).Run(ctx); err == nil { + t.Fatal("want error, got nil") + } + // Copying an object with the key should succeed. + if _, err := obj2.CopierFrom(obj.Key(key)).Run(ctx); err != nil { + t.Fatal(err) + } + // The destination object is not encrypted; we can read it without a key. + checkReadUnencrypted("copy dest", obj2, contents) + + // Providing a key on the destination but not the source should fail, + // since the source is encrypted. + if _, err := obj2.Key(key2).CopierFrom(obj).Run(ctx); err == nil { + t.Fatal("want error, got nil") + } + + // But copying with keys for both source and destination should succeed. + if _, err := obj2.Key(key2).CopierFrom(obj.Key(key)).Run(ctx); err != nil { + t.Fatal(err) + } + // And the destination should be encrypted, meaning we can only read it + // with a key. + checkRead("copy destination", obj2, key2, contents) + + // Change obj2's key to prepare for compose, where all objects must have + // the same key. Also illustrates key rotation: copy an object to itself + // with a different key. + if _, err := obj2.Key(key).CopierFrom(obj2.Key(key2)).Run(ctx); err != nil { + t.Fatal(err) + } + obj3 := client.Bucket(bucket).Object("customer-encryption-3") + // Composing without keys should fail. + if _, err := obj3.ComposerFrom(obj, obj2).Run(ctx); err == nil { + t.Fatal("want error, got nil") + } + // Keys on the source objects result in an error. + if _, err := obj3.ComposerFrom(obj.Key(key), obj2).Run(ctx); err == nil { + t.Fatal("want error, got nil") + } + // A key on the destination object both decrypts the source objects + // and encrypts the destination. + if _, err := obj3.Key(key).ComposerFrom(obj, obj2).Run(ctx); err != nil { + t.Fatalf("got %v, want nil", err) + } + // Check that the destination in encrypted. + checkRead("compose destination", obj3, key, contents+contents) + + // You can't compose one or more unencrypted source objects into an + // encrypted destination object. + _, err := obj2.CopierFrom(obj2.Key(key)).Run(ctx) // unencrypt obj2 + if err != nil { + t.Fatal(err) + } + if _, err := obj3.Key(key).ComposerFrom(obj2).Run(ctx); err == nil { + t.Fatal("got nil, want error") + } +} + +func TestIntegration_NonexistentBucket(t *testing.T) { + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(bucket + "-nonexistent") + if _, err := bkt.Attrs(ctx); err != ErrBucketNotExist { + t.Errorf("Attrs: got %v, want ErrBucketNotExist", err) + } + it := bkt.Objects(ctx, nil) + if _, err := it.Next(); err != ErrBucketNotExist { + t.Errorf("Objects: got %v, want ErrBucketNotExist", err) + } +} + +func TestIntegration_PerObjectStorageClass(t *testing.T) { + const ( + defaultStorageClass = "STANDARD" + newStorageClass = "MULTI_REGIONAL" + ) + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(bucket) + + // The bucket should have the default storage class. + battrs, err := bkt.Attrs(ctx) + if err != nil { + t.Fatal(err) + } + if battrs.StorageClass != defaultStorageClass { + t.Fatalf("bucket storage class: got %q, want %q", + battrs.StorageClass, defaultStorageClass) + } + // Write an object; it should start with the bucket's storage class. + obj := bkt.Object("posc") + if err := writeObject(ctx, obj, "", []byte("foo")); err != nil { + t.Fatal(err) + } + oattrs, err := obj.Attrs(ctx) + if err != nil { + t.Fatal(err) + } + if oattrs.StorageClass != defaultStorageClass { + t.Fatalf("object storage class: got %q, want %q", + oattrs.StorageClass, defaultStorageClass) + } + // Now use Copy to change the storage class. + copier := obj.CopierFrom(obj) + copier.StorageClass = newStorageClass + oattrs2, err := copier.Run(ctx) + if err != nil { + log.Fatal(err) + } + if oattrs2.StorageClass != newStorageClass { + t.Fatalf("new object storage class: got %q, want %q", + oattrs2.StorageClass, newStorageClass) + } + + // We can also write a new object using a non-default storage class. + obj2 := bkt.Object("posc2") + w := obj2.NewWriter(ctx) + w.StorageClass = newStorageClass + if _, err := w.Write([]byte("xxx")); err != nil { + t.Fatal(err) + } + if err := w.Close(); err != nil { + t.Fatal(err) + } + if w.Attrs().StorageClass != newStorageClass { + t.Fatalf("new object storage class: got %q, want %q", + w.Attrs().StorageClass, newStorageClass) + } +} + +func TestIntegration_BucketInCopyAttrs(t *testing.T) { + // Confirm that if bucket is included in the object attributes of a rewrite + // call, but object name and content-type aren't, then we get an error. See + // the comment in Copier.Run. + ctx := context.Background() + client, bucket := testConfig(ctx, t) + defer client.Close() + + bkt := client.Bucket(bucket) + obj := bkt.Object("bucketInCopyAttrs") + if err := writeObject(ctx, obj, "", []byte("foo")); err != nil { + t.Fatal(err) + } + copier := obj.CopierFrom(obj) + rawObject := copier.ObjectAttrs.toRawObject(bucket) + _, err := copier.callRewrite(ctx, obj, rawObject) + if err == nil { + t.Errorf("got nil, want error") + } +} + +func writeObject(ctx context.Context, obj *ObjectHandle, contentType string, contents []byte) error { + w := obj.NewWriter(ctx) + w.ContentType = contentType + if contents != nil { + if _, err := w.Write(contents); err != nil { + _ = w.Close() + return err + } + } + return w.Close() +} + +func readObject(ctx context.Context, obj *ObjectHandle) ([]byte, error) { + r, err := obj.NewReader(ctx) + if err != nil { + return nil, err + } + defer r.Close() + return ioutil.ReadAll(r) +} + +// cleanup deletes the bucket used for testing, as well as old +// testing buckets that weren't cleaned previously. +func cleanup() error { + if testing.Short() { + return nil // Don't clean up in short mode. + } + ctx := context.Background() + client, bucket := config(ctx) + if client == nil { + return nil // Don't cleanup if we're not configured correctly. + } + defer client.Close() + if err := killBucket(ctx, client, bucket); err != nil { + return err + } + + // Delete buckets whose name begins with our test prefix, and which were + // created a while ago. (Unfortunately GCS doesn't provide last-modified + // time, which would be a better way to check for staleness.) + const expireAge = 24 * time.Hour + projectID := testutil.ProjID() + it := client.Buckets(ctx, projectID) + it.Prefix = projectID + testPrefix + for { + bktAttrs, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return err + } + if time.Since(bktAttrs.Created) > expireAge { + log.Printf("deleting bucket %q, which is more than %s old", bktAttrs.Name, expireAge) + if err := killBucket(ctx, client, bktAttrs.Name); err != nil { + return err + } + } + } + return nil +} + +// killBucket deletes a bucket and all its objects. +func killBucket(ctx context.Context, client *Client, bucketName string) error { + bkt := client.Bucket(bucketName) + // Bucket must be empty to delete. + it := bkt.Objects(ctx, nil) + for { + objAttrs, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return err + } + if err := bkt.Object(objAttrs.Name).Delete(ctx); err != nil { + return fmt.Errorf("deleting %q: %v", bucketName+"/"+objAttrs.Name, err) + } + } + // GCS is eventually consistent, so this delete may fail because the + // replica still sees an object in the bucket. We log the error and expect + // a later test run to delete the bucket. + if err := bkt.Delete(ctx); err != nil { + log.Printf("deleting %q: %v", bucketName, err) + } + return nil +} + +func randomContents() []byte { + h := md5.New() + io.WriteString(h, fmt.Sprintf("hello world%d", rand.Intn(100000))) + return h.Sum(nil) +} + +type zeros struct{} + +func (zeros) Read(p []byte) (int, error) { return len(p), nil } diff --git a/vendor/cloud.google.com/go/storage/invoke.go b/vendor/cloud.google.com/go/storage/invoke.go new file mode 100644 index 00000000..e8fc924e --- /dev/null +++ b/vendor/cloud.google.com/go/storage/invoke.go @@ -0,0 +1,43 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "cloud.google.com/go/internal" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/googleapi" +) + +// runWithRetry calls the function until it returns nil or a non-retryable error, or +// the context is done. +func runWithRetry(ctx context.Context, call func() error) error { + return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) { + err = call() + if err == nil { + return true, nil + } + e, ok := err.(*googleapi.Error) + if !ok { + return true, err + } + // Retry on 429 and 5xx, according to + // https://cloud.google.com/storage/docs/exponential-backoff. + if e.Code == 429 || (e.Code >= 500 && e.Code < 600) { + return false, nil + } + return true, err + }) +} diff --git a/vendor/cloud.google.com/go/storage/invoke_test.go b/vendor/cloud.google.com/go/storage/invoke_test.go new file mode 100644 index 00000000..2818a15f --- /dev/null +++ b/vendor/cloud.google.com/go/storage/invoke_test.go @@ -0,0 +1,55 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "errors" + "testing" + + "golang.org/x/net/context" + "google.golang.org/api/googleapi" +) + +func TestInvoke(t *testing.T) { + ctx := context.Background() + // Time-based tests are flaky. We just make sure that invoke eventually + // returns with the right error. + + for _, test := range []struct { + count int // number of times to return retryable error + retryCode int // error code for retryable error + err error // error to return after count returns of retryCode + }{ + {0, 0, nil}, + {0, 0, errors.New("foo")}, + {1, 429, nil}, + {1, 429, errors.New("bar")}, + {2, 518, nil}, + {2, 599, &googleapi.Error{Code: 428}}, + } { + counter := 0 + call := func() error { + counter++ + if counter <= test.count { + return &googleapi.Error{Code: test.retryCode} + } + return test.err + } + got := runWithRetry(ctx, call) + if got != test.err { + t.Errorf("%v: got %v, want %v", test, got, test.err) + } + } +} diff --git a/vendor/cloud.google.com/go/storage/reader.go b/vendor/cloud.google.com/go/storage/reader.go new file mode 100644 index 00000000..329a5f39 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/reader.go @@ -0,0 +1,57 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "io" +) + +// Reader reads a Cloud Storage object. +// It implements io.Reader. +type Reader struct { + body io.ReadCloser + remain, size int64 + contentType string +} + +// Close closes the Reader. It must be called when done reading. +func (r *Reader) Close() error { + return r.body.Close() +} + +func (r *Reader) Read(p []byte) (int, error) { + n, err := r.body.Read(p) + if r.remain != -1 { + r.remain -= int64(n) + } + return n, err +} + +// Size returns the size of the object in bytes. +// The returned value is always the same and is not affected by +// calls to Read or Close. +func (r *Reader) Size() int64 { + return r.size +} + +// Remain returns the number of bytes left to read, or -1 if unknown. +func (r *Reader) Remain() int64 { + return r.remain +} + +// ContentType returns the content type of the object. +func (r *Reader) ContentType() string { + return r.contentType +} diff --git a/vendor/cloud.google.com/go/storage/storage.go b/vendor/cloud.google.com/go/storage/storage.go new file mode 100644 index 00000000..9d6db945 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/storage.go @@ -0,0 +1,1083 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "bytes" + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/api/option" + "google.golang.org/api/transport" + + "cloud.google.com/go/internal/optional" + "golang.org/x/net/context" + "google.golang.org/api/googleapi" + raw "google.golang.org/api/storage/v1" +) + +var ( + ErrBucketNotExist = errors.New("storage: bucket doesn't exist") + ErrObjectNotExist = errors.New("storage: object doesn't exist") +) + +const userAgent = "gcloud-golang-storage/20151204" + +const ( + // ScopeFullControl grants permissions to manage your + // data and permissions in Google Cloud Storage. + ScopeFullControl = raw.DevstorageFullControlScope + + // ScopeReadOnly grants permissions to + // view your data in Google Cloud Storage. + ScopeReadOnly = raw.DevstorageReadOnlyScope + + // ScopeReadWrite grants permissions to manage your + // data in Google Cloud Storage. + ScopeReadWrite = raw.DevstorageReadWriteScope +) + +// Client is a client for interacting with Google Cloud Storage. +// +// Clients should be reused instead of created as needed. +// The methods of Client are safe for concurrent use by multiple goroutines. +type Client struct { + hc *http.Client + raw *raw.Service +} + +// NewClient creates a new Google Cloud Storage client. +// The default scope is ScopeFullControl. To use a different scope, like ScopeReadOnly, use option.WithScopes. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + o := []option.ClientOption{ + option.WithScopes(ScopeFullControl), + option.WithUserAgent(userAgent), + } + opts = append(o, opts...) + hc, ep, err := transport.NewHTTPClient(ctx, opts...) + if err != nil { + return nil, fmt.Errorf("dialing: %v", err) + } + rawService, err := raw.New(hc) + if err != nil { + return nil, fmt.Errorf("storage client: %v", err) + } + if ep != "" { + rawService.BasePath = ep + } + return &Client{ + hc: hc, + raw: rawService, + }, nil +} + +// Close closes the Client. +// +// Close need not be called at program exit. +func (c *Client) Close() error { + c.hc = nil + return nil +} + +// BucketHandle provides operations on a Google Cloud Storage bucket. +// Use Client.Bucket to get a handle. +type BucketHandle struct { + acl ACLHandle + defaultObjectACL ACLHandle + + c *Client + name string +} + +// Bucket returns a BucketHandle, which provides operations on the named bucket. +// This call does not perform any network operations. +// +// The supplied name must contain only lowercase letters, numbers, dashes, +// underscores, and dots. The full specification for valid bucket names can be +// found at: +// https://cloud.google.com/storage/docs/bucket-naming +func (c *Client) Bucket(name string) *BucketHandle { + return &BucketHandle{ + c: c, + name: name, + acl: ACLHandle{ + c: c, + bucket: name, + }, + defaultObjectACL: ACLHandle{ + c: c, + bucket: name, + isDefault: true, + }, + } +} + +// SignedURLOptions allows you to restrict the access to the signed URL. +type SignedURLOptions struct { + // GoogleAccessID represents the authorizer of the signed URL generation. + // It is typically the Google service account client email address from + // the Google Developers Console in the form of "xxx@developer.gserviceaccount.com". + // Required. + GoogleAccessID string + + // PrivateKey is the Google service account private key. It is obtainable + // from the Google Developers Console. + // At https://console.developers.google.com/project//apiui/credential, + // create a service account client ID or reuse one of your existing service account + // credentials. Click on the "Generate new P12 key" to generate and download + // a new private key. Once you download the P12 file, use the following command + // to convert it into a PEM file. + // + // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes + // + // Provide the contents of the PEM file as a byte slice. + // Exactly one of PrivateKey or SignBytes must be non-nil. + PrivateKey []byte + + // SignBytes is a function for implementing custom signing. + // If your application is running on Google App Engine, you can use appengine's internal signing function: + // ctx := appengine.NewContext(request) + // acc, _ := appengine.ServiceAccount(ctx) + // url, err := SignedURL("bucket", "object", &SignedURLOptions{ + // GoogleAccessID: acc, + // SignBytes: func(b []byte) ([]byte, error) { + // _, signedBytes, err := appengine.SignBytes(ctx, b) + // return signedBytes, err + // }, + // // etc. + // }) + // + // Exactly one of PrivateKey or SignBytes must be non-nil. + SignBytes func([]byte) ([]byte, error) + + // Method is the HTTP method to be used with the signed URL. + // Signed URLs can be used with GET, HEAD, PUT, and DELETE requests. + // Required. + Method string + + // Expires is the expiration time on the signed URL. It must be + // a datetime in the future. + // Required. + Expires time.Time + + // ContentType is the content type header the client must provide + // to use the generated signed URL. + // Optional. + ContentType string + + // Headers is a list of extention headers the client must provide + // in order to use the generated signed URL. + // Optional. + Headers []string + + // MD5 is the base64 encoded MD5 checksum of the file. + // If provided, the client should provide the exact value on the request + // header in order to use the signed URL. + // Optional. + MD5 []byte +} + +// SignedURL returns a URL for the specified object. Signed URLs allow +// the users access to a restricted resource for a limited time without having a +// Google account or signing in. For more information about the signed +// URLs, see https://cloud.google.com/storage/docs/accesscontrol#Signed-URLs. +func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) { + if opts == nil { + return "", errors.New("storage: missing required SignedURLOptions") + } + if opts.GoogleAccessID == "" { + return "", errors.New("storage: missing required GoogleAccessID") + } + if (opts.PrivateKey == nil) == (opts.SignBytes == nil) { + return "", errors.New("storage: exactly one of PrivateKey or SignedBytes must be set") + } + if opts.Method == "" { + return "", errors.New("storage: missing required method option") + } + if opts.Expires.IsZero() { + return "", errors.New("storage: missing required expires option") + } + + signBytes := opts.SignBytes + if opts.PrivateKey != nil { + key, err := parseKey(opts.PrivateKey) + if err != nil { + return "", err + } + signBytes = func(b []byte) ([]byte, error) { + sum := sha256.Sum256(b) + return rsa.SignPKCS1v15( + rand.Reader, + key, + crypto.SHA256, + sum[:], + ) + } + } else { + signBytes = opts.SignBytes + } + + u := &url.URL{ + Path: fmt.Sprintf("/%s/%s", bucket, name), + } + + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "%s\n", opts.Method) + fmt.Fprintf(buf, "%s\n", opts.MD5) + fmt.Fprintf(buf, "%s\n", opts.ContentType) + fmt.Fprintf(buf, "%d\n", opts.Expires.Unix()) + fmt.Fprintf(buf, "%s", strings.Join(opts.Headers, "\n")) + fmt.Fprintf(buf, "%s", u.String()) + + b, err := signBytes(buf.Bytes()) + if err != nil { + return "", err + } + encoded := base64.StdEncoding.EncodeToString(b) + u.Scheme = "https" + u.Host = "storage.googleapis.com" + q := u.Query() + q.Set("GoogleAccessId", opts.GoogleAccessID) + q.Set("Expires", fmt.Sprintf("%d", opts.Expires.Unix())) + q.Set("Signature", string(encoded)) + u.RawQuery = q.Encode() + return u.String(), nil +} + +// ObjectHandle provides operations on an object in a Google Cloud Storage bucket. +// Use BucketHandle.Object to get a handle. +type ObjectHandle struct { + c *Client + bucket string + object string + acl ACLHandle + gen int64 // a negative value indicates latest + conds *Conditions + encryptionKey []byte // AES-256 key +} + +// ACL provides access to the object's access control list. +// This controls who can read and write this object. +// This call does not perform any network operations. +func (o *ObjectHandle) ACL() *ACLHandle { + return &o.acl +} + +// Generation returns a new ObjectHandle that operates on a specific generation +// of the object. +// By default, the handle operates on the latest generation. Not +// all operations work when given a specific generation; check the API +// endpoints at https://cloud.google.com/storage/docs/json_api/ for details. +func (o *ObjectHandle) Generation(gen int64) *ObjectHandle { + o2 := *o + o2.gen = gen + return &o2 +} + +// If returns a new ObjectHandle that applies a set of preconditions. +// Preconditions already set on the ObjectHandle are ignored. +// Operations on the new handle will only occur if the preconditions are +// satisfied. See https://cloud.google.com/storage/docs/generations-preconditions +// for more details. +func (o *ObjectHandle) If(conds Conditions) *ObjectHandle { + o2 := *o + o2.conds = &conds + return &o2 +} + +// Key returns a new ObjectHandle that uses the supplied encryption +// key to encrypt and decrypt the object's contents. +// +// Encryption key must be a 32-byte AES-256 key. +// See https://cloud.google.com/storage/docs/encryption for details. +func (o *ObjectHandle) Key(encryptionKey []byte) *ObjectHandle { + o2 := *o + o2.encryptionKey = encryptionKey + return &o2 +} + +// Attrs returns meta information about the object. +// ErrObjectNotExist will be returned if the object is not found. +func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error) { + if err := o.validate(); err != nil { + return nil, err + } + call := o.c.raw.Objects.Get(o.bucket, o.object).Projection("full").Context(ctx) + if err := applyConds("Attrs", o.gen, o.conds, call); err != nil { + return nil, err + } + if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil { + return nil, err + } + var obj *raw.Object + var err error + err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + if err != nil { + return nil, err + } + return newObject(obj), nil +} + +// Update updates an object with the provided attributes. +// All zero-value attributes are ignored. +// ErrObjectNotExist will be returned if the object is not found. +func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (*ObjectAttrs, error) { + if err := o.validate(); err != nil { + return nil, err + } + var attrs ObjectAttrs + // Lists of fields to send, and set to null, in the JSON. + var forceSendFields, nullFields []string + if uattrs.ContentType != nil { + attrs.ContentType = optional.ToString(uattrs.ContentType) + forceSendFields = append(forceSendFields, "ContentType") + } + if uattrs.ContentLanguage != nil { + attrs.ContentLanguage = optional.ToString(uattrs.ContentLanguage) + // For ContentLanguage It's an error to send the empty string. + // Instead we send a null. + if attrs.ContentLanguage == "" { + nullFields = append(nullFields, "ContentLanguage") + } else { + forceSendFields = append(forceSendFields, "ContentLanguage") + } + } + if uattrs.ContentEncoding != nil { + attrs.ContentEncoding = optional.ToString(uattrs.ContentEncoding) + forceSendFields = append(forceSendFields, "ContentType") + } + if uattrs.ContentDisposition != nil { + attrs.ContentDisposition = optional.ToString(uattrs.ContentDisposition) + forceSendFields = append(forceSendFields, "ContentDisposition") + } + if uattrs.CacheControl != nil { + attrs.CacheControl = optional.ToString(uattrs.CacheControl) + forceSendFields = append(forceSendFields, "CacheControl") + } + if uattrs.Metadata != nil { + attrs.Metadata = uattrs.Metadata + if len(attrs.Metadata) == 0 { + // Sending the empty map is a no-op. We send null instead. + nullFields = append(nullFields, "Metadata") + } else { + forceSendFields = append(forceSendFields, "Metadata") + } + } + if uattrs.ACL != nil { + attrs.ACL = uattrs.ACL + // It's an error to attempt to delete the ACL, so + // we don't append to nullFields here. + forceSendFields = append(forceSendFields, "Acl") + } + rawObj := attrs.toRawObject(o.bucket) + rawObj.ForceSendFields = forceSendFields + rawObj.NullFields = nullFields + call := o.c.raw.Objects.Patch(o.bucket, o.object, rawObj).Projection("full").Context(ctx) + if err := applyConds("Update", o.gen, o.conds, call); err != nil { + return nil, err + } + if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil { + return nil, err + } + var obj *raw.Object + var err error + err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err }) + if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound { + return nil, ErrObjectNotExist + } + if err != nil { + return nil, err + } + return newObject(obj), nil +} + +// ObjectAttrsToUpdate is used to update the attributes of an object. +// Only fields set to non-nil values will be updated. +// Set a field to its zero value to delete it. +// +// For example, to change ContentType and delete ContentEncoding and +// Metadata, use +// ObjectAttrsToUpdate{ +// ContentType: "text/html", +// ContentEncoding: "", +// Metadata: map[string]string{}, +// } +type ObjectAttrsToUpdate struct { + ContentType optional.String + ContentLanguage optional.String + ContentEncoding optional.String + ContentDisposition optional.String + CacheControl optional.String + Metadata map[string]string // set to map[string]string{} to delete + ACL []ACLRule +} + +// Delete deletes the single specified object. +func (o *ObjectHandle) Delete(ctx context.Context) error { + if err := o.validate(); err != nil { + return err + } + call := o.c.raw.Objects.Delete(o.bucket, o.object).Context(ctx) + if err := applyConds("Delete", o.gen, o.conds, call); err != nil { + return err + } + err := runWithRetry(ctx, func() error { return call.Do() }) + switch e := err.(type) { + case nil: + return nil + case *googleapi.Error: + if e.Code == http.StatusNotFound { + return ErrObjectNotExist + } + } + return err +} + +// NewReader creates a new Reader to read the contents of the +// object. +// ErrObjectNotExist will be returned if the object is not found. +// +// The caller must call Close on the returned Reader when done reading. +func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) { + return o.NewRangeReader(ctx, 0, -1) +} + +// NewRangeReader reads part of an object, reading at most length bytes +// starting at the given offset. If length is negative, the object is read +// until the end. +func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (*Reader, error) { + if err := o.validate(); err != nil { + return nil, err + } + if offset < 0 { + return nil, fmt.Errorf("storage: invalid offset %d < 0", offset) + } + if o.conds != nil { + if err := o.conds.validate("NewRangeReader"); err != nil { + return nil, err + } + } + u := &url.URL{ + Scheme: "https", + Host: "storage.googleapis.com", + Path: fmt.Sprintf("/%s/%s", o.bucket, o.object), + RawQuery: conditionsQuery(o.gen, o.conds), + } + verb := "GET" + if length == 0 { + verb = "HEAD" + } + req, err := http.NewRequest(verb, u.String(), nil) + if err != nil { + return nil, err + } + if length < 0 && offset > 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) + } else if length > 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1)) + } + if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil { + return nil, err + } + var res *http.Response + err = runWithRetry(ctx, func() error { + res, err = o.c.hc.Do(req) + if err != nil { + return err + } + if res.StatusCode == http.StatusNotFound { + res.Body.Close() + return ErrObjectNotExist + } + if res.StatusCode < 200 || res.StatusCode > 299 { + body, _ := ioutil.ReadAll(res.Body) + res.Body.Close() + return &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + Body: string(body), + } + } + if offset > 0 && length != 0 && res.StatusCode != http.StatusPartialContent { + res.Body.Close() + return errors.New("storage: partial request not satisfied") + } + return nil + }) + if err != nil { + return nil, err + } + + var size int64 // total size of object, even if a range was requested. + if res.StatusCode == http.StatusPartialContent { + cr := strings.TrimSpace(res.Header.Get("Content-Range")) + if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") { + return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) + } + size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64) + if err != nil { + return nil, fmt.Errorf("storage: invalid Content-Range %q", cr) + } + } else { + size = res.ContentLength + } + + remain := res.ContentLength + body := res.Body + if length == 0 { + remain = 0 + body.Close() + body = emptyBody + } + + return &Reader{ + body: body, + size: size, + remain: remain, + contentType: res.Header.Get("Content-Type"), + }, nil +} + +var emptyBody = ioutil.NopCloser(strings.NewReader("")) + +// NewWriter returns a storage Writer that writes to the GCS object +// associated with this ObjectHandle. +// +// A new object will be created unless an object with this name already exists. +// Otherwise any previous object with the same name will be replaced. +// The object will not be available (and any previous object will remain) +// until Close has been called. +// +// Attributes can be set on the object by modifying the returned Writer's +// ObjectAttrs field before the first call to Write. If no ContentType +// attribute is specified, the content type will be automatically sniffed +// using net/http.DetectContentType. +// +// It is the caller's responsibility to call Close when writing is done. +func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer { + return &Writer{ + ctx: ctx, + o: o, + donec: make(chan struct{}), + ObjectAttrs: ObjectAttrs{Name: o.object}, + ChunkSize: googleapi.DefaultUploadChunkSize, + } +} + +func (o *ObjectHandle) validate() error { + if o.bucket == "" { + return errors.New("storage: bucket name is empty") + } + if o.object == "" { + return errors.New("storage: object name is empty") + } + if !utf8.ValidString(o.object) { + return fmt.Errorf("storage: object name %q is not valid UTF-8", o.object) + } + return nil +} + +// parseKey converts the binary contents of a private key file +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +func parseKey(key []byte) (*rsa.PrivateKey, error) { + if block, _ := pem.Decode(key); block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, err + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("oauth2: private key is invalid") + } + return parsed, nil +} + +func toRawObjectACL(oldACL []ACLRule) []*raw.ObjectAccessControl { + var acl []*raw.ObjectAccessControl + if len(oldACL) > 0 { + acl = make([]*raw.ObjectAccessControl, len(oldACL)) + for i, rule := range oldACL { + acl[i] = &raw.ObjectAccessControl{ + Entity: string(rule.Entity), + Role: string(rule.Role), + } + } + } + return acl +} + +// toRawObject copies the editable attributes from o to the raw library's Object type. +func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object { + acl := toRawObjectACL(o.ACL) + return &raw.Object{ + Bucket: bucket, + Name: o.Name, + ContentType: o.ContentType, + ContentEncoding: o.ContentEncoding, + ContentLanguage: o.ContentLanguage, + CacheControl: o.CacheControl, + ContentDisposition: o.ContentDisposition, + StorageClass: o.StorageClass, + Acl: acl, + Metadata: o.Metadata, + } +} + +// ObjectAttrs represents the metadata for a Google Cloud Storage (GCS) object. +type ObjectAttrs struct { + // Bucket is the name of the bucket containing this GCS object. + // This field is read-only. + Bucket string + + // Name is the name of the object within the bucket. + // This field is read-only. + Name string + + // ContentType is the MIME type of the object's content. + ContentType string + + // ContentLanguage is the content language of the object's content. + ContentLanguage string + + // CacheControl is the Cache-Control header to be sent in the response + // headers when serving the object data. + CacheControl string + + // ACL is the list of access control rules for the object. + ACL []ACLRule + + // Owner is the owner of the object. This field is read-only. + // + // If non-zero, it is in the form of "user-". + Owner string + + // Size is the length of the object's content. This field is read-only. + Size int64 + + // ContentEncoding is the encoding of the object's content. + ContentEncoding string + + // ContentDisposition is the optional Content-Disposition header of the object + // sent in the response headers. + ContentDisposition string + + // MD5 is the MD5 hash of the object's content. This field is read-only. + MD5 []byte + + // CRC32C is the CRC32 checksum of the object's content using + // the Castagnoli93 polynomial. This field is read-only. + CRC32C uint32 + + // MediaLink is an URL to the object's content. This field is read-only. + MediaLink string + + // Metadata represents user-provided metadata, in key/value pairs. + // It can be nil if no metadata is provided. + Metadata map[string]string + + // Generation is the generation number of the object's content. + // This field is read-only. + Generation int64 + + // MetaGeneration is the version of the metadata for this + // object at this generation. This field is used for preconditions + // and for detecting changes in metadata. A metageneration number + // is only meaningful in the context of a particular generation + // of a particular object. This field is read-only. + MetaGeneration int64 + + // StorageClass is the storage class of the object. + // This value defines how objects in the bucket are stored and + // determines the SLA and the cost of storage. Typical values are + // "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" + // and "DURABLE_REDUCED_AVAILABILITY". + // It defaults to "STANDARD", which is equivalent to "MULTI_REGIONAL" + // or "REGIONAL" depending on the bucket's location settings. + StorageClass string + + // Created is the time the object was created. This field is read-only. + Created time.Time + + // Deleted is the time the object was deleted. + // If not deleted, it is the zero value. This field is read-only. + Deleted time.Time + + // Updated is the creation or modification time of the object. + // For buckets with versioning enabled, changing an object's + // metadata does not change this property. This field is read-only. + Updated time.Time + + // CustomerKeySHA256 is the base64-encoded SHA-256 hash of the + // customer-supplied encryption key for the object. It is empty if there is + // no customer-supplied encryption key. + // See // https://cloud.google.com/storage/docs/encryption for more about + // encryption in Google Cloud Storage. + CustomerKeySHA256 string + + // Prefix is set only for ObjectAttrs which represent synthetic "directory + // entries" when iterating over buckets using Query.Delimiter. See + // ObjectIterator.Next. When set, no other fields in ObjectAttrs will be + // populated. + Prefix string +} + +// convertTime converts a time in RFC3339 format to time.Time. +// If any error occurs in parsing, the zero-value time.Time is silently returned. +func convertTime(t string) time.Time { + var r time.Time + if t != "" { + r, _ = time.Parse(time.RFC3339, t) + } + return r +} + +func newObject(o *raw.Object) *ObjectAttrs { + if o == nil { + return nil + } + acl := make([]ACLRule, len(o.Acl)) + for i, rule := range o.Acl { + acl[i] = ACLRule{ + Entity: ACLEntity(rule.Entity), + Role: ACLRole(rule.Role), + } + } + owner := "" + if o.Owner != nil { + owner = o.Owner.Entity + } + md5, _ := base64.StdEncoding.DecodeString(o.Md5Hash) + var crc32c uint32 + d, err := base64.StdEncoding.DecodeString(o.Crc32c) + if err == nil && len(d) == 4 { + crc32c = uint32(d[0])<<24 + uint32(d[1])<<16 + uint32(d[2])<<8 + uint32(d[3]) + } + var sha256 string + if o.CustomerEncryption != nil { + sha256 = o.CustomerEncryption.KeySha256 + } + return &ObjectAttrs{ + Bucket: o.Bucket, + Name: o.Name, + ContentType: o.ContentType, + ContentLanguage: o.ContentLanguage, + CacheControl: o.CacheControl, + ACL: acl, + Owner: owner, + ContentEncoding: o.ContentEncoding, + Size: int64(o.Size), + MD5: md5, + CRC32C: crc32c, + MediaLink: o.MediaLink, + Metadata: o.Metadata, + Generation: o.Generation, + MetaGeneration: o.Metageneration, + StorageClass: o.StorageClass, + CustomerKeySHA256: sha256, + Created: convertTime(o.TimeCreated), + Deleted: convertTime(o.TimeDeleted), + Updated: convertTime(o.Updated), + } +} + +// Query represents a query to filter objects from a bucket. +type Query struct { + // Delimiter returns results in a directory-like fashion. + // Results will contain only objects whose names, aside from the + // prefix, do not contain delimiter. Objects whose names, + // aside from the prefix, contain delimiter will have their name, + // truncated after the delimiter, returned in prefixes. + // Duplicate prefixes are omitted. + // Optional. + Delimiter string + + // Prefix is the prefix filter to query objects + // whose names begin with this prefix. + // Optional. + Prefix string + + // Versions indicates whether multiple versions of the same + // object will be included in the results. + Versions bool +} + +// contentTyper implements ContentTyper to enable an +// io.ReadCloser to specify its MIME type. +type contentTyper struct { + io.Reader + t string +} + +func (c *contentTyper) ContentType() string { + return c.t +} + +// Conditions constrain methods to act on specific generations of +// resources. +// +// The zero value is an empty set of constraints. Not all conditions or +// combinations of conditions are applicable to all methods. +// See https://cloud.google.com/storage/docs/generations-preconditions +// for details on how these operate. +type Conditions struct { + // Generation constraints. + // At most one of the following can be set to a non-zero value. + + // GenerationMatch specifies that the object must have the given generation + // for the operation to occur. + // If GenerationMatch is zero, it has no effect. + // Use DoesNotExist to specify that the object does not exist in the bucket. + GenerationMatch int64 + + // GenerationNotMatch specifies that the object must not have the given + // generation for the operation to occur. + // If GenerationNotMatch is zero, it has no effect. + GenerationNotMatch int64 + + // DoesNotExist specifies that the object must not exist in the bucket for + // the operation to occur. + // If DoesNotExist is false, it has no effect. + DoesNotExist bool + + // Metadata generation constraints. + // At most one of the following can be set to a non-zero value. + + // MetagenerationMatch specifies that the object must have the given + // metageneration for the operation to occur. + // If MetagenerationMatch is zero, it has no effect. + MetagenerationMatch int64 + + // MetagenerationNotMatch specifies that the object must not have the given + // metageneration for the operation to occur. + // If MetagenerationNotMatch is zero, it has no effect. + MetagenerationNotMatch int64 +} + +func (c *Conditions) validate(method string) error { + if *c == (Conditions{}) { + return fmt.Errorf("storage: %s: empty conditions", method) + } + if !c.isGenerationValid() { + return fmt.Errorf("storage: %s: multiple conditions specified for generation", method) + } + if !c.isMetagenerationValid() { + return fmt.Errorf("storage: %s: multiple conditions specified for metageneration", method) + } + return nil +} + +func (c *Conditions) isGenerationValid() bool { + n := 0 + if c.GenerationMatch != 0 { + n++ + } + if c.GenerationNotMatch != 0 { + n++ + } + if c.DoesNotExist { + n++ + } + return n <= 1 +} + +func (c *Conditions) isMetagenerationValid() bool { + return c.MetagenerationMatch == 0 || c.MetagenerationNotMatch == 0 +} + +// applyConds modifies the provided call using the conditions in conds. +// call is something that quacks like a *raw.WhateverCall. +func applyConds(method string, gen int64, conds *Conditions, call interface{}) error { + cval := reflect.ValueOf(call) + if gen >= 0 { + if !setConditionField(cval, "Generation", gen) { + return fmt.Errorf("storage: %s: generation not supported", method) + } + } + if conds == nil { + return nil + } + if err := conds.validate(method); err != nil { + return err + } + switch { + case conds.GenerationMatch != 0: + if !setConditionField(cval, "IfGenerationMatch", conds.GenerationMatch) { + return fmt.Errorf("storage: %s: ifGenerationMatch not supported", method) + } + case conds.GenerationNotMatch != 0: + if !setConditionField(cval, "IfGenerationNotMatch", conds.GenerationNotMatch) { + return fmt.Errorf("storage: %s: ifGenerationNotMatch not supported", method) + } + case conds.DoesNotExist: + if !setConditionField(cval, "IfGenerationMatch", int64(0)) { + return fmt.Errorf("storage: %s: DoesNotExist not supported", method) + } + } + switch { + case conds.MetagenerationMatch != 0: + if !setConditionField(cval, "IfMetagenerationMatch", conds.MetagenerationMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationMatch not supported", method) + } + case conds.MetagenerationNotMatch != 0: + if !setConditionField(cval, "IfMetagenerationNotMatch", conds.MetagenerationNotMatch) { + return fmt.Errorf("storage: %s: ifMetagenerationNotMatch not supported", method) + } + } + return nil +} + +func applySourceConds(gen int64, conds *Conditions, call *raw.ObjectsRewriteCall) error { + if gen >= 0 { + call.SourceGeneration(gen) + } + if conds == nil { + return nil + } + if err := conds.validate("CopyTo source"); err != nil { + return err + } + switch { + case conds.GenerationMatch != 0: + call.IfSourceGenerationMatch(conds.GenerationMatch) + case conds.GenerationNotMatch != 0: + call.IfSourceGenerationNotMatch(conds.GenerationNotMatch) + case conds.DoesNotExist: + call.IfSourceGenerationMatch(0) + } + switch { + case conds.MetagenerationMatch != 0: + call.IfSourceMetagenerationMatch(conds.MetagenerationMatch) + case conds.MetagenerationNotMatch != 0: + call.IfSourceMetagenerationNotMatch(conds.MetagenerationNotMatch) + } + return nil +} + +// setConditionField sets a field on a *raw.WhateverCall. +// We can't use anonymous interfaces because the return type is +// different, since the field setters are builders. +func setConditionField(call reflect.Value, name string, value interface{}) bool { + m := call.MethodByName(name) + if !m.IsValid() { + return false + } + m.Call([]reflect.Value{reflect.ValueOf(value)}) + return true +} + +// conditionsQuery returns the generation and conditions as a URL query +// string suitable for URL.RawQuery. It assumes that the conditions +// have been validated. +func conditionsQuery(gen int64, conds *Conditions) string { + // URL escapes are elided because integer strings are URL-safe. + var buf []byte + + appendParam := func(s string, n int64) { + if len(buf) > 0 { + buf = append(buf, '&') + } + buf = append(buf, s...) + buf = strconv.AppendInt(buf, n, 10) + } + + if gen >= 0 { + appendParam("generation=", gen) + } + if conds == nil { + return string(buf) + } + switch { + case conds.GenerationMatch != 0: + appendParam("ifGenerationMatch=", conds.GenerationMatch) + case conds.GenerationNotMatch != 0: + appendParam("ifGenerationNotMatch=", conds.GenerationNotMatch) + case conds.DoesNotExist: + appendParam("ifGenerationMatch=", 0) + } + switch { + case conds.MetagenerationMatch != 0: + appendParam("ifMetagenerationMatch=", conds.MetagenerationMatch) + case conds.MetagenerationNotMatch != 0: + appendParam("ifMetagenerationNotMatch=", conds.MetagenerationNotMatch) + } + return string(buf) +} + +// composeSourceObj wraps a *raw.ComposeRequestSourceObjects, but adds the methods +// that modifyCall searches for by name. +type composeSourceObj struct { + src *raw.ComposeRequestSourceObjects +} + +func (c composeSourceObj) Generation(gen int64) { + c.src.Generation = gen +} + +func (c composeSourceObj) IfGenerationMatch(gen int64) { + // It's safe to overwrite ObjectPreconditions, since its only field is + // IfGenerationMatch. + c.src.ObjectPreconditions = &raw.ComposeRequestSourceObjectsObjectPreconditions{ + IfGenerationMatch: gen, + } +} + +func setEncryptionHeaders(headers http.Header, key []byte, copySource bool) error { + if key == nil { + return nil + } + // TODO(jbd): Ask the API team to return a more user-friendly error + // and avoid doing this check at the client level. + if len(key) != 32 { + return errors.New("storage: not a 32-byte AES-256 key") + } + var cs string + if copySource { + cs = "copy-source-" + } + headers.Set("x-goog-"+cs+"encryption-algorithm", "AES256") + headers.Set("x-goog-"+cs+"encryption-key", base64.StdEncoding.EncodeToString(key)) + keyHash := sha256.Sum256(key) + headers.Set("x-goog-"+cs+"encryption-key-sha256", base64.StdEncoding.EncodeToString(keyHash[:])) + return nil +} + +// TODO(jbd): Add storage.objects.watch. diff --git a/vendor/cloud.google.com/go/storage/storage_test.go b/vendor/cloud.google.com/go/storage/storage_test.go new file mode 100644 index 00000000..4ccf8773 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/storage_test.go @@ -0,0 +1,683 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "crypto/tls" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/http/httptest" + "reflect" + "strings" + "testing" + "time" + + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + raw "google.golang.org/api/storage/v1" +) + +func TestSignedURL(t *testing.T) { + expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00") + url, err := SignedURL("bucket-name", "object-name", &SignedURLOptions{ + GoogleAccessID: "xxx@clientid", + PrivateKey: dummyKey("rsa"), + Method: "GET", + MD5: []byte("202cb962ac59075b964b07152d234b70"), + Expires: expires, + ContentType: "application/json", + Headers: []string{"x-header1", "x-header2"}, + }) + if err != nil { + t.Error(err) + } + want := "https://storage.googleapis.com/bucket-name/object-name?" + + "Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=" + + "ITqNWQHr7ayIj%2B0Ds5%2FzUT2cWMQQouuFmu6L11Zd3kfNKvm3sjyGIzO" + + "gZsSUoter1SxP7BcrCzgqIZ9fQmgQnuIpqqLL4kcGmTbKsQS6hTknpJM%2F" + + "2lS4NY6UH1VXBgm2Tce28kz8rnmqG6svcGvtWuOgJsETeSIl1R9nAEIDCEq" + + "ZJzoOiru%2BODkHHkpoFjHWAwHugFHX%2B9EX4SxaytiN3oEy48HpYGWV0I" + + "h8NvU1hmeWzcLr41GnTADeCn7Eg%2Fb5H2GCNO70Cz%2Bw2fn%2BofLCUeR" + + "YQd%2FhES8oocv5kpHZkstc8s8uz3aKMsMauzZ9MOmGy%2F6VULBgIVvi6a" + + "AwEBIYOw%3D%3D" + if url != want { + t.Fatalf("Unexpected signed URL; found %v", url) + } +} + +func TestSignedURL_PEMPrivateKey(t *testing.T) { + expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00") + url, err := SignedURL("bucket-name", "object-name", &SignedURLOptions{ + GoogleAccessID: "xxx@clientid", + PrivateKey: dummyKey("pem"), + Method: "GET", + MD5: []byte("202cb962ac59075b964b07152d234b70"), + Expires: expires, + ContentType: "application/json", + Headers: []string{"x-header1", "x-header2"}, + }) + if err != nil { + t.Error(err) + } + want := "https://storage.googleapis.com/bucket-name/object-name?" + + "Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=" + + "B7XkS4dfmVDoe%2FoDeXZkWlYmg8u2kI0SizTrzL5%2B9RmKnb5j7Kf34DZ" + + "JL8Hcjr1MdPFLNg2QV4lEH86Gqgqt%2Fv3jFOTRl4wlzcRU%2FvV5c5HU8M" + + "qW0FZ0IDbqod2RdsMONLEO6yQWV2HWFrMLKl2yMFlWCJ47et%2BFaHe6v4Z" + + "EBc0%3D" + if url != want { + t.Fatalf("Unexpected signed URL; found %v", url) + } +} + +func TestSignedURL_SignBytes(t *testing.T) { + expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00") + url, err := SignedURL("bucket-name", "object-name", &SignedURLOptions{ + GoogleAccessID: "xxx@clientid", + SignBytes: func(b []byte) ([]byte, error) { + return []byte("signed"), nil + }, + Method: "GET", + MD5: []byte("202cb962ac59075b964b07152d234b70"), + Expires: expires, + ContentType: "application/json", + Headers: []string{"x-header1", "x-header2"}, + }) + if err != nil { + t.Error(err) + } + want := "https://storage.googleapis.com/bucket-name/object-name?" + + "Expires=1033570800&GoogleAccessId=xxx%40clientid&Signature=" + + "c2lnbmVk" // base64('signed') == 'c2lnbmVk' + if url != want { + t.Fatalf("Unexpected signed URL\ngot: %q\nwant: %q", url, want) + } +} + +func TestSignedURL_URLUnsafeObjectName(t *testing.T) { + expires, _ := time.Parse(time.RFC3339, "2002-10-02T10:00:00-05:00") + url, err := SignedURL("bucket-name", "object name界", &SignedURLOptions{ + GoogleAccessID: "xxx@clientid", + PrivateKey: dummyKey("pem"), + Method: "GET", + MD5: []byte("202cb962ac59075b964b07152d234b70"), + Expires: expires, + ContentType: "application/json", + Headers: []string{"x-header1", "x-header2"}, + }) + if err != nil { + t.Error(err) + } + want := "https://storage.googleapis.com/bucket-name/object%20nam" + + "e%E7%95%8C?Expires=1033570800&GoogleAccessId=xxx%40clientid" + + "&Signature=bxORkrAm73INEMHktrE7VoUZQzVPvL5NFZ7noAI5zK%2BGSm" + + "%2BWFvsK%2FVnRGtYK9BK89jz%2BX4ZQd87nkMEJw1OsqmGNiepyzB%2B3o" + + "sUYrHyV7UnKs9bkQpBkqPFlfgK1o7oX4NJjA1oKjuHP%2Fj5%2FC15OPa3c" + + "vHV619BEb7vf30nAwQM%3D" + if url != want { + t.Fatalf("Unexpected signed URL; found %v", url) + } +} + +func TestSignedURL_MissingOptions(t *testing.T) { + pk := dummyKey("rsa") + var tests = []struct { + opts *SignedURLOptions + errMsg string + }{ + { + &SignedURLOptions{}, + "missing required GoogleAccessID", + }, + { + &SignedURLOptions{GoogleAccessID: "access_id"}, + "exactly one of PrivateKey or SignedBytes must be set", + }, + { + &SignedURLOptions{ + GoogleAccessID: "access_id", + SignBytes: func(b []byte) ([]byte, error) { return b, nil }, + PrivateKey: pk, + }, + "exactly one of PrivateKey or SignedBytes must be set", + }, + { + &SignedURLOptions{ + GoogleAccessID: "access_id", + PrivateKey: pk, + }, + "missing required method", + }, + { + &SignedURLOptions{ + GoogleAccessID: "access_id", + SignBytes: func(b []byte) ([]byte, error) { return b, nil }, + }, + "missing required method", + }, + { + &SignedURLOptions{ + GoogleAccessID: "access_id", + PrivateKey: pk, + Method: "PUT", + }, + "missing required expires", + }, + } + for _, test := range tests { + _, err := SignedURL("bucket", "name", test.opts) + if !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("expected err: %v, found: %v", test.errMsg, err) + } + } +} + +func dummyKey(kind string) []byte { + slurp, err := ioutil.ReadFile(fmt.Sprintf("./testdata/dummy_%s", kind)) + if err != nil { + log.Fatal(err) + } + return slurp +} + +func TestCopyToMissingFields(t *testing.T) { + var tests = []struct { + srcBucket, srcName, destBucket, destName string + errMsg string + }{ + { + "mybucket", "", "mybucket", "destname", + "name is empty", + }, + { + "mybucket", "srcname", "mybucket", "", + "name is empty", + }, + { + "", "srcfile", "mybucket", "destname", + "name is empty", + }, + { + "mybucket", "srcfile", "", "destname", + "name is empty", + }, + } + ctx := context.Background() + client, err := NewClient(ctx, option.WithHTTPClient(&http.Client{Transport: &fakeTransport{}})) + if err != nil { + panic(err) + } + for i, test := range tests { + src := client.Bucket(test.srcBucket).Object(test.srcName) + dst := client.Bucket(test.destBucket).Object(test.destName) + _, err := dst.CopierFrom(src).Run(ctx) + if !strings.Contains(err.Error(), test.errMsg) { + t.Errorf("CopyTo test #%v:\ngot err %q\nwant err %q", i, err, test.errMsg) + } + } +} + +func TestObjectNames(t *testing.T) { + // Naming requirements: https://cloud.google.com/storage/docs/bucket-naming + const maxLegalLength = 1024 + + type testT struct { + name, want string + } + tests := []testT{ + // Embedded characters important in URLs. + {"foo % bar", "foo%20%25%20bar"}, + {"foo ? bar", "foo%20%3F%20bar"}, + {"foo / bar", "foo%20/%20bar"}, + {"foo %?/ bar", "foo%20%25%3F/%20bar"}, + + // Non-Roman scripts + {"타코", "%ED%83%80%EC%BD%94"}, + {"世界", "%E4%B8%96%E7%95%8C"}, + + // Longest legal name + {strings.Repeat("a", maxLegalLength), strings.Repeat("a", maxLegalLength)}, + + // Line terminators besides CR and LF: https://en.wikipedia.org/wiki/Newline#Unicode + {"foo \u000b bar", "foo%20%0B%20bar"}, + {"foo \u000c bar", "foo%20%0C%20bar"}, + {"foo \u0085 bar", "foo%20%C2%85%20bar"}, + {"foo \u2028 bar", "foo%20%E2%80%A8%20bar"}, + {"foo \u2029 bar", "foo%20%E2%80%A9%20bar"}, + + // Null byte. + {"foo \u0000 bar", "foo%20%00%20bar"}, + + // Non-control characters that are discouraged, but not forbidden, according to the documentation. + {"foo # bar", "foo%20%23%20bar"}, + {"foo []*? bar", "foo%20%5B%5D%2A%3F%20bar"}, + + // Angstrom symbol singleton and normalized forms: http://unicode.org/reports/tr15/ + {"foo \u212b bar", "foo%20%E2%84%AB%20bar"}, + {"foo \u0041\u030a bar", "foo%20A%CC%8A%20bar"}, + {"foo \u00c5 bar", "foo%20%C3%85%20bar"}, + + // Hangul separating jamo: http://www.unicode.org/versions/Unicode7.0.0/ch18.pdf (Table 18-10) + {"foo \u3131\u314f bar", "foo%20%E3%84%B1%E3%85%8F%20bar"}, + {"foo \u1100\u1161 bar", "foo%20%E1%84%80%E1%85%A1%20bar"}, + {"foo \uac00 bar", "foo%20%EA%B0%80%20bar"}, + } + + // C0 control characters not forbidden by the docs. + var runes []rune + for r := rune(0x01); r <= rune(0x1f); r++ { + if r != '\u000a' && r != '\u000d' { + runes = append(runes, r) + } + } + tests = append(tests, testT{fmt.Sprintf("foo %s bar", string(runes)), "foo%20%01%02%03%04%05%06%07%08%09%0B%0C%0E%0F%10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E%1F%20bar"}) + + // C1 control characters, plus DEL. + runes = nil + for r := rune(0x7f); r <= rune(0x9f); r++ { + runes = append(runes, r) + } + tests = append(tests, testT{fmt.Sprintf("foo %s bar", string(runes)), "foo%20%7F%C2%80%C2%81%C2%82%C2%83%C2%84%C2%85%C2%86%C2%87%C2%88%C2%89%C2%8A%C2%8B%C2%8C%C2%8D%C2%8E%C2%8F%C2%90%C2%91%C2%92%C2%93%C2%94%C2%95%C2%96%C2%97%C2%98%C2%99%C2%9A%C2%9B%C2%9C%C2%9D%C2%9E%C2%9F%20bar"}) + + opts := &SignedURLOptions{ + GoogleAccessID: "xxx@clientid", + PrivateKey: dummyKey("rsa"), + Method: "GET", + MD5: []byte("202cb962ac59075b964b07152d234b70"), + Expires: time.Date(2002, time.October, 2, 10, 0, 0, 0, time.UTC), + ContentType: "application/json", + Headers: []string{"x-header1", "x-header2"}, + } + + for _, test := range tests { + g, err := SignedURL("bucket-name", test.name, opts) + if err != nil { + t.Errorf("SignedURL(%q) err=%v, want nil", test.name, err) + } + if w := "/bucket-name/" + test.want; !strings.Contains(g, w) { + t.Errorf("SignedURL(%q)=%q, want substring %q", test.name, g, w) + } + } +} + +func TestCondition(t *testing.T) { + gotReq := make(chan *http.Request, 1) + hc, close := newTestServer(func(w http.ResponseWriter, r *http.Request) { + io.Copy(ioutil.Discard, r.Body) + gotReq <- r + w.WriteHeader(200) + }) + defer close() + ctx := context.Background() + c, err := NewClient(ctx, option.WithHTTPClient(hc)) + if err != nil { + t.Fatal(err) + } + + obj := c.Bucket("buck").Object("obj") + dst := c.Bucket("dstbuck").Object("dst") + tests := []struct { + fn func() + want string + }{ + { + func() { obj.Generation(1234).NewReader(ctx) }, + "GET /buck/obj?generation=1234", + }, + { + func() { obj.If(Conditions{GenerationMatch: 1234}).NewReader(ctx) }, + "GET /buck/obj?ifGenerationMatch=1234", + }, + { + func() { obj.If(Conditions{GenerationNotMatch: 1234}).NewReader(ctx) }, + "GET /buck/obj?ifGenerationNotMatch=1234", + }, + { + func() { obj.If(Conditions{MetagenerationMatch: 1234}).NewReader(ctx) }, + "GET /buck/obj?ifMetagenerationMatch=1234", + }, + { + func() { obj.If(Conditions{MetagenerationNotMatch: 1234}).NewReader(ctx) }, + "GET /buck/obj?ifMetagenerationNotMatch=1234", + }, + { + func() { obj.If(Conditions{MetagenerationNotMatch: 1234}).Attrs(ctx) }, + "GET /storage/v1/b/buck/o/obj?alt=json&ifMetagenerationNotMatch=1234&projection=full", + }, + + { + func() { obj.If(Conditions{MetagenerationMatch: 1234}).Update(ctx, ObjectAttrsToUpdate{}) }, + "PATCH /storage/v1/b/buck/o/obj?alt=json&ifMetagenerationMatch=1234&projection=full", + }, + { + func() { obj.Generation(1234).Delete(ctx) }, + "DELETE /storage/v1/b/buck/o/obj?alt=json&generation=1234", + }, + { + func() { + w := obj.If(Conditions{GenerationMatch: 1234}).NewWriter(ctx) + w.ContentType = "text/plain" + w.Close() + }, + "POST /upload/storage/v1/b/buck/o?alt=json&ifGenerationMatch=1234&projection=full&uploadType=multipart", + }, + { + func() { + w := obj.If(Conditions{DoesNotExist: true}).NewWriter(ctx) + w.ContentType = "text/plain" + w.Close() + }, + "POST /upload/storage/v1/b/buck/o?alt=json&ifGenerationMatch=0&projection=full&uploadType=multipart", + }, + { + func() { + dst.If(Conditions{MetagenerationMatch: 5678}).CopierFrom(obj.If(Conditions{GenerationMatch: 1234})).Run(ctx) + }, + "POST /storage/v1/b/buck/o/obj/rewriteTo/b/dstbuck/o/dst?alt=json&ifMetagenerationMatch=5678&ifSourceGenerationMatch=1234&projection=full", + }, + } + + for i, tt := range tests { + tt.fn() + select { + case r := <-gotReq: + got := r.Method + " " + r.RequestURI + if got != tt.want { + t.Errorf("%d. RequestURI = %q; want %q", i, got, tt.want) + } + case <-time.After(5 * time.Second): + t.Fatalf("%d. timeout", i) + } + if err != nil { + t.Fatal(err) + } + } + + // Test an error, too: + err = obj.Generation(1234).NewWriter(ctx).Close() + if err == nil || !strings.Contains(err.Error(), "NewWriter: generation not supported") { + t.Errorf("want error about unsupported generation; got %v", err) + } +} + +func TestConditionErrors(t *testing.T) { + for _, conds := range []Conditions{ + {GenerationMatch: 0}, + {DoesNotExist: false}, // same as above, actually + {GenerationMatch: 1, GenerationNotMatch: 2}, + {GenerationNotMatch: 2, DoesNotExist: true}, + {MetagenerationMatch: 1, MetagenerationNotMatch: 2}, + } { + if err := conds.validate(""); err == nil { + t.Errorf("%+v: got nil, want error", conds) + } + } +} + +// Test object compose. +func TestObjectCompose(t *testing.T) { + gotURL := make(chan string, 1) + gotBody := make(chan []byte, 1) + hc, close := newTestServer(func(w http.ResponseWriter, r *http.Request) { + body, _ := ioutil.ReadAll(r.Body) + gotURL <- r.URL.String() + gotBody <- body + w.Write([]byte("{}")) + }) + defer close() + ctx := context.Background() + c, err := NewClient(ctx, option.WithHTTPClient(hc)) + if err != nil { + t.Fatal(err) + } + + testCases := []struct { + desc string + dst *ObjectHandle + srcs []*ObjectHandle + attrs *ObjectAttrs + wantReq raw.ComposeRequest + wantURL string + wantErr bool + }{ + { + desc: "basic case", + dst: c.Bucket("foo").Object("bar"), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object("baz"), + c.Bucket("foo").Object("quux"), + }, + wantURL: "/storage/v1/b/foo/o/bar/compose?alt=json", + wantReq: raw.ComposeRequest{ + Destination: &raw.Object{Bucket: "foo"}, + SourceObjects: []*raw.ComposeRequestSourceObjects{ + {Name: "baz"}, + {Name: "quux"}, + }, + }, + }, + { + desc: "with object attrs", + dst: c.Bucket("foo").Object("bar"), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object("baz"), + c.Bucket("foo").Object("quux"), + }, + attrs: &ObjectAttrs{ + Name: "not-bar", + ContentType: "application/json", + }, + wantURL: "/storage/v1/b/foo/o/bar/compose?alt=json", + wantReq: raw.ComposeRequest{ + Destination: &raw.Object{ + Bucket: "foo", + Name: "not-bar", + ContentType: "application/json", + }, + SourceObjects: []*raw.ComposeRequestSourceObjects{ + {Name: "baz"}, + {Name: "quux"}, + }, + }, + }, + { + desc: "with conditions", + dst: c.Bucket("foo").Object("bar").If(Conditions{ + GenerationMatch: 12, + MetagenerationMatch: 34, + }), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object("baz").Generation(56), + c.Bucket("foo").Object("quux").If(Conditions{GenerationMatch: 78}), + }, + wantURL: "/storage/v1/b/foo/o/bar/compose?alt=json&ifGenerationMatch=12&ifMetagenerationMatch=34", + wantReq: raw.ComposeRequest{ + Destination: &raw.Object{Bucket: "foo"}, + SourceObjects: []*raw.ComposeRequestSourceObjects{ + { + Name: "baz", + Generation: 56, + }, + { + Name: "quux", + ObjectPreconditions: &raw.ComposeRequestSourceObjectsObjectPreconditions{ + IfGenerationMatch: 78, + }, + }, + }, + }, + }, + { + desc: "no sources", + dst: c.Bucket("foo").Object("bar"), + wantErr: true, + }, + { + desc: "destination, no bucket", + dst: c.Bucket("").Object("bar"), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object("baz"), + }, + wantErr: true, + }, + { + desc: "destination, no object", + dst: c.Bucket("foo").Object(""), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object("baz"), + }, + wantErr: true, + }, + { + desc: "source, different bucket", + dst: c.Bucket("foo").Object("bar"), + srcs: []*ObjectHandle{ + c.Bucket("otherbucket").Object("baz"), + }, + wantErr: true, + }, + { + desc: "source, no object", + dst: c.Bucket("foo").Object("bar"), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object(""), + }, + wantErr: true, + }, + { + desc: "destination, bad condition", + dst: c.Bucket("foo").Object("bar").Generation(12), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object("baz"), + }, + wantErr: true, + }, + { + desc: "source, bad condition", + dst: c.Bucket("foo").Object("bar"), + srcs: []*ObjectHandle{ + c.Bucket("foo").Object("baz").If(Conditions{MetagenerationMatch: 12}), + }, + wantErr: true, + }, + } + + for _, tt := range testCases { + composer := tt.dst.ComposerFrom(tt.srcs...) + if tt.attrs != nil { + composer.ObjectAttrs = *tt.attrs + } + _, err := composer.Run(ctx) + if gotErr := err != nil; gotErr != tt.wantErr { + t.Errorf("%s: got error %v; want err %t", tt.desc, err, tt.wantErr) + continue + } + if tt.wantErr { + continue + } + url, body := <-gotURL, <-gotBody + if url != tt.wantURL { + t.Errorf("%s: request URL\ngot %q\nwant %q", tt.desc, url, tt.wantURL) + } + var req raw.ComposeRequest + if err := json.Unmarshal(body, &req); err != nil { + t.Errorf("%s: json.Unmarshal %v (body %s)", tt.desc, err, body) + } + if !reflect.DeepEqual(req, tt.wantReq) { + // Print to JSON. + wantReq, _ := json.Marshal(tt.wantReq) + t.Errorf("%s: request body\ngot %s\nwant %s", tt.desc, body, wantReq) + } + } +} + +// Test that ObjectIterator's Next and NextPage methods correctly terminate +// if there is nothing to iterate over. +func TestEmptyObjectIterator(t *testing.T) { + hClient, close := newTestServer(func(w http.ResponseWriter, r *http.Request) { + io.Copy(ioutil.Discard, r.Body) + fmt.Fprintf(w, "{}") + }) + defer close() + ctx := context.Background() + client, err := NewClient(ctx, option.WithHTTPClient(hClient)) + if err != nil { + t.Fatal(err) + } + it := client.Bucket("b").Objects(ctx, nil) + c := make(chan error, 1) + go func() { + _, err := it.Next() + c <- err + }() + select { + case err := <-c: + if err != iterator.Done { + t.Errorf("got %v, want Done", err) + } + case <-time.After(50 * time.Millisecond): + t.Error("timed out") + } +} + +// Test that BucketIterator's Next method correctly terminates if there is +// nothing to iterate over. +func TestEmptyBucketIterator(t *testing.T) { + hClient, close := newTestServer(func(w http.ResponseWriter, r *http.Request) { + io.Copy(ioutil.Discard, r.Body) + fmt.Fprintf(w, "{}") + }) + defer close() + ctx := context.Background() + client, err := NewClient(ctx, option.WithHTTPClient(hClient)) + if err != nil { + t.Fatal(err) + } + it := client.Buckets(ctx, "project") + c := make(chan error, 1) + go func() { + _, err := it.Next() + c <- err + }() + select { + case err := <-c: + if err != iterator.Done { + t.Errorf("got %v, want Done", err) + } + case <-time.After(50 * time.Millisecond): + t.Error("timed out") + } +} + +func newTestServer(handler func(w http.ResponseWriter, r *http.Request)) (*http.Client, func()) { + ts := httptest.NewTLSServer(http.HandlerFunc(handler)) + tlsConf := &tls.Config{InsecureSkipVerify: true} + tr := &http.Transport{ + TLSClientConfig: tlsConf, + DialTLS: func(netw, addr string) (net.Conn, error) { + return tls.Dial("tcp", ts.Listener.Addr().String(), tlsConf) + }, + } + return &http.Client{Transport: tr}, func() { + tr.CloseIdleConnections() + ts.Close() + } +} diff --git a/vendor/cloud.google.com/go/storage/testdata/dummy_pem b/vendor/cloud.google.com/go/storage/testdata/dummy_pem new file mode 100644 index 00000000..3428d449 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/testdata/dummy_pem @@ -0,0 +1,39 @@ +Bag Attributes + friendlyName: privatekey + localKeyID: 54 69 6D 65 20 31 34 31 36 38 35 32 30 30 34 37 37 32 +Key Attributes: +-----BEGIN RSA PRIVATE KEY----- +MIICXQIBAAKBgQCtCWMoJ2Bok2QoGFyU7A6IlGprO9QfUTT0jNrLkIbM5OWNIuDx +64+PEaTS5g5m+2Hz/lmd5jJKanAH4dY9LZzsaYAPq1K17Gcmg1hEisYeKsgOcjYY +kwRkV+natCTsC+tfWmS0voRh0jA1rI1J4MikceoHtgWdEuoHrrptRVpWKwIDAQAB +AoGAKp3uQvx3vSnX+BwP6Um+RpsvHpwMoW3xue1bEdnVqW8SrlERz+NxZw40ZxDs +KSbuuBZD4iTI7BUM5JQVnNm4FQY1YrPlWZLyI73Bj8RKTXrPdJheM/0r7xjiIXbQ +7w4cUSM9rVugnI/rxF2kPIQTGYI+EG/6+P+k6VvgPmC0T/ECQQDUPskiS18WaY+i +Koalbrb3GakaBoHrC1b4ln4CAv7fq7H4WvFvqi/2rxLhHYq31iwxYy8s7J7Sba1+ +5vwJ2TxZAkEA0LVfs3Q2VWZ+cM3bv0aYTalMXg6wT+LoNvk9HnOb0zQYajF3qm4G +ZFdfEqvOkje0zQ4fcihARKyda/VY84UGIwJBAIZa0FvjNmgrnn7bSKzEbxHwrnkJ +EYjGfuGR8mY3mzvfpiM+/oLfSslvfhX+62cALq18yco4ZzlxsFgaxAU//NECQDcS +NN94YcHlGqYPW9W7/gI4EwOaoqFhwV6II71+SfbP/0U+KlJZV+xwNZEKrqZcdqPI +/zkzL8ovNha/laokRrsCQQCyoPHGcBWj+VFbNoyQnX4tghc6rOY7n4pmpgQvU825 +TAM9vnYtSkKK/V56kEDNBO5LwiRsir95IUNclqqMKR1C +-----END RSA PRIVATE KEY----- +Bag Attributes + friendlyName: privatekey + localKeyID: 54 69 6D 65 20 31 34 31 36 38 35 32 30 30 34 37 37 32 +subject=/CN=1079432350659-nvog0vmn9s6pqr3kr4v2avbc7nkhoa11.apps.googleusercontent.com +issuer=/CN=1079432350659-nvog0vmn9s6pqr3kr4v2avbc7nkhoa11.apps.googleusercontent.com +-----BEGIN CERTIFICATE----- +MIICXTCCAcagAwIBAgIIHxTMQUVJRZ0wDQYJKoZIhvcNAQEFBQAwVDFSMFAGA1UE +AxNJMTA3OTQzMjM1MDY1OS1udm9nMHZtbjlzNnBxcjNrcjR2MmF2YmM3bmtob2Ex +MS5hcHBzLmdvb2dsZXVzZXJjb250ZW50LmNvbTAeFw0xNDExMjQxODAwMDRaFw0y +NDExMjExODAwMDRaMFQxUjBQBgNVBAMTSTEwNzk0MzIzNTA2NTktbnZvZzB2bW45 +czZwcXIza3I0djJhdmJjN25raG9hMTEuYXBwcy5nb29nbGV1c2VyY29udGVudC5j +b20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAK0JYygnYGiTZCgYXJTsDoiU +ams71B9RNPSM2suQhszk5Y0i4PHrj48RpNLmDmb7YfP+WZ3mMkpqcAfh1j0tnOxp +gA+rUrXsZyaDWESKxh4qyA5yNhiTBGRX6dq0JOwL619aZLS+hGHSMDWsjUngyKRx +6ge2BZ0S6geuum1FWlYrAgMBAAGjODA2MAwGA1UdEwEB/wQCMAAwDgYDVR0PAQH/ +BAQDAgeAMBYGA1UdJQEB/wQMMAoGCCsGAQUFBwMCMA0GCSqGSIb3DQEBBQUAA4GB +ACVvKkZkomHq3uffOQwdZ4VJYuxrvDGnZu/ExW9WngO2teEsjxABL41TNnRYHN5T +lMC19poFA2tR/DySDLJ2XNs/hSvyQUL6HHCncVdR4Srpie88j48peY1MZSMP51Jv +qagbbP5K5DSEu02/zZaV0kaCvLEN0KAtj/noDuOOnQU2 +-----END CERTIFICATE----- diff --git a/vendor/cloud.google.com/go/storage/testdata/dummy_rsa b/vendor/cloud.google.com/go/storage/testdata/dummy_rsa new file mode 100644 index 00000000..4ce6678d --- /dev/null +++ b/vendor/cloud.google.com/go/storage/testdata/dummy_rsa @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAx4fm7dngEmOULNmAs1IGZ9Apfzh+BkaQ1dzkmbUgpcoghucE +DZRnAGd2aPyB6skGMXUytWQvNYav0WTR00wFtX1ohWTfv68HGXJ8QXCpyoSKSSFY +fuP9X36wBSkSX9J5DVgiuzD5VBdzUISSmapjKm+DcbRALjz6OUIPEWi1Tjl6p5RK +1w41qdbmt7E5/kGhKLDuT7+M83g4VWhgIvaAXtnhklDAggilPPa8ZJ1IFe31lNlr +k4DRk38nc6sEutdf3RL7QoH7FBusI7uXV03DC6dwN1kP4GE7bjJhcRb/7jYt7CQ9 +/E9Exz3c0yAp0yrTg0Fwh+qxfH9dKwN52S7SBwIDAQABAoIBAQCaCs26K07WY5Jt +3a2Cw3y2gPrIgTCqX6hJs7O5ByEhXZ8nBwsWANBUe4vrGaajQHdLj5OKfsIDrOvn +2NI1MqflqeAbu/kR32q3tq8/Rl+PPiwUsW3E6Pcf1orGMSNCXxeducF2iySySzh3 +nSIhCG5uwJDWI7a4+9KiieFgK1pt/Iv30q1SQS8IEntTfXYwANQrfKUVMmVF9aIK +6/WZE2yd5+q3wVVIJ6jsmTzoDCX6QQkkJICIYwCkglmVy5AeTckOVwcXL0jqw5Kf +5/soZJQwLEyBoQq7Kbpa26QHq+CJONetPP8Ssy8MJJXBT+u/bSseMb3Zsr5cr43e +DJOhwsThAoGBAPY6rPKl2NT/K7XfRCGm1sbWjUQyDShscwuWJ5+kD0yudnT/ZEJ1 +M3+KS/iOOAoHDdEDi9crRvMl0UfNa8MAcDKHflzxg2jg/QI+fTBjPP5GOX0lkZ9g +z6VePoVoQw2gpPFVNPPTxKfk27tEzbaffvOLGBEih0Kb7HTINkW8rIlzAoGBAM9y +1yr+jvfS1cGFtNU+Gotoihw2eMKtIqR03Yn3n0PK1nVCDKqwdUqCypz4+ml6cxRK +J8+Pfdh7D+ZJd4LEG6Y4QRDLuv5OA700tUoSHxMSNn3q9As4+T3MUyYxWKvTeu3U +f2NWP9ePU0lV8ttk7YlpVRaPQmc1qwooBA/z/8AdAoGAW9x0HWqmRICWTBnpjyxx +QGlW9rQ9mHEtUotIaRSJ6K/F3cxSGUEkX1a3FRnp6kPLcckC6NlqdNgNBd6rb2rA +cPl/uSkZP42Als+9YMoFPU/xrrDPbUhu72EDrj3Bllnyb168jKLa4VBOccUvggxr +Dm08I1hgYgdN5huzs7y6GeUCgYEAj+AZJSOJ6o1aXS6rfV3mMRve9bQ9yt8jcKXw +5HhOCEmMtaSKfnOF1Ziih34Sxsb7O2428DiX0mV/YHtBnPsAJidL0SdLWIapBzeg +KHArByIRkwE6IvJvwpGMdaex1PIGhx5i/3VZL9qiq/ElT05PhIb+UXgoWMabCp84 +OgxDK20CgYAeaFo8BdQ7FmVX2+EEejF+8xSge6WVLtkaon8bqcn6P0O8lLypoOhd +mJAYH8WU+UAy9pecUnDZj14LAGNVmYcse8HFX71MoshnvCTFEPVo4rZxIAGwMpeJ +5jgQ3slYLpqrGlcbLgUXBUgzEO684Wk/UV9DFPlHALVqCfXQ9dpJPg== +-----END RSA PRIVATE KEY----- diff --git a/vendor/cloud.google.com/go/storage/writer.go b/vendor/cloud.google.com/go/storage/writer.go new file mode 100644 index 00000000..61bd0be6 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/writer.go @@ -0,0 +1,150 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "errors" + "fmt" + "io" + "unicode/utf8" + + "golang.org/x/net/context" + "google.golang.org/api/googleapi" + raw "google.golang.org/api/storage/v1" +) + +// A Writer writes a Cloud Storage object. +type Writer struct { + // ObjectAttrs are optional attributes to set on the object. Any attributes + // must be initialized before the first Write call. Nil or zero-valued + // attributes are ignored. + ObjectAttrs + + // ChunkSize controls the maximum number of bytes of the object that the + // Writer will attempt to send to the server in a single request. Objects + // smaller than the size will be sent in a single request, while larger + // objects will be split over multiple requests. The size will be rounded up + // to the nearest multiple of 256K. If zero, chunking will be disabled and + // the object will be uploaded in a single request. + // + // ChunkSize will default to a reasonable value. Any custom configuration + // must be done before the first Write call. + ChunkSize int + + ctx context.Context + o *ObjectHandle + + opened bool + pw *io.PipeWriter + + donec chan struct{} // closed after err and obj are set. + err error + obj *ObjectAttrs +} + +func (w *Writer) open() error { + attrs := w.ObjectAttrs + // Check the developer didn't change the object Name (this is unfortunate, but + // we don't want to store an object under the wrong name). + if attrs.Name != w.o.object { + return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object) + } + if !utf8.ValidString(attrs.Name) { + return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name) + } + pr, pw := io.Pipe() + w.pw = pw + w.opened = true + + if w.ChunkSize < 0 { + return errors.New("storage: Writer.ChunkSize must non-negative") + } + mediaOpts := []googleapi.MediaOption{ + googleapi.ChunkSize(w.ChunkSize), + } + if c := attrs.ContentType; c != "" { + mediaOpts = append(mediaOpts, googleapi.ContentType(c)) + } + + go func() { + defer close(w.donec) + + call := w.o.c.raw.Objects.Insert(w.o.bucket, attrs.toRawObject(w.o.bucket)). + Media(pr, mediaOpts...). + Projection("full"). + Context(w.ctx) + if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil { + w.err = err + pr.CloseWithError(w.err) + return + } + var resp *raw.Object + err := applyConds("NewWriter", w.o.gen, w.o.conds, call) + if err == nil { + resp, err = call.Do() + } + if err != nil { + w.err = err + pr.CloseWithError(w.err) + return + } + w.obj = newObject(resp) + }() + return nil +} + +// Write appends to w. It implements the io.Writer interface. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.err != nil { + return 0, w.err + } + if !w.opened { + if err := w.open(); err != nil { + return 0, err + } + } + return w.pw.Write(p) +} + +// Close completes the write operation and flushes any buffered data. +// If Close doesn't return an error, metadata about the written object +// can be retrieved by calling Attrs. +func (w *Writer) Close() error { + if !w.opened { + if err := w.open(); err != nil { + return err + } + } + if err := w.pw.Close(); err != nil { + return err + } + <-w.donec + return w.err +} + +// CloseWithError aborts the write operation with the provided error. +// CloseWithError always returns nil. +func (w *Writer) CloseWithError(err error) error { + if !w.opened { + return nil + } + return w.pw.CloseWithError(err) +} + +// Attrs returns metadata about a successfully-written object. +// It's only valid to call it after Close returns nil. +func (w *Writer) Attrs() *ObjectAttrs { + return w.obj +} diff --git a/vendor/cloud.google.com/go/storage/writer_test.go b/vendor/cloud.google.com/go/storage/writer_test.go new file mode 100644 index 00000000..75b209b2 --- /dev/null +++ b/vendor/cloud.google.com/go/storage/writer_test.go @@ -0,0 +1,92 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package storage + +import ( + "crypto/sha256" + "encoding/base64" + "fmt" + "net/http" + "reflect" + "testing" + + "golang.org/x/net/context" + + "google.golang.org/api/option" +) + +type fakeTransport struct { + gotReq *http.Request +} + +func (t *fakeTransport) RoundTrip(req *http.Request) (*http.Response, error) { + t.gotReq = req + return nil, fmt.Errorf("error handling request") +} + +func TestErrorOnObjectsInsertCall(t *testing.T) { + ctx := context.Background() + hc := &http.Client{Transport: &fakeTransport{}} + client, err := NewClient(ctx, option.WithHTTPClient(hc)) + if err != nil { + t.Fatalf("error when creating client: %v", err) + } + wc := client.Bucket("bucketname").Object("filename1").NewWriter(ctx) + wc.ContentType = "text/plain" + + // We can't check that the Write fails, since it depends on the write to the + // underling fakeTransport failing which is racy. + wc.Write([]byte("hello world")) + + // Close must always return an error though since it waits for the transport to + // have closed. + if err := wc.Close(); err == nil { + t.Errorf("expected error on close, got nil") + } +} + +func TestEncryption(t *testing.T) { + ctx := context.Background() + ft := &fakeTransport{} + hc := &http.Client{Transport: ft} + client, err := NewClient(ctx, option.WithHTTPClient(hc)) + if err != nil { + t.Fatalf("error when creating client: %v", err) + } + obj := client.Bucket("bucketname").Object("filename1") + key := []byte("secret-key-that-is-32-bytes-long") + wc := obj.Key(key).NewWriter(ctx) + // TODO(jba): use something other than fakeTransport, which always returns error. + wc.Write([]byte("hello world")) + wc.Close() + if got, want := ft.gotReq.Header.Get("x-goog-encryption-algorithm"), "AES256"; got != want { + t.Errorf("algorithm: got %q, want %q", got, want) + } + gotKey, err := base64.StdEncoding.DecodeString(ft.gotReq.Header.Get("x-goog-encryption-key")) + if err != nil { + t.Fatalf("decoding key: %v", err) + } + if !reflect.DeepEqual(gotKey, key) { + t.Errorf("key: got %v, want %v", gotKey, key) + } + wantHash := sha256.Sum256(key) + gotHash, err := base64.StdEncoding.DecodeString(ft.gotReq.Header.Get("x-goog-encryption-key-sha256")) + if err != nil { + t.Fatalf("decoding hash: %v", err) + } + if !reflect.DeepEqual(gotHash, wantHash[:]) { // wantHash is an array + t.Errorf("hash: got\n%v, want\n%v", gotHash, wantHash) + } +} diff --git a/vendor/cloud.google.com/go/trace/apiv1/doc.go b/vendor/cloud.google.com/go/trace/apiv1/doc.go new file mode 100644 index 00000000..6d956bdd --- /dev/null +++ b/vendor/cloud.google.com/go/trace/apiv1/doc.go @@ -0,0 +1,38 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package trace is an experimental, auto-generated package for the +// trace API. +// +// Send and retrieve trace data from Stackdriver Trace. Data is generated and +// available by default for all App Engine applications. Data from other +// applications can be written to Stackdriver Trace for display, reporting, +// and analysis. +// +// Use the client at cloud.google.com/go/trace in preference to this. +package trace // import "cloud.google.com/go/trace/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertXGoog(ctx context.Context, val string) context.Context { + md, _ := metadata.FromContext(ctx) + md = md.Copy() + md["x-goog-api-client"] = []string{val} + return metadata.NewContext(ctx, md) +} diff --git a/vendor/cloud.google.com/go/trace/apiv1/mock_test.go b/vendor/cloud.google.com/go/trace/apiv1/mock_test.go new file mode 100644 index 00000000..e8a1ffec --- /dev/null +++ b/vendor/cloud.google.com/go/trace/apiv1/mock_test.go @@ -0,0 +1,299 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package trace + +import ( + google_protobuf "github.com/golang/protobuf/ptypes/empty" + cloudtracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v1" +) + +import ( + "flag" + "io" + "log" + "net" + "os" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockTraceServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + cloudtracepb.TraceServiceServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockTraceServer) ListTraces(_ context.Context, req *cloudtracepb.ListTracesRequest) (*cloudtracepb.ListTracesResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*cloudtracepb.ListTracesResponse), nil +} + +func (s *mockTraceServer) GetTrace(_ context.Context, req *cloudtracepb.GetTraceRequest) (*cloudtracepb.Trace, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*cloudtracepb.Trace), nil +} + +func (s *mockTraceServer) PatchTraces(_ context.Context, req *cloudtracepb.PatchTracesRequest) (*google_protobuf.Empty, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*google_protobuf.Empty), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockTrace mockTraceServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + cloudtracepb.RegisterTraceServiceServer(serv, &mockTrace) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestTraceServicePatchTraces(t *testing.T) { + var expectedResponse *google_protobuf.Empty = &google_protobuf.Empty{} + + mockTrace.err = nil + mockTrace.reqs = nil + + mockTrace.resps = append(mockTrace.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var traces *cloudtracepb.Traces = &cloudtracepb.Traces{} + var request = &cloudtracepb.PatchTracesRequest{ + ProjectId: projectId, + Traces: traces, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.PatchTraces(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockTrace.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + +} + +func TestTraceServicePatchTracesError(t *testing.T) { + errCode := codes.Internal + mockTrace.err = grpc.Errorf(errCode, "test error") + + var projectId string = "projectId-1969970175" + var traces *cloudtracepb.Traces = &cloudtracepb.Traces{} + var request = &cloudtracepb.PatchTracesRequest{ + ProjectId: projectId, + Traces: traces, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + err = c.PatchTraces(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } +} +func TestTraceServiceGetTrace(t *testing.T) { + var projectId2 string = "projectId2939242356" + var traceId2 string = "traceId2987826376" + var expectedResponse = &cloudtracepb.Trace{ + ProjectId: projectId2, + TraceId: traceId2, + } + + mockTrace.err = nil + mockTrace.reqs = nil + + mockTrace.resps = append(mockTrace.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var traceId string = "traceId1270300245" + var request = &cloudtracepb.GetTraceRequest{ + ProjectId: projectId, + TraceId: traceId, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetTrace(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockTrace.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestTraceServiceGetTraceError(t *testing.T) { + errCode := codes.Internal + mockTrace.err = grpc.Errorf(errCode, "test error") + + var projectId string = "projectId-1969970175" + var traceId string = "traceId1270300245" + var request = &cloudtracepb.GetTraceRequest{ + ProjectId: projectId, + TraceId: traceId, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.GetTrace(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} +func TestTraceServiceListTraces(t *testing.T) { + var nextPageToken string = "" + var tracesElement *cloudtracepb.Trace = &cloudtracepb.Trace{} + var traces = []*cloudtracepb.Trace{tracesElement} + var expectedResponse = &cloudtracepb.ListTracesResponse{ + NextPageToken: nextPageToken, + Traces: traces, + } + + mockTrace.err = nil + mockTrace.reqs = nil + + mockTrace.resps = append(mockTrace.resps[:0], expectedResponse) + + var projectId string = "projectId-1969970175" + var request = &cloudtracepb.ListTracesRequest{ + ProjectId: projectId, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTraces(context.Background(), request).Next() + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockTrace.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + want := (interface{})(expectedResponse.Traces[0]) + got := (interface{})(resp) + var ok bool + + switch want := (want).(type) { + case proto.Message: + ok = proto.Equal(want, got.(proto.Message)) + default: + ok = want == got + } + if !ok { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestTraceServiceListTracesError(t *testing.T) { + errCode := codes.Internal + mockTrace.err = grpc.Errorf(errCode, "test error") + + var projectId string = "projectId-1969970175" + var request = &cloudtracepb.ListTracesRequest{ + ProjectId: projectId, + } + + c, err := NewClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.ListTraces(context.Background(), request).Next() + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/trace/apiv1/trace_client.go b/vendor/cloud.google.com/go/trace/apiv1/trace_client.go new file mode 100644 index 00000000..ea4060ca --- /dev/null +++ b/vendor/cloud.google.com/go/trace/apiv1/trace_client.go @@ -0,0 +1,235 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package trace + +import ( + "math" + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + "google.golang.org/api/transport" + cloudtracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// CallOptions contains the retry settings for each method of Client. +type CallOptions struct { + PatchTraces []gax.CallOption + GetTrace []gax.CallOption + ListTraces []gax.CallOption +} + +func defaultClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("cloudtrace.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/trace.append", + "https://www.googleapis.com/auth/trace.readonly", + ), + } +} + +func defaultCallOptions() *CallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 1000 * time.Millisecond, + Multiplier: 1.2, + }) + }), + }, + } + return &CallOptions{ + PatchTraces: retry[[2]string{"default", "idempotent"}], + GetTrace: retry[[2]string{"default", "idempotent"}], + ListTraces: retry[[2]string{"default", "idempotent"}], + } +} + +// Client is a client for interacting with Stackdriver Trace API. +type Client struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + client cloudtracepb.TraceServiceClient + + // The call options for this service. + CallOptions *CallOptions + + // The metadata to be sent with each request. + xGoogHeader string +} + +// NewClient creates a new trace service client. +// +// This file describes an API for collecting and viewing traces and spans +// within a trace. A Trace is a collection of spans corresponding to a single +// operation or set of operations for an application. A span is an individual +// timed event which forms a node of the trace tree. Spans for a single trace +// may span multiple services. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &Client{ + conn: conn, + CallOptions: defaultCallOptions(), + + client: cloudtracepb.NewTraceServiceClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *Client) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *Client) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *Client) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", "") + c.xGoogHeader = gax.XGoogHeader(kv...) +} + +// PatchTraces sends new traces to Stackdriver Trace or updates existing traces. If the ID +// of a trace that you send matches that of an existing trace, any fields +// in the existing trace and its spans are overwritten by the provided values, +// and any new fields provided are merged with the existing trace data. If the +// ID does not match, a new trace is created. +func (c *Client) PatchTraces(ctx context.Context, req *cloudtracepb.PatchTracesRequest) error { + ctx = insertXGoog(ctx, c.xGoogHeader) + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + _, err = c.client.PatchTraces(ctx, req) + return err + }, c.CallOptions.PatchTraces...) + return err +} + +// GetTrace gets a single trace by its ID. +func (c *Client) GetTrace(ctx context.Context, req *cloudtracepb.GetTraceRequest) (*cloudtracepb.Trace, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *cloudtracepb.Trace + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.GetTrace(ctx, req) + return err + }, c.CallOptions.GetTrace...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListTraces returns of a list of traces that match the specified filter conditions. +func (c *Client) ListTraces(ctx context.Context, req *cloudtracepb.ListTracesRequest) *TraceIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + it := &TraceIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]*cloudtracepb.Trace, string, error) { + var resp *cloudtracepb.ListTracesResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.client.ListTraces(ctx, req) + return err + }, c.CallOptions.ListTraces...) + if err != nil { + return nil, "", err + } + return resp.Traces, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it +} + +// TraceIterator manages a stream of *cloudtracepb.Trace. +type TraceIterator struct { + items []*cloudtracepb.Trace + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*cloudtracepb.Trace, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *TraceIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *TraceIterator) Next() (*cloudtracepb.Trace, error) { + var item *cloudtracepb.Trace + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *TraceIterator) bufLen() int { + return len(it.items) +} + +func (it *TraceIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/trace/apiv1/trace_client_example_test.go b/vendor/cloud.google.com/go/trace/apiv1/trace_client_example_test.go new file mode 100644 index 00000000..733c5918 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/apiv1/trace_client_example_test.go @@ -0,0 +1,89 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package trace_test + +import ( + "cloud.google.com/go/trace/apiv1" + "golang.org/x/net/context" + cloudtracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v1" +) + +func ExampleNewClient() { + ctx := context.Background() + c, err := trace.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleClient_PatchTraces() { + ctx := context.Background() + c, err := trace.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &cloudtracepb.PatchTracesRequest{ + // TODO: Fill request struct fields. + } + err = c.PatchTraces(ctx, req) + if err != nil { + // TODO: Handle error. + } +} + +func ExampleClient_GetTrace() { + ctx := context.Background() + c, err := trace.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &cloudtracepb.GetTraceRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.GetTrace(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} + +func ExampleClient_ListTraces() { + ctx := context.Background() + c, err := trace.NewClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &cloudtracepb.ListTracesRequest{ + // TODO: Fill request struct fields. + } + it := c.ListTraces(ctx, req) + for { + resp, err := it.Next() + if err != nil { + // TODO: Handle error. + break + } + // TODO: Use resp. + _ = resp + } +} diff --git a/vendor/cloud.google.com/go/trace/sampling.go b/vendor/cloud.google.com/go/trace/sampling.go new file mode 100644 index 00000000..d609290b --- /dev/null +++ b/vendor/cloud.google.com/go/trace/sampling.go @@ -0,0 +1,117 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + crand "crypto/rand" + "encoding/binary" + "fmt" + "math/rand" + "sync" + "time" + + "golang.org/x/time/rate" +) + +type SamplingPolicy interface { + // Sample returns a Decision. + // If Trace is false in the returned Decision, then the Decision should be + // the zero value. + Sample(p Parameters) Decision +} + +// Parameters contains the values passed to a SamplingPolicy's Sample method. +type Parameters struct { + HasTraceHeader bool // whether the incoming request has a valid X-Cloud-Trace-Context header. +} + +// Decision is the value returned by a call to a SamplingPolicy's Sample method. +type Decision struct { + Trace bool // Whether to trace the request. + Sample bool // Whether the trace is included in the random sample. + Policy string // Name of the sampling policy. + Weight float64 // Sample weight to be used in statistical calculations. +} + +type sampler struct { + fraction float64 + skipped float64 + *rate.Limiter + *rand.Rand + sync.Mutex +} + +func (s *sampler) Sample(p Parameters) Decision { + s.Lock() + x := s.Float64() + d := s.sample(p, time.Now(), x) + s.Unlock() + return d +} + +// sample contains the a deterministic, time-independent logic of Sample. +func (s *sampler) sample(p Parameters, now time.Time, x float64) (d Decision) { + d.Sample = x < s.fraction + d.Trace = p.HasTraceHeader || d.Sample + if !d.Trace { + // We have no reason to trace this request. + return Decision{} + } + // We test separately that the rate limit is not tiny before calling AllowN, + // because of overflow problems in x/time/rate. + if s.Limit() < 1e-9 || !s.AllowN(now, 1) { + // Rejected by the rate limit. + if d.Sample { + s.skipped++ + } + return Decision{} + } + if d.Sample { + d.Policy, d.Weight = "default", (1.0+s.skipped)/s.fraction + s.skipped = 0.0 + } + return +} + +// NewLimitedSampler returns a sampling policy that randomly samples a given +// fraction of requests. It also enforces a limit on the number of traces per +// second. It tries to trace every request with a trace header, but will not +// exceed the qps limit to do it. +func NewLimitedSampler(fraction, maxqps float64) (SamplingPolicy, error) { + if !(fraction >= 0) { + return nil, fmt.Errorf("invalid fraction %f", fraction) + } + if !(maxqps >= 0) { + return nil, fmt.Errorf("invalid maxqps %f", maxqps) + } + // Set a limit on the number of accumulated "tokens", to limit bursts of + // traced requests. Use one more than a second's worth of tokens, or 100, + // whichever is smaller. + // See https://godoc.org/golang.org/x/time/rate#NewLimiter. + maxTokens := 100 + if maxqps < 99.0 { + maxTokens = 1 + int(maxqps) + } + var seed int64 + if err := binary.Read(crand.Reader, binary.LittleEndian, &seed); err != nil { + seed = time.Now().UnixNano() + } + s := sampler{ + fraction: fraction, + Limiter: rate.NewLimiter(rate.Limit(maxqps), maxTokens), + Rand: rand.New(rand.NewSource(seed)), + } + return &s, nil +} diff --git a/vendor/cloud.google.com/go/trace/trace.go b/vendor/cloud.google.com/go/trace/trace.go new file mode 100644 index 00000000..8e50dfac --- /dev/null +++ b/vendor/cloud.google.com/go/trace/trace.go @@ -0,0 +1,811 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package trace is a Google Stackdriver Trace library. +// +// This package is still experimental and subject to change. +// +// See https://cloud.google.com/trace/api/#data_model for a discussion of traces +// and spans. +// +// To initialize a client that connects to the Stackdriver Trace server, use the +// NewClient function. Generally you will want to do this on program +// initialization. +// +// import "cloud.google.com/go/trace" +// ... +// traceClient, err = trace.NewClient(ctx, projectID) +// +// Calling SpanFromRequest will create a new trace span for an incoming HTTP +// request. If the request contains a trace context header, it is used to +// determine the trace ID. Otherwise, a new trace ID is created. +// +// func handler(w http.ResponseWriter, r *http.Request) { +// span := traceClient.SpanFromRequest(r) +// defer span.Finish() +// ... +// } +// +// SpanFromRequest and NewSpan returns nil if the *Client is nil, so you can disable +// tracing by not initializing your *Client variable. All of the exported +// functions on *Span do nothing when the *Span is nil. +// +// If you need to start traces that don't correspond to an incoming HTTP request, +// you can use NewSpan to create a root-level span. +// +// span := traceClient.NewSpan("span name") +// defer span.Finish() +// +// Although a trace span object is created for every request, only a subset of +// traces are uploaded to the server, for efficiency. By default, the requests +// that are traced are those with the tracing bit set in the options field of +// the trace context header. Ideally, you should override this behaviour by +// calling SetSamplingPolicy. NewLimitedSampler returns an implementation of +// SamplingPolicy which traces requests that have the tracing bit set, and also +// randomly traces a specified fraction of requests. Additionally, it sets a +// limit on the number of requests traced per second. The following example +// traces one in every thousand requests, up to a limit of 5 per second. +// +// p, err := trace.NewLimitedSampler(0.001, 5) +// traceClient.SetSamplingPolicy(p) +// +// You can create a new span as a child of an existing span with NewChild. +// +// childSpan := span.NewChild(name) +// ... +// childSpan.Finish() +// +// When sending an HTTP request to another server, NewRemoteChild will create +// a span to represent the time the current program waits for the request to +// complete, and attach a header to the outgoing request so that the trace will +// be propagated to the destination server. +// +// childSpan := span.NewRemoteChild(&httpRequest) +// ... +// childSpan.Finish() +// +// Alternatively, if you have access to the X-Cloud-Trace-Context header value +// but not the underlying HTTP request (this can happen if you are using a +// different transport or messaging protocol, such as gRPC), you can use +// SpanFromHeader instead of SpanFromRequest. In that case, you will need to +// specify the span name explicility, since it cannot be constructed from the +// HTTP request's URL and method. +// +// func handler(r *somepkg.Request) { +// span := traceClient.SpanFromHeader("span name", r.TraceContext()) +// defer span.Finish() +// ... +// } +// +// Spans can contain a map from keys to values that have useful information +// about the span. The elements of this map are called labels. Some labels, +// whose keys all begin with the string "trace.cloud.google.com/", are set +// automatically in the following ways: +// +// - SpanFromRequest sets some labels to data about the incoming request. +// +// - NewRemoteChild sets some labels to data about the outgoing request. +// +// - Finish sets a label to a stack trace, if the stack trace option is enabled +// in the incoming trace header. +// +// - The WithResponse option sets some labels to data about a response. +// You can also set labels using SetLabel. If a label is given a value +// automatically and by SetLabel, the automatically-set value is used. +// +// span.SetLabel(key, value) +// +// The WithResponse option can be used when Finish is called. +// +// childSpan := span.NewRemoteChild(outgoingReq) +// resp, err := http.DefaultClient.Do(outgoingReq) +// ... +// childSpan.Finish(trace.WithResponse(resp)) +// +// When a span created by SpanFromRequest or SpamFromHeader is finished, the +// finished spans in the corresponding trace -- the span itself and its +// descendants -- are uploaded to the Stackdriver Trace server using the +// *Client that created the span. Finish returns immediately, and uploading +// occurs asynchronously. You can use the FinishWait function instead to wait +// until uploading has finished. +// +// err := span.FinishWait() +// +// Using contexts to pass *trace.Span objects through your program will often +// be a better approach than passing them around explicitly. This allows trace +// spans, and other request-scoped or part-of-request-scoped values, to be +// easily passed through API boundaries. Various Google Cloud libraries will +// retrieve trace spans from contexts and automatically create child spans for +// API requests. +// See https://blog.golang.org/context for more discussion of contexts. +// A derived context containing a trace span can be created using NewContext. +// +// span := traceClient.SpanFromRequest(r) +// ctx = trace.NewContext(ctx, span) +// +// The span can be retrieved from a context elsewhere in the program using +// FromContext. +// +// func foo(ctx context.Context) { +// span := trace.FromContext(ctx).NewChild("in foo") +// defer span.Finish() +// ... +// } +// +package trace // import "cloud.google.com/go/trace" + +import ( + "crypto/rand" + "encoding/binary" + "encoding/json" + "fmt" + "log" + "net/http" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/context" + api "google.golang.org/api/cloudtrace/v1" + "google.golang.org/api/gensupport" + "google.golang.org/api/option" + "google.golang.org/api/support/bundler" + "google.golang.org/api/transport" + "google.golang.org/grpc" +) + +const ( + httpHeader = `X-Cloud-Trace-Context` + userAgent = `gcloud-golang-trace/20160501` + cloudPlatformScope = `https://www.googleapis.com/auth/cloud-platform` + spanKindClient = `RPC_CLIENT` + spanKindServer = `RPC_SERVER` + spanKindUnspecified = `SPAN_KIND_UNSPECIFIED` + maxStackFrames = 20 + labelHost = `trace.cloud.google.com/http/host` + labelMethod = `trace.cloud.google.com/http/method` + labelStackTrace = `trace.cloud.google.com/stacktrace` + labelStatusCode = `trace.cloud.google.com/http/status_code` + labelURL = `trace.cloud.google.com/http/url` + labelSamplingPolicy = `trace.cloud.google.com/sampling_policy` + labelSamplingWeight = `trace.cloud.google.com/sampling_weight` +) + +const ( + // ScopeTraceAppend grants permissions to write trace data for a project. + ScopeTraceAppend = "https://www.googleapis.com/auth/trace.append" + + // ScopeCloudPlatform grants permissions to view and manage your data + // across Google Cloud Platform services. + ScopeCloudPlatform = "https://www.googleapis.com/auth/cloud-platform" +) + +type contextKey struct{} + +type stackLabelValue struct { + Frames []stackFrame `json:"stack_frame"` +} + +type stackFrame struct { + Class string `json:"class_name,omitempty"` + Method string `json:"method_name"` + Filename string `json:"file_name"` + Line int64 `json:"line_number"` +} + +var ( + spanIDCounter uint64 + spanIDIncrement uint64 +) + +func init() { + // Set spanIDCounter and spanIDIncrement to random values. nextSpanID will + // return an arithmetic progression using these values, skipping zero. We set + // the LSB of spanIDIncrement to 1, so that the cycle length is 2^64. + binary.Read(rand.Reader, binary.LittleEndian, &spanIDCounter) + binary.Read(rand.Reader, binary.LittleEndian, &spanIDIncrement) + spanIDIncrement |= 1 + // Attach hook for autogenerated Google API calls. This will automatically + // create trace spans for API calls if there is a trace in the context. + gensupport.RegisterHook(requestHook) +} + +func requestHook(ctx context.Context, req *http.Request) func(resp *http.Response) { + span := FromContext(ctx) + if span == nil || req == nil { + return nil + } + span = span.NewRemoteChild(req) + return func(resp *http.Response) { + if resp != nil { + span.Finish(WithResponse(resp)) + } else { + span.Finish() + } + } +} + +// EnableGRPCTracingDialOption traces all outgoing requests from a gRPC client. +// The calling context should already have a *trace.Span; a child span will be +// created for the outgoing gRPC call. If the calling context doesn't have a span, +// the call will not be traced. +// +// The functionality in gRPC that this relies on is currently experimental. +var EnableGRPCTracingDialOption grpc.DialOption = grpc.WithUnaryInterceptor(grpc.UnaryClientInterceptor(grpcUnaryInterceptor)) + +// EnableGRPCTracing automatically traces all gRPC calls from cloud.google.com/go clients. +// +// The functionality in gRPC that this relies on is currently experimental. +var EnableGRPCTracing option.ClientOption = option.WithGRPCDialOption(EnableGRPCTracingDialOption) + +func grpcUnaryInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + // TODO: also intercept streams. + span := FromContext(ctx).NewChild(method) + err := invoker(ctx, method, req, reply, cc, opts...) + if err != nil { + // TODO: standardize gRPC label names? + span.SetLabel("error", err.Error()) + } + span.Finish() + return err +} + +// nextSpanID returns a new span ID. It will never return zero. +func nextSpanID() uint64 { + var id uint64 + for id == 0 { + id = atomic.AddUint64(&spanIDCounter, spanIDIncrement) + } + return id +} + +// nextTraceID returns a new trace ID. +func nextTraceID() string { + id1 := nextSpanID() + id2 := nextSpanID() + return fmt.Sprintf("%016x%016x", id1, id2) +} + +// Client is a client for uploading traces to the Google Stackdriver Trace server. +type Client struct { + service *api.Service + projectID string + policy SamplingPolicy + bundler *bundler.Bundler +} + +// NewClient creates a new Google Stackdriver Trace client. +func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { + o := []option.ClientOption{ + option.WithScopes(cloudPlatformScope), + option.WithUserAgent(userAgent), + } + o = append(o, opts...) + hc, basePath, err := transport.NewHTTPClient(ctx, o...) + if err != nil { + return nil, fmt.Errorf("creating HTTP client for Google Stackdriver Trace API: %v", err) + } + apiService, err := api.New(hc) + if err != nil { + return nil, fmt.Errorf("creating Google Stackdriver Trace API client: %v", err) + } + if basePath != "" { + // An option set a basepath, so override api.New's default. + apiService.BasePath = basePath + } + c := &Client{ + service: apiService, + projectID: projectID, + } + bundler := bundler.NewBundler((*api.Trace)(nil), func(bundle interface{}) { + traces := bundle.([]*api.Trace) + err := c.upload(traces) + if err != nil { + log.Printf("failed to upload %d traces to the Cloud Trace server.", len(traces)) + } + }) + bundler.DelayThreshold = 2 * time.Second + bundler.BundleCountThreshold = 100 + // We're not measuring bytes here, we're counting traces and spans as one "byte" each. + bundler.BundleByteThreshold = 1000 + bundler.BundleByteLimit = 1000 + bundler.BufferedByteLimit = 10000 + c.bundler = bundler + return c, nil +} + +// SetSamplingPolicy sets the SamplingPolicy that determines how often traces +// are initiated by this client. +func (c *Client) SetSamplingPolicy(p SamplingPolicy) { + if c != nil { + c.policy = p + } +} + +// SpanFromHeader returns a new trace span, based on a provided request header +// value. See https://cloud.google.com/trace/docs/faq. +// +// It returns nil iff the client is nil. +// +// The trace information and identifiers will be read from the header value. +// Otherwise, a new trace ID is made and the parent span ID is zero. +// +// The name of the new span is provided as an argument. +// +// If a non-nil sampling policy has been set in the client, it can override +// the options set in the header and choose whether to trace the request. +// +// If the header doesn't have existing tracing information, then a *Span is +// returned anyway, but it will not be uploaded to the server, just as when +// calling SpanFromRequest on an untraced request. +// +// Most users using HTTP should use SpanFromRequest, rather than +// SpanFromHeader, since it provides additional functionality for HTTP +// requests. In particular, it will set various pieces of request information +// as labels on the *Span, which is not available from the header alone. +func (c *Client) SpanFromHeader(name string, header string) *Span { + if c == nil { + return nil + } + traceID, parentSpanID, options, ok := traceInfoFromHeader(header) + if !ok { + traceID = nextTraceID() + } + t := &trace{ + traceID: traceID, + client: c, + globalOptions: options, + localOptions: options, + } + span := startNewChild(name, t, parentSpanID) + span.span.Kind = spanKindServer + span.rootSpan = true + configureSpanFromPolicy(span, c.policy, ok) + return span +} + +// SpanFromRequest returns a new trace span for an HTTP request. +// +// It returns nil iff the client is nil. +// +// If the incoming HTTP request contains a trace context header, the trace ID, +// parent span ID, and tracing options will be read from that header. +// Otherwise, a new trace ID is made and the parent span ID is zero. +// +// If a non-nil sampling policy has been set in the client, it can override the +// options set in the header and choose whether to trace the request. +// +// If the request is not being traced, then a *Span is returned anyway, but it +// will not be uploaded to the server -- it is only useful for propagating +// trace context to child requests and for getting the TraceID. All its +// methods can still be called -- the Finish, FinishWait, and SetLabel methods +// do nothing. NewChild does nothing, and returns the same *Span. TraceID +// works as usual. +func (c *Client) SpanFromRequest(r *http.Request) *Span { + if c == nil { + return nil + } + traceID, parentSpanID, options, ok := traceInfoFromHeader(r.Header.Get(httpHeader)) + if !ok { + traceID = nextTraceID() + } + t := &trace{ + traceID: traceID, + client: c, + globalOptions: options, + localOptions: options, + } + span := startNewChildWithRequest(r, t, parentSpanID) + span.span.Kind = spanKindServer + span.rootSpan = true + configureSpanFromPolicy(span, c.policy, ok) + return span +} + +// NewSpan returns a new trace span with the given name. +// +// A new trace and span ID is generated to trace the span. +// Returned span need to be finished by calling Finish or FinishWait. +func (c *Client) NewSpan(name string) *Span { + if c == nil { + return nil + } + t := &trace{ + traceID: nextTraceID(), + client: c, + localOptions: optionTrace, + globalOptions: optionTrace, + } + span := startNewChild(name, t, 0) + span.span.Kind = spanKindServer + span.rootSpan = true + configureSpanFromPolicy(span, c.policy, false) + return span +} + +func configureSpanFromPolicy(s *Span, p SamplingPolicy, ok bool) { + if p == nil { + return + } + d := p.Sample(Parameters{HasTraceHeader: ok}) + if d.Trace { + // Turn on tracing locally, and in child requests. + s.trace.localOptions |= optionTrace + s.trace.globalOptions |= optionTrace + } else { + // Turn off tracing locally. + s.trace.localOptions = 0 + return + } + if d.Sample { + // This trace is in the random sample, so set the labels. + s.SetLabel(labelSamplingPolicy, d.Policy) + s.SetLabel(labelSamplingWeight, fmt.Sprint(d.Weight)) + } +} + +// NewContext returns a derived context containing the span. +func NewContext(ctx context.Context, s *Span) context.Context { + if s == nil { + return ctx + } + return context.WithValue(ctx, contextKey{}, s) +} + +// FromContext returns the span contained in the context, or nil. +func FromContext(ctx context.Context) *Span { + s, _ := ctx.Value(contextKey{}).(*Span) + return s +} + +func traceInfoFromHeader(h string) (string, uint64, optionFlags, bool) { + // See https://cloud.google.com/trace/docs/faq for the header format. + // Return if the header is empty or missing, or if the header is unreasonably + // large, to avoid making unnecessary copies of a large string. + if h == "" || len(h) > 200 { + return "", 0, 0, false + } + + // Parse the trace id field. + slash := strings.Index(h, `/`) + if slash == -1 { + return "", 0, 0, false + } + traceID, h := h[:slash], h[slash+1:] + + // Parse the span id field. + semicolon := strings.Index(h, `;`) + if semicolon == -1 { + return "", 0, 0, false + } + spanstr, h := h[:semicolon], h[semicolon+1:] + spanID, err := strconv.ParseUint(spanstr, 10, 64) + if err != nil { + return "", 0, 0, false + } + + // Parse the options field. + if !strings.HasPrefix(h, "o=") { + return "", 0, 0, false + } + o, err := strconv.ParseUint(h[2:], 10, 64) + if err != nil { + return "", 0, 0, false + } + options := optionFlags(o) + return traceID, spanID, options, true +} + +type optionFlags uint32 + +const ( + optionTrace optionFlags = 1 << iota + optionStack +) + +type trace struct { + mu sync.Mutex + client *Client + traceID string + globalOptions optionFlags // options that will be passed to any child requests + localOptions optionFlags // options applied in this server + spans []*Span // finished spans for this trace. +} + +// finish appends s to t.spans. If s is the root span, uploads the trace to the +// server. +func (t *trace) finish(s *Span, wait bool, opts ...FinishOption) error { + for _, o := range opts { + o.modifySpan(s) + } + s.end = time.Now() + t.mu.Lock() + t.spans = append(t.spans, s) + spans := t.spans + t.mu.Unlock() + if s.rootSpan { + if wait { + return t.client.upload([]*api.Trace{t.constructTrace(spans)}) + } + go func() { + tr := t.constructTrace(spans) + err := t.client.bundler.Add(tr, 1+len(spans)) + if err == bundler.ErrOversizedItem { + err = t.client.upload([]*api.Trace{tr}) + } + if err != nil { + log.Println("error uploading trace:", err) + } + }() + } + return nil +} + +func (t *trace) constructTrace(spans []*Span) *api.Trace { + apiSpans := make([]*api.TraceSpan, len(spans)) + for i, sp := range spans { + sp.span.StartTime = sp.start.In(time.UTC).Format(time.RFC3339Nano) + sp.span.EndTime = sp.end.In(time.UTC).Format(time.RFC3339Nano) + if t.localOptions&optionStack != 0 { + sp.setStackLabel() + } + sp.SetLabel(labelHost, sp.host) + sp.SetLabel(labelURL, sp.url) + sp.SetLabel(labelMethod, sp.method) + if sp.statusCode != 0 { + sp.SetLabel(labelStatusCode, strconv.Itoa(sp.statusCode)) + } + apiSpans[i] = &sp.span + } + + return &api.Trace{ + ProjectId: t.client.projectID, + TraceId: t.traceID, + Spans: apiSpans, + } +} + +func (c *Client) upload(traces []*api.Trace) error { + _, err := c.service.Projects.PatchTraces(c.projectID, &api.Traces{Traces: traces}).Do() + return err +} + +// Span contains information about one span of a trace. +type Span struct { + trace *trace + span api.TraceSpan + start time.Time + end time.Time + rootSpan bool + stack [maxStackFrames]uintptr + host string + method string + url string + statusCode int +} + +func (s *Span) tracing() bool { + return s.trace.localOptions&optionTrace != 0 +} + +// NewChild creates a new span with the given name as a child of s. +// If s is nil, does nothing and returns nil. +func (s *Span) NewChild(name string) *Span { + if s == nil { + return nil + } + if !s.tracing() { + return s + } + return startNewChild(name, s.trace, s.span.SpanId) +} + +// NewRemoteChild creates a new span as a child of s. +// +// Some labels in the span are set from the outgoing *http.Request r. +// +// A header is set in r so that the trace context is propagated to the +// destination. The parent span ID in that header is set as follows: +// - If the request is being traced, then the ID of s is used. +// - If the request is not being traced, but there was a trace context header +// in the incoming request for this trace (the request passed to +// SpanFromRequest), the parent span ID in that header is used. +// - Otherwise, the parent span ID is zero. +// The tracing bit in the options is set if tracing is enabled, or if it was +// set in the incoming request. +// +// If s is nil, does nothing and returns nil. +func (s *Span) NewRemoteChild(r *http.Request) *Span { + if s == nil { + return nil + } + if !s.tracing() { + r.Header[httpHeader] = []string{spanHeader(s.trace.traceID, s.span.ParentSpanId, s.trace.globalOptions)} + return s + } + newSpan := startNewChildWithRequest(r, s.trace, s.span.SpanId) + r.Header[httpHeader] = []string{spanHeader(s.trace.traceID, newSpan.span.SpanId, s.trace.globalOptions)} + return newSpan +} + +func startNewChildWithRequest(r *http.Request, trace *trace, parentSpanID uint64) *Span { + name := r.URL.Host + r.URL.Path // drop scheme and query params + newSpan := startNewChild(name, trace, parentSpanID) + if r.Host == "" { + newSpan.host = r.URL.Host + } else { + newSpan.host = r.Host + } + newSpan.method = r.Method + newSpan.url = r.URL.String() + return newSpan +} + +func startNewChild(name string, trace *trace, parentSpanID uint64) *Span { + spanID := nextSpanID() + for spanID == parentSpanID { + spanID = nextSpanID() + } + newSpan := &Span{ + trace: trace, + span: api.TraceSpan{ + Kind: spanKindClient, + Name: name, + ParentSpanId: parentSpanID, + SpanId: spanID, + }, + start: time.Now(), + } + if trace.localOptions&optionStack != 0 { + _ = runtime.Callers(1, newSpan.stack[:]) + } + return newSpan +} + +// TraceID returns the ID of the trace to which s belongs. +func (s *Span) TraceID() string { + if s == nil { + return "" + } + return s.trace.traceID +} + +// SetLabel sets the label for the given key to the given value. +// If the value is empty, the label for that key is deleted. +// If a label is given a value automatically and by SetLabel, the +// automatically-set value is used. +// If s is nil, does nothing. +func (s *Span) SetLabel(key, value string) { + if s == nil { + return + } + if !s.tracing() { + return + } + if value == "" { + if s.span.Labels != nil { + delete(s.span.Labels, key) + } + return + } + if s.span.Labels == nil { + s.span.Labels = make(map[string]string) + } + s.span.Labels[key] = value +} + +type FinishOption interface { + modifySpan(s *Span) +} + +type withResponse struct { + *http.Response +} + +// WithResponse returns an option that can be passed to Finish that indicates +// that some labels for the span should be set using the given *http.Response. +func WithResponse(resp *http.Response) FinishOption { + return withResponse{resp} +} +func (u withResponse) modifySpan(s *Span) { + if u.Response != nil { + s.statusCode = u.StatusCode + } +} + +// Finish declares that the span has finished. +// +// If s is nil, Finish does nothing and returns nil. +// +// If the option trace.WithResponse(resp) is passed, then some labels are set +// for s using information in the given *http.Response. This is useful when the +// span is for an outgoing http request; s will typically have been created by +// NewRemoteChild in this case. +// +// If s is a root span (one created by SpanFromRequest) then s, and all its +// descendant spans that have finished, are uploaded to the Google Stackdriver +// Trace server asynchronously. +func (s *Span) Finish(opts ...FinishOption) { + if s == nil { + return + } + if !s.tracing() { + return + } + s.trace.finish(s, false, opts...) +} + +// FinishWait is like Finish, but if s is a root span, it waits until uploading +// is finished, then returns an error if one occurred. +func (s *Span) FinishWait(opts ...FinishOption) error { + if s == nil { + return nil + } + if !s.tracing() { + return nil + } + return s.trace.finish(s, true, opts...) +} + +func spanHeader(traceID string, spanID uint64, options optionFlags) string { + // See https://cloud.google.com/trace/docs/faq for the header format. + return fmt.Sprintf("%s/%d;o=%d", traceID, spanID, options) +} + +func (s *Span) setStackLabel() { + var stack stackLabelValue + lastSigPanic, inTraceLibrary := false, true + for _, pc := range s.stack { + if pc == 0 { + break + } + if !lastSigPanic { + pc-- + } + fn := runtime.FuncForPC(pc) + file, line := fn.FileLine(pc) + // Name has one of the following forms: + // path/to/package.Foo + // path/to/package.(Type).Foo + // For the first form, we store the whole name in the Method field of the + // stack frame. For the second form, we set the Method field to "Foo" and + // the Class field to "path/to/package.(Type)". + name := fn.Name() + if inTraceLibrary && !strings.HasPrefix(name, "cloud.google.com/go/trace.") { + inTraceLibrary = false + } + var class string + if i := strings.Index(name, ")."); i != -1 { + class, name = name[:i+1], name[i+2:] + } + frame := stackFrame{ + Class: class, + Method: name, + Filename: file, + Line: int64(line), + } + if inTraceLibrary && len(stack.Frames) == 1 { + stack.Frames[0] = frame + } else { + stack.Frames = append(stack.Frames, frame) + } + lastSigPanic = fn.Name() == "runtime.sigpanic" + } + if label, err := json.Marshal(stack); err == nil { + s.SetLabel(labelStackTrace, string(label)) + } +} diff --git a/vendor/cloud.google.com/go/trace/trace_test.go b/vendor/cloud.google.com/go/trace/trace_test.go new file mode 100644 index 00000000..556947d0 --- /dev/null +++ b/vendor/cloud.google.com/go/trace/trace_test.go @@ -0,0 +1,849 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "math/rand" + "net/http" + "reflect" + "strings" + "sync" + "testing" + "time" + + "cloud.google.com/go/datastore" + "cloud.google.com/go/internal/testutil" + "cloud.google.com/go/storage" + "golang.org/x/net/context" + api "google.golang.org/api/cloudtrace/v1" + compute "google.golang.org/api/compute/v1" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + dspb "google.golang.org/genproto/googleapis/datastore/v1" + "google.golang.org/grpc" +) + +const testProjectID = "testproject" + +type fakeRoundTripper struct { + reqc chan *http.Request +} + +func newFakeRoundTripper() *fakeRoundTripper { + return &fakeRoundTripper{reqc: make(chan *http.Request)} +} + +func (rt *fakeRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) { + rt.reqc <- r + resp := &http.Response{ + Status: "200 OK", + StatusCode: 200, + Body: ioutil.NopCloser(strings.NewReader("{}")), + } + return resp, nil +} + +func newTestClient(rt http.RoundTripper) *Client { + t, err := NewClient(context.Background(), testProjectID, option.WithHTTPClient(&http.Client{Transport: rt})) + if err != nil { + panic(err) + } + return t +} + +type fakeDatastoreServer struct { + dspb.DatastoreServer + fail bool +} + +func (f *fakeDatastoreServer) Lookup(ctx context.Context, req *dspb.LookupRequest) (*dspb.LookupResponse, error) { + if f.fail { + return nil, errors.New("lookup failed") + } + return &dspb.LookupResponse{}, nil +} + +// makeRequests makes some requests. +// span is the root span. rt is the trace client's http client's transport. +// This is used to retrieve the trace uploaded by the client, if any. If +// expectTrace is true, we expect a trace will be uploaded. If synchronous is +// true, the call to Finish is expected not to return before the client has +// uploaded any traces. +func makeRequests(t *testing.T, span *Span, rt *fakeRoundTripper, synchronous bool, expectTrace bool) *http.Request { + ctx := NewContext(context.Background(), span) + + // An HTTP request. + { + req2, err := http.NewRequest("GET", "http://example.com/bar", nil) + if err != nil { + t.Fatal(err) + } + resp := &http.Response{StatusCode: 200} + s := span.NewRemoteChild(req2) + s.Finish(WithResponse(resp)) + } + + // An autogenerated API call. + { + rt := &fakeRoundTripper{reqc: make(chan *http.Request, 1)} + hc := &http.Client{Transport: rt} + computeClient, err := compute.New(hc) + if err != nil { + t.Fatal(err) + } + _, err = computeClient.Zones.List(testProjectID).Context(ctx).Do() + if err != nil { + t.Fatal(err) + } + } + + // A cloud library call that uses the autogenerated API. + { + rt := &fakeRoundTripper{reqc: make(chan *http.Request, 1)} + hc := &http.Client{Transport: rt} + storageClient, err := storage.NewClient(context.Background(), option.WithHTTPClient(hc)) + if err != nil { + t.Fatal(err) + } + var objAttrsList []*storage.ObjectAttrs + it := storageClient.Bucket("testbucket").Objects(ctx, nil) + for { + objAttrs, err := it.Next() + if err != nil && err != iterator.Done { + t.Fatal(err) + } + if err == iterator.Done { + break + } + objAttrsList = append(objAttrsList, objAttrs) + } + } + + // A cloud library call that uses grpc internally. + for _, fail := range []bool{false, true} { + srv, err := testutil.NewServer() + if err != nil { + t.Fatalf("creating test datastore server: %v", err) + } + dspb.RegisterDatastoreServer(srv.Gsrv, &fakeDatastoreServer{fail: fail}) + srv.Start() + conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure(), EnableGRPCTracingDialOption) + if err != nil { + t.Fatalf("connecting to test datastore server: %v", err) + } + datastoreClient, err := datastore.NewClient(ctx, testProjectID, option.WithGRPCConn(conn)) + if err != nil { + t.Fatalf("creating datastore client: %v", err) + } + k := datastore.NameKey("Entity", "stringID", nil) + e := new(datastore.Entity) + datastoreClient.Get(ctx, k, e) + } + + done := make(chan struct{}) + go func() { + if synchronous { + err := span.FinishWait() + if err != nil { + t.Errorf("Unexpected error from span.FinishWait: %v", err) + } + } else { + span.Finish() + } + done <- struct{}{} + }() + if !expectTrace { + <-done + select { + case <-rt.reqc: + t.Errorf("Got a trace, expected none.") + case <-time.After(5 * time.Millisecond): + } + return nil + } else if !synchronous { + <-done + return <-rt.reqc + } else { + select { + case <-done: + t.Errorf("Synchronous Finish didn't wait for trace upload.") + return <-rt.reqc + case <-time.After(5 * time.Millisecond): + r := <-rt.reqc + <-done + return r + } + } +} + +func TestTrace(t *testing.T) { + t.Parallel() + testTrace(t, false, true) +} + +func TestTraceWithWait(t *testing.T) { + testTrace(t, true, true) +} + +func TestTraceFromHeader(t *testing.T) { + t.Parallel() + testTrace(t, false, false) +} + +func TestTraceFromHeaderWithWait(t *testing.T) { + testTrace(t, false, true) +} + +func TestNewSpan(t *testing.T) { + const traceID = "0123456789ABCDEF0123456789ABCDEF" + + rt := newFakeRoundTripper() + traceClient := newTestClient(rt) + span := traceClient.NewSpan("/foo") + span.trace.traceID = traceID + + uploaded := makeRequests(t, span, rt, true, true) + + if uploaded == nil { + t.Fatalf("No trace uploaded, expected one.") + } + + expected := api.Traces{ + Traces: []*api.Trace{ + { + ProjectId: testProjectID, + Spans: []*api.TraceSpan{ + { + Kind: "RPC_CLIENT", + Labels: map[string]string{ + "trace.cloud.google.com/http/host": "example.com", + "trace.cloud.google.com/http/method": "GET", + "trace.cloud.google.com/http/status_code": "200", + "trace.cloud.google.com/http/url": "http://example.com/bar", + }, + Name: "example.com/bar", + }, + { + Kind: "RPC_CLIENT", + Labels: map[string]string{ + "trace.cloud.google.com/http/host": "www.googleapis.com", + "trace.cloud.google.com/http/method": "GET", + "trace.cloud.google.com/http/status_code": "200", + "trace.cloud.google.com/http/url": "https://www.googleapis.com/compute/v1/projects/testproject/zones", + }, + Name: "www.googleapis.com/compute/v1/projects/testproject/zones", + }, + { + Kind: "RPC_CLIENT", + Labels: map[string]string{ + "trace.cloud.google.com/http/host": "www.googleapis.com", + "trace.cloud.google.com/http/method": "GET", + "trace.cloud.google.com/http/status_code": "200", + "trace.cloud.google.com/http/url": "https://www.googleapis.com/storage/v1/b/testbucket/o", + }, + Name: "www.googleapis.com/storage/v1/b/testbucket/o", + }, + &api.TraceSpan{ + Kind: "RPC_CLIENT", + Labels: nil, + Name: "/google.datastore.v1.Datastore/Lookup", + }, + &api.TraceSpan{ + Kind: "RPC_CLIENT", + Labels: map[string]string{"error": "rpc error: code = Unknown desc = lookup failed"}, + Name: "/google.datastore.v1.Datastore/Lookup", + }, + { + Kind: "RPC_SERVER", + Labels: map[string]string{}, + Name: "/foo", + }, + }, + TraceId: traceID, + }, + }, + } + + body, err := ioutil.ReadAll(uploaded.Body) + if err != nil { + t.Fatal(err) + } + var patch api.Traces + err = json.Unmarshal(body, &patch) + if err != nil { + t.Fatal(err) + } + + if len(patch.Traces) != len(expected.Traces) || len(patch.Traces[0].Spans) != len(expected.Traces[0].Spans) { + got, _ := json.Marshal(patch) + want, _ := json.Marshal(expected) + t.Fatalf("PatchTraces request: got %s want %s", got, want) + } + + n := len(patch.Traces[0].Spans) + rootSpan := patch.Traces[0].Spans[n-1] + for i, s := range patch.Traces[0].Spans { + if a, b := s.StartTime, s.EndTime; a > b { + t.Errorf("span %d start time is later than its end time (%q, %q)", i, a, b) + } + if a, b := rootSpan.StartTime, s.StartTime; a > b { + t.Errorf("trace start time is later than span %d start time (%q, %q)", i, a, b) + } + if a, b := s.EndTime, rootSpan.EndTime; a > b { + t.Errorf("span %d end time is later than trace end time (%q, %q)", i, a, b) + } + if i > 1 && i < n-1 { + if a, b := patch.Traces[0].Spans[i-1].EndTime, s.StartTime; a > b { + t.Errorf("span %d end time is later than span %d start time (%q, %q)", i-1, i, a, b) + } + } + } + + if x := rootSpan.ParentSpanId; x != 0 { + t.Errorf("Incorrect ParentSpanId: got %d want %d", x, 0) + } + for i, s := range patch.Traces[0].Spans { + if x, y := rootSpan.SpanId, s.ParentSpanId; i < n-1 && x != y { + t.Errorf("Incorrect ParentSpanId in span %d: got %d want %d", i, y, x) + } + } + for i, s := range patch.Traces[0].Spans { + s.EndTime = "" + labels := &expected.Traces[0].Spans[i].Labels + for key, value := range *labels { + if v, ok := s.Labels[key]; !ok { + t.Errorf("Span %d is missing Label %q:%q", i, key, value) + } else if key == "trace.cloud.google.com/http/url" { + if !strings.HasPrefix(v, value) { + t.Errorf("Span %d Label %q: got value %q want prefix %q", i, key, v, value) + } + } else if v != value { + t.Errorf("Span %d Label %q: got value %q want %q", i, key, v, value) + } + } + for key := range s.Labels { + if _, ok := (*labels)[key]; key != "trace.cloud.google.com/stacktrace" && !ok { + t.Errorf("Span %d: unexpected label %q", i, key) + } + } + *labels = nil + s.Labels = nil + s.ParentSpanId = 0 + if s.SpanId == 0 { + t.Errorf("Incorrect SpanId: got 0 want nonzero") + } + s.SpanId = 0 + s.StartTime = "" + } + if !reflect.DeepEqual(patch, expected) { + got, _ := json.Marshal(patch) + want, _ := json.Marshal(expected) + t.Errorf("PatchTraces request: got %s want %s", got, want) + } +} + +func testTrace(t *testing.T, synchronous bool, fromRequest bool) { + const header = `0123456789ABCDEF0123456789ABCDEF/42;o=3` + rt := newFakeRoundTripper() + traceClient := newTestClient(rt) + + span := traceClient.SpanFromHeader("/foo", header) + headerOrReqLabels := map[string]string{} + headerOrReqName := "/foo" + + if fromRequest { + req, err := http.NewRequest("GET", "http://example.com/foo", nil) + if err != nil { + t.Fatal(err) + } + req.Header.Set("X-Cloud-Trace-Context", header) + span = traceClient.SpanFromRequest(req) + headerOrReqLabels = map[string]string{ + "trace.cloud.google.com/http/host": "example.com", + "trace.cloud.google.com/http/method": "GET", + "trace.cloud.google.com/http/url": "http://example.com/foo", + } + headerOrReqName = "example.com/foo" + } + + uploaded := makeRequests(t, span, rt, synchronous, true) + if uploaded == nil { + t.Fatalf("No trace uploaded, expected one.") + } + + expected := api.Traces{ + Traces: []*api.Trace{ + { + ProjectId: testProjectID, + Spans: []*api.TraceSpan{ + { + Kind: "RPC_CLIENT", + Labels: map[string]string{ + "trace.cloud.google.com/http/host": "example.com", + "trace.cloud.google.com/http/method": "GET", + "trace.cloud.google.com/http/status_code": "200", + "trace.cloud.google.com/http/url": "http://example.com/bar", + }, + Name: "example.com/bar", + }, + { + Kind: "RPC_CLIENT", + Labels: map[string]string{ + "trace.cloud.google.com/http/host": "www.googleapis.com", + "trace.cloud.google.com/http/method": "GET", + "trace.cloud.google.com/http/status_code": "200", + "trace.cloud.google.com/http/url": "https://www.googleapis.com/compute/v1/projects/testproject/zones", + }, + Name: "www.googleapis.com/compute/v1/projects/testproject/zones", + }, + { + Kind: "RPC_CLIENT", + Labels: map[string]string{ + "trace.cloud.google.com/http/host": "www.googleapis.com", + "trace.cloud.google.com/http/method": "GET", + "trace.cloud.google.com/http/status_code": "200", + "trace.cloud.google.com/http/url": "https://www.googleapis.com/storage/v1/b/testbucket/o", + }, + Name: "www.googleapis.com/storage/v1/b/testbucket/o", + }, + &api.TraceSpan{ + Kind: "RPC_CLIENT", + Labels: nil, + Name: "/google.datastore.v1.Datastore/Lookup", + }, + &api.TraceSpan{ + Kind: "RPC_CLIENT", + Labels: map[string]string{"error": "rpc error: code = Unknown desc = lookup failed"}, + Name: "/google.datastore.v1.Datastore/Lookup", + }, + { + Kind: "RPC_SERVER", + Labels: headerOrReqLabels, + Name: headerOrReqName, + }, + }, + TraceId: "0123456789ABCDEF0123456789ABCDEF", + }, + }, + } + + body, err := ioutil.ReadAll(uploaded.Body) + if err != nil { + t.Fatal(err) + } + var patch api.Traces + err = json.Unmarshal(body, &patch) + if err != nil { + t.Fatal(err) + } + + if len(patch.Traces) != len(expected.Traces) || len(patch.Traces[0].Spans) != len(expected.Traces[0].Spans) { + got, _ := json.Marshal(patch) + want, _ := json.Marshal(expected) + t.Fatalf("PatchTraces request: got %s want %s", got, want) + } + + n := len(patch.Traces[0].Spans) + rootSpan := patch.Traces[0].Spans[n-1] + for i, s := range patch.Traces[0].Spans { + if a, b := s.StartTime, s.EndTime; a > b { + t.Errorf("span %d start time is later than its end time (%q, %q)", i, a, b) + } + if a, b := rootSpan.StartTime, s.StartTime; a > b { + t.Errorf("trace start time is later than span %d start time (%q, %q)", i, a, b) + } + if a, b := s.EndTime, rootSpan.EndTime; a > b { + t.Errorf("span %d end time is later than trace end time (%q, %q)", i, a, b) + } + if i > 1 && i < n-1 { + if a, b := patch.Traces[0].Spans[i-1].EndTime, s.StartTime; a > b { + t.Errorf("span %d end time is later than span %d start time (%q, %q)", i-1, i, a, b) + } + } + } + + if x := rootSpan.ParentSpanId; x != 42 { + t.Errorf("Incorrect ParentSpanId: got %d want %d", x, 42) + } + for i, s := range patch.Traces[0].Spans { + if x, y := rootSpan.SpanId, s.ParentSpanId; i < n-1 && x != y { + t.Errorf("Incorrect ParentSpanId in span %d: got %d want %d", i, y, x) + } + } + for i, s := range patch.Traces[0].Spans { + s.EndTime = "" + labels := &expected.Traces[0].Spans[i].Labels + for key, value := range *labels { + if v, ok := s.Labels[key]; !ok { + t.Errorf("Span %d is missing Label %q:%q", i, key, value) + } else if key == "trace.cloud.google.com/http/url" { + if !strings.HasPrefix(v, value) { + t.Errorf("Span %d Label %q: got value %q want prefix %q", i, key, v, value) + } + } else if v != value { + t.Errorf("Span %d Label %q: got value %q want %q", i, key, v, value) + } + } + for key := range s.Labels { + if _, ok := (*labels)[key]; key != "trace.cloud.google.com/stacktrace" && !ok { + t.Errorf("Span %d: unexpected label %q", i, key) + } + } + *labels = nil + s.Labels = nil + s.ParentSpanId = 0 + if s.SpanId == 0 { + t.Errorf("Incorrect SpanId: got 0 want nonzero") + } + s.SpanId = 0 + s.StartTime = "" + } + if !reflect.DeepEqual(patch, expected) { + got, _ := json.Marshal(patch) + want, _ := json.Marshal(expected) + t.Errorf("PatchTraces request: got %s \n\n want %s", got, want) + } +} + +func TestNoTrace(t *testing.T) { + testNoTrace(t, false, true) +} + +func TestNoTraceWithWait(t *testing.T) { + testNoTrace(t, true, true) +} + +func TestNoTraceFromHeader(t *testing.T) { + testNoTrace(t, false, false) +} + +func TestNoTraceFromHeaderWithWait(t *testing.T) { + testNoTrace(t, true, false) +} + +func testNoTrace(t *testing.T, synchronous bool, fromRequest bool) { + for _, header := range []string{ + `0123456789ABCDEF0123456789ABCDEF/42;o=2`, + `0123456789ABCDEF0123456789ABCDEF/42;o=0`, + `0123456789ABCDEF0123456789ABCDEF/42`, + `0123456789ABCDEF0123456789ABCDEF`, + ``, + } { + rt := newFakeRoundTripper() + traceClient := newTestClient(rt) + var span *Span + if fromRequest { + req, err := http.NewRequest("GET", "http://example.com/foo", nil) + if header != "" { + req.Header.Set("X-Cloud-Trace-Context", header) + } + if err != nil { + t.Fatal(err) + } + span = traceClient.SpanFromRequest(req) + } else { + span = traceClient.SpanFromHeader("/foo", header) + } + uploaded := makeRequests(t, span, rt, synchronous, false) + if uploaded != nil { + t.Errorf("Got a trace, expected none.") + } + } +} + +func TestSample(t *testing.T) { + // A deterministic test of the sampler logic. + type testCase struct { + rate float64 + maxqps float64 + want int + } + const delta = 25 * time.Millisecond + for _, test := range []testCase{ + // qps won't matter, so we will sample half of the 79 calls + {0.50, 100, 40}, + // with 1 qps and a burst of 2, we will sample twice in second #1, once in the partial second #2 + {0.50, 1, 3}, + } { + sp, err := NewLimitedSampler(test.rate, test.maxqps) + if err != nil { + t.Fatal(err) + } + s := sp.(*sampler) + sampled := 0 + tm := time.Now() + for i := 0; i < 80; i++ { + if s.sample(Parameters{}, tm, float64(i%2)).Sample { + sampled++ + } + tm = tm.Add(delta) + } + if sampled != test.want { + t.Errorf("rate=%f, maxqps=%f: got %d samples, want %d", test.rate, test.maxqps, sampled, test.want) + } + } +} + +func TestSampling(t *testing.T) { + t.Parallel() + // This scope tests sampling in a larger context, with real time and randomness. + wg := sync.WaitGroup{} + type testCase struct { + rate float64 + maxqps float64 + expectedRange [2]int + } + for _, test := range []testCase{ + {0, 5, [2]int{0, 0}}, + {5, 0, [2]int{0, 0}}, + {0.50, 100, [2]int{20, 60}}, + {0.50, 1, [2]int{3, 4}}, // Windows, with its less precise clock, sometimes gives 4. + } { + wg.Add(1) + go func(test testCase) { + rt := newFakeRoundTripper() + traceClient := newTestClient(rt) + traceClient.bundler.BundleByteLimit = 1 + p, err := NewLimitedSampler(test.rate, test.maxqps) + if err != nil { + t.Fatalf("NewLimitedSampler: %v", err) + } + traceClient.SetSamplingPolicy(p) + ticker := time.NewTicker(25 * time.Millisecond) + sampled := 0 + for i := 0; i < 79; i++ { + req, err := http.NewRequest("GET", "http://example.com/foo", nil) + if err != nil { + t.Fatal(err) + } + span := traceClient.SpanFromRequest(req) + span.Finish() + select { + case <-rt.reqc: + <-ticker.C + sampled++ + case <-ticker.C: + } + } + ticker.Stop() + if test.expectedRange[0] > sampled || sampled > test.expectedRange[1] { + t.Errorf("rate=%f, maxqps=%f: got %d samples want ∈ %v", test.rate, test.maxqps, sampled, test.expectedRange) + } + wg.Done() + }(test) + } + wg.Wait() +} + +func TestBundling(t *testing.T) { + t.Parallel() + rt := newFakeRoundTripper() + traceClient := newTestClient(rt) + traceClient.bundler.DelayThreshold = time.Second / 2 + traceClient.bundler.BundleCountThreshold = 10 + p, err := NewLimitedSampler(1, 99) // sample every request. + if err != nil { + t.Fatalf("NewLimitedSampler: %v", err) + } + traceClient.SetSamplingPolicy(p) + + for i := 0; i < 35; i++ { + go func() { + req, err := http.NewRequest("GET", "http://example.com/foo", nil) + if err != nil { + t.Fatal(err) + } + span := traceClient.SpanFromRequest(req) + span.Finish() + }() + } + + // Read the first three bundles. + <-rt.reqc + <-rt.reqc + <-rt.reqc + + // Test that the fourth bundle isn't sent early. + select { + case <-rt.reqc: + t.Errorf("bundle sent too early") + case <-time.After(time.Second / 4): + <-rt.reqc + } + + // Test that there aren't extra bundles. + select { + case <-rt.reqc: + t.Errorf("too many bundles sent") + case <-time.After(time.Second): + } +} + +func TestWeights(t *testing.T) { + const ( + expectedNumTraced = 10100 + numTracedEpsilon = 100 + expectedTotalWeight = 50000 + totalWeightEpsilon = 5000 + ) + rng := rand.New(rand.NewSource(1)) + const delta = 2 * time.Millisecond + for _, headerRate := range []float64{0.0, 0.5, 1.0} { + // Simulate 10 seconds of requests arriving at 500qps. + // + // The sampling policy tries to sample 25% of them, but has a qps limit of + // 100, so it will not be able to. The returned weight should be higher + // for some sampled requests to compensate. + // + // headerRate is the fraction of incoming requests that have a trace header + // set. The qps limit should not be exceeded, even if headerRate is high. + sp, err := NewLimitedSampler(0.25, 100) + if err != nil { + t.Fatal(err) + } + s := sp.(*sampler) + tm := time.Now() + totalWeight := 0.0 + numTraced := 0 + seenLargeWeight := false + for i := 0; i < 50000; i++ { + d := s.sample(Parameters{HasTraceHeader: rng.Float64() < headerRate}, tm, rng.Float64()) + if d.Trace { + numTraced++ + } + if d.Sample { + totalWeight += d.Weight + if x := int(d.Weight) / 4; x <= 0 || x >= 100 || d.Weight != float64(x)*4.0 { + t.Errorf("weight: got %f, want a small positive multiple of 4", d.Weight) + } + if d.Weight > 4 { + seenLargeWeight = true + } + } + tm = tm.Add(delta) + } + if !seenLargeWeight { + t.Errorf("headerRate %f: never saw sample weight higher than 4.", headerRate) + } + if numTraced < expectedNumTraced-numTracedEpsilon || expectedNumTraced+numTracedEpsilon < numTraced { + t.Errorf("headerRate %f: got %d traced requests, want ∈ [%d, %d]", headerRate, numTraced, expectedNumTraced-numTracedEpsilon, expectedNumTraced+numTracedEpsilon) + } + if totalWeight < expectedTotalWeight-totalWeightEpsilon || expectedTotalWeight+totalWeightEpsilon < totalWeight { + t.Errorf("headerRate %f: got total weight %f want ∈ [%d, %d]", headerRate, totalWeight, expectedTotalWeight-totalWeightEpsilon, expectedTotalWeight+totalWeightEpsilon) + } + } +} + +type alwaysTrace struct{} + +func (a alwaysTrace) Sample(p Parameters) Decision { + return Decision{Trace: true} +} + +type neverTrace struct{} + +func (a neverTrace) Sample(p Parameters) Decision { + return Decision{Trace: false} +} + +func TestPropagation(t *testing.T) { + rt := newFakeRoundTripper() + traceClient := newTestClient(rt) + for _, header := range []string{ + `0123456789ABCDEF0123456789ABCDEF/42;o=0`, + `0123456789ABCDEF0123456789ABCDEF/42;o=1`, + `0123456789ABCDEF0123456789ABCDEF/42;o=2`, + `0123456789ABCDEF0123456789ABCDEF/42;o=3`, + `0123456789ABCDEF0123456789ABCDEF/0;o=0`, + `0123456789ABCDEF0123456789ABCDEF/0;o=1`, + `0123456789ABCDEF0123456789ABCDEF/0;o=2`, + `0123456789ABCDEF0123456789ABCDEF/0;o=3`, + ``, + } { + for _, policy := range []SamplingPolicy{ + nil, + alwaysTrace{}, + neverTrace{}, + } { + traceClient.SetSamplingPolicy(policy) + req, err := http.NewRequest("GET", "http://example.com/foo", nil) + if err != nil { + t.Fatal(err) + } + if header != "" { + req.Header.Set("X-Cloud-Trace-Context", header) + } + + span := traceClient.SpanFromRequest(req) + + req2, err := http.NewRequest("GET", "http://example.com/bar", nil) + if err != nil { + t.Fatal(err) + } + req3, err := http.NewRequest("GET", "http://example.com/baz", nil) + if err != nil { + t.Fatal(err) + } + span.NewRemoteChild(req2) + span.NewRemoteChild(req3) + + var ( + t1, t2, t3 string + s1, s2, s3 uint64 + o1, o2, o3 uint64 + ) + fmt.Sscanf(header, "%32s/%d;o=%d", &t1, &s1, &o1) + fmt.Sscanf(req2.Header.Get("X-Cloud-Trace-Context"), "%32s/%d;o=%d", &t2, &s2, &o2) + fmt.Sscanf(req3.Header.Get("X-Cloud-Trace-Context"), "%32s/%d;o=%d", &t3, &s3, &o3) + + if header == "" { + if t2 != t3 { + t.Errorf("expected the same trace ID in child requests, got %q %q", t2, t3) + } + } else { + if t2 != t1 || t3 != t1 { + t.Errorf("trace IDs should be passed to child requests") + } + } + trace := policy == alwaysTrace{} || policy == nil && (o1&1) != 0 + if header == "" { + if trace && (s2 == 0 || s3 == 0) { + t.Errorf("got span IDs %d %d in child requests, want nonzero", s2, s3) + } + if trace && s2 == s3 { + t.Errorf("got span IDs %d %d in child requests, should be different", s2, s3) + } + if !trace && (s2 != 0 || s3 != 0) { + t.Errorf("got span IDs %d %d in child requests, want zero", s2, s3) + } + } else { + if trace && (s2 == s1 || s3 == s1 || s2 == s3) { + t.Errorf("parent span IDs in input and outputs should be all different, got %d %d %d", s1, s2, s3) + } + if !trace && (s2 != s1 || s3 != s1) { + t.Errorf("parent span ID in input, %d, should have been equal to parent span IDs in output: %d %d", s1, s2, s3) + } + } + expectTraceOption := policy == alwaysTrace{} || (o1&1) != 0 + if expectTraceOption != ((o2&1) != 0) || expectTraceOption != ((o3&1) != 0) { + t.Errorf("tracing flag in child requests should be %t, got options %d %d", expectTraceOption, o2, o3) + } + } + } +} diff --git a/vendor/cloud.google.com/go/translate/internal/translate/v2/README b/vendor/cloud.google.com/go/translate/internal/translate/v2/README new file mode 100644 index 00000000..a4f22c6f --- /dev/null +++ b/vendor/cloud.google.com/go/translate/internal/translate/v2/README @@ -0,0 +1,12 @@ +translate-nov2016-api.json is a hand-modified version of translate-api.json. +It correctly reflects the API as of 2016-11-15. + +Differences: + +- Change to base URL +- Addition of OAuth scopes + +To generate: + + + diff --git a/vendor/cloud.google.com/go/translate/internal/translate/v2/regen.sh b/vendor/cloud.google.com/go/translate/internal/translate/v2/regen.sh new file mode 100755 index 00000000..3aec1d82 --- /dev/null +++ b/vendor/cloud.google.com/go/translate/internal/translate/v2/regen.sh @@ -0,0 +1,29 @@ +#!/bin/bash -e + + +(cd $GOPATH/src/google.golang.org/api; make generator) + +$GOPATH/bin/google-api-go-generator \ + -api_json_file translate-nov2016-api.json \ + -api_pkg_base cloud.google.com/go/translate/internal \ + -output translate-nov2016-gen.nolicense + +cat - translate-nov2016-gen.nolicense > translate-nov2016-gen.go <" + s + "" + } + tr = translate(htmlify(test.input), test.target, nil) + if got, want := tr.Text, htmlify(test.output); got != want { + t.Errorf("html: got %q, want %q", got, want) + } + // Using the HTML format behaves the same. + tr = translate(htmlify(test.input), test.target, &Options{Format: HTML}) + if got, want := tr.Text, htmlify(test.output); got != want { + t.Errorf("html: got %q, want %q", got, want) + } + } +} + +// This tests the beta "nmt" model. +func TestTranslateModel(t *testing.T) { + ctx := context.Background() + c := initTest(ctx, t) + defer c.Close() + + trs, err := c.Translate(ctx, []string{"Hello"}, language.French, &Options{Model: "nmt"}) + if err != nil { + t.Fatal(err) + } + if len(trs) != 1 { + t.Fatalf("wanted one Translation, got %d", len(trs)) + } + tr := trs[0] + if got, want := tr.Text, "Bonjour"; got != want { + t.Errorf("text: got %q, want %q", got, want) + } + if got, want := tr.Model, "nmt"; got != want { + t.Errorf("model: got %q, want %q", got, want) + } +} + +func TestTranslateMultipleInputs(t *testing.T) { + ctx := context.Background() + c := initTest(ctx, t) + defer c.Close() + + inputs := []string{ + "When you're a Jet, you're a Jet all the way", + "From your first cigarette to your last dying day", + "When you're a Jet if the spit hits the fan", + "You got brothers around, you're a family man", + } + ts, err := c.Translate(ctx, inputs, language.French, nil) + if err != nil { + t.Fatal(err) + } + if got, want := len(ts), len(inputs); got != want { + t.Fatalf("got %d Translations, wanted %d", got, want) + } +} + +func TestTranslateErrors(t *testing.T) { + ctx := context.Background() + c := initTest(ctx, t) + defer c.Close() + + for _, test := range []struct { + ctx context.Context + target language.Tag + inputs []string + opts *Options + }{ + {ctx, language.English, nil, nil}, + {ctx, language.Und, []string{"input"}, nil}, + {ctx, language.English, []string{}, nil}, + {ctx, language.English, []string{"input"}, &Options{Format: "random"}}, + } { + _, err := c.Translate(test.ctx, test.inputs, test.target, test.opts) + if err == nil { + t.Errorf("%+v: got nil, want error", test) + } + } +} + +func TestDetectLanguage(t *testing.T) { + ctx := context.Background() + c := initTest(ctx, t) + defer c.Close() + ds, err := c.DetectLanguage(ctx, []string{ + "Today is Monday", + "Aujourd'hui est lundi", + }) + if err != nil { + t.Fatal(err) + } + if len(ds) != 2 { + t.Fatalf("got %d detection lists, want 2", len(ds)) + } + checkDetections(t, ds[0], language.English) + checkDetections(t, ds[1], language.French) +} + +func checkDetections(t *testing.T, ds []Detection, want language.Tag) { + for _, d := range ds { + if d.Language == want { + return + } + } + t.Errorf("%v: missing %s", ds, want) +} + +// A small subset of the supported languages. +var supportedLangs = []Language{ + {Name: "Danish", Tag: language.Danish}, + {Name: "English", Tag: language.English}, + {Name: "French", Tag: language.French}, + {Name: "German", Tag: language.German}, + {Name: "Greek", Tag: language.Greek}, + {Name: "Hindi", Tag: language.Hindi}, + {Name: "Hungarian", Tag: language.Hungarian}, + {Name: "Italian", Tag: language.Italian}, + {Name: "Russian", Tag: language.Russian}, + {Name: "Turkish", Tag: language.Turkish}, +} + +func TestSupportedLanguages(t *testing.T) { + ctx := context.Background() + c := initTest(ctx, t) + defer c.Close() + got, err := c.SupportedLanguages(ctx, language.English) + if err != nil { + t.Fatal(err) + } + want := map[language.Tag]Language{} + for _, sl := range supportedLangs { + want[sl.Tag] = sl + } + for _, g := range got { + w, ok := want[g.Tag] + if !ok { + continue + } + if g != w { + t.Errorf("got %+v, want %+v", g, w) + } + delete(want, g.Tag) + } + if len(want) > 0 { + t.Errorf("missing: %+v", want) + } +} diff --git a/vendor/cloud.google.com/go/vision/annotations.go b/vendor/cloud.google.com/go/vision/annotations.go new file mode 100644 index 00000000..f38c0f83 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/annotations.go @@ -0,0 +1,689 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vision + +import ( + "image" + + "golang.org/x/text/language" + pb "google.golang.org/genproto/googleapis/cloud/vision/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// Annotations contains all the annotations performed by the API on a single image. +// A nil field indicates either that the corresponding feature was not requested, +// or that annotation failed for that feature. +type Annotations struct { + // Faces holds the results of face detection. + Faces []*FaceAnnotation + // Landmarks holds the results of landmark detection. + Landmarks []*EntityAnnotation + // Logos holds the results of logo detection. + Logos []*EntityAnnotation + // Labels holds the results of label detection. + Labels []*EntityAnnotation + // Texts holds the results of text detection. + Texts []*EntityAnnotation + // FullText holds the results of full text (OCR) detection. + FullText *TextAnnotation + // SafeSearch holds the results of safe-search detection. + SafeSearch *SafeSearchAnnotation + // ImageProps contains properties of the annotated image. + ImageProps *ImageProps + // Web contains web annotations for the image. + Web *WebDetection + // CropHints contains crop hints for the image. + CropHints []*CropHint + + // If non-nil, then one or more of the attempted annotations failed. + // Non-nil annotations are guaranteed to be correct, even if Error is + // non-nil. + Error error +} + +func annotationsFromProto(res *pb.AnnotateImageResponse) *Annotations { + as := &Annotations{} + for _, a := range res.FaceAnnotations { + as.Faces = append(as.Faces, faceAnnotationFromProto(a)) + } + for _, a := range res.LandmarkAnnotations { + as.Landmarks = append(as.Landmarks, entityAnnotationFromProto(a)) + } + for _, a := range res.LogoAnnotations { + as.Logos = append(as.Logos, entityAnnotationFromProto(a)) + } + for _, a := range res.LabelAnnotations { + as.Labels = append(as.Labels, entityAnnotationFromProto(a)) + } + for _, a := range res.TextAnnotations { + as.Texts = append(as.Texts, entityAnnotationFromProto(a)) + } + as.FullText = textAnnotationFromProto(res.FullTextAnnotation) + as.SafeSearch = safeSearchAnnotationFromProto(res.SafeSearchAnnotation) + as.ImageProps = imagePropertiesFromProto(res.ImagePropertiesAnnotation) + as.Web = webDetectionFromProto(res.WebDetection) + as.CropHints = cropHintsFromProto(res.CropHintsAnnotation) + if res.Error != nil { + // res.Error is a google.rpc.Status. Convert to a Go error. Use a gRPC + // error because it preserves the code as a separate field. + // TODO(jba): preserve the details field. + as.Error = grpc.Errorf(codes.Code(res.Error.Code), "%s", res.Error.Message) + } + return as +} + +// A FaceAnnotation describes the results of face detection on an image. +type FaceAnnotation struct { + // BoundingPoly is the bounding polygon around the face. The coordinates of + // the bounding box are in the original image's scale, as returned in + // ImageParams. The bounding box is computed to "frame" the face in + // accordance with human expectations. It is based on the landmarker + // results. Note that one or more x and/or y coordinates may not be + // generated in the BoundingPoly (the polygon will be unbounded) if only a + // partial face appears in the image to be annotated. + BoundingPoly []image.Point + + // FDBoundingPoly is tighter than BoundingPoly, and + // encloses only the skin part of the face. Typically, it is used to + // eliminate the face from any image analysis that detects the "amount of + // skin" visible in an image. It is not based on the landmarker results, only + // on the initial face detection, hence the fd (face detection) prefix. + FDBoundingPoly []image.Point + + // Landmarks are detected face landmarks. + Face FaceLandmarks + + // RollAngle indicates the amount of clockwise/anti-clockwise rotation of + // the face relative to the image vertical, about the axis perpendicular to + // the face. Range [-180,180]. + RollAngle float32 + + // PanAngle is the yaw angle: the leftward/rightward angle that the face is + // pointing, relative to the vertical plane perpendicular to the image. Range + // [-180,180]. + PanAngle float32 + + // TiltAngle is the pitch angle: the upwards/downwards angle that the face is + // pointing relative to the image's horizontal plane. Range [-180,180]. + TiltAngle float32 + + // DetectionConfidence is the detection confidence. The range is [0, 1]. + DetectionConfidence float32 + + // LandmarkingConfidence is the face landmarking confidence. The range is [0, 1]. + LandmarkingConfidence float32 + + // Likelihoods expresses the likelihood of various aspects of the face. + Likelihoods *FaceLikelihoods +} + +func faceAnnotationFromProto(pfa *pb.FaceAnnotation) *FaceAnnotation { + fa := &FaceAnnotation{ + BoundingPoly: boundingPolyFromProto(pfa.BoundingPoly), + FDBoundingPoly: boundingPolyFromProto(pfa.FdBoundingPoly), + RollAngle: pfa.RollAngle, + PanAngle: pfa.PanAngle, + TiltAngle: pfa.TiltAngle, + DetectionConfidence: pfa.DetectionConfidence, + LandmarkingConfidence: pfa.LandmarkingConfidence, + Likelihoods: &FaceLikelihoods{ + Joy: Likelihood(pfa.JoyLikelihood), + Sorrow: Likelihood(pfa.SorrowLikelihood), + Anger: Likelihood(pfa.AngerLikelihood), + Surprise: Likelihood(pfa.SurpriseLikelihood), + UnderExposed: Likelihood(pfa.UnderExposedLikelihood), + Blurred: Likelihood(pfa.BlurredLikelihood), + Headwear: Likelihood(pfa.HeadwearLikelihood), + }, + } + populateFaceLandmarks(pfa.Landmarks, &fa.Face) + return fa +} + +// An EntityAnnotation describes the results of a landmark, label, logo or text +// detection on an image. +type EntityAnnotation struct { + // ID is an opaque entity ID. Some IDs might be available in Knowledge Graph(KG). + // For more details on KG please see: + // https://developers.google.com/knowledge-graph/ + ID string + + // Locale is the language code for the locale in which the entity textual + // description (next field) is expressed. + Locale string + + // Description is the entity textual description, expressed in the language of Locale. + Description string + + // Score is the overall score of the result. Range [0, 1]. + Score float32 + + // Confidence is the accuracy of the entity detection in an image. + // For example, for an image containing the Eiffel Tower, this field represents + // the confidence that there is a tower in the query image. Range [0, 1]. + Confidence float32 + + // Topicality is the relevancy of the ICA (Image Content Annotation) label to the + // image. For example, the relevancy of 'tower' to an image containing + // 'Eiffel Tower' is likely higher than an image containing a distant towering + // building, though the confidence that there is a tower may be the same. + // Range [0, 1]. + Topicality float32 + + // BoundingPoly is the image region to which this entity belongs. Not filled currently + // for label detection. For text detection, BoundingPolys + // are produced for the entire text detected in an image region, followed by + // BoundingPolys for each word within the detected text. + BoundingPoly []image.Point + + // Locations contains the location information for the detected entity. + // Multiple LatLng structs can be present since one location may indicate the + // location of the scene in the query image, and another the location of the + // place where the query image was taken. Location information is usually + // present for landmarks. + Locations []LatLng + + // Properties are additional optional Property fields. + // For example a different kind of score or string that qualifies the entity. + Properties []Property +} + +func entityAnnotationFromProto(e *pb.EntityAnnotation) *EntityAnnotation { + var locs []LatLng + for _, li := range e.Locations { + locs = append(locs, latLngFromProto(li.LatLng)) + } + var props []Property + for _, p := range e.Properties { + props = append(props, propertyFromProto(p)) + } + return &EntityAnnotation{ + ID: e.Mid, + Locale: e.Locale, + Description: e.Description, + Score: e.Score, + Confidence: e.Confidence, + Topicality: e.Topicality, + BoundingPoly: boundingPolyFromProto(e.BoundingPoly), + Locations: locs, + Properties: props, + } +} + +// TextAnnotation contains a structured representation of OCR extracted text. +// The hierarchy of an OCR extracted text structure looks like: +// TextAnnotation -> Page -> Block -> Paragraph -> Word -> Symbol +// Each structural component, starting from Page, may further have its own +// properties. Properties describe detected languages, breaks etc. +type TextAnnotation struct { + // List of pages detected by OCR. + Pages []*Page + // UTF-8 text detected on the pages. + Text string +} + +func textAnnotationFromProto(pta *pb.TextAnnotation) *TextAnnotation { + if pta == nil { + return nil + } + var pages []*Page + for _, p := range pta.Pages { + pages = append(pages, pageFromProto(p)) + } + return &TextAnnotation{ + Pages: pages, + Text: pta.Text, + } +} + +// A Page is a page of text detected from OCR. +type Page struct { + // Additional information detected on the page. + Properties *TextProperties + // Page width in pixels. + Width int32 + // Page height in pixels. + Height int32 + // List of blocks of text, images etc on this page. + Blocks []*Block +} + +func pageFromProto(p *pb.Page) *Page { + if p == nil { + return nil + } + var blocks []*Block + for _, b := range p.Blocks { + blocks = append(blocks, blockFromProto(b)) + } + return &Page{ + Properties: textPropertiesFromProto(p.Property), + Width: p.Width, + Height: p.Height, + Blocks: blocks, + } +} + +// A Block is a logical element on the page. +type Block struct { + // Additional information detected for the block. + Properties *TextProperties + // The bounding box for the block. + // The vertices are in the order of top-left, top-right, bottom-right, + // bottom-left. When a rotation of the bounding box is detected the rotation + // is represented as around the top-left corner as defined when the text is + // read in the 'natural' orientation. + // For example: + // * when the text is horizontal it might look like: + // 0----1 + // | | + // 3----2 + // * when it's rotated 180 degrees around the top-left corner it becomes: + // 2----3 + // | | + // 1----0 + // and the vertice order will still be (0, 1, 2, 3). + BoundingBox []image.Point + // List of paragraphs in this block (if this blocks is of type text). + Paragraphs []*Paragraph + // Detected block type (text, image etc) for this block. + BlockType BlockType +} + +// A BlockType represents the kind of Block (text, image, etc.) +type BlockType int + +const ( + // Unknown block type. + UnknownBlock BlockType = BlockType(pb.Block_UNKNOWN) + // Regular text block. + TextBlock BlockType = BlockType(pb.Block_TEXT) + // Table block. + TableBlock BlockType = BlockType(pb.Block_TABLE) + // Image block. + PictureBlock BlockType = BlockType(pb.Block_PICTURE) + // Horizontal/vertical line box. + RulerBlock BlockType = BlockType(pb.Block_RULER) + // Barcode block. + BarcodeBlock BlockType = BlockType(pb.Block_BARCODE) +) + +func blockFromProto(p *pb.Block) *Block { + if p == nil { + return nil + } + var paras []*Paragraph + for _, pa := range p.Paragraphs { + paras = append(paras, paragraphFromProto(pa)) + } + return &Block{ + Properties: textPropertiesFromProto(p.Property), + BoundingBox: boundingPolyFromProto(p.BoundingBox), + Paragraphs: paras, + BlockType: BlockType(p.BlockType), + } +} + +// A Paragraph is a structural unit of text representing a number of words in +// certain order. +type Paragraph struct { + // Additional information detected for the paragraph. + Properties *TextProperties + // The bounding box for the paragraph. + // The vertices are in the order of top-left, top-right, bottom-right, + // bottom-left. When a rotation of the bounding box is detected the rotation + // is represented as around the top-left corner as defined when the text is + // read in the 'natural' orientation. + // For example: + // * when the text is horizontal it might look like: + // 0----1 + // | | + // 3----2 + // * when it's rotated 180 degrees around the top-left corner it becomes: + // 2----3 + // | | + // 1----0 + // and the vertice order will still be (0, 1, 2, 3). + BoundingBox []image.Point + // List of words in this paragraph. + Words []*Word +} + +func paragraphFromProto(p *pb.Paragraph) *Paragraph { + if p == nil { + return nil + } + var words []*Word + for _, w := range p.Words { + words = append(words, wordFromProto(w)) + } + return &Paragraph{ + Properties: textPropertiesFromProto(p.Property), + BoundingBox: boundingPolyFromProto(p.BoundingBox), + Words: words, + } +} + +// A Word is a word in a text document. +type Word struct { + // Additional information detected for the word. + Properties *TextProperties + // The bounding box for the word. + // The vertices are in the order of top-left, top-right, bottom-right, + // bottom-left. When a rotation of the bounding box is detected the rotation + // is represented as around the top-left corner as defined when the text is + // read in the 'natural' orientation. + // For example: + // * when the text is horizontal it might look like: + // 0----1 + // | | + // 3----2 + // * when it's rotated 180 degrees around the top-left corner it becomes: + // 2----3 + // | | + // 1----0 + // and the vertice order will still be (0, 1, 2, 3). + BoundingBox []image.Point + // List of symbols in the word. + // The order of the symbols follows the natural reading order. + Symbols []*Symbol +} + +func wordFromProto(p *pb.Word) *Word { + if p == nil { + return nil + } + var syms []*Symbol + for _, s := range p.Symbols { + syms = append(syms, symbolFromProto(s)) + } + return &Word{ + Properties: textPropertiesFromProto(p.Property), + BoundingBox: boundingPolyFromProto(p.BoundingBox), + Symbols: syms, + } +} + +// A Symbol is a symbol in a text document. +type Symbol struct { + // Additional information detected for the symbol. + Properties *TextProperties + // The bounding box for the symbol. + // The vertices are in the order of top-left, top-right, bottom-right, + // bottom-left. When a rotation of the bounding box is detected the rotation + // is represented as around the top-left corner as defined when the text is + // read in the 'natural' orientation. + // For example: + // * when the text is horizontal it might look like: + // 0----1 + // | | + // 3----2 + // * when it's rotated 180 degrees around the top-left corner it becomes: + // 2----3 + // | | + // 1----0 + // and the vertice order will still be (0, 1, 2, 3). + BoundingBox []image.Point + // The actual UTF-8 representation of the symbol. + Text string +} + +func symbolFromProto(p *pb.Symbol) *Symbol { + if p == nil { + return nil + } + return &Symbol{ + Properties: textPropertiesFromProto(p.Property), + BoundingBox: boundingPolyFromProto(p.BoundingBox), + Text: p.Text, + } +} + +// TextProperties contains additional information about an OCR structural component. +type TextProperties struct { + // A list of detected languages together with confidence. + DetectedLanguages []*DetectedLanguage + // Detected start or end of a text segment. + DetectedBreak *DetectedBreak +} + +// Detected language for a structural component. +type DetectedLanguage struct { + // The BCP-47 language code, such as "en-US" or "sr-Latn". + Code language.Tag + // The confidence of the detected language, in the range [0, 1]. + Confidence float32 +} + +// DetectedBreak is the detected start or end of a structural component. +type DetectedBreak struct { + // The type of break. + Type DetectedBreakType + // True if break prepends the element. + IsPrefix bool +} + +type DetectedBreakType int + +const ( + // Unknown break label type. + UnknownBreak = DetectedBreakType(pb.TextAnnotation_DetectedBreak_UNKNOWN) + // Regular space. + SpaceBreak = DetectedBreakType(pb.TextAnnotation_DetectedBreak_SPACE) + // Sure space (very wide). + SureSpaceBreak = DetectedBreakType(pb.TextAnnotation_DetectedBreak_SURE_SPACE) + // Line-wrapping break. + EOLSureSpaceBreak = DetectedBreakType(pb.TextAnnotation_DetectedBreak_EOL_SURE_SPACE) + // End-line hyphen that is not present in text; does not co-occur with SPACE, LEADER_SPACE, or LINE_BREAK. + HyphenBreak = DetectedBreakType(pb.TextAnnotation_DetectedBreak_HYPHEN) + // Line break that ends a paragraph. + LineBreak = DetectedBreakType(pb.TextAnnotation_DetectedBreak_LINE_BREAK) +) + +func textPropertiesFromProto(p *pb.TextAnnotation_TextProperty) *TextProperties { + var dls []*DetectedLanguage + for _, dl := range p.DetectedLanguages { + tag, _ := language.Parse(dl.LanguageCode) + // Ignore error. If err != nil the returned tag will not be garbage, + // but a best-effort attempt at a parse. At worst it will be + // language.Und, the documented "undefined" Tag. + dls = append(dls, &DetectedLanguage{Code: tag, Confidence: dl.Confidence}) + } + var db *DetectedBreak + if p.DetectedBreak != nil { + db = &DetectedBreak{ + Type: DetectedBreakType(p.DetectedBreak.Type), + IsPrefix: p.DetectedBreak.IsPrefix, + } + } + return &TextProperties{ + DetectedLanguages: dls, + DetectedBreak: db, + } +} + +// SafeSearchAnnotation describes the results of a SafeSearch detection on an image. +type SafeSearchAnnotation struct { + // Adult is the likelihood that the image contains adult content. + Adult Likelihood + + // Spoof is the likelihood that an obvious modification was made to the + // image's canonical version to make it appear funny or offensive. + Spoof Likelihood + + // Medical is the likelihood that this is a medical image. + Medical Likelihood + + // Violence is the likelihood that this image represents violence. + Violence Likelihood +} + +func safeSearchAnnotationFromProto(s *pb.SafeSearchAnnotation) *SafeSearchAnnotation { + if s == nil { + return nil + } + return &SafeSearchAnnotation{ + Adult: Likelihood(s.Adult), + Spoof: Likelihood(s.Spoof), + Medical: Likelihood(s.Medical), + Violence: Likelihood(s.Violence), + } +} + +// ImageProps describes properties of the image itself, like the dominant colors. +type ImageProps struct { + // DominantColors describes the dominant colors of the image. + DominantColors []*ColorInfo +} + +func imagePropertiesFromProto(ip *pb.ImageProperties) *ImageProps { + if ip == nil || ip.DominantColors == nil { + return nil + } + var cinfos []*ColorInfo + for _, ci := range ip.DominantColors.Colors { + cinfos = append(cinfos, colorInfoFromProto(ci)) + } + return &ImageProps{DominantColors: cinfos} +} + +// WebDetection contains relevant information for the image from the Internet. +type WebDetection struct { + // Deduced entities from similar images on the Internet. + WebEntities []*WebEntity + // Fully matching images from the Internet. + // They're definite neardups and most often a copy of the query image with + // merely a size change. + FullMatchingImages []*WebImage + // Partial matching images from the Internet. + // Those images are similar enough to share some key-point features. For + // example an original image will likely have partial matching for its crops. + PartialMatchingImages []*WebImage + // Web pages containing the matching images from the Internet. + PagesWithMatchingImages []*WebPage +} + +func webDetectionFromProto(p *pb.WebDetection) *WebDetection { + if p == nil { + return nil + } + var ( + wes []*WebEntity + fmis, pmis []*WebImage + wps []*WebPage + ) + for _, e := range p.WebEntities { + wes = append(wes, webEntityFromProto(e)) + } + for _, m := range p.FullMatchingImages { + fmis = append(fmis, webImageFromProto(m)) + } + for _, m := range p.PartialMatchingImages { + pmis = append(fmis, webImageFromProto(m)) + } + for _, g := range p.PagesWithMatchingImages { + wps = append(wps, webPageFromProto(g)) + } + return &WebDetection{ + WebEntities: wes, + FullMatchingImages: fmis, + PartialMatchingImages: pmis, + PagesWithMatchingImages: wps, + } +} + +// A WebEntity is an entity deduced from similar images on the Internet. +type WebEntity struct { + // Opaque entity ID. + ID string + // Overall relevancy score for the entity. + // Not normalized and not comparable across different image queries. + Score float32 + // Canonical description of the entity, in English. + Description string +} + +func webEntityFromProto(p *pb.WebDetection_WebEntity) *WebEntity { + return &WebEntity{ + ID: p.EntityId, + Score: p.Score, + Description: p.Description, + } +} + +// WebImage contains metadata for online images. +type WebImage struct { + // The result image URL. + URL string + // Overall relevancy score for the image. + // Not normalized and not comparable across different image queries. + Score float32 +} + +func webImageFromProto(p *pb.WebDetection_WebImage) *WebImage { + return &WebImage{ + URL: p.Url, + Score: p.Score, + } +} + +// A WebPage contains metadata for web pages. +type WebPage struct { + // The result web page URL. + URL string + // Overall relevancy score for the web page. + // Not normalized and not comparable across different image queries. + Score float32 +} + +func webPageFromProto(p *pb.WebDetection_WebPage) *WebPage { + return &WebPage{ + URL: p.Url, + Score: p.Score, + } +} + +// CropHint is a single crop hint that is used to generate a new crop when +// serving an image. +type CropHint struct { + // The bounding polygon for the crop region. The coordinates of the bounding + // box are in the original image's scale, as returned in `ImageParams`. + BoundingPoly []image.Point + // Confidence of this being a salient region. Range [0, 1]. + Confidence float32 + // Fraction of importance of this salient region with respect to the original + // image. + ImportanceFraction float32 +} + +func cropHintsFromProto(p *pb.CropHintsAnnotation) []*CropHint { + if p == nil { + return nil + } + var chs []*CropHint + for _, pch := range p.CropHints { + chs = append(chs, cropHintFromProto(pch)) + } + return chs +} + +func cropHintFromProto(pch *pb.CropHint) *CropHint { + return &CropHint{ + BoundingPoly: boundingPolyFromProto(pch.BoundingPoly), + Confidence: pch.Confidence, + ImportanceFraction: pch.ImportanceFraction, + } +} diff --git a/vendor/cloud.google.com/go/vision/apiv1/README.md b/vendor/cloud.google.com/go/vision/apiv1/README.md new file mode 100644 index 00000000..bcfa08dd --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/README.md @@ -0,0 +1,9 @@ +Auto-generated vision v1 clients +================================= + +This package includes auto-generated clients for the vision v1 API. + +Use the handwritten client (in the parent directory, +cloud.google.com/go/vision) in preference to this. + +This code is EXPERIMENTAL and subject to CHANGE AT ANY TIME. diff --git a/vendor/cloud.google.com/go/vision/apiv1/doc.go b/vendor/cloud.google.com/go/vision/apiv1/doc.go new file mode 100644 index 00000000..a1dfbd75 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/doc.go @@ -0,0 +1,37 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +// Package vision is an experimental, auto-generated package for the +// vision API. +// +// Integrates Google Vision features, including image labeling, face, logo, +// and landmark detection, optical character recognition (OCR), and detection +// of explicit content, into applications. +// +// Use the client at cloud.google.com/go/vision in preference to this. +package vision // import "cloud.google.com/go/vision/apiv1" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertXGoog(ctx context.Context, val string) context.Context { + md, _ := metadata.FromContext(ctx) + md = md.Copy() + md["x-goog-api-client"] = []string{val} + return metadata.NewContext(ctx, md) +} diff --git a/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client.go b/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client.go new file mode 100644 index 00000000..1b014f79 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client.go @@ -0,0 +1,134 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package vision + +import ( + "time" + + "cloud.google.com/go/internal/version" + gax "github.com/googleapis/gax-go" + "golang.org/x/net/context" + "google.golang.org/api/option" + "google.golang.org/api/transport" + visionpb "google.golang.org/genproto/googleapis/cloud/vision/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +// ImageAnnotatorCallOptions contains the retry settings for each method of ImageAnnotatorClient. +type ImageAnnotatorCallOptions struct { + BatchAnnotateImages []gax.CallOption +} + +func defaultImageAnnotatorClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("vision.googleapis.com:443"), + option.WithScopes( + "https://www.googleapis.com/auth/cloud-platform", + ), + } +} + +func defaultImageAnnotatorCallOptions() *ImageAnnotatorCallOptions { + retry := map[[2]string][]gax.CallOption{ + {"default", "idempotent"}: { + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.3, + }) + }), + }, + } + return &ImageAnnotatorCallOptions{ + BatchAnnotateImages: retry[[2]string{"default", "idempotent"}], + } +} + +// ImageAnnotatorClient is a client for interacting with Google Cloud Vision API. +type ImageAnnotatorClient struct { + // The connection to the service. + conn *grpc.ClientConn + + // The gRPC API client. + imageAnnotatorClient visionpb.ImageAnnotatorClient + + // The call options for this service. + CallOptions *ImageAnnotatorCallOptions + + // The metadata to be sent with each request. + xGoogHeader string +} + +// NewImageAnnotatorClient creates a new image annotator client. +// +// Service that performs Google Cloud Vision API detection tasks over client +// images, such as face, landmark, logo, label, and text detection. The +// ImageAnnotator service returns detected entities from the images. +func NewImageAnnotatorClient(ctx context.Context, opts ...option.ClientOption) (*ImageAnnotatorClient, error) { + conn, err := transport.DialGRPC(ctx, append(defaultImageAnnotatorClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &ImageAnnotatorClient{ + conn: conn, + CallOptions: defaultImageAnnotatorCallOptions(), + + imageAnnotatorClient: visionpb.NewImageAnnotatorClient(conn), + } + c.SetGoogleClientInfo() + return c, nil +} + +// Connection returns the client's connection to the API service. +func (c *ImageAnnotatorClient) Connection() *grpc.ClientConn { + return c.conn +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *ImageAnnotatorClient) Close() error { + return c.conn.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *ImageAnnotatorClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", "") + c.xGoogHeader = gax.XGoogHeader(kv...) +} + +// BatchAnnotateImages run image detection and annotation for a batch of images. +func (c *ImageAnnotatorClient) BatchAnnotateImages(ctx context.Context, req *visionpb.BatchAnnotateImagesRequest) (*visionpb.BatchAnnotateImagesResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + var resp *visionpb.BatchAnnotateImagesResponse + err := gax.Invoke(ctx, func(ctx context.Context) error { + var err error + resp, err = c.imageAnnotatorClient.BatchAnnotateImages(ctx, req) + return err + }, c.CallOptions.BatchAnnotateImages...) + if err != nil { + return nil, err + } + return resp, nil +} diff --git a/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client_example_test.go b/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client_example_test.go new file mode 100644 index 00000000..3702eb31 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/image_annotator_client_example_test.go @@ -0,0 +1,51 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package vision_test + +import ( + "cloud.google.com/go/vision/apiv1" + "golang.org/x/net/context" + visionpb "google.golang.org/genproto/googleapis/cloud/vision/v1" +) + +func ExampleNewImageAnnotatorClient() { + ctx := context.Background() + c, err := vision.NewImageAnnotatorClient(ctx) + if err != nil { + // TODO: Handle error. + } + // TODO: Use client. + _ = c +} + +func ExampleImageAnnotatorClient_BatchAnnotateImages() { + ctx := context.Background() + c, err := vision.NewImageAnnotatorClient(ctx) + if err != nil { + // TODO: Handle error. + } + + req := &visionpb.BatchAnnotateImagesRequest{ + // TODO: Fill request struct fields. + } + resp, err := c.BatchAnnotateImages(ctx, req) + if err != nil { + // TODO: Handle error. + } + // TODO: Use resp. + _ = resp +} diff --git a/vendor/cloud.google.com/go/vision/apiv1/mock_test.go b/vendor/cloud.google.com/go/vision/apiv1/mock_test.go new file mode 100644 index 00000000..0cc6a61e --- /dev/null +++ b/vendor/cloud.google.com/go/vision/apiv1/mock_test.go @@ -0,0 +1,149 @@ +// Copyright 2017, Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// AUTO-GENERATED CODE. DO NOT EDIT. + +package vision + +import ( + visionpb "google.golang.org/genproto/googleapis/cloud/vision/v1" +) + +import ( + "flag" + "io" + "log" + "net" + "os" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + "golang.org/x/net/context" + "google.golang.org/api/option" + status "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +var _ = io.EOF +var _ = ptypes.MarshalAny +var _ status.Status + +type mockImageAnnotatorServer struct { + // Embed for forward compatibility. + // Tests will keep working if more methods are added + // in the future. + visionpb.ImageAnnotatorServer + + reqs []proto.Message + + // If set, all calls return this error. + err error + + // responses to return if err == nil + resps []proto.Message +} + +func (s *mockImageAnnotatorServer) BatchAnnotateImages(_ context.Context, req *visionpb.BatchAnnotateImagesRequest) (*visionpb.BatchAnnotateImagesResponse, error) { + s.reqs = append(s.reqs, req) + if s.err != nil { + return nil, s.err + } + return s.resps[0].(*visionpb.BatchAnnotateImagesResponse), nil +} + +// clientOpt is the option tests should use to connect to the test server. +// It is initialized by TestMain. +var clientOpt option.ClientOption + +var ( + mockImageAnnotator mockImageAnnotatorServer +) + +func TestMain(m *testing.M) { + flag.Parse() + + serv := grpc.NewServer() + visionpb.RegisterImageAnnotatorServer(serv, &mockImageAnnotator) + + lis, err := net.Listen("tcp", "localhost:0") + if err != nil { + log.Fatal(err) + } + go serv.Serve(lis) + + conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure()) + if err != nil { + log.Fatal(err) + } + clientOpt = option.WithGRPCConn(conn) + + os.Exit(m.Run()) +} + +func TestImageAnnotatorBatchAnnotateImages(t *testing.T) { + var expectedResponse *visionpb.BatchAnnotateImagesResponse = &visionpb.BatchAnnotateImagesResponse{} + + mockImageAnnotator.err = nil + mockImageAnnotator.reqs = nil + + mockImageAnnotator.resps = append(mockImageAnnotator.resps[:0], expectedResponse) + + var requests []*visionpb.AnnotateImageRequest = nil + var request = &visionpb.BatchAnnotateImagesRequest{ + Requests: requests, + } + + c, err := NewImageAnnotatorClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.BatchAnnotateImages(context.Background(), request) + + if err != nil { + t.Fatal(err) + } + + if want, got := request, mockImageAnnotator.reqs[0]; !proto.Equal(want, got) { + t.Errorf("wrong request %q, want %q", got, want) + } + + if want, got := expectedResponse, resp; !proto.Equal(want, got) { + t.Errorf("wrong response %q, want %q)", got, want) + } +} + +func TestImageAnnotatorBatchAnnotateImagesError(t *testing.T) { + errCode := codes.Internal + mockImageAnnotator.err = grpc.Errorf(errCode, "test error") + + var requests []*visionpb.AnnotateImageRequest = nil + var request = &visionpb.BatchAnnotateImagesRequest{ + Requests: requests, + } + + c, err := NewImageAnnotatorClient(context.Background(), clientOpt) + if err != nil { + t.Fatal(err) + } + + resp, err := c.BatchAnnotateImages(context.Background(), request) + + if c := grpc.Code(err); c != errCode { + t.Errorf("got error code %q, want %q", c, errCode) + } + _ = resp +} diff --git a/vendor/cloud.google.com/go/vision/doc.go b/vendor/cloud.google.com/go/vision/doc.go new file mode 100644 index 00000000..2f69580c --- /dev/null +++ b/vendor/cloud.google.com/go/vision/doc.go @@ -0,0 +1,102 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package vision provides a client for the Google Cloud Vision API. + +Google Cloud Vision allows easy integration of vision detection features +into developer applications, including image labeling, face and landmark +detection, optical character recognition (OCR), and tagging of explicit +content. For more information about Cloud Vision, read the Google Cloud Vision API +Documentation at https://cloud.google.com/vision/docs. + +Note: This package is in beta. Some backwards-incompatible changes may occur. + +Creating Images + +The Cloud Vision API supports a variety of image file formats, including JPEG, +PNG8, PNG24, Animated GIF (first frame only), and RAW. See +https://cloud.google.com/vision/docs/image-best-practices#image_types for the +complete list of formats. Be aware that Cloud Vision sets upper limits on file +size as well as on the total combined size of all images in a request. Reducing +your file size can significantly improve throughput; however, be careful not to +reduce image quality in the process. See +https://cloud.google.com/vision/docs/image-best-practices#image_sizing for +current file size limits. + +Creating an Image instance does not perform an API request. + +Use NewImageFromReader to obtain an image from any io.Reader, such as an open file: + + f, err := os.Open("path/to/image.jpg") + if err != nil { ... } + defer f.Close() + img, err := vision.NewImageFromReader(f) + if err != nil { ... } + +Use NewImageFromURI to refer to an image in Google Cloud Storage or a public URL: + + img := vision.NewImageFromURI("gs://my-bucket/my-image.png") + +Annotating Images + +Client.Annotate is the most general method in the package. It can run multiple +detections on multiple images with a single API call. + +To describe the detections you want to perform on an image, create an +AnnotateRequest and specify the maximum number of results to return for each +detection of interest. The exceptions are safe search and image properties, +where a boolean is used instead. + + resultSlice, err := client.Annotate(ctx, &vision.AnnotateRequest{ + Image: img, + MaxLogos: 5, + MaxTexts: 100, + SafeSearch: true, + }) + if err != nil { ... } + +You can pass as many AnnotateRequests as desired to client.Annotate. The return +value is a slice of an Annotations. Each Annotations value may contain an Error +along with one or more successful results. The failed detections will have a nil annotation. + + result := resultSlice[0] + if result.Error != nil { ... } // some detections failed + for _, logo := range result.Logos { ... } + for _, text := range result.Texts { ... } + if result.SafeSearch != nil { ... } + +Other methods on Client run a single detection on a single image. For instance, +Client.DetectFaces will run face detection on the provided Image. These methods +return a single annotation of the appropriate type (for example, DetectFaces +returns a FaceAnnotation). The error return value incorporates both API call +errors and the detection errors stored in Annotations.Error, simplifying your +logic. + + faces, err := client.DetectFaces(ctx, 10) // maximum of 10 faces + if err != nil { ... } + +Here faces is a slice of FaceAnnotations. The Face field of each FaceAnnotation +provides easy access to the positions of facial features: + + fmt.Println(faces[0].Face.Nose.Tip) + fmt.Println(faces[0].Face.Eyes.Left.Pupil) + + +Authentication + +See examples of authorization and authentication at +https://godoc.org/cloud.google.com/go#pkg-examples. +*/ +package vision // import "cloud.google.com/go/vision" diff --git a/vendor/cloud.google.com/go/vision/examples_test.go b/vendor/cloud.google.com/go/vision/examples_test.go new file mode 100644 index 00000000..a63d6368 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/examples_test.go @@ -0,0 +1,99 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vision_test + +import ( + "fmt" + "os" + + "cloud.google.com/go/vision" + "golang.org/x/net/context" +) + +func ExampleNewClient() { + ctx := context.Background() + client, err := vision.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + // Use the client. + + // Close the client when finished. + if err := client.Close(); err != nil { + // TODO: handle error. + } +} + +func Example_NewImageFromReader() { + f, err := os.Open("path/to/image.jpg") + if err != nil { + // TODO: handle error. + } + img, err := vision.NewImageFromReader(f) + if err != nil { + // TODO: handle error. + } + fmt.Println(img) +} + +func Example_NewImageFromURI() { + img := vision.NewImageFromURI("gs://my-bucket/my-image.png") + fmt.Println(img) +} + +func ExampleClient_Annotate_oneImage() { + ctx := context.Background() + client, err := vision.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + annsSlice, err := client.Annotate(ctx, &vision.AnnotateRequest{ + Image: vision.NewImageFromURI("gs://my-bucket/my-image.png"), + MaxLogos: 100, + MaxTexts: 100, + SafeSearch: true, + }) + if err != nil { + // TODO: handle error. + } + anns := annsSlice[0] + if anns.Logos != nil { + fmt.Println(anns.Logos) + } + if anns.Texts != nil { + fmt.Println(anns.Texts) + } + if anns.SafeSearch != nil { + fmt.Println(anns.SafeSearch) + } + if anns.Error != nil { + fmt.Printf("at least one of the features failed: %v", anns.Error) + } +} + +func ExampleClient_DetectFaces() { + ctx := context.Background() + client, err := vision.NewClient(ctx) + if err != nil { + // TODO: handle error. + } + img := vision.NewImageFromURI("gs://my-bucket/my-image.png") + faces, err := client.DetectFaces(ctx, img, 10) + if err != nil { + // TODO: handle error. + } + fmt.Println(faces[0].Face.Nose.Tip) + fmt.Println(faces[0].Face.Eyes.Left.Pupil) +} diff --git a/vendor/cloud.google.com/go/vision/face.go b/vendor/cloud.google.com/go/vision/face.go new file mode 100644 index 00000000..a7de3e5c --- /dev/null +++ b/vendor/cloud.google.com/go/vision/face.go @@ -0,0 +1,172 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vision + +import ( + "log" + + "github.com/golang/geo/r3" + pb "google.golang.org/genproto/googleapis/cloud/vision/v1" +) + +// FaceLandmarks contains the positions of facial features detected by the service. +// TODO(jba): write doc for all +type FaceLandmarks struct { + Eyebrows Eyebrows + Eyes Eyes + Ears Ears + Nose Nose + Mouth Mouth + Chin Chin + Forehead *r3.Vector +} + +type Eyebrows struct { + Left, Right Eyebrow +} + +type Eyebrow struct { + Top, Left, Right *r3.Vector +} + +type Eyes struct { + Left, Right Eye +} + +type Eye struct { + Left, Right, Top, Bottom, Center, Pupil *r3.Vector +} + +type Ears struct { + Left, Right *r3.Vector +} + +type Nose struct { + Left, Right, Top, Bottom, Tip *r3.Vector +} + +type Mouth struct { + Left, Center, Right, UpperLip, LowerLip *r3.Vector +} + +type Chin struct { + Left, Center, Right *r3.Vector +} + +// FaceLikelihoods expresses the likelihood of various aspects of a face. +type FaceLikelihoods struct { + // Joy is the likelihood that the face expresses joy. + Joy Likelihood + + // Sorrow is the likelihood that the face expresses sorrow. + Sorrow Likelihood + + // Anger is the likelihood that the face expresses anger. + Anger Likelihood + + // Surprise is the likelihood that the face expresses surprise. + Surprise Likelihood + + // UnderExposed is the likelihood that the face is under-exposed. + UnderExposed Likelihood + + // Blurred is the likelihood that the face is blurred. + Blurred Likelihood + + // Headwear is the likelihood that the face has headwear. + Headwear Likelihood +} + +func populateFaceLandmarks(landmarks []*pb.FaceAnnotation_Landmark, face *FaceLandmarks) { + for _, lm := range landmarks { + pos := &r3.Vector{ + X: float64(lm.Position.X), + Y: float64(lm.Position.Y), + Z: float64(lm.Position.Z), + } + switch lm.Type { + case pb.FaceAnnotation_Landmark_LEFT_OF_LEFT_EYEBROW: + face.Eyebrows.Left.Left = pos + case pb.FaceAnnotation_Landmark_RIGHT_OF_LEFT_EYEBROW: + face.Eyebrows.Left.Right = pos + case pb.FaceAnnotation_Landmark_LEFT_OF_RIGHT_EYEBROW: + face.Eyebrows.Right.Left = pos + case pb.FaceAnnotation_Landmark_RIGHT_OF_RIGHT_EYEBROW: + face.Eyebrows.Right.Right = pos + case pb.FaceAnnotation_Landmark_LEFT_EYEBROW_UPPER_MIDPOINT: + face.Eyebrows.Left.Top = pos + case pb.FaceAnnotation_Landmark_RIGHT_EYEBROW_UPPER_MIDPOINT: + face.Eyebrows.Right.Top = pos + case pb.FaceAnnotation_Landmark_MIDPOINT_BETWEEN_EYES: + face.Nose.Top = pos + case pb.FaceAnnotation_Landmark_NOSE_TIP: + face.Nose.Tip = pos + case pb.FaceAnnotation_Landmark_UPPER_LIP: + face.Mouth.UpperLip = pos + case pb.FaceAnnotation_Landmark_LOWER_LIP: + face.Mouth.LowerLip = pos + case pb.FaceAnnotation_Landmark_MOUTH_LEFT: + face.Mouth.Left = pos + case pb.FaceAnnotation_Landmark_MOUTH_RIGHT: + face.Mouth.Right = pos + case pb.FaceAnnotation_Landmark_MOUTH_CENTER: + face.Mouth.Center = pos + case pb.FaceAnnotation_Landmark_NOSE_BOTTOM_RIGHT: + face.Nose.Right = pos + case pb.FaceAnnotation_Landmark_NOSE_BOTTOM_LEFT: + face.Nose.Left = pos + case pb.FaceAnnotation_Landmark_NOSE_BOTTOM_CENTER: + face.Nose.Bottom = pos + case pb.FaceAnnotation_Landmark_LEFT_EYE: + face.Eyes.Left.Center = pos + case pb.FaceAnnotation_Landmark_RIGHT_EYE: + face.Eyes.Right.Center = pos + case pb.FaceAnnotation_Landmark_LEFT_EYE_TOP_BOUNDARY: + face.Eyes.Left.Top = pos + case pb.FaceAnnotation_Landmark_LEFT_EYE_RIGHT_CORNER: + face.Eyes.Left.Right = pos + case pb.FaceAnnotation_Landmark_LEFT_EYE_BOTTOM_BOUNDARY: + face.Eyes.Left.Bottom = pos + case pb.FaceAnnotation_Landmark_LEFT_EYE_LEFT_CORNER: + face.Eyes.Left.Left = pos + case pb.FaceAnnotation_Landmark_RIGHT_EYE_TOP_BOUNDARY: + face.Eyes.Right.Top = pos + case pb.FaceAnnotation_Landmark_RIGHT_EYE_RIGHT_CORNER: + face.Eyes.Right.Right = pos + case pb.FaceAnnotation_Landmark_RIGHT_EYE_BOTTOM_BOUNDARY: + face.Eyes.Right.Bottom = pos + case pb.FaceAnnotation_Landmark_RIGHT_EYE_LEFT_CORNER: + face.Eyes.Right.Left = pos + case pb.FaceAnnotation_Landmark_LEFT_EYE_PUPIL: + face.Eyes.Left.Pupil = pos + case pb.FaceAnnotation_Landmark_RIGHT_EYE_PUPIL: + face.Eyes.Right.Pupil = pos + case pb.FaceAnnotation_Landmark_LEFT_EAR_TRAGION: + face.Ears.Left = pos + case pb.FaceAnnotation_Landmark_RIGHT_EAR_TRAGION: + face.Ears.Right = pos + case pb.FaceAnnotation_Landmark_FOREHEAD_GLABELLA: + face.Forehead = pos + case pb.FaceAnnotation_Landmark_CHIN_GNATHION: + face.Chin.Center = pos + case pb.FaceAnnotation_Landmark_CHIN_LEFT_GONION: + face.Chin.Left = pos + case pb.FaceAnnotation_Landmark_CHIN_RIGHT_GONION: + face.Chin.Right = pos + default: + log.Printf("vision: ignoring unknown face annotation landmark %s", lm.Type) + } + } +} diff --git a/vendor/cloud.google.com/go/vision/geometry.go b/vendor/cloud.google.com/go/vision/geometry.go new file mode 100644 index 00000000..35f90b8e --- /dev/null +++ b/vendor/cloud.google.com/go/vision/geometry.go @@ -0,0 +1,36 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vision + +import ( + "image" + + pb "google.golang.org/genproto/googleapis/cloud/vision/v1" +) + +func pointFromProto(v *pb.Vertex) image.Point { + return image.Point{X: int(v.X), Y: int(v.Y)} +} + +func boundingPolyFromProto(b *pb.BoundingPoly) []image.Point { + if b == nil { + return nil + } + var ps []image.Point + for _, v := range b.Vertices { + ps = append(ps, pointFromProto(v)) + } + return ps +} diff --git a/vendor/cloud.google.com/go/vision/image.go b/vendor/cloud.google.com/go/vision/image.go new file mode 100644 index 00000000..03bf579f --- /dev/null +++ b/vendor/cloud.google.com/go/vision/image.go @@ -0,0 +1,91 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vision + +import ( + "io" + "io/ioutil" + + pb "google.golang.org/genproto/googleapis/cloud/vision/v1" +) + +// An Image represents the contents of an image to run detection algorithms on, +// along with metadata. Images may be described by their raw bytes, or by a +// reference to a a Google Cloude Storage (GCS) object. +type Image struct { + // Exactly one of content and gcsURI will be non-zero. + content []byte // raw image bytes + uri string // URI of the form "gs://BUCKET/OBJECT", or public URL + + // Rect is a rectangle on the Earth's surface represented by the + // image. It is optional. + Rect *LatLngRect + + // LanguageHints is a list of languages to use for text detection. In most + // cases, leaving this field nil yields the best results since it enables + // automatic language detection. For languages based on the Latin alphabet, + // setting LanguageHints is not needed. In rare cases, when the language of + // the text in the image is known, setting a hint will help get better + // results (although it will be a significant hindrance if the hint is + // wrong). Text detection returns an error if one or more of the specified + // languages is not one of the supported languages (See + // https://cloud.google.com/translate/v2/translate-reference#supported_languages). + LanguageHints []string +} + +// NewImageFromReader reads the bytes of an image from rc, then closes rc. +// +// You may optionally set Rect and LanguageHints on the returned Image before +// using it. +func NewImageFromReader(r io.ReadCloser) (*Image, error) { + bytes, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + if err := r.Close(); err != nil { + return nil, err + } + return &Image{content: bytes}, nil +} + +// NewImageFromURI returns an image that refers to an object in Google Cloud Storage +// (when the uri is of the form "gs://BUCKET/OBJECT") or at a public URL. +// +// You may optionally set Rect and LanguageHints on the returned Image before +// using it. +func NewImageFromURI(uri string) *Image { + return &Image{uri: uri} +} + +// toProtos converts the Image to the two underlying API protos it represents, +// pb.Image and pb.ImageContext. +func (img *Image) toProtos() (*pb.Image, *pb.ImageContext) { + var pimg *pb.Image + switch { + case img.content != nil: + pimg = &pb.Image{Content: img.content} + case img.uri != "": + pimg = &pb.Image{Source: &pb.ImageSource{ImageUri: img.uri}} + } + + var pctx *pb.ImageContext + if img.Rect != nil || len(img.LanguageHints) > 0 { + pctx = &pb.ImageContext{ + LatLongRect: img.Rect.toProto(), + LanguageHints: img.LanguageHints, + } + } + return pimg, pctx +} diff --git a/vendor/cloud.google.com/go/vision/image_test.go b/vendor/cloud.google.com/go/vision/image_test.go new file mode 100644 index 00000000..0aa554d1 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/image_test.go @@ -0,0 +1,41 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vision + +import ( + "reflect" + "testing" + + pb "google.golang.org/genproto/googleapis/cloud/vision/v1" +) + +func TestImageToProtos(t *testing.T) { + const url = "https://www.example.com/test.jpg" + langHints := []string{"en", "fr"} + img := NewImageFromURI("https://www.example.com/test.jpg") + img.LanguageHints = langHints + + goti, gotc := img.toProtos() + wanti := &pb.Image{Source: &pb.ImageSource{ImageUri: url}} + if !reflect.DeepEqual(goti, wanti) { + t.Errorf("got %+v, want %+v", goti, wanti) + } + wantc := &pb.ImageContext{ + LanguageHints: langHints, + } + if !reflect.DeepEqual(gotc, wantc) { + t.Errorf("got %+v, want %+v", gotc, wantc) + } +} diff --git a/vendor/cloud.google.com/go/vision/latlng.go b/vendor/cloud.google.com/go/vision/latlng.go new file mode 100644 index 00000000..d553a369 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/latlng.go @@ -0,0 +1,58 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vision + +import ( + pb "google.golang.org/genproto/googleapis/cloud/vision/v1" + llpb "google.golang.org/genproto/googleapis/type/latlng" +) + +// A LatLng is a point on the Earth's surface, represented with a latitude and longitude. +type LatLng struct { + // Lat is the latitude in degrees. It must be in the range [-90.0, +90.0]. + Lat float64 + // Lng is the longitude in degrees. It must be in the range [-180.0, +180.0]. + Lng float64 +} + +func (l LatLng) toProto() *llpb.LatLng { + return &llpb.LatLng{ + Latitude: l.Lat, + Longitude: l.Lng, + } +} + +func latLngFromProto(ll *llpb.LatLng) LatLng { + return LatLng{ + Lat: ll.Latitude, + Lng: ll.Longitude, + } +} + +// A LatLngRect is a rectangular area on the Earth's surface, represented by a +// minimum and maximum latitude and longitude. +type LatLngRect struct { + Min, Max LatLng +} + +func (r *LatLngRect) toProto() *pb.LatLongRect { + if r == nil { + return nil + } + return &pb.LatLongRect{ + MinLatLng: r.Min.toProto(), + MaxLatLng: r.Max.toProto(), + } +} diff --git a/vendor/cloud.google.com/go/vision/testdata/README.md b/vendor/cloud.google.com/go/vision/testdata/README.md new file mode 100644 index 00000000..fd3ea341 --- /dev/null +++ b/vendor/cloud.google.com/go/vision/testdata/README.md @@ -0,0 +1,16 @@ +The following files were copied from https://github.com/GoogleCloudPlatform/cloud-vision/tree/master/data: +cat.jpg +face.jpg +faulkner.jpg +mountain.jpg +no-text.jpg + +eiffel-tower.jpg is from +https://commons.wikimedia.org/wiki/File:Tour_Eiffel_Wikimedia_Commons_(cropped).jpg. + +google.png is from the Google home page: +https://www.google.com/images/branding/googlelogo/1x/googlelogo_color_272x92dp.png. + + + + diff --git a/vendor/cloud.google.com/go/vision/testdata/cat.jpg b/vendor/cloud.google.com/go/vision/testdata/cat.jpg new file mode 100644 index 0000000000000000000000000000000000000000..76af906f0a3433637f9f608e33b6d4accb043a3a GIT binary patch literal 122667 zcmaI6bx>Px@Gc(Qic6uzoghUE0fM_j5-8eIpjdG$P@GWQy|^VLxJzk)(jvtn@WEPK zf|Q~K3jFxq`@8?$xjS>_J!kekyEA9b?7PqN?EQcDOMqwq9xg8N|J#WEw-G-qqz@M{ z2?+@i5g9otG5NziF%BL+J|O`iF(D!G|Lh?Df5-a2c<*}w)Wigz@b&R{#Szk6aQhw!NtQT zAbjwNQva{acn?8w9&$Vc2jEcS;?Z!3;VT={+7fVjMbL?t{^626UV3V@@PGJ6=CT1^ZgZb+bPvtob!uZs8Vit0A43SPUl#&yhE2Qk~l_P2+> zJeIic0V}XnY=v$xvNdsngM9xxz2@Y6DEBH+t>$_l(%vs6 zN2Mch`xJ9^^w_zdrzplrZH5I!I;T7Q;!F9u7TN5V}IgNFH z+Gkj!7U|udW7$${Pj+jG7af0rV$HMbJ{%GH2&Q}+SpRnqxCw)7-!PyXH#^d1|7qL< zhJ1ZOl#+2L{!-09N8BwrGRubs8OmhLGkf(fe6|~Us^#2AaejCZRP%kQgDrLx7F+9a zOAj`mq+zdrId~6{%DE{nW_=?c{GP)g%4QpyO_X;^KK*VGbc8hXU1bjEe3ue-W6=d~ ztJ=aQI{>78SGxCXIB>x?rm)a$8Ud{|LmMgr{cYFY(H%TMzLyo9kjat$E!85 z0C~3;`+Ee5c5C}}sOm*y_sQ^-gs?l=>%?@7Yr8LQrwpkl4BCc9Rt7qgHoA+ANGsr9 zm;i??4Rp?3x|!OSP?jIJ&vv%+w=+h}`4}y{Jkyu+1YqH`zoQ-Uit~4h*G9VhL|m1a z_QdW?oaeA=*SxYRxs>W6PAqiyfQ(VX5$MlR#knx;yu8Sx zxabnf6{%KHQ@@}X>7$6R{FNGaABv|)m;FgChxL4VG6@N!3mx2)aoO{tg~hua+q?C`cuF$T>D(F*351yBRI6;<19URnlRv6mbK*5Y zfSeSOlztW_x3s_z=WTQNF}X3bDI5qNTO)r`_HN z0~XJBiVl*4(6aHpmZv<=hUz`gZk3=|t_!o6mKXo!@o@+9I4=fr?1)PIJ2N+<+bkO4 zDK|sEiyLH5JIL}vfsaNiMwC6B6?wehKM6(+RLRzff{ZmHqT(zVEL7V*P(S>$&%PG& zFD^X}{kb2YGii0g$7ELfMewnxwdv66t;T1Ohbx_ zuIo0tQJ2@OsRGht6Y9xZzZP>b$8#yii?}zMYlkw5qMr>bXx2YBuIBWS{kZc87PA`M zefG%naa}vv_rIn!Ezca^nvGj53iW6G$js-^nAJ`7dTLazqT-^GTB?)p6*_+-{PA_e zVL_XlRoTa&f!alaz$NM0eMe+OwHBHHRsl;jT`oh5(qV}Rt^>555ANQ|MB3?V>pXE&RF+ZU zSZ1EN-KC>%PDG4t&FsRHpz11Bgt|HU8i*aOGg-TQ#biF0-K1%6NxlGpXzT^B{IfQrEuCT zzY8fpAm>>u5X)`wh-CPn=S}^40wS8PJ|22obt$pmqj(B`XtE3tZ0s#tVkrDNd>GlD zx-h*@69eb??%(hT2Jw6w*!UuGwgp7zYp6Zyz1ZJBF>`*@O;i%KV)ps1W{KDy5LFr2 zJ&0@Qpi#?i=yYl6r=_R>z6W%g*OyPcAg0fB?s)yasTte6ljYxP^7~o+tiRUJqez_J zQ_f>L9~YvlWd4w@m{DHnv4o}bwrt0FaP7@Q>WdgN;LTpvY+x#5*9EW zl>~*N6G@J$KB|nL(ct_}9`+-8xzing@*YKN87hCbp|k*X+llMXo?a@F)wBwp^hiIOs@ z>NUFyE2b~r1}eEG^d$Q^h4CQ?xi_%MmQhlRY$QjrA840Z18EAMIoJFX#Lrr4KqSfK z3c7%B;1!aH5KH<)THeq}mZ0v2B9lIRIc66!V&$4IbCdV8XeI-vvKo7+umGKNnGqNq zZ=fTYB6`z}K%{yEYrtMtv&KVpZSd_|YPJiVM%kPg9`+6$ZgOG!G&fhDlS5sjlvgyx z!yYigD_2Ghf`Bq>D15@zwu!Si;#o0Somv1}T}zCXJzSU(UeQHWS$oC5z@&jKur-b; zxlPT3bk`FXCmX#&?)Epj89*7d@-!B6qn~gZHUYU5o$nm{aMjN50RnUX`La@;lFI$# zYbbSYTloHd29HH9l@DCe+#H@{X>Qi7 zgl^5*W9V!iDYVOZA^Eiec|e3k#*8cFu_@x;JUl_?z!tGKnS+bwJ%e|9{*($w@G_g z9_yZH_d7kiL5HN~e?kPQm67k5OrJOiY}5NlF3gWzqY_0`z&ct4X3ac<@*>sa$_eux z8yJIf*A76{`aH>R#Ae=zV#OVOnN`f}0wG!8u7b{XX_OcXQTvs7HvOl6j#tIUt>+<= zGO-&epN+3O%>go98VVtD8gMG34ymYp?@Z@b)+Jf;|j6=V7273;RFcr&pIO1FYYqB@#zcgqX$ES3|2e zYuyZzOyyjQ^&13z-BfS+sAucLV0FR@=Jw%sr!y1V&g=Mp76J=Z^*^~XPpW_Gko)?* z_kvMEf{?QGN@QG-c|zJ^Wo4BlHOzfqw$i+pml9+V6RjHu!gT~4fI~;ny-WZ`7E`R< z@(ZHgjAbvt^|jDPlzpJ~M(+8!#dSm!=R<)JZ9++&7uzkBy{mOFtr}+9P#oWN$tx01 z?p7N~AN8G}LMfx9dKZ!%;l>+)08^J&0Rk_N6wsk26>|ea*JXgTlpU1p_{vfqcyjtJ zZ9(YXqNivG6vnp2q7hsgSft?MSMr3aD^;N7>F9+{O#!mBsMz<>&Vs0PWafM*+T7&L z_|M_7P|85v#Qq)8KfSkUE)@XgUM-)-1@pI2Y#t}@uhpcF<44D7EjXUn5Qk@85}#{G zl)YuG_V9!grd(Nl*V?9=7Wo)d<{v^WTSRCisjc;-7pxQYqK-V7s)O~%3!G1tA5_FO zB~FaI(aLasS!1GVT$&c%ERbH!e;W(bqWE+lFn!E2 zgbpp3p)waH+x!MNQ(?Z8=MapaO09_NJU!i_q53#6K}@-nCaH7QqcR74fquJ00xLhO zI~t5_9T9I8~S7Wc9)_H}4x5r7~Tq_UuVcvQc8h>MVpdrnUp3x*`{w7?fQ>!6o&tW?O z6XuWAR(@w~kPPB?cwT6bS}itKc~n)&&3$tl)-MakGh$=rm*UIhKw53FDEUb+soWXS zeV^tN@WCs{QwkBfd3D5fxciK|_Z$6qUKrA5UF)uAT>&M>}XlWJJO^UB5h z#5!_LrsF0t8gSIY3(k%?@fK~bIW*VPEnk6l{1MXfP)Ojc-ACz-5`2I*b|PJyx2k0A zAMmB6!Azm?PiMVJNtF}gf9M5hh^&HXTv*vQN?Ny(JYpf(jq?Y5`%G757d-!Y#;xRi z4qNf3B$@n9odWz34QBAvp^kh#S6%B0*RBn9*U-y1lOo&%$Ir;El*tZ$GIpn8`hM+; zl>f5KvIqJbn&FN`5?i;@z;!Z#q~dgL`XwA{yi}!xR4s1I&9P>}1&ETLt=0;XoF~g^3!#SIZr*zZw<(8(o1ow`Ofc!n2c#ebcdAs0)#wy5`0|n5eQLMb zls(T3JG+GjkB%JlgxmwN5AFdX_W(<3^`^oXfp#c;F`_B4bC8a;>knWLzrfgTo^=z% zQtvhN{PAl6cH~cpV7rTY0Am&+C9~P}xg!}k%Rjw!p}*WBbzHn%*2i6&^vBCYR9jZG znj<`P^s8O9v3scaUV=^4yut3s*`?cxewBU9#Fw$arF?IPWjv~|f@g+rc(aM{GS9_7 zwPNr(sKX7}R$P}P(?)*OkReEKv-%WXoH1P^_p7GiG4fM+gY~z<4Toa{y-`bd*>Xt# ztJ}X8&s*1`Pw|rP5=cUe*z{OQiWi*BVP4KoTst#Uc92>S4POP?LdpZ=j1}7jo4ril zf5xZCzoHhK6O($tic|E&k*FjYJ^nXE)A`+BGu%0J1chrMolCg==vUPg_1$whpxv`m ztwjsNtlkT@Qx-}NTUXOFpz%H6mp%sB^u%TqgJ3(^k5~yfK6M>kZUzsior%r88>zVf z5Z~El`siIbc}S`eg8JEUm{`JDGi6tgye)=`IB^hTR6)rSmTQ4IOhCkL8irkG<*5~wckDC;iD~qRvxvbh zYUSgYkLyjVw1zjft8+5uqnIpp)L|EwItbnR1sl4-(aJBZrKH~il`fNItDhAqgzhTk zDb2R!>qfJ_GucV?nkPdU7o4kD(^3yY=>;51aGtKKdBe{gpr!2RyK((K5{EaX?TkQt zLqu7t&I_gq<}#_PgOmVpy48_FTwRE|UUk_|<;#c4t!5l|#X;AzaOau%XMq;wKVsMP zC^!TPO%-6YCOKnfkA&B3ljf4XKU3zBO4*IA^`*Ji(G`mhqitG)E0(l9SB^$>jEIVR zcJ3>=fo}L~nIG{yW>hoaF|$|8@3-O;&6h_eGfUmv+oIm9_>6?-N z7NtK`b`|t3K--k8N+JtGiLHo`F?mw@K2{z$pl!%k?=U8zP`&p8k~YnQ*N~LL?paXn z8IGW<6A$+@npV0lxd%`^$?Yu>4@>=Mz?J#5x+o!cJcQ~|fv>6Rs4_2z<-;(LNE4sw zJ5O)|SS7B!p?5j3=y66%+kkg7ikABxz~k_?;rWWjM{Su%?Lh%Qzvn3q-xCw#Jip9KjaHYRBaw~8%Q30Dch%^|MNl6-#1 z+&{H0y@0tBs;JK7Y>}1nBdX`|Ye(KnpbJyhw3Z4Ev^_X{#gOZjO&C zii}r_#~C~Iy4IZEwM$k6OT7$9z+UM-xm=z z)oO_0ai85{ZVi$}ktTIdX}Pb3{PfAXxTr{CC#p3awUpgoB?t3aV{pPVCJen59$^17 zU4&c-6@}tR*2FiyDPi9?vi`RW-^3RvE^hB!;D-W{hRh=MhC{@BwBg{e7g;mkM82G~ zoRivrEl*yK#Zn>~p><9jYZVaGZ>*D(!<&ewHq4Kx74!KLYr)`~yo>>5EV}ztSz@Iz zU9RPU_w*raF8VBwzDdDnfmM#7HP=8@EG;a_c^@b;?g8^`upHzRA0J{`vFkht-edmG z?PY>`!>$*>>^Kg-5A(^V!$Vw0E)QElL{U=oIu17Ere?qEX_Flb!{Ik%)NZrIg6qK6 zFJp|$q6W`7HB%8cbILMq$eiidNl&n%1H#97TP_iY#y;)r3YC+^scM%TLa$QOz4)_O z{e{Hjt?_)u1Dq0!VB5L57Ur=xS|fjKsK`CoS)Tl5NbB!TgBhi6wFlFk1A)xii;nRi z)Dmfvkfh@j<-j2HCDC6nb#yqJ4sj704PhY#j_*Z1;C`4u#0-L&gLNnmLTc@4XqX?Q z4$zSei3g18#6a)xU9~-Ps1<(s&Xb&7RVri~hWlu$@m_ankAiPZHKkor~2WWj+RGpm+yY+xYr~y?=HI67lJIG4K zR%xCG%R~K|a7LicI|9=5kaRJtwaxg&bE)9b0+o6y z+c?Y)Hcq<53mkpV>0&A)s7ck;4EV_gc+;f(o(~IF({cBGH%7$zXsPL7lYhg<5&e?W zw9b%095RbM{h}`u_D*_S(*s1|0NDv1f&h`5L7W!XhX;OSd&0-Mq*}Ltjzg#3wP0)B5hAh)r~Fq`J%yE0+gG28%|u(xTf$zw@QEdpG-)KqwHoi$lnOe7s{oEm5EWJ}OOn|EKk_kS%x z{k=b%{7rA0a|)WhS6aL|-?~f-Orwnj_7VC0C=6+@kLD*cah8ioxGKY?N^xxJ49%vEH0-v{CNa6#W_=O3>sv-K zZaxL|uOdsrf+BNBO4=2tdFRC78gvdo=?JEaa({`TAgDb&H}?hN?rsPgw0^e#!+3?N zt-#^ssshuVPYUK7^NNE*K9rb4rA9J;YG}zs;AJo#wYuYJZD5<~YKGZknb_3Lxxqn# z`IfmeVk;ucj8TJXrpdgyEHe6jEG3$dtdz%x`sjS8KAMa`7YY&y`9$i&Ik0T`bS~dR z^MjuE-H>x^+$U@QK;*Ok{cl_06VII^*S~+0@Z$>tt%z(uWaF}V?56|DFO@Lbe?7?U z*o1RTLRAO#dkV=0j63?Cl7rSNJY{`~56KYuF;X{ib|s#gFJD~4)4F)Y4POlH_Rue> z1qB;~!Llrt>=Kjz3}`BP_TM2cIeiz+{*+djefg{7 zC%+MOVdjz+u|%{tqVJDGbkMRHU*k=wK8tO?nwWZ;Cm;t%V@AshuD|itbRR;=hNQ+M z|F_#^7akEV$wQzy4xWOpdzU6|%!bUc&w#?K%C-1`_y%b9ATHwXkTjMp|EUy%;W^{f zb(~E@2h)~FJ7?-xk))=0pMSe`CbI#?#=dn+ z7>=?k%?Z?VB^nh7(U_0HBeonr#TaR`Oq_Ih`M+`xsFP|@(zvlU@!J~$3j+P{w59ra zev*3tNs|8~MyokQ2~Q#POIfyb79|&A7^E7N+Tm;6DGH-b6V&G?Uls|3kGTL7`x72f zBDrPx~coj1#=J$YBiU7XN*n*R2 zamTI>*qCvv-kFk)X>1ytvlvDWhA5fm@-TWUMuQc7DJn^Jwcc)tXUI+rv&-#ao+k|Y z_F=4w-8?`vCQQ0wn4n?`XQH*vTP69`6NMg?T-G=1rEE6zL6~hmZb512kAgL(f-t%A z(u!4g(k)WOu0}^h4y$Nu?LodQ+Hmd#5w7MzQ>*0CGjc-Qoik>-?xet4s%19ygqOx4 z2HMF#H1#I+wSdBSGj1ptXvk(pXXYyaFS82bz#A@pw31zvdIi(WT`;z;(aGq>$QTQ#vpj8{#$SNlP&1_nPii(U89EXJDBb4tES!Qaf zDfQgZ$b75!_p*apT}`3uh#sf&Zu_krH>TlB_rKr+w*Qp_NVs%ZEfzSDcVN1Jq#>i6HHgN zUB6SgH53bg(wTixOpF9FrfHu2L^D-%FdF5HE5wr`Re6<4jf&%6pSbE9z~s%_A781c zd;C4!Ul5;0q-vgGGg)S9>~9BV4qUeR6q^=;xrLs}2KFzs@|+MS)&_QLpYYGNzh&}T z)KuN}K!?2${E9VAq2OBor=0zPaRsp9$xJ0`XfC+CZ1TY^jwyCGD2V#h#?Gw0YO%A* z9Cqj{gV(y&C^jAH+Wzrx-?oVJ;dzmac~@bB=c3-f)s&{Q$8@8th_vv=I_Ic#J3z7O z{(|4vM3)*skndF}n&xVZooUw_O!+eZ%DE?aEn%aU&bu|TJtw|VNSbyYiO-DrV^A|#>Dsp$umCim&RtIroj4stE@lZmH*{olTVzvFtgVJ8AfqCV#R!K<#XKyN&|RF(iDW4t0eG^n>qa{fq zw)r+%-K)7v8~-d$(Sesvswv#oX!aQ*c8i7K9i)ROg=*g)K6 zq-=1#Ai_L9S~4VTMF0Hpb0zSr2SG8W_NWD~LaNDHoXF6sW>7RETGis_X!C>i_7uxV z_Mj)P;+|*i=KKRA(j9JnvJT(w;`H}V?>UmqSeo?j{I2?Rik74wF@*j{pD#Fm0)ECO z)1_#C&BGBlCYVrFOG;qj1a$(xtZJ@O*Qjgq1wAi^`M;{Jd}(x?HBW}UCU{Hm$B=Z{ zA;jbBJR@2DcNWblVe9_pR>wVzj0NyTgfj7vN;spDF_J@uESj4ONUu|<3cJw+0>{cT zA@e2w!y5+m{cql|b+Ye}}u_NC{@KyyH$ zdwBB=L%shDyfCP5V&Sc6D1&f-4i6)0-m$BlnS05ErWWIFrcq#CW``QaBzE>xx)9nr zVBLNMZ5cIo?c3qX0WtZSFqhWehy(hd7!A)XWh1SN>-TdZcp?!K3r^60)Yt;w!`qW) zCvlnctjD)GicjapPI475P%|j*iQyM59vQUZC=+)(meWh)JF|qgEO8>XiKgkYznna8 zrYhdNewT4LApF#k`uD)-AV50szk{LJ{5Q6UFYLGTRi7#- zw1(MuvVvX6w0z!Z;-#@zm<2rE^pdxJ<2HH^@POs{ynZt87D?nFfT|4A$G zIcOeZ*4&~Vu%D>Eu$DjjY~!*%O_I#3v>T#;N?MYZPB!Ed+dF8fuPu*T1keOg^Ofd8 zMV_d+iOFBr#V%?P*glO?(~8>#R~q|H;1a#eS$JzRn}@%#btAO;J#PDv{lCGZ>MWx= zG2RgTdw_ivW)l!rxMkH@ewZvvpo1PFT2#=+`hJWLpHRtMm^fSz%f!LMXnswuz7aym zho$%mh1rkP?4K!(+F`5^n)=Iz9HyQA_S>ODSSUZ+;4(T9sa&l7YBtNNh4F9-G{yx; zEEz`OK1-2dJgQiCAmmliC*vjc`Hu$Mac(uJLv1yv`Q=q)`wNXnz85MxRq3jRO?zC? zzY;U8>hN1G6%C3|E^S;#Mv%NKURh*ftpkqt<4&>hohlh^H!3WU}qIo4K5Or_!)!Q{B(arg~>A_A{Y?vVud{VwL`N#Uq|i z{H~c(F@$xbsycvi?P~$ifpYRbW&Nps&dpVUG5rcpBOP|rNu=>1V{!gt0?M#5J9QIY z6^8Dvd>0o2nb$5!BhK&%#usNfa$PxDA}p!9pdwW`sXl_p!$OmQFKs0hE6 zBM`?dX4}sM8!|81?^Bf0tBaf{BmY?}Icdh=XB;H2S_~KBXO^u-3_-eb-eNmT#rph@ z3U!cLf!i`h%$#PRFI&p5>tUds75&HF)YF`yza)j2437a|+IZnIXb@bIJ(BGksNrrkhI4i2!Jbi~4 z`%dS{*S%O~e|g}!Uk>vS>wo4-UmU6;hOY!_YsAu@Og2)K{a&E2%?s56M>-~V%r0+? z^prb0Y|_f37d2xBe*7Ubsj4<8Xf1%V7$?jgGUMtp(`qV&`i^W&1mZ*2hAE zkB{4sbnC!VD%b-NIN^fbf}0>`D_@tJXj#x+wFy*0O)X|WNDJD4eq`)V?iIdp07sFE z2snsGdA2#mQknM1wh6Sjz358INUZ&P4SUmkiNlg+*-Qx2 z4VsilKhsV>lG^+I^yNE+wxrt9Iwm&HN}?e z_nk>g*Q8Q`Vf!t5J_%=?$S`fqVLJ6u7~DNcY{EBwRYMlB)Q;D%v*2)3bM3Sk$8?b9 z<-tC@!JZOkFnqR@!K}t(hDd%iXyDoPUfL>TaNN&=o@eRe`?`fClZSRP>1YQUFO6M^ z4{E)BSD9SIj(e__3yfGpqD3 z$=?kF1S)pV_1rr`yNOlncW3I{mY_nyka!-6tg&MrILYlT=XMIYjru z>f%C;A)f~Kv50r%maMyj%17gADu@eT?iQDDguaBh5lvDVy|*@y_1m}GS+O5RTlie0 zbFn7gRF8Ryw}c@d)Sg~tu1?XI@MV2a77ohpZs!;1(_kXdHa2C0_38x)AkQaT zBRN_Il=um$X%}z;%BD$04`NlB&aUg6RAz{xyJ(W?;8J|izD0bBzQWbR*FwTJ9|C-# z3)r#>PQ-4ezV3+S6YYbJ>MFygrA+dZ?DZO=FkIcIq^&Qu+0-Wjj0y(fHnrk%0fRFF zp?R1>F=8HSBtAd#cT;#0Vgb)WCG~*UgiAaLV5hwX8Xr*O{2NhDocr@iz2x>21+PNr z4~WhTSs^)o#J`O%WkZ$2HW~e0lL(1C>LNLv1XXG47M!i}yF{JwH+jkE0FzF;pF82) zr=;jsqp7D}%gD(jMU7mF-tACVi=IQ^JNMvca$cy;K}PB+zn=0UyK{Y$b<9!2HEc$P4)aK1h$3uQWVIXKw2gPRldHvPC^1Fz{>FrRf}Uq0R-Fdy zf@xH6Mu1UjVyX8l!5k3*$Xr(W|BSd0ptCXPY^e0xf_CBMPg~`4=jBth7Mby{)oE&4 zeNT2!trn>&oiJ2)erxod>&&?ha>z{KiM~nuN0E(bqfVFnSN#qS@E#M?c@5Tt z!TK9x)~A2Z9W))B#p`fKkfXJ7MEt)GVmcP@tZF{Yh_~nWs*Km_@;F53l+*IJ(e?C^ zxd=Nd64o2bm@h_&agnLSfXo;3Qf*&K)4J65bb4_Y2sAi3E9_Z#-KBfevglBj?Y5HR z9&G&M=_*~;tTK}Q%=4oCX*tHukgVKWP4k3d%YUd?`QJ?qU)Q6cTD1I($0xRWjL#)Z zfxBVgq}b&^Q=mV0F-+YwE^1F9LB#oGEs%oi>eST6;Y)9Z8fDJu-l=KAF!LjkV)~J< z!>pDAb9Fuxi%D+{Q;+Sv<>)uDmT9x@SbW2VGn-4}CW=%`iGO`ZpTqoA*#TM9=9>g2kG<3})*h!cnMJ+VJ5@rrDJ#VB5JBD0*iym9k^s=g{Djg<0fdY2IT}TRS~~*Ybvh zsNmv32}9C)(jI`;9u5zb!uFw*H*WZg%D&oZh;-q|h1(fLUSU4<$S zD&^|#oo7)*MzVOS6_L7B;7-}Ku)A@5J z+0M$Bh7|b8!&PG&Xuu*+C10ixtx*yl}W5(6c9b!KIKVgzPjVMHKM&lUKZ!oT&EBO!q5WW;8L zTq4qQ@SybYrdF~cj1s9ztQ?N;mzbZt`^5X&$5u(TdrE9}iB@0);9Ta*;Yu}kP!;7& zfn28fZ`!N>%}{?TWcW%@l%3*pPbvxAc8aa5kZ{q`P}{Wr&{~g#Iy?kcPj1?EMBo{otGh8sxY;j%d&DhNX7C#?~a3M3&XrKkkK;HP254(@R< ziy9LF3pXGS2v)Y`Q0eI8xfL|=Xx2unGl-mUP2F+kK!L5k|C%AgjBLw>rE@KWOT8XW zJv=ibVyw5y<;_Gu!XJ-IBdn~Vk~N%(T!cS~q{HnxLfyUG&X-)GPt!DCGuZ8$%G7mY5c8S??q2it<}EL|$n^ zIqaZLqV$FSEvhif5nUVAiHxk!A)wU9ESFv;gP2;ngpv1E5P#^rpy)tnKj|CT#}RL& z@q7A;(sDx#L9lfnJzrH*msa4ZQtQS_kRV#ryn{7C!lderN^6;!2tlPA@O5RZ*`e&U zXESzs!3d6UfQ)9z*83@~W|fy&Q~tmJ&p#~F$%2);V>x^-$@iR-HQRycyo}y8vp1D1 zE3mEu3R?LZ&nuSa$?pu?(0mnsmj`IENZeKxBD}k~Pj72JrFO^rJTy_@oWyx#3v}BK zv~4K(OOeUuT_4(eD*xZRLn-BDZq4MxsKDV?zB6(J`qK?)G8LlJBzYi3C)7n=} z)tj(C;|#(xop`aE$6Z&C$hKbrxM?#Y&NBoVumiC}`)ph8)ZpM@mM^eEq*0L;b-3JZ zP_2aqEZ`8p92w-`uslnIWTBMz-5Z(fY)^gS#k9@$53Ye$iU1_To-xf!UL{r>iCx|U z6xXb44pzN+X_${H)GlvAAUQNOR?0Ml3&TVAfQpatQL%mUqs7N{q@Th$14+xB;%FD# zWa%EgV$whQ4Hj%DA@UI7E>I*FgU07)|bI-(WWZnZ($idh2k_Ffpd>m}%oHj&% zCdH|!lB|$*iz-dO;J8C3(Uxodj8+i^bk4W(j<$aov*K1JPIztCgxhMKc^a1-mZ!2R zcB!$N_ssNc8!a_3M%E#ufi`Woma~nHP&JN7NF@ z4e{^9*z}HR#;dvzHRp(l4HhWq3)HgXP!L5ulZo_<-4~1sJ0xbj%?#EtA`1u&vzHKz zRax67T?^BcnvCf6v(_3O0Y*~e3)hU(GE>d)CmbVt1J8^ubP{=?VGgmxZ$ct>==4Qp zAjbW&k9=V`&`tv+&40sOz?Up@9k@H5aV45Hu~zG%m9uwuXhB>Y_+Sm2AKlXx)ngO9 zzxK&g#cN9Qv=cP>r^|v_>`RKHob+PEFYW;J&?vK1DQ( z2iLsJHPuQzyVZ`Flm@PZB!f+yQAVY~O>%XCEPtl(vR;YpGK*4wz<pxq~qupAdOE zZKh3|So!GY6@+UxS%3dx{0k%6!xvv;TN+iamM*Tn75FhkY(;|(x>&OVTHxccY?@jQ zqOaC&`EMaW;>rZ=Y7qM>?n)rKN2R||j63e|cKRN0qE=YTXg+r_5tt#`_ST6o7@=}r zo6LRE^CrqMwc#MI1OD6Bk{HJ;F!<|sh^x7!k_Gu|YX(I9v$~>`l+;6|v(0vRQ5VdL z>QLXuNJW@}_$ySZIsz@I@}y9&vfqxzWQ(R&66G7;)L#|>a^9masadD3aZ%PT3^DuK z{xCqY3!5MO1oG;?4*O6?_cGbNSh+QnKE3&DXUJ?hqkxBmIEOo2ghQJw70{`h$=d-B z9md8tI-N^yOg-Srg>FII;e8j(4>6ngsUJEKFD?u%TA1r38eQlEq|9B2#5|bhPi3UY zB7OQ_knYcXlSL{Uq6TVFy?dPmKa3kut`cPK&Y~iF2y?pb^{&1j9=jWwE+1yprgOy` z8cfwOxXqyqW@Q~!jUEC_YYMKCjd5qum5bMC-28xZ0vk&n=Z5*9S|y%0b8S(;RW2JU ziz5U2B?ct||B^}PiUos7FfG)@Y6}I2cdl$*p$Bx;s#~Z$o6kW^m)BeoUVDtUbDuMJ z2mwdMer5l?!==S8DLEZ9qQ3=Ei2b_ZowCh1G4{~qd7ky+fe`n(Hf3|+c2r!mcQw_2 z=ksbbP%Bl_N)uM8eFg9|Geu;8gX&ekh0457I~9y~6b5XtI_>RU6J+_gkm;ooZ!!|? zEuqeWwzB67(zTZ<@hJv7nTB4R*AC<`4DSYx`dLHj<5Hx3pG@!K{g_X?3z-84q)yvY zX{YdC`G~R1-EMWz5Qo2}R3a)dhiR;_RcHfNZ?n_)wR(G8y)c7$qd^yE(7KKS4aHqy zH>L26NKX9Ko1IcYF376_3Z=w*fW&TF5>qen;EKjk$es2%GI2*HU77Y8)rcEb+$359 zbG;af5=lY4Y)aD!nZe34^cD6_ATEoWP&>C@iX{ybYhmQKVNf$+mJ*yxWvC-#@g}m` zsMx6fzb{7D@fJrt8M`Lb+oMQ!DX<}`)EnreZ_G>U+@=kqq}PHJQQA<0f35M?n()bS zs%5H+Bt2+-Ef-BB4_Ix^=Tr^I)@0}64IfXYF*ZOi2v6{9IMC}ri1o$lS+UGYj5LEDf2Y*nsoat50lN_$J1D9Uh25VE7e@t9az*nq zqd}kRG72ljbYy~SQ|M4{I)^^+)tbJhidL!6C_jq!$-fcZVK0k`FH@B@s?a)@oaZni z6XuZFjruEN{o50zHR@MfDGpg$&gXi<{fw*p3Y8$dwiFcSGQZQ{iE-mGJ&a-MTm|bL z&tC4jDs)|fGKegiw-qVbXtFD0=E2c_`~Dt~tKOZbf{ngImeUqD(HdQB;0V_qp6YKs zTNCD?DXkf;>cB4>_}&pFSLqUoo89@mO>ga4sAmQF(U8V6JH-eMtT$&`ELKId(>D%E z9cwZdsi>|<%(`1J1Sp|XvD4X^xk+Sax)O@~YI$zubi*@~cVWLO23`u@KcaIe(QrEB}ibiQ8^n3jMwS0Qqm zHrz9Eqn%O?Qd?qDYNP0A-B<%EQaTr6LjZ(dPQWSG7$)d4Qnu6@bUG(u>7Q&433^IY zn-CyG##f7W`#;=c>wMQa>Zo#!w{Q=fAgw1&e=9~onl z4=~~Ph%iWuWwSuQC^##@f6KC7llVdBN_th!Y8?SxgKy;lBh@P%z{MQ%Sfd7v+=YOJa1ElkInV?62r zb~(Gez>=*$a2$m0l8Bk6R9{-FqkOji<8OYf=pOAr@W`oD-Y>G<{95YPpT_ckiqo&g zmh>XMB;fN*8Vcnu%w(1m<{-S8uCLWDgh7Mg!Lpwi$5hb3-LEY>tCnLemh!m$kzX-Q zjeeBRIRV$je#zGw_C=IooAv5mdg1mO3XZu~Aq={CKf3hnk~3IxR5c*$TAXTOJSM@N z+uRX1!oZ_VO^Kq7GUPR<%%1$3MEiQe(B>s+&zq}mv`7^pul>&c6oLO4bHxt3Nm{A~ zhe#`b^0FYErgrfH6g9*B{v?hYKhpPM{P=0+vTz`Njjbr>%_8fQtdjDlt^2&pf;d2twx zU>*+&Dt-O_Rp^g`KsH|~M5&4n4U^^{I);!bs7ogS^4Xo;jSdZTgt`co9tF9&Gj<~? zpcj6-=5u4KC4_Go)`WfKa|Z8wm`Hs;Tar9%z*qLl`j3Ln*^jD&+fbK2a0rd|h%}5$ z?raKLz@NCKt5z25g+-}^yD7rtpe-D5hTQ5rUNZ(7g&&1FVqyZB)xCLINOc9U!Y^3g zr*hOCIH;1;R>ny9ZLk9iWQv5T-^WUiHG*!Jf;Vi$^dDD8*fEkKH4U^pu^{}7dW()n zH9?2=Qq7wGhXx0KX=f@?DAMAuqF?WArb5dqMf|xW_vt_4LJ0g98PSmDL8DGK^R?oAQZT!I^U0EjpbZ z?%Ltm5pICcm7KcL^%DNpw1}Uwr|_M#9;%-2JiXqUWYVT3sy~#(fDYFTM}E&E?Y;oJ z-GBZ8{C^ysg`d0}in-v~9CaJtwT)yr@a-oD$+669}{)pnS2QC&eTD4~6O z;WK2Pdb`1I8{UStzJ0B_%Lngd3t_u10_qEu?IBttjp!GtDz`nR5=EHD(ZwUHs;lMndn z55L@ROBnPgM-cr93{F9V8|AM$TBF-(jLkqGE;3fe3=UARNG?%U6Obe0P-aeGu8p2J zi1>IgXIk!KL^7z?qx(GVd>25dG%7%b5GP6Xy>EL1(FblwvF3DaqP+sSaXv5OA7VWlgvPM ziNroAw@tNutcqmUWU0_Qz@4{H!@d_9@2_QY?vH5~RA$9o>7?hx??N}bf&7D;T?tgQ z2V;zHbA6NM=q4kpExb=Xxd2})pEB#HI<$_rR~BTYX2 zRVOw+FuWK|HBw4Yxss`hXX6XF=+x!kWsG}%b{J>M-Xamq6aDNc`|W}6zZs|XFyS%k z4XeeYve6d|>uxn?9SC+Wt%0jh;XXhiWHt(HUzh)X`C20ACGsHt=0{8lN?%n4<+mf##y4Uq}iLunZ zaiM?t@Z2yhVxH*V$ByO#SfN67Bt<1@I^OlXi#Lz3nD|K$JR#lei)G!9Tr8PH->+sy zv#^o)RT5Pg9X)tXUx`VOr944^ZexQ|h)$g}TPxl1GC2UFqC>-T%i8dKY=gX#Dkxeo z+>Bqkuxr~ma+FE+HidcB^}RlTQg?hZ^&Q{%`bc*(6Fu#Jgak?@%oZ>}s6);wT>qkZ z*vZ87E^THoDY<4I=KYLZL+_HnSV!P65@;GJpUgVS^pMkPB@Av1kA1jH8k(xmUzoF` zfAf0P!VCPTj;%AH75y5$i9nhn)p}Mt{s2ew%^l0^lLV@o^McF%64h8?2J@k=f@@dD;_9DI>>m^r^t1pb}O5T{*Z zeZASk#TGh%$U1hqCrvEBHw-KMi#^u1+VLOv!jl(thb~;5t7O08I2Qy(VkM0N=_dYJ zKH_~7a#nN%{aMoD7*rhb;oGC-zb7kU%YI2~gH0X4ZXdxd{#Jo7AyOaud}gN)J|9gZ z;e>B}zJ3rv-ag|oVju= za&6Yx@)t8|m2x-BjfE$}h1n&2Szo>RUjDN#MQWh;&g> z6WdFI!UyrjPo)jibwls?JUW}yD5N$ls%%Xy4O)bdajzV>N#TE7h>OjSXoXfAGBz)H zc*CLCJtGU4=?Gih91?a><>B|?3Xh#@$4Fpx$1u9cQbcQ<X z&q*&J2McoaCV}`>l8Fm=Tb(8aDUP*< zxFVa%@=^6Z!ugxldVDDqom}*bc6Y$=)BbYoQm>Nw#W8N#%VFES&Hm`rX3hmSq51mm zahdmJ>_BY{gLaqjqF9n;uK^tXpr|5vF)m%IDawi9^{?OX&wrr>XO@PA)7637{K0tM z1xhDZ8Uy`jyK=E=0;T+U(*k}qt|qrkFOc`%eEt^?2%*pzHzieWrfvh8%UCen{cH4X z3jHLr(atmGs=nc~1d=O(dY}&hk9%3AAeVKVU2Pz;Id*znLU-4~O2dOo^$(5Nb%~CF zVvYstV0~TE-M>!3ypw1$GQTg1czx+$D}t~n-Cy*JV#psFqt8C#VbS&XpY)t3Td2^f zfhaOhn5PybG)9DrMh=Jq@4Lbu;%Fr z=x`_0k>?x^R#Nv+uFPL8H?GpL_DP}^_Aszj)Tm>bj$fsEJ&^lDRgcsL6?WI9YJZ&S zSSwgUr!|bIDRwXUTp^mbMUExRF?r$@5UQp-o-zF1@_UUf`1>;xpDt`?DKJ89VfR8M zvL%6x|Fpc}3p4~J_f^XQ>iducpI(~P!-V8sP6LblNQhxI_Q%|0?%c$rABLRHzR(}w zt#qnlIB7fftFAHN`Gx@TsUBg))@)nAdqDKkAELq1_3N6kFn z#P|-Zj$GH4Y{IwVxCPnD9{euIPZ;!iY{op;n{uehQ8oWr*e&+nf6Mkr{Puy6i`Yu- z{Lv=mLD%f7r~9&ck2%tR9Jr*)zw^%iR(4N*xYHSz7^oO){&?l_S#j4g?~-!!J8~5Y zSBcd$N+Ups?P_yBCIE`Ue#n3`X$bg=FBXbVQn;mKrcH>I>=U~W(iPaXR3tIjKtwS%W4@bM96#Ff+ll}9$ecE;$qclOV?Wrob4&5w2f>jDT;0F$_Y zjm{2D2AkCxfL7m%l{3Lknh%{+9dOuuc-O@9yz|NV?jcqhXBSkf+z+lBn4A)8(O-C6b|>{fSOe=5C1HwtL9^m+XA?M&k3~ zFaN?7I;E19-I*D&2%A1Qn>}W!p(_bDOEhJ%rOX?*vUE|E&38*`mB85#fMuaeSC*7m zgauoc<_0sc4OB`g3#}1w4h=z~AAsP(?mpcS$UV+Wx{pCSdoB1v(SDL`NCq`)SXs^4 z=tJP(HCwxh%z2;PM}y2Od;XTn@>IE3K9FJ~gFUHr8pHzLBJTTmWb5>}3W;mM8sQCc zoIh`+Ibtt*S3|Zgvf47V#O?V>r)ue_X5>G&u+ybxhhq9%*I+K~fyseh5)z%N+(`T> za)99a&n4dbK%;rG=yl>Vr|g>;>87eTgFEOc$F)8rQ{`w8dzdVPi}G`GoE^WBvTnE%F(=9BPU>bf@b8q4_I!x!_o*ZhW&#B=7nB*${V zBuG8oI8eeP3MmV~*JTuxUjLhV9WP?wX<19Vm_MqcO8ylol=;0b&saz6ji4Y4$(`-9 zV?IBm2PTE1bf)}oDv{CMyfO!i{y7OB6L zQmrz~Zk_jHkt~zE;v^*xB1LJZdw!dnF?1KAeOI&aZN>LAT@k!B6HDPT6VETi$NLwp zB*)j{*nbu8ht6~sXnLg;NFHQ{s5)&s_BnB8l(CbFf0^E*_dk$KL?Tgl&@sSG(GIUA zQXAKJ{h@`GU0%j3jErV{g{TRCxDAP@T5i$yYOvz6cD4SmVTR!94a1PQ$p?8_=yD4h zOJ8OxvBQintqO=ARxfS~u{&!^^ z%^!!kn$UmVaALnXXQ6+wL(W=WwmqvE5-ma%uZ3RydJSz%4+L&J$b53Ec7ud#n(TJO z^N2L2cISF27#S>1sN&s;*-5p4D->0fCAmg){3aN-1GNw}`jpm4`*WTRqvfuFJ~~MA9foGa zTdhjeyV8yu?Qk$S;YEeG-aZEJ8f4G4=v(!Q+;YQ6^8B5a9iE2+Y4* zqXldZ*;jS~=-&y4pkTn);J%tN-^K-4nsHwPT4D~kBZysiDp?4YJNNSSKmqOL5>Y@& zwQl=xlYRw2u__P+fO2whhJ1R}BH;Guq^ry0(fq0cm}9v3ZdY~RD0lPpwtzs6w?4hZp110j zP>1Cq#K7ZM*RiuFQOq%oD`@@DpV!Zm2!-*7@tSOLBdE zFs9;R3@DfcmgW*8i z!r=We2UF^1rc(&$FREjIPjf+wPjwvMcUZKr#+OElu{XXS55;5Vt-4Du+1BD;l%DJ8 zdRo2VxykFPw)p%n2IE>0%a&v=*qC`-4y)nf)%hPuBO5ScEbf0KGqx0cUKeIw4%#!5 z-1DJpmsb+>KU9oyxOW}oP_C<5d97-wAZ0hTzBq+$atW^x7SIeB`71jr;(9pz zx;AwEEB{y`67#gfe*nK|jtuQqhj-&X=cJid=IrjFA^qMrbx;uSt7Wd~w9wz*DWy8| zk7n31l=Ncfe41CS3y{apUiNu=NH+IxumBUAn{_y^{SKP<<1x*2326Uzhgm;a`0B`i5MNm*!bjqva43LOSI< z01lxLW-L1;t$s49iL3@GLRSENSrq8l1LY|14OweP*NqI`d0jwG5(rX#XJt< zTP*IjRNkXV?lTvlTAF7gblVcTfXdcrwxKTK#kZAy#*;JR)YRVLR4RvL^ zYJu`BBFSoZlv}J}MJ)4Pi{=eB1p20VK$nvJjfh5F)1qNnxuLHQDno(!6B{m^J+*S_ zX&uCS2GNdL8+U*$H?ZzN{Gq-r-OdfdKs*in10U<)9S+f9t^>`pj*?|e8-lf>Ty3E% zq_K5%3J^;zw*CNg@a-lfN@%-#L<4V;+ukjY=RM)T{8nxq!<)$}$5`DBA)ckr;i1-C z+*ztD%v)H{<0s<4d$f<~YOfSmJI>QBY2-?Jv0I_t4FtdKHmh z`q@G0l3H=qU4m>nXR+Yoek&80UxQ^0_3jN;$>+}e6R%pp_cgM(7p=_FpLE6gR1Wf0 zOzhi2jqz@T$l;`$P@!}4zGr9I@gCtHvkoeBzCZQ0|*CjHFV(A!yCirq`v zt|2n&4!NK651GcV8z5*)UgWoF|ywmE6wJ(~!TY z4h*wU`L3XqM|;iuno7=4?b#*&=k?)mNgu^iXNOANgEKodZJYg$yYZh5{)_KWOAbv& zJ>)#b&@vors`I12Ug^&%o|!5DBAJhqcNr5p#Hz7=k65V1U4nn!TtG;F?QYk8ygF%M z;Ykj668d9X)2Fkt!%0e4dYq7Z{vQ32Yg`V|!qdQfm3$?D#&0?gTKuf*nk|lKJhuJ`$_D``cj<^pO6s)}EXBLum?A9+RQ53o}=)Kl)}tWTc6w*5Jm%bQS&EPSs0d zKiF-7tYBeP~?%k6D>KAJVfREiO zyCs(tiQjT}M!fnEAqew2Di1*>{;EtfY(4z5=S*?W^Ua=j=@&TLk-ympQ2Y}r~h~t z++yg!lGCRb$G=0~>0%DSk3-2$B|892@*%nHO^g3OynVWy?O4?6(9~*Y-`ngc;`OEd zk?`p=ddMc4Q0l~UJV_Ty{&p6nV_z1BzCb{5^JYikd)SB8V&(<>Cy9#%b&5QIs$jQG-$#Epr zE9jYLPN#*Bl7=v+PqL7JwmnF7Hv!CPH4yiv9h{rBd&?ay6u{S|Xz9SF;8nYSMYs1m z0PBs@g|H@eA<0}PJF0!JfX0oPxo45`2qLNMjkM@<3?bDGhbV#FS6dMPOxNtv;|G*! z_eR;kS$1r`9}UH^9`gUun(a9HF{YS%dMK3Jw)*ft#-h8;L{-retS53cOYdRcZ1y+^ zWH#7Uqwc{+G8+Q2+GV?rX7+Cla0v>A0)j#!s;Uj(e$Z&}r!AEXluH@F-=Z~lMbv1) zuH*@jKBU{=pgvVQ)WvZWW{=#G@y1ieTDo+u55M(5`@D8m&<{I)9Thq%azw^{=di0_ zBy-G0PHcNnv>s-J{(jT!ho!)CIcqGeREL^XaQ?ex4?ph}*5@-qVOL+vPjFrtwN1cq z-P>L1RWuseV~H2ej2gG_b~4(rz#ptmeN1@rKevNWT#x=xcGcWZo<9&j$I5lr7bNF| z(hqPLN;4{_SmKV)K}l}QTxe(J_uW$yobDIfw5l}c8-81#e53L{D8eG@zJ}yOPqq7N z=a&Lcneyj;9%N6}b7RaGZf@tzS>`lzY1599Uws#LZ|1A|LU@%Ae5`m|f88kGuVEQj z<~P2l7JGHw2q+m>sN%UbO=MK1jg6Y@i2N72Mr&SUR`}}-OOal1^jSHTIewU&@7+dd zwVVpgi_dr6&X|jw(~N|!|3{MC_Z`~@2MAj7#?<+#(kA{_rOqmSjLx0()TG|HYa-JhGrC))b*7`RxjZ4BZed<29q!SXNsv#ko+YC$UKoex(zm4(=Ak z$yc8Gwe*zSH1rmYOh55;{If4YTst6>qTMu_9Z;qK+Qk2nKrDVP62&JPE+cL6e;|S9 z532O5i%$ov<3Z)WzSCMXbtUmvM0?2R43khpB31V1Z4b6Jq{pxiH9%SQ@812YK@;y+ zX-<`~aMC)C2VF9*)C#t=uFm#&o>tA>Iir;gsvo*M0R3d`5Wbm)5JeU zI~uq7C8CjIUuWGYHBJ#~+7??4p;c&Kqto-N`dE$YXa9nV_7wLQP*WS+a{TC~!`*X3 z#VZ*%7Ea@3xXw4BHHBuIV6xzfAne+Lm=M=Dwq@&l)!RZiyU#;v&2}zH5aJy*p#WS^ zQ<5{#>(>)3*2nqc8cnsftlpl$HcFx^PvvR%Adxzg&DF6gjWnFR3%PUPb1a-pIi!O zMecMqtqwVcapP945Kk$;|5;v%XIw_fiO7j{yZHroNeGyN^4Vc8jwaW;LWb%>dZi|l zm+Muat)CIT#k9}phu`{WEFCH5L1cAf%pW^-2a3a-`hz4AG=%%a`bDNqZ;Z& zfxa!};8#7*hUU^kK>@*y$szMJMxu#cK$D-9!O%+8O8NspOaQ_N?(`;7c38xD5K8U) z(!SEZGQ+1ANcKVQJ}dN1aTaq3&Db*+!4aQ6x409*Bl-Q zXi;eHMNl_F9(yR%U|fy8ahg`>Ru}-OFCI4R2QM(j-LXx1zlQL$`t!@QAWzLtmz_;(WUk-BApb$a}l9V;iue3rNd+WJ&KcO4{K=Y zxCfag3CX9$N~tB!wyUOqokM&e^7>xB5B^^lN)J2l=^LR4c9y`*c=@T!KNSJlgl^wD z*ZbDDMOkp&PZ}Vc!>5};p|3*67%=n^bIb)4p}0`jbB|My%}(hugT<8fJynp=eQfir zdW<_@N*qt@T6m1dXd9iI3o4?~e(j^$rGG5YI$YvD4*}i61687N&USLASLb@p`K2bm z`xt5=sOnA47>0is)&nnvisgd6!VA9ieKob~)0=Z=g;>TeR`pJeXF4PJ*2pv+2BXv7 z+vf|M_-oKC1rh1;3rnHGIRi3QwG3vARTEht->XVc-I$ZxG2NfwZLeC`p!Bsmga-iB=ym8b3zj9-lq{f=&Axfp?G^UNa2e*Z9JXtIfcY)mBGHYYN zzKm}A)V*k3iz-^9&V$dApbz3hhc!LM|0BuF%zlBT%Dj-vsHD-VWKCWlDG~n7@|j;L z?6Nxku+)6v+@z13QFE3-I=lT3B^&#TOt2o8m|ccXpl@_3%0x^ zbt{Q+`5%czS3+mlOHLkHYk!bHq*kEVN+3^DrHUb0#E7trx^swNSqyVoxw(P$dvHRd zC$+J3xsjA-i>aLire%`Bz`4)3C#2&vc}rflvEd2vy>=0mzLr)DK=A<^#H@d{nhv@vVPm=IFo;j*jK zw-syopB;~E<{sN$qW=9650GT2CtSrk0Dehw7`7lf0A^CU;wCoR6ty%Wv`#y;dx;^; zOocLL!?mrG->g{wgRj;tG(Msu%Q6Tq7JuJ1Z5F_qTWOBvW4;YHP+&bWleXb5QW{FZ zKDz^F#Mw3VY$`n1acN?ErU?8<+2PwAgza|e^Cj-f*kz>WIZaxZ9w(Ug`Q|ya2H2Jk zbEkK~TLrPpfId5%JVXeXj9P&0X9B~PR-MFYf}BPyt!5kiu*#6u4^c-@KV_AW0t3uW zIF%ST_%~b+&})wEd)%)hsuQbJw-h@K1uE+Zx8DOLq_;r1&f1@{#dyutmoxE4euF18h+->48pv`SN64|9Iz#+`DO$gD|BnIm6W^Ym2}lj0mX;YliM*bZy)@ISLqXom3qg?`Z`$5c(! zK<#zKnC=f1{rHahdX0j*9lW>F`OdeaNz?18^=03Z_gyvQM_%G+T%RtiDtK%)c7Z{L z@h(I?^tfrKy3jo>Z)Nd&p-h}m2b#`Yt(o>x%-&TO!k<1LXU7pQ<}@{ zYcs>hY+-aN{&Jx4l~53|`Hr%+zD%Nu@32qyM==ILtQ5!Yf8Q>e2jwped(Kdk59CU} zpvL@<Q%B3(8&aQgt`66VFfAg_|=5+HK+{tTQx(qUA7Tr9&1a!Vj>d5ZERQOsK5%9%| z9sVF)a_m5@zQVoXeOf)HhR8=| zTTzb~_7Nj)v5#~XYDnpFK5mbH&4$Bc>puG{vr2D@k-Uwch#2G(CHLh}cA&TVAyo!s z%)()n^{gL-y zRF4@A_J-lw?Hot=lY1$CF@Guj@T`$_H@S|*DadK(qZ6pv%32wa3k#F71%4C*7EZ=0 zwC_pti?Dz>#Lcr}xm)cPG?asfFSRORfUEnd0ezO4*g zkyzewcDUFDUb82gCWW(7Vw!#7UP-vc$FF7+KiUX3eGpJS(joIOGL!T|I3y~m27Yv3 zBrb{A98ccxF0q{v@sVrBlDqIML6K-4J1vc)9U6PPo>R%$^+kn_aBj*l9=9SrGol>L zcZuwFsS zm@;U`ne$|HAxW3aof@eu9z&6YMiELz?_cX*kq5Tv_q^I76%{ev7=w;uR{x>M{}>B=zpum}uJipo_MEZ*GnWll z_TD^NMlTZ+%r|bLPfI>WT7{X$rR^miBO91LKGQol^V3F%Wc^$~1eC}2N!S8&x9j-! zKAY~GfxHv5V_~j8x-7$CTl2ONV^3?{pyb#B#)gM&9jT{#Wt?L7Cv0^yse74YmsTMr zS#RPp2Gv6aJ+*e{d>su$y5J`pf_hVJD|6%>Ij3?+2c1ve6Ta~APt(8J27g^ zFAmMnm;U}B8aKaso)rEQGO0Fx{BqbonmfRp(%ou_>>c=!4Y}PN#JVa2Er&iRpa0q)s^WF+oRf zty2)TZtU}oeE8TzEMT$$aS~7O9Q3xd5thp?<5CZUFgIBh%GZhm6cTEZwq^9U8FX}Y z!1-4BI5Jv2$#g4l-+h{A|d^yv~9bIA(I$+z%eYKC((zLKB2Fz?HR~u}> zMm`DKkqNPuGz0(VloMh=gd?sS0FNtP9KMbJMrW@fTPyqU+dZN%5i$BUIOIcd5J&yn zUYEG5vV+|^*GCZ!fwdHqYb^zmGQa!bGf4Xtdg%MTF5g~c#VRvwc6@^(r>u-&*u&nK zepjLhxE_)Zh}EOjt+X)-Y3L4@7rFB|iG02|5UAIQjJTn3)rBUiPLI_ADlm)8^O>0; zh>w%I!GIl3YSvfyju?BQ$UvWT3JiJ zrz0FcU-?7-?CN>$;0BZht1eI5h1IRH$(hrt)UIAbin^B*KfXbS=Z*Kc`|Ogrj4G6K zx?#SKAgFepvmsN&`qo739qM#|>}&4%u(w*JF1hxTr$M_H*NtWIT64F^@__Z3_<`)C zr9lOEmBXH}D=JHq>1PfXM?7Or5f?5mjT^~NW?-Sec-?*0w!6hU6ZH8A?maf zjCr{QqCUn$w<+JnKJrFxbxXF4-~bjl5rr&OQxB0wuYf96+-htu-I{leoUDk>@M7;9 z$@1}q;1Rzn40h@J{eT5v|xWA(eMqu(e6wsx`R_(sU+I^2$cQN zOF0fWV|@y55k`rkVrzkkpT^O_(GKEFND2X~UVe!N5jJHD$_II1{o!0kHwz!*Cfl+E zLD=X^M*(-XP%|6@P4mHl);(f?)fCjiFPj+|u}hRNQKg;Z`b#%?&7OE{3TfL_(RMNjUu#Fjdwm@YX z+XNsFYi=zk>oGU3-MM)QqU^)Z1Zm~&60q%~X)tGQ4TE4LGgXYcK|9#jWW6Cv4iz=a zJWJr$?B$LE(7uMEvX^A9^?@~#kPP@8fl!bj2T7KcLme_`Bn%9E&p+BM@ZiOwv$9}8 zZV?77z{`S|p;5J2lr@}9se#^s?pbRHz#x-8trBL2I}bWXl(6uYH;AZ2gAFO*w_8i= zQ)!oEwjXU6+1NIR(^HoR|MJgN-!bt83{oP)4FcEOpKRD7=O&i#z<`0n3w^Z!gzJC+ z8d6^<-O!>U?jB?->H+tWZE_;WFk7E#AZC@Tim}iQZLNE z|B(!(*;L8jA5K=P`l|dGO|G5@S*i|rojR>Q9#}sahf~d zH^@`PYRqGDZI}}hbIV9T)Tb9_?pLGg&vU8f_O-E_^NLkk@6PH9>WPg~Id# z@c6>swc`R;$GduBTSclfLb9?3I&dQ&+NwX^&5 zjrv}1vl8iN-&b7`mLA_@IjCb{^Nb%k84^vq=01DV)g&%)C=Xol%5Q^iju?j#*ktR~q~jyoH9 z=|w#2e;!&V(2pNOmj7=6oV_6!{_C4f(W0TV0fmyU8vex|R zz9`xR8B)(?MA2so7F^v@QpD*9?tO)GB%!(xru#uG@U?Suo$!et-|6WAypZ?2;O#?}y6*dBV>dB1;i zDIUBn%E1_UVP$k1p(DF>ehf+E+e_Rs(GgdqT~xU=s^i6akSr%DxZQC&`xmCa`sv>u zc?N0VQ_fx5ET12Cj<%EGZZuT$Pdv5pr%S#*7ugz#07jEo7G<-6!RkhT;*rkW(;CXetu+RYR`>rvJ0}tQ--}wfw`v97 zzNRDlV_VhdpnMG8>U3r)uI>A+0c`p0P-TZ~KEWLWW7&lEmgB^$a4uh6s&G530mks6 zfqpvmZ?jh~W#1FZf3GBVEDyq#dYaXn12aC1)&Kp6Hiwp5{AD%VG5hf!uBn4M0cx4q z0D+6xT+B*ImdOq3Wj0Su88mpgQzG30eR`iu`IT9f`oI9Vnc6dFA)Bq}DfJ1uoh$!u z=VdN+uihx-SO)E6MJz=jmrRk!zk}|}CP>dsU?YiXZn80n)wVHTN-l$`02ipKg+km0 zjG$8^f}5bQ!V6Wf#79FiszM);&bsCCM<&0H;y5PH#UY!u|=1kY%9ijGj2p&b^|O zdpltn3%w50k7s*mR&?>cC@A(4Dk)qyWVKl`+8S!*+f8P;ImBdf>u_1qVv> z;dN*TxC084B*834&kR2PfUzVF-3TPCY@Vq|z(?7t$M>BXPz_e}jKkZ-g#IpP^y+Ezxo;f!uk zuUF{^aT34!pXPGl1CKUac6e5b+5$Qy+oSK5nerW%t->+~5@B~>UzmB8u(bic=DwUT zQVVxEgq-iZ0F>U5gY~5In1a6@(hgR&0?`42hIN~or<3!OXZ>}^e4PX&6wjb2q-Udn z!=f!^ickz#WeBufjh(kjS~npSceDJStWMUiim%4DNYaF1Nba`Avb>dwb`$ffoEX_% z+gVan=zgr59S^m4RLQYwUP^p_DHAED`ftz6Bvu=%OBLsH?4t?DN!_sKaQOTkD_VNr zLne?f%_TCY_@@M!o{j)Z<=JtR%^&)(f3b@Tfnf;}oy2rCgSxoC;#V3hg8(VH#<_DY zq`!J7@mQSt{*}(kNfwHS_Gk`Rd~`xX7Wyi&QK6oSEaU0!r8wn{?%GE9(_z`f{Sx96 z8z^(Ve~~>-&8K)qt3NhpAeJ}mJ~DDM%Nwwzb$R8%1`n~5ub02MWigikBb{oNzp2N* zZnLa&e8M+>m+PfpB%ZW26A_;QRB>It7vi6W7~DpEx@N2_Cx*x92R-m zskFczcHl+W%O>|+*bWPE9CB@RZMd~z{r6|&axboh;V$I*!VsS@~u z)rxSJ=ym~#USa825f;?z-o;y3WB1ABMo&GVr@7l8)kWwcCSu9LuSf)WhG+}8bQ%mz z#Ys64Leh!1qr@_w{LiP$x=p$+9o?#Qd>3>6c;i>);Hg=kKYmjSjIheaCx?aI$4>*1 zlSq*$b<^&_z+lO8bb}Kx2XF+1Zg~!p&?Hzf@w0`DE>PW5a} z%JS*_O-d*P%-)tvkEde3Zm83HPjW3X?j7Cm0x`g&{MX?VzVGT!s!X5D>T(a+WpQe2 zpUWT1{*y)?XXX1)fpbQ!I@7AIQNfsbr=1TI1{0g16-D3rXG}tubTPpory@dHoZ#NJ z-|lM%iE?tqg4J=oLVVA~N6=RA*9u&_JnMbREZt zw3eu;mC|Pxlmsfn+m->1I^8?UN^Aoj(1sRywLEbF9&u?aHs5YL0Y@y`;}TOqh>pbj zaZNKBQk@X~?&PntN84>){&nNu>rPyFB0yJdsYi)99xGEhsF~*`TMj;B2EVj9_C&H( z9(9EeGO`D~rT&|@mG@(K!S-WZ(Cc1A&a;5Q>=t(hZROoov~*N!+R6Do5J|S$U(+cg zA(Pf@O{oMi3AHMgwDCn$hMRi_Kymzv5dzV#f6em^WoigkDzORJ7rw9no4u+lmT22C z9N}ahC|pgXluPa#&Nx0Qg!)7`vRgWdHQ27nF})+-kT^?|$WcwI0Q_Vwe{0AA(8#Ur z+xBotTzJAYb{}76S`j%N1|#UICL$a@rc>A16p^;>R(ptGed09->QI?g+8*#oqqb8T zV*Z@yfJdHdVer^Hw7dbe7Xfo?#auWl?iqIE+GF+ zfDy$vo8Sj5$hxJ0vTARy{eb)dGCsU=>Jafr+ODi+>8~Q)llYo z#*ObUh$YD{gUF4quZ4OW^v{wX4Q;&YnR3P(-^#tssri&kDio;RFjAqzcM?1o^$4(b zyy~`|GVO{dS9tpZx8Ogh@aM8x3+}pYd^5wc;}>^OHtThKWYz;q!Ukb zPl@BW!c0h2D>8vLmcFQ_15^io$a15`FS>Im)T~A@+EyNzagFhJ^EV<4t9BCQEF<==URys#6tbs6iWFzOxJ@D+G;Q z#3r#2YLt=B7Ig=mP&qQt3}EZ_6{XnYO)!=P1W5OmPBa~Upd7l2m9RcC5wMR02f;_a zCXvFLm)tPQpe{umU$zzcD!C%NvM$>sp9OSC&bpNS zD4Xo7>t3FJa}*7JHw^Dpg4)?aS?hk~UPy*xut>6bsfg|g}J3oE#8lFobLLg7DE@=pz=l;I%9Aqu|E z>feVcX%(eAu%br1SjF>q`Srq_f)wru2Hw7^h5IvmX*pKWSGZjJR^w3?heb;RHfN#C zO@qZ_Obh$6x!(3cl=wrgwjM9y)lfyZAqFcQzo^TCQRg@wq<-CeM|5#Xh9&oe9$&&R>N-~UjW8f4(72WRjfW&Dl=p2^?%VRMM|XP)q9@X* zYoKSma!Q{62M0m;zBaUCd7iQCy=Iw2xu>`stS?868w&=IW<3>KjW`|^mr~dO8I^k8 z&IF#SJ&|L})}soQ6XyVMy#}F330O=5WoZfL{8}K&@fwDw61`vI4c^!*9GfU4ELN2$ux7XU6ds{!}vX8k`^>v`8nZy>CC zcapA9s;dUN=F)mBTe~IdsneB4_RG-))6F&TwCpulDvkA-3paTMJI=&=KAWcMd@oaB ze=Fr0PNZ$YFxR26T+-LRH%|dbHY=VLZWI_MkX=K zo)vv8X558dmn!Yc6-s=h7MiMDg?gA(Zd$2$&OEEM0;c{|t6mjRQCX^mb){4)7Xd;? zYP0}Z_NzszC=0>{gx7?y6o6I>KDb&nip{xt)+Uum0cr76^s+^zPqVhZUuNfdd_8Z;Y#_PeqmCtNo{J@a7xB` zNw+;RuBoirfoZJxNmp~j6N_)ay)FhX=DLwSi zifJO!9@M5&S2n8dLP1fPKv|Sz<#U^vLCEidn94JSAf|0kSL#uvI7zPP!OV760m?(p zW;&n%gsRa4{$1475K(+7X};Hz=$h>&!iZ9(SAe@LweJJFnwb;T- zhI1$MTEx=W`FE#DR$c!9e#mS;tL;4dsSPNjQM~5}T-pBsrD@-n6YmR0!`>RHb5X3M z(oBN)aszwD0Po7jRK56X;YVFuZI2@WFL7xhqn~0-K>^jDz$_-;Fjg*+^+F%NV zn2>RjOhhLe$`%w`MuuENej~^Alh-m}9s=_b1=I zSU)UhXq+-V`ma-Cq=0vMvD+j24*vi}8h?hvTFGl%W6k9twbS<7aoeb- zY7{_l1ER9}_yf@*?Vf*{wCh|-M8ee$7n!S9%>jBnY-8DZXY(tghIQmW5}n7g`I~4@ z)qO+9jolv0#8b1%?d4J;&ed9}Q)oq1;01@M-{o9~Q+KE=s<(+1Zls}OB~vXpRw!Rp zS)*i1>iT*0LF$;jTAFRttcTi=wDgL32@hwu(d^}IRq*tV6<-cCVSPJA)7tQ%H>byS zPY-%88L8<|P8aFTQ^FvQ7tMT0;TR_g>UHt^cSaYg(XO~v@3bP)iqhD&7JcQ7dFk)8 zo2)NIZ&E;BBT2YkuFkg$&F(t+t(q=RsL2YICoA+!g^c*pvF~WCx~yJ~?ux^@V(I4V zD-^@lwS~gPc~)$zWk+1D0x#}HJEzV7vOOQCOB?&5t1feByE0ckY(ymcd zsjHM$_Bz)Z65bOUp}|71G}@)@MAa@;3lyrcR;aGjYO7VMq#_to2}=PO3E@?1l|*v1 zYlTJ4%9C59k#f}PB`Y`UrTr7Gd6)`-;8DykO?Mx#{=mZ3Bj{T7o7%%RHCq@u)_ zQi^d2PYM+YtX56amaR~-Zi3nsA&y!$#b(nBPS`3XkflT}QmB^{ho+L0^t8)e3Smlm zWsr`ULiG$NCWQqxX(6^&9^4@|F)p18J=gG7mGc*xym6J+K)U05Yi&F}tdC?V$&aPIC zRYtoL#p;WKl*cteYG)OmmP&Klkl=unBrcQ=av9K6DKAD!7a)+@NurV}ZNeg|G~lMw zObt&C04OY!R*Lk*Agl@J>%w@a3eKHoYSe8p65<9-52}`Sv8z3%Lxd25ol*xJ@$&xw zwe9uq!z#>v(g!0mxDniKoOE7VuZQY_Wsm_QG7d30h)YZLj6HswB6IqQ2abguYj*;1 zz~}3ae?+RZNQvs3Bs`CrT_=pl=^xwZocI_~NM;D+Vm&*jDdkr!>CrLtgbbpjz}<4>>&=*^ekQ24tS2*e^5SPYbr2Ma^Id@|sLB?Hp zw0xJ&JQ-pOSOGFS`t|<+3+j|?-WV~C>TxyZwJSlq8Hg$Ol!<8pMbk)s0uNuBu~MiF zkqKgT<8@ywgUa(2>PhUq2Z%2+O7gWX2{>I`&xm4pvYf9Oa$$O(6kKI^O-Ai?aJ?f? zzg~Lg?qx;w_bQ#WGUB#nYRw9Jh+daT(94tjJK^F;!up?wd?&^hccj;r)BGon6{_A5 z#|!G#UK;=mD|IwM94!6IUjx7JoOD?uO4wK3wGR#j<~0ut4%t;ZCzjE!A{Wr~4SFLh z!Ch$%X~0o$bmsR(558JzZFr1uvo*aCI1&QzS|5l@NX{3zverY#D=SLAcdvMFlMBl` zOFcsSP34Gqz~yXw-te z9+z^wn5?w)+r^dUqp!!)bX%?$sH0X_&$K(th3#x=;Y^NBnp>0-me(mITMtLSd5CZi=STtmgdDRW{ru)JIIG2-<~Ft`*lRwL+xTYDH?>wPvxaPTk8o zM{P=bQ;<|yoK~pSn|DcTkf~iUX)3H_ZCVzgRC^W6Wm<)>)C4CqD05Vryd^nV+Lcj? zR8U;G5JHN22Bth90a{nB)xfCRm3^Ek)2!RtE6Zv$46kEw)sJ~?D@oEUc&gG0um0qP zw|!*1?bR|DR~|<dg8rj|fr*=8!N!c5@$bo>ozn zyZCcLZs?Q1bpGGEi&;L`sJL$8BtRVi^q#BE-cpvvK;A(FG?LIvi69=w3x8C*aC6#a z%@gyC$yoQ710GTV8-^z&%y*3bt33>R{{Re9d%V=d5)Myc+9VhdvTpnfLC!M6A_p)4 zC!}(H7qF#Kr!WB{f!six=6FGJ^N%yklBpmp0nB%8#aTv*u)I=9RQLu?bT^IJbYsF z!j(igw27GO!p#2wY-S+(diVEVdDC?IZXncaj299n0S0wyft-`njPcNX zxks@-J=@aGd;b6mRZ2PLeSKHxOP|84R|x>L;Cd6-aSP;rJkVKfTvK5#FiVag6PN=$ z4ztv)B~9lZX{?Sh_e=PMpKp?TNIxac0|X9!f4Tx`x;)qrGL6jZ$o~KeY0>1Kyh4{5 z9QtwlC+w)IX%XE%#k)+II3hlhlTrO8Sr{A>{O*frGO-;008DY~>)Cog0(ej37uBjF z+M7%2Kb!{1+y4N|00HW;^e+$8xqDc^eApdx+x(G*^)DRh%(P_-8cmm$16myoaUx;D zPT~pV;shBAeU&&+t*f%NM(k(9q{eM!J<@iJHekl%)D!wG%|6ZJVKN(>azy&@7FUCG zFzgn9Lt(^EF~IE$)zi{t&1-zT?uN$iQ_!DP&EJxLO+8-!0K;$cgZ}`>9eDN1jZ%i= zB#ioYSG1cgwEqAvxAUy>sq2s$K;{nv)4xtv4_9iWe6w7=+gYptJA1sN91djj1GjRv z^tU(%47eP}QO{HM0cBC5;y|hJ%oz4%Wzg$(}udr^B&N?Az^k~RgMv~x(v^>_Uyi)X{;BtFz{dnr&X1J zuWU%)&Q3clTl=_ zeY3*e2n+z0sOz?Mli`mBZ2*zkd;LF13|bb}fubQewR7Hq4r~QlsaEQ$<*IbGTj9?t zwbqmmMWogyHK@~gy+^}<&KHuu@Wf0nu+*Rm$G*_#fUDd-L~UHsy*`0;Ft+ZzDLoSG zye{SuwN30kf#HoGd@rI}ej0Bdm+ZXP;mbo@!2V<})Ls^#$233-ldi+_j<5Jl0Qr8) z<{l;RQKn88?>$RIW7=@OJLA6sgSrMiz1KbIr#(L&Tv_eHOK`r0;_nLVCkLwVm90oT zEqS`@=BAU?Yu56b+$G$*3t3LQ_2X7^`C6C7WmR(1bv1gzqVzV^7n8KAx9sbw>Kto_ zBMwy?Q6I!y_BNEql$RCGsFvp`#;$YH%|taGkm}0)My(2|o7Q-&jkQ$XLi=8%)>Q>W zr`AN|*SlKYjc;WeiFrDDea04vT^x>0L;pPHNFO?_WOzTtfrN3S7#XFToqh|E=1y%v_c#eYQb4?NT`A#sdb8C2ZZsYQg~BHsi+q* zUKz5I^x}~4k_%{7=($u1%@v&(xC$4kU_{`vry(k}!&RpwS57LkDm6BO8i5@&+11vr za)uQopo6t8s7`7M!3Pxz=BaXkoKhT=novrhvq4{`RVpgwC7n@qg3hefrCCs_5e74E zs7PL4RkB{i;brO;(iKjz7njZNC=g1f+JrArscGjc0^-Dl+vD;u>w_(?x)p4}3kIV| zM{j){SARWgp7ODKr%Ll$hKCXQf_GiwUsZKY8q+mv2G|3zLThFi_Gd8W_VVo zl#~uDLh8h zWW<>qXPCz-mEEg$;m!oayP=Np{nm|LQ2t%AK@Dp_iTd*bPstbirVq{oK|OlLBz*8N zvAP+BuZd)d9L&e)oAJg$Xgxbx`>p=~4tRF0%i79qCooPRHll0fo~*w3Qm@4JnaG<_tnWuu`aU}LWX^h0NL6^7M-a^W_b=6CwkQ?kAJ(esS_wDOUtUjy}G;@Oza}*3f02)CA!3$Q_~z zQ0rpdrw#uAj~=~azeVb-Z_dW_J+u3PlDthT_Ych6gCLHh_!q0wuL*-6;b?~4YuS$H zr0`WOQTT&`2Lts>)d4MQ+F*k}-|CK`mdWIgm_%dK$7{5p0)5w?)-HC?dxtQ+UcE=l zmEpW|9|5gy9OK8L>*bZNkiCYZNS@Na=jpppFgCjKAz6usM=FuwDncy+YeAbpm- zbeUP68}O59fJk1hoj4dwW&)U4T0n)x9@PXWrB&(#lAlp4Y^l)Zgix76)Rn1eqmrlC z*|e?6T6UUAD|W*{^j@1x&_GdoH<_{UkatC+Pw@2e8W+3MbfJapbj=_U3oFokg4e?v zTn^A2GQNx94R2$EPtk8`{uCQzbJ2JmK)Rde4hrY$huf_$1Bbeys_7!2A_DWi57zg9 zy~_s$Ef0ZyA^sf5Bo6EIw~9PA>STytzV$sRwHiSKg1!y-nfP`HbAaxvn~nJC`7-8- z3YB*+XX0NExsDI>Co)JMH#jYD1PljU0$lVIUz9S3a&*?May5Vv8 z9NoUFYWxKCo2Ax}7LZ)k%HYJt`Fnu|*_=+9&z-QARIQMBs*!W0rK4+>ux(zxnn`sDRF_qMMQ2Hz^6itL$<$ zUXfN4sdS;JQ6;>oI6*`jxHT|@;+5PILVJ=Ns4h|}-4dwNR_Ls%%&HX%ty(3joo1vI zToqdFS1Ky?nvGOq-CN4Xz0)V6^mQ7P>T<81u{Jyn_LNoy)wvxP)Gsv2=)BIUq4K-@ zen&rCyl?x$6`DGjL#XM*EGm*0e;rEZyyMY6usv37)eu&DZ^%ta* z@#}zyxV*P){f`~aPI?bT@VYvcMtPdz2jz*!An;G8Wxb{TB;3+&*A|xK3=z-C(;O+h z9=@k_L+{D;{k;|TT#N>dvd{^cBy%L5Nts_cu>4tf{{Y0M99lo5pERDv52Ro%yC1~6 zR};8wBPLowlh7DFqm@3t2glJg;v>vS>3u$KHO*G`%I)6~!uG67v8a{ZFg|G3ZqM0)0f}52XD%o)_M~ z(@){v;ad6P9M){63_ELQC8wEiAQ|j9O5A~SOYlIa@ToQ1Xj9s1k|5MAg9D)xz$)IS zobWLowa5MYhcBp3NKbyo3>s@ab&(su%JBOF8-%I4~>b6-@yyOFv(N3r`aAFb+Oam$GL z4!^*tSonneAjdczqr109bKX0u8iu9`aG8heQ>Jt@ljUxsBSiw z6OE&<&yn&?pl|_}#0Zm!!H>38CS8`lInFqkJ>wrmZsNA%AVG+clM~PLzoO1V>7hc= z#5Zr7KR$Z&`!8W*X|=f!Tt4f_Y4uAS(+=8jeg6PpesaFAX=>V(L#49NJ4l@Tg3}n< z@%7*2f*{C|{OFxlmy}Pqj>+1s2DAePK#4tbIqlkg7G>V1J{SUh$4q>nCnGAz)UM^g z9ti`yO2)gA)o+%ReIbPk<R)TJ@Q%=|ZDOfMr<~(F)3nSLZ*)AH#pl=8o(0XNa^y!em1Jg=>04YJ-LO$MH+> z>ukBL#(FE~taNeH@=mR8doML|+?Dj-6zGMd;e7K_wuRl}>s=bmm7!?$n(+l&wR-A? z-=gd$f~woQ3cxEannT*;Vd(&=mCsiwt2t1t6~`4)x*?FAP01*j<)B}t~J~g~b&%HFS#E3+VnFx#4`XPpgo5OUh{}H5Edt^$J9$)JT=AL!5!hh+LT}kcGiD$!?oDW%Eyz{v@b>DwM_%jeOBU{%DU4* z=(@S`V=KselLr0}U%w@PKrqs0ZDnoM0ad8P!w>zEMN37p`e{$5q^k+dAp`3z zhtLY8YcN)l2&Bt|?~j}%)Us7e2r8^K%8$xzO2CH1;|nq5u5;9ShKs7=I-aMZCb~cq z^Dwi;*L3<m)#ruioWRo3u`z1Of zGZDz*e{_~q5PD~)(Fv?%jznY8P7(C#2#&o0Y)#(yyp+t;8g=bC@u#ak=dP9>t2(Y~2xcLF16pDS@ZW_m__Z{pSKLr!+# zjRtVpKe>VPSapA+*T+0tbk_1Wmp(!5yC)vzMtiHC5VbWbzNYcyAcKI}10VqgNt2W} zcC2s&jo5=CbM4Q~RJWw|65vPzC7=KcL7tFsI;<_p!7=I9Gs>q& zU12S2cQOpYJncRI0B=>dulor1U-;8al6=9j{{YyD$vki%{M5GSH1B8SBXp6NJs^Gl zODj+DxOGM~?E#Ju;!Z&6?zOHeH)+`H5J3=nPtYHJqq?78_DySP^-I9()DUEL&q))+ zub}BRvdG}Xj_w1bNnbP2?_OO=xDDOrJ@NklSzklAdxMzZNZZtPG5Qr)!>4LjovZcFEG?+lWXF}$`mZ&uUXtV3Kpuyu-A(w5NbpTJ zd{+Vn-b9?vdUS6Y{TJ0)O96b}!Y|97=79zW{EvC``!A?9lF&zGj-ge}Qwfi1hP5~V z(2NBotDBWuiH>7QUYkVGC!$t`qi%y8m(lcX8Y2rXs)40wfCcDi*us6itSe1o=Cmt{ z3LMpC$uA0ZD*ILY8wp)$XC-f0+QN#XtS#YR-GrK9OUePX+`e7-%Uf+H626s6v~s=| z{-OLv8%4r@khIT%`P6ufn}&(T@%pdXzlHZ(s596vz#bZ^YTUP+xPGhp3;1KFZJ^2j z0Ek&%(DpT4V5$`EjIBdWE~S`E9ZI6Z@g{&OwBm5K*u~J1$K-FtZ^MT(bDG>qUz|Ko zqLw)0s{WDqyTc8t!-e@9@oVtp({qS!46XcqV}5J%9mOOxUzM+{Y2~Ejs>m&ZydOt;(=0(|RKHRjx<5Fo(Gz z?nP0_4|27*lmw{WguMk#p;sY$EduL>;e+xQq0uhZK1jo;`%i}NI9}@7;|t{89KPXw z3r@Vk^XrqQ!>c4dx~l7xhukdc7TsMf)zv3gLTf3Z>f8_zowY6DZ5?u*M5hv~(kZf< z+)4yURFxW4P?&|sVY_Bn@jiSFzW?D^ltorLty3MW=fn2T)T_~Ejf(j8a zauqaFQr;67q2WRmT!=1u;DdzKY6i7A;VC%`Y%0|kDy<3hQn76YB~@xV3#zqeNt;+C zIa#a~auE+yt2I<|y7%jP37@>{{RQN4iCwB>iR3&(67v66$G-4L zJr}VxRg`-PQ&OCAIYyfXy#v`uw_6_r zs;v4s@9YoXq5^@W?us{&%+K=+TH4S~cq4#1rB$XfI?w$|11sYH06Qx*?go2*IrUcU z?1py;ft(EcAa0aS8=Mz*I7-+FEh7R76Mh!eyL(;m z<)zIsN#GDk!1D;lMfOqCHSa!E-W(M5g z2w8f+iK&K`w8RMupRWCb&G>^*_>SJ;1+F50`1|$$04w1ijGi&2xYBcHdjp#h<=X5M zJZ>TIRd!f{UZ6AC;gIa(}4f5|jbbd_;aD z8I6wf4up=r-J{sAVQAgXb8Xmiz)-z6&J`_OQG3NZym;mt54-lyMS4*ApJi+^a_`wnm zM>7Mjd@oFEv&`ZMhX%$7EhEZKLxy_~)hAA=uz(;Ml_9*CBM>HHeSK6k_SR*kBIi~v zY(Wfbq8#7^WR5`K6T+ehacw69w;Q-5OiYpsqvo}5HBE(~tpwDlTaUb+gu(0;E2}27 znw!f(83)WZfg zSEunMc@+6A1I;68*y0OJm@a}MCjg#!UTU?u1YiM@;V?&05Drf(qD4x(X?if%0&p@~ z)|2!c4*aiEQszS)1|LX~1YnQpIg`3KFC)2?HnpX}(U&?7J4Zw7*X9<+n&*5V)00jF zfdgbf1cGJQl5y-h&55s9q3f&^!7U?zWN>Hg?!ESrt5SOpoB|vS5^^J_RrB8s?E`DT z+T*)9$OArrcIv&ArjWlfzS6|-@|&y zb3k7?@xH0>DAOUO(9PQhBzyXE?JHZu*MtU{$bfx)hd))0Qhgrg->6mEty@(tXwG3F z&E+@E7GYz067?=spHU&F)W}`~@dLrks>gXj!Cvjo!q3#|jih=igI|FE02F)|@LudM zk6qG(!u@;r*QJLOh52{H78_h}y>Aa+BcqO)woY2~&K4yMEsJ-n>C-8|sU-}qSXQtu zN_kCsuHu|g7Ye+q6_rv|U>vCqN+lCqkVQpV#aXDTRSKX1asu=(a#?b$1ARrMrCmW- zu6lK)#1%bqo}=ns9>3vz8%V#x_{M>K!umFceZup+oa2|M_NL(})s;b83U{n5V0o&C z;-t#$>O<(HB~=N|NT|2RLMasBkwsVvTERjBj39DG;^Y-LM1|;xs?`-Yg>Is#@~R;U z0YkYIGTLI&o&utJKP& zY_HP_OA%RTR$6sJ&92no#YZAxOQ?y3YYAZiQWQBVbjoE*!is5*Q-rASxuF$B4MrFq z5>IA971s zCT^ppg1lC<;Z{0uzLRj-3kv>p5x!{Fy1Ro%~?Iq!+{5pZEPoz@6m#{2Ny?GiGv zs(4M}e5Zw^^2A>ijHcg5SWRtEV2(#WKB*OS#{=hYaC1oJ(FL4StBvj4yv?ztlLx|@jq-PRI`sZA=3o(h@a=Gp+c!8y$9Y;b?vmR z3D*?q&<({(ft&j{4*bK0AxB+dFtnWxrDnD5YltD_;Ka5>0}xIL02yCZ{1*HYt$O!w zrsmUv32-2gP9i`M2lYu`qJIg01nuo@sr+V=W?k+<2iqlmEtiz2Gi4TrjxithS2tc? z@D=&9MRh+U2dAE&&m?;U9X2RISt65wJnoR4r9l()Vv5a(@? zJA3*L!_`&?rhcf(t(l2wkmJD_0(kUZ2U>pWf|zXlN=5B($PcFM%o77|^$5&&L!s&) z4eqAUOLuRHk>(^ElfaMBS+TZm??CF1PCSm?xrFy|jih}LtZL}?8>$V!pZ!35fP+4T zT-5d8V98G+bFW4EC^ERA}b!$8_VYm203E_UvnB%Gf` zEu&li05bqUE)OShkPChvlt`J_N&MIwfE`sUd*W(CU6Ky)a$4aed3rdA5*97p&}(UJ zB)Ya>Ohl6AyZT_cqGB?%?LcK+<)TEtKk|bRIXNQ)^hsNXYJ0Y|!P`qb@JvYq1IwO$ zGn9AQyG)IwHe?bb1Ox1QNLW{O%#ukY<-;TAAmRe*&z6k9#7+SRtosF|nqJLH^u92r z%)^}K2N98i;4{<9KQY4dm-g;!+W?sXy+1U@NrAL=Jpt;qDc21rG=>=)1`SFJ$rGsKkRmqPJqlIh7bj*a6^AE)p>Z+ClH= zyGn4STYWHm#F)!T4lr=4!gvOh6)C%dDZaIWao{u@CiF}U^v@1I3u@dkr_ zC99lFy_g=vAC#Ce)o$8YH)wZ%U*;54<7-FO0f^!;>+9Tn^1W5Im}*3I?7n+#XiK5J zGwcHT{{Z1dtEt4v&cU#FngbF7t6m0zY8mBw=AZ{ABG&=@Ao+$W&Kk8Cj1?vVQ$7Tpcb?R zzneMd)_>fd`@#A{C{k-02LLVSGFP7)n0*zBTEBJXT5+r?LY>No{#+&0)MteP)w_!7 zttUxHX)Y?|DsX`W_bgpjz6#a2d@nhxRpnFS?Zliik)3ZDmC>bwC7Q4QPr0VmFKEzWhm-NUJKdjopP(yxL60& zi6sM(xhcg#xD^fxKwRLQMT(UIr&KG|0;gCjiC2{^Q3%$Au}Cg>T|}vQO0Fu_3X{24 zs;X~BsLE={Rq3lt3TZ3VYO3I`RtlTKl^JeUofVmGw5XJ*!PE;rxLOqopal+OL7)v#*RBJ8)xAax#cah`&LJayUOdNOOPe@;^{{RSo1Zdh(X|Bnr*9VV%pmofM{NH7<_*bGZ zdEU@K9e>AF?6sX!FfcoMe`H@Ju6q-y>frfDsAKhG3d0!G-v^4H) zip8|aIm?dG1EB7{eP&)N)Wb^})`p(01Ch^8tI})t4s|Ax<}|7YNXcWH=Fd#W^)ESd zb^)hdhMwAm#qERTba(u%&vVa3w3P8fR-*O*=D~GW)O5zu8be5@GnTc}&lA9m?08p3 z-AZZ`@d0swNdS>B5A}B=zB(!zja|e7?a;vS0vi$LEq5Jbte>jX@Ri)M8!-ffoW>74 zVDNgP#mBhQEt&9IHZ(^tIU|DZQ3xIySR z$>Xjeb;hP?KH-i(5yp^g#tVsqCxIUPEb*=}E}6QaE^h(Gk>oTJkpKx3iO;Y}yl4Kh zNis-0Vh1Ck8SV92=D1@TVo76wAQ%UDmVw;FobU`RZF<<$A(8}*#mz1Ous3;u6EGmO z4E@oJt2WxmI%*Lx($LofBywHJ5z$f9Yi*`7A`4hZ24DjO8Hkbuj(yhK+|y1Q`9!yH z#`w&0IqD27Cuz@o)PD^vZowW<497g2Oi#&c$tv1kxQsCp2#WP?dS7usy=o zt6q~r)5Es!JBgE=k_pC7ZkSqD_Y~Ulz(A`qa%AQvPd%fYDw;L9Yuh3PHjgkMwZdi_ z1d?;cAyzw0QthBW;yXP_F*7DTPpo>Zy5~z?<4L(~8f1gkU>-9w8OK>rd`3f0m`s!A zjEwSggV-+zt}?FYJO2RTvuW~}Cpia>kRTk%^jUPZCl?T8iIx+?WA*Kvslt5~nwY8y zacIbmyhL>W0Hij$PWY0)m6HC^f;#%PxMk76O9L^_In3$SdxJS6h(EGdfc$9MtZNC6{>LLQus<1f zhh6Qa)@6=>WOY1)IP1#u-UYEV+y-RE;(z7T6CX$dR$YE6`Ugz3AJXg}-}YZs{5aK{ zYDD3#B24_hb@I;-)CGX`04=L}`-Vx!-Es7upRcr?Nv~)=i%wVP&%?jOml~f9{#Lc3 zCz3&qW*6M_twZ7fM-Ue^LqWjZ`hXB}gW7&H{{X4!bM5UM zT=5&K2Mv_QVoxNP!$6)F#lQabUDNab0J(d!k-PSfviW7*z>g>nJeM37W_jvQIWnC= ztk}^6&PY7RCqB#81 zE#5<14stUja0(PFT{{T;4ffX<6GPOpLNJh|LNFUXX{Sn_@oala!{{Tat0&Oim zol1-Xp@L*)a=%N-d_r5C#JE;}%$NOI+ML^WGsRRXn5XH}w0a$;MQX^pBLxE0PuibX)FRSNA@QlMHy zRZ^ux)U8r=BBr51OsS}qR3@QSAIh@gt2re@X{&U?s>N!VN~slsrBEx?6*`JQX%(YE z%Q1ysw{il*%BNf_inV5-nbiUorqu~@swB|Vw>3G13z~v0iQy&_f>>p|B^39CUFpCk zl(&T}a)%}Yz@EUA(4N4RJT{teNosRebwpGOtAe{!Ds=)w2&%K|Eee5AtkfPB)WU(G z93utDJ*V~&bXHBWNLTvh1zPFL=uXuM#E+PcU zz>lxdHl?3_O8Vb{{28&VsNvFbjdS%{x@9p+i$@m^k(QVQb0>yww9-_L6w*&tISY zubX(H{Oof>_LsPpbOSRJ+a+u24kmYUIOi+N>l$$+6Epd7F}IP&5#4PS81%Jzl-Ti5 zZFO8aCJS=}k_Ki52L&fgEi8LWgPP)99ixB%#E(i%k)GV7&<~6PW zZfOz>#0-*pl6b*gGx2Q-jBPN|-Y0AxK#l-C3C#Ceei5%CqZ%9tcSFF>%acDOHRw6S zRHl5S2x*vUjs|+?m1@THI$O7C6Szc42bmm#_n-R) z3z*^wc>JN!{4>H0I6!NQq=N8A^twzmICYLA5Q6G~EZhVg*fJZpF|r2)M@1{q8-VSO z{*9zI43CHj8TD`{9RO7ICK6s+PnctpXM!8ojzGfIdjMl8gVS+t0G#GSWSsZRs?O7* zO!M0q>N|jVG0c>HGOX)520Y*Z*dd}bIi7Rfb?5$;G?!+hqz+-Tl5>nl@T*r&1Q6Di zfZ>M0$EG*{cUYAnuP%btjscvG4&gDl_ZZ5lNE(Iydcm0>W<4{^kbX-40Ee{l>U9na zXO{_vPaOzx9hP0YL(7;rGZB&o=AH&*V?OIe*6*eq+zMwE93z;)IhYv}$R%l~jiUB| z=sY)=iNjp1a9dw&V4=C&$X-F>9a74iq%K6@%ShbDBvS7TB-K0d|erVuUa7=`M865+oL)76W4kVK9?k~%9o zeyCq9beaA~_*1NDWKQfE6B+*iKV^m~7PuCYKcA}gbUg;8WCKPH;ns7Ght*_T)Xvk_ zlh_WY*?L%lyu5J+MfIZXzY4?Iiry?(I6jF`=V? z1TQc79!>cI;{+>AsF2ntHk$H5!RWVgcexx4{+y>D9$2R#>0CiySouxKtJesk6}XmK8^?Ee6X z8rQ@wBZctYQ%`f9FuS-dEtLz<)X|9w{+D4K%JnwZK!tl6^It2!(g<13uq*01zMMT5 zk-N2tT1x5Wyy&u0>qV$(rM-%!YmBpQ7dC)asxdAVX(hCRhtUa?)V(cKimsv(jR35@ zE>R6t_Q6)EYP1|uoUB;Pb-K500PjE$ziHVUg4EL(B~7esHpa;ITNLb!aJ6)+5Lcw7 zp~&HKciSr-5K}2G2i1E!od-NB6*MxruaOVd&M8GP0V|HjSXvdjO0s=7Ea<~rl2Ml> z5~B=kQuP$kJS1xG3bjD3y;Z0doR%rn3Ib(7lD0-1!imXjDTPHiuTdbU)Jbw$vC=Bj z6-KKJD9|c}RH&`JR;m&rTCUrLNGrC*QLQv3)d{UZRY~JExK)W(?iEL>h{IS@5?E8E zRTu)8#1M=HIx)`*tx&EwDzySyFsf=vP}MF>Vp^Kgl*HyHqZ(7#tAzF}>{cS6RO+=} zdaGC|3XckvV6VAUtklA@LT1&>YGDfTD(*xImj%rTu=J{-R6MKo8mUy}5)CcESE$8c zlmsfUkFgnEhS=aN3sIH1Y*tnU;>lGnX<=yEgcp=>pggs4l-DvsW#t^AV{oohk3V@y zQL0w$?Fm?Qsliq}x2qYNJ5asT(bxP9kc;l+9 z>sA7I^H19!b)8x6E;1y_?MAuj{UjnrE_Ik}x!;rnE6M8C0m08`o%}}tcMH;8T$i-4 z_9Ne-^Yofn!p?T|>+H4UJVv1K&Ovkzd*|u(S(>#Y<=e}IlHz?J4v`bpdQTAOg{~yY z4`_2+(qkjEj$vWk2SD=73<(X#tgVhb;c1^RkrM;}4^DdGW7ZWL`w;jn$m$QLZ+}Ir zsZ%ZlncO^~fP*<7l)#RdAvV^=AW7X4TbTg*@$>Gjq^$iafj(3DNiOpa4?sFdo}p<_ z(lRp>KBx7M@VyqC$v>2Aa07^kH=k+2&QMnYZb3W8r)6*bq>d*dJpyu#d1{&{2bm&Zfe;DDxDYu# z6?*nz@aBI(aLxhS$&cqn=(P+7GPeMH*Wj{`JH(~$JWdn86M2dMdlN!IWfTmW_kdhmMj z^iE%=uRCK);I-Qu1;=xoS_8WP4`YcPk~In>WDUjgNZZH!h%*K=$XD;{7neSKjwTCp zfJOn&PF0E&*jNeZJj0P4H$mLcdCW?!SM>S6H(JH6Suh> ze7l}iYP5sSz&y=6i0Pd1#sR=8DAd}@hj2L*-PD35!J;^E;D&;6z=E8hjayr&=D2xD zbAcm}&`wP9al|Yd+gjte2jJ`k$=oFF1Gh*rud?l-yXVXxv4$H=N&NL3`0}t%mzhet zwU(DO679e_;AO2m5ID#EG6O?H zoZ?A|fCzJ=&hPkwU6tqP*wLgK;z$BWrsL*z-^}Mhf=5R45rrCsQC15YNH}g_Qy3-N zf2q#I5iyJM=&S1*=aLU+T8On14Q$|GC@B1Sl&(Vt@UGp&&xkmcBSDbj4u(V>w%zeJVc&J zCxHXH^wf3QbqNjroDw*ne{Zi&>kikA{XbC+O-Oo zoJ#zE;vE(AY5?Yu8>B>VW7&UZ^=}R_^vxpC1*AaXemwki{{Tz%Ym({>Eq9otBbTht9e?DK}$D95vsNk;&>wocx!s@HhHG zyM4n=W|n~9To}VX-fYDC75h8z1I)OyIjrz=SV%--FTE9QPX@ea^L{{S++ zpQ}<0VLprVua7lN^+*SdExCc0UWdg8tgSsS@Z7zimaBdW(mo^F-erD_@b`yU)--Jj zy^9~0bw9(x$-?t@w4F~83-%_z;HErqK1<^sG5kF_$8_gr_!gse-FdsJlD+4NJR0_9 z3@-(LWai9;(a&1fo|N3wr?Erp6fL+~$4uyo)RlKNTdMV0>sYM=QydT`!VXnxfh4O> zYD?OM!U|i8VqHaQEzVZ->bX>2RNB?bo{`h(It|YY-)WUF7s|97ju+8%TZQI%m~`BI zs4Ba(@UJ(7TNee^So(NJ^^I1x0Fs zRf=sFC#KM0K~SisRMk_bsx_4=s`XH)*DB#5uT5jsP~>fTvRahJm07P&X<5)Wl!}SY zNve^NDoH?8?o|lJ6yQ~AZcl1vU@FBjs{pK0Q&ch)YQbA7tze)kIajI`r-fRoaH%W_ zyAqmJifdU(TB8uPTByZTgs5|Zp408RQU%&04EuVrLywDj9-sPABEnBAwBQ zJu*NXE5Z&vHxKN!*y+;#1o4vFe9t$k}P1~@(Ydny-X5JXHzPCa9~BdHSD;yG#Q?cE{t!1H-w z+>lKEym9lCYNTWhFdB>;Hb{@@9cDP)A!Sx+-0=WY25KG)fIe2VM;PRu3iraZ05|}id2W2sj-AxjzG$fDxByFoojDKk!$6Nn8%G=>u($ZG83LUx&`viXyE6jMmbpC7_!?g zssJWnHL>}LCji8Q{Y3D$ZR*G&v&oSY)9kRTXlWNxMlsY+)oxl`k1!FF^8@=Y9Yb(9 z1PI6(=hvZIS~1%@z|Rhzp19}ix%!1;R)yhfw{r^hrm9QHJxGJuRiqk0z&@V+We>tF z$qPD+dyfoiZjwZ{3=BlCxcCppoKt@z0QU#`FMwzlV8?D(tEGNsN86Po{Yy@ucLYKq zVjN)c{uji20r-IFEPI9w0&-8M%a!(RG*xp$feLNFw%sX>nD;*EcLBfC^H(Y9P9j`? z%C|>O1ER+DLav=*!q*l^sZszBWYDB5{{XW~sbsG(AbV0O%{CH&_Jpk+sA?1&DfY^( zW-W(hP`RcR%2>s@Y%JTCt&6%Ii!QC2+8Y_=Qo5$JYuK4fscfnTgz?2qv|M`vzb{=@ z-Cf?UU!}hno>@WNFn>k)t415DN$$1}vHlfkfRN~4akXSL1>nC7>;q|xbYDVfSlfZl z3p-NKTTmGpTJ)(aO<-C32gdwK_;3vQSI4|v_)bj2SL_P(8t&gkiK+NikLW&&PJEaj z&#w{q4fN*+3&Cm@rG&5EUNrnP1Q>61`JeF@z&i_wYaz)gyQPh#Y&PH(YMa-eZDnSa;-$4-lH`$eh*t<)rM)7dO?^O=@`cWbdRsY8Ap~Uw zYtiU+xL-)nYVf{Wqd$f8e-72*ad~-R)qBf+0(fGtr<^OntAg^+Ur?s>(2+@4V!^(q z6RDs~VN+CM2L&>rO{+*oN&t)~1w1P6R%$BFP6b-3u?o#pg}gvOQiy~p0;(lSZA7GC z8%ncGs*P4@s>Nc!2r8m!QBh5Xrr@TTT=1caC|#hM;FWHy7O4tV8nfQrN7je62fTqN+CRRhybJ6)RUQA&{}6urDb>qfKk-G}coBL|t~O z%y->itlTP}3oE0P)+VDu9q)vLNqSqYJ9A0xm6Ylrt4^-?O3GeJ^>r_~9hNtSs6_iE z>NkAt2*hVM|sDxU9J2cg*x=^;{;_~WlW=(BLoh8QeSpR$bt-Z`JkZV zAF_+5jk)w!A}T51$7uR7_wcMe7|&@97T0tq4@9(r*ws_J02p)Gfo50r70U(!st z)Ew-A!TT!a_y>4t}lw0OXk0I$T3! zoB(pX(|-~xRVIzCC+{xuxzHM0(c6aM>ka{DYA&U4sLs79t!n; zf&BjfKew-&j-LBb4jg z_8rhf&IU$$bTN^^Dx?xC>eavimXJC5ddFkcc~)E1u1S`PTjF3q0DfF#zcNW84|TrE zSrXZ4-7ef*KS@NILlcGYNP9GJ|wG~*n1%GMq;^%jX1w45N2TAPMx5m9a=TyJlSBoyRR5Gu-U{Y`jQCdRh=(Pox9pQHH1#!FeX2B74;S|&U|@!US#7Lg%YIUbhwKF7$5daYb(l*KDr)ktcB#{j=039*G zCMIK%-W58PBOrN*{Ko)#j-5ix)GwUoIjtm;Aom#)9f-;GQB@kXHMX;J2yufD&@e}~ zPCjWDnvgLZ5Dqdk#{~UhQ_!p))&~*^8717n%;&GoRBh2FLvjf_Of-o-V~Lpy8#Lat zPS(RPTuw2@JM^Eg_gceI-NgRIc*`w3yQzrG893@s?6&MKWzA;|0~|^D{{Ur`gZ1w8 zJCFnwyH4O^KSkteSMtUOtnjq#^+tjLJus&cV`ogPTUT)COn;U7WAN9;jeRE5bD`MJ zf9>;MnJw-|@BSB{(Y!|bPX(v9RhLMw*_1#r**sACFPDBEekBUKTH_EP$>gu9)4309 zw1e!iHFpfCm1(xevb9Fg1cNz@obl+GRnupbs+Pe4P3=k~D-~V>sN*?6Rk?DeO1)Xj zICAD}L`!j*C96qVJtGS8LZf z$>QA$=&LnjQ30-wqZMa_3_IhgHB~}T{6rOR>2=q=&@ZAgzEy8@L0*qc)*>>p@11AU z?s`_FA#1YqdM}Vzc#=A;T7E0XE0*}!zfRIB%N06BT47#GvkpYkl+MN*+w>MC_r$V&A}NKL2`mawrTp=bTw+l{)S+v#rdW#x*t*KMO^TpX-qfr=MUF^yrImZbhsm^8+Tw!m^I=y7v z8CaF<;-qxtXxrAxzh2HuPpa1$)x9pvHNK^J9eSI$5zw!oRHRzplJh#PsDk0o)l`hF zd0Seb5YltX)T>tqe zVB?X2gY;Qfl^PscaC>!H#Ik9ET<^^xuHz6R1L}`Wzuf?w?f(G!a8K1|TxtcCTKw%B zff0fZB*$|>UJZFa z8l=18JUy}^0sjEC`*XtfO`}9fAD5rde|7M0!;K}hpGu5*Spy&O{sr_O5Nf~fu5I+D zIqd%cb1i%65Z5xEK{9_V_g@X=@pb&ab2C zaw*Oox2YHl^AF+%rFE#myDh5Lk@=gRPc0rp)20{itG~njGe*;DY4=69XLVY(uX`Ku z8rMeFmoy2i!c5e`aN7V6DEw{WYjG!_oDWI#>=)jD7x*4O2Qo=5{_pnjyYgPE^Zx*e zycI^B7gVQi&D6w@8UtJ;$vq?vvmFOqN)K5Igq ztx_apzE0D`pF=PqY-qj~4RHX1XK?MFPyYa7m8|Vi)2y02t!E^XdisKTj+k2&bb16ElAYC0d2 zWWd2W$;`m)Fh)BpZBE^khUf$kJiW}~du={w?4N(*4x3Dna$+`t{aHI_rWcd(4~iz8 zMu_tWng0OTL4bCHEo)4XKR-mVP*?Y)hZ)+_CIp!s0O(2ez(HzUU^oN0qk$1TOmatV z>&|I4oMEoq&>|qnk-&C(jtGyk^%nPF9FAH6oNa8AAdlEHv$gc zuzkOog@u9XEN)D~b*1^=1gaWkGD#<}gm<`-Im*h!J)j;8XX8b$+Wh`q2 zOoM=BeX~XJojULudQYOrqtd$RSA9tLQSp^8_sy6*q+;17S5);*Y8s6QD$KYlrCJKz z<0G=>Hy^SMk5w$WXjh`CJ+Q7)#dozO%~p^+Ejgk-S@_|l@3nD^!TlHGEe)lY5OD|h zU%z~Fr2JcYgVJGs74c<6DZKI?}BK&6)=X;Vz*qm-tU#Eu}9 zGKEPfQXW-Ws>79cgyKl3tGKH#Ri3oa5>SG;xeHvCq$i}5=M<$hIi)p)D1>b&)5V6l zOUheHmlVMjKc8f>Q!0Nx$u1~VS+{b$!n0_VdYMZAo>e;KTOm`pDx^t4HGw2KKABn; z83-N|lAfTjw*-=!N>jo#U@9d7nBtO=7@J8X6v|~IhlJW?Box*qR~DG1=xHN+RGOtg ztGp|90xNDvCC^uJS>OOS~`7B5n(r4#LHPxZdJ-w^TNi(tKaav(~p^Y+uM9ED{)R7l!H&H z!q0oMuk=+aQ_A0xqo+fv^*T)e=4YbEvSuM}>GWeA*F>7bsw?W;4o3^k-kj|#ds1%0 z+$^1G>@AVV61DYVuPt#)&%oskSA);}DJ(8<`I+vz^fU-KJrAi&n;svi*vaPvtltw` z$q#9QCQdR3?S)p>qsR4QgYLTK>RX)j^jFDOg}8R-RB17d&2j6{h{wOlW?Q^5uFmHG z56GA%IU9M1;c0lQO_|#m;P##!?$<}6!*zY3&cmt(jK@E)`L2n{Jyj>Y{Rtdi#c0K7j8x+RBcB6`69fH*v_Lt-YLBp0)`WDm>f5;_IqG`p8kzM@_+ zPUe|1c>bN;vAC>z{{YL=r^fwz;XPAr&TxL-zbIZt$MK4TZZ7N8coiL7(WA7T&0{>h zG6c?eD`M-*UO}D19WYxXqK#%@+0yI5x@?H@CoJ&E7r9`m>kj83FMePOMQPuhhM7J-JPqj9$F-F4rj2O zVQpR23;-|^NFHoV53uroDwy{Th{7ftp&xzLq=Z!<-lVY@4}<0>#k)$ z*=tziGJMV?p2GkEAD4g$=$%JV4cZ7EU+d3#CVA_Ss|u<(m%1SEWDYxt0%xA*l|bpB zZgaNCz|2UQ0t6Tl_D#9fTH24PO@-3X0Uwm|B#H92WU6*n53C-thLE&|dOGiFP7@;O8peA(M&=UG-Hs)t&M_g;%YITMjA2;Pb(gi( z91i`l%uIF6<_f^qDi=F;004sD{&VFs&;Ut{@~PR^f&c@}xokd@o@Hx$t1=i0qQ3J0pHPjy(ds{Xo5C^B=Zg+Nc52h z9o8LkSogGV%Rp?7;YBL1cbM=9Gv7H`R`nYLAi?RMaq?P>yn$LxJO#Ac;7k(mK%*y$2O+`f1CZ{kK4faBU1 z)<50@8*@FnEU!38eK_J0+z?i)?t!(`CW7IB@~H>Gj*8>nPeo4UsmP&J5>EJ_M`PC?E_~$Y);Jg4Yh#N7!R=nJPSg%S7HWj(zEg-SPM+*oA z>Tz}3&2!Q!1otJpEgMR&33VEpP(4&CruM6PambXH1j~L^YdZHDO)yus(`{R~h4bAe z;d@Oo^UAq;>6LzfrsZavl^SZpT)b9`Qj9rO4@?eFty*i-fmG?aH{u{^#mGz29aSyW z5NieLPjUfKM4%zTS92AWI}xEs(nIjo{{Vu4)KQ^CgtDzwVyy6h8AQT`OH+ax(vDJ^ zQOczkQCE3Rs#RyX2|AON`Yu^?oU|x80b#{w+`U|JR4!JLMF%T1T&V)A(sFexz@^kE zno{Z&oduGV@{P%EQ=kq>Dd{dul8uEdBVj;TX-=k))aqqTUanG1a$O>vrBr$V_17ULl~u1Os|AvZ*|1U3 zVr@D(N|RFBs@Afrm5EKN-K9t?DsM$vjD_U2D@Y}Jn@g@el`T@xJA^`criA@FMtyg- zoiJSIU<6g|Na()n{+Ij?D^~c96Xu4>+I|M#;ivjceR^T_oaUJ6hW`L_^$Pz0j=$4K z`$ig!d3b1_Z|AE0XzPl0^!Au*S~>O$<9~`jjh2FiT4C*Tpo~K4;!p1VbM*Xms_Dhj zJq&w=;q_W1u=FkG<z&eN2l|m)35@iGrKQqffL``+pj9U<+u_L-}%DIZlvl*8TI`VIT{9`0FPcur(U@& zb0EPTJHd?4bgO-3k0cida8F1)$FfeLbS6B)0C4pYk|WbFRY7Am>d>~CY?4~jv=Vvz z#7|HnN6~q2_bm@-bos|lgnn;q4rh?PHlT939p(>)Bb;tg2Uc!=%?a<^(ht6W%x(_g|#+ zZ}eeY?KRzwJqYT1%KNv1zX5GD_Jc~BT=)=R{YgBZRmJ?D_~(PgKjpsH`V+<1_W2Ls zU*NC%DnjD3X5sCY2XvlY9XkQcEpHW8Yo7ArTt*|L$4_PYS5NShY1`Gft+WP%v`;w1 zPrCf+t62xR#E6#?06h;g@?KB;GtJ-fyuJDR-?KUY0BQapyyL>y=)V!0n%$!_0tCsx z{bhOWTGh-Z0g1^Th5-5f*V6+?8#n-BVodvYm4SU-zY-iG0WR|6aEQ-WdvLzDf5R0W z7TN9%6-6-90kb=%3EBr7HzyqOsXJ6Gd1wLyx-`g>{aquZg=Vtr2vFX(#PONxgLdtmYi7zc=gB+p)`YwJ6nKznz# zT2A6bjz-am;F-@EUOu*`N~wxRaBdp_#127m0LdQemDOht$Ozm-hJQ%n2?TQkIpr80 zmr(JEZVTMvKQTL6ARdiqi1%5I)P5tk5Mj$LLOU2GGVZ-V>DU6YsB1%c1ZD^wiRd7ld*xNBQKZy%!dxO63`izPBhbO;h*-5R zn&%SlCsy zq(O|k9Y`b|v)Hdq;r&IsOQiXEFlKtsZU>a|Pol=~aqUB#TH#I7I>#XOl4s;OS#|aD zz&ALV+yogiCMHe+kD6K3kHRjd@6RSO3F**qK5N2i&2yaBI|FgSnIvXs{v)?!rzx*d zqj=Q(X?Gk0Fny1=r*-uY4|uh%YlDapk=*_N0Fb@~NyIhX_?VoS;B(G53GV@1u(-Gl zEfa$TMo#D;xPd3P%`3!2eS)u!gQssZi0T37Am9#oUem!B*`3>gf+Ww;d>y(~aQ2Fu zqk_-?+djCNo*;U{`j>;Ny`yO)$K@~t4#(HkWqf5{qVz3rumaZ*d0zWQ{8FsUmjl2P z(SCi<^#=@+eyh;a*T)~J3og7`%D+Wg{wtRb$AkVCrSR8@n*RXI;7Rq5lKi`8;t>Rw zN6lNd@o{VxJcIng$NdU#*A;JD(2uImsi^M2Uz@Cd7oO7(c_K5!&!Fw^3(?vBI?)0g z?2g1ldZKt~zT;~^4COMF??cM?t$+0kYzO}UxIU;^{-CV${{Xm8NRQ{)KlHl)00H+u zh?d~KJ!}4=EQjW*IP_MT{-SIh#Z3Dn{W}Be4Ou=W(wkb7+ItxHUp{x^-KYH1Y5q6b zJNm-Yo+y4q{B`hO`+DWzM3m3MFBckaXeS@#eG~ET#CF#pmO;Y&{i14Oll>)ib>sL| z`_IAp$f&`di_=tW1@I5UUmG`TeBBq=Exb_IH;}xZxynrnw-8fl*I5Gnq^%n!O3MO{ zr*c&mxXRhSv6WJthE$T>lcXlNBeu7Iq38_z%Wy$oF!8U4gMq^O(F+S))3-F?Yt6+< z{9E{g;Yf3gFOKR~n@L}=zYu&cA=vJ}Hh9;=<*qnhmy5SY9Xtz)2PW2J5*2qjcgI|% zxeJ1q6$Glq!XCMfT=bIUkzRtv?iZ=C)S;`uTi07QmzZ4L#x?sCHA-Wcfq0tHz@ux@ ziZ&z(1(Ze9OfTI1!nH=>!k%yj8{1-1dQ%gAUIFxb~1Ou6pKoieGdn&EgTK%vCkD6UL5e5j3dO%cow(246kImV#F`m6W5ZgO&0|2)k zpp0iZ#~AE`PI+@=a%6muJfN*l;qG@K!=MKyGv5Q~s#3+u8Y5ofSnaOb41fp(5xbfD zafj&q#xC$KT~S>6|{=AE0j0MK33gPvW1oE@MH=Q6#8-q!$T35Xs3pSsyG&d9%HrLcsd4zHV)uzGg&0Y;U?iUsBfWNX*7L>OXPvUp3YAhdJf>g3%fIPonA8 zFE^<@tRytaEC+Trhhj$VCVL+1&I(@?Iz#-xmX1Tg228d@1Kh7yUrlQROm4{saos1> znGw(<5s#1nKwtx)5znf67!-7$f;N)Txv~I1m$w+tI5MV~8+SF$aTzTz)||wSOblS< zTzgvb;Zen)I~@a>dYQq1yxzC1Q)``0nJr^T+6#8bj&dZo13W5_mg!$^4)~QFEz5S( zEn`f`+E3;JIVbxnRc_5k)OPW;yK-FP94RLdh*d4O%#Qy-qM2YUCH*^gvA>8iT4q!Ll z57ezYi~4lSgIsok1Y`l}yNsq=yi;Lhxvp??V*$~X#W70y1k}2?%&8Q5g39e<>@O1^LcaJV?0UoI0A5g;@O;fpg`;I+V=qFfN8bB zv~+R7&m{2^?6E1(+~CNbu{klH`wM?u8-q!LT-Jjh@oqz6B<(y;?6IA0;_c0M7?N^l zA2SO`f7-i=4ygoyOyeI;(6?+aA%GG9Ck>EhBObi+uxUO9Kr`#nShsU`G&o2H9)ll3 zi#4Qs25W!@wV=77@<|_*AI#avgN$^_w{vXTxgtZ0hn53rj*}hzB#f^&Q)%1)iTuv% zKhg(sa5Ao5YJ?Al(GG@}7aVRJzyyPyaHrKZzol7*g6A{<(m^gDWBOg3e9UlHu(Z^v zQE4tAkJ~$d8=g55K*>Jw!uZ`!7QM0zhXc8;>zN$&`f#xCHJXv;sOMymNi&$t^V9P2 z5!pG*H`lcv#F{|irl5EpcrediGVEY?OQZOv;TtKm;txaUbNVlhJ5+gzGmWFrM0|=n zI@hVsxcw2)7j^Btuox8nt1Tu%UQ+mi!wYy>BbNr41_3AE(mVB8_rHwSPG}+9 zT@fwg9eKl#Zi~cQ8Gtz9GM$8o90F$^sbfpSe)sV<@#8fJJxC-I_x0c;pB-E}CZng^ z%BtHfq%?^HV4Q!dxZk`ubv^_#F{Z@~Q>wn1o zPxn?Sc;dMJw|!P8!d&`E`o&Hjka+63AD!P+qxgqhfBZ*(`j7Wm0adJ0$iGLjr}%+Y z-*r75KxrBt~^ z%Iz|~iE^9s_$HY}^~wP@8?7LHn!D=vPZY}0z7eO+3%sFkSO%9u=d z*I#-31o0&*alGP8$@Ko~^RA~Yl)VGSuB7M$kLRN4li~9|zU%mtc*>Vga6i%nubgAwxbnF{TmsB24|$yxgTo=Z=%-bZF+e1uYp!ZV-Uectw|xU``BZ=wx1Pe%U$PrnP| zY_Tn?B=v}2Vep+`DeZ9by^wSLLxQR0_xkcz_EN3oJy(dMSYsiikl<$y8NrSL?zV0B7(1X62n~YR zi0XGA{@v5CJ*|bqb{0JH3?A6;(5%5{yAlj=Vwu`V+J1A!7q>OnrLo{!;e)mw1-EqSK!>S4h1F$eWLucPaBO)LeioSBB_ zq>{SAj@FO(fr%0P_T^&J-IcC>7Eq{La_SUA+C$4)&;yC+?l(gA-X8FywhgTSM6?VB zh~Q`4P@?x#a1Jub8-VIQf3o^6muN$H#9?d9{@nd_sMGW|v8^XK`GfOFyrw`gft;L> zuT_3K!IGOD5z8ekv?gVr`{e%m@+#Mht@0RRIf+=Gnp z3E+<_XEbDWsrIYZ2ATD=-xLVhEGFyj5(&;GDF7MM8% zMi-B&!&J1xjFKXK&nF-ZV3_Q^J+7W9v^oBIjV2soI>tXVbIw(JdJm}RYk#UsjEHVT z^b!om%hGq`JovMI(MDHHdAn{TP|$v-N-S50l;@@?z%m`2ad4tty)FQa0TJnB#4qf zECJUJ&Oi(3ejD)nC^S~8=5G!B!wmvB9C?eR0x$$Di=B0^ca$}xl1z{bjyT;JIp^6g z#6BmMiflP88=Zm~-JnlVAUpy+RN5me#5(-B!G~=AceKlgG3JQ!_8nK1vbJ;F-LQ7x z$jOXHSsyjm6@#U`1J5}i;J^f&^Z{@9cE#IR14~?g<VcH2CY*?1j3#MC4KBp*By zH#$tqhp#+^z2YAeRF@YpIR60rId*10H^S#^@LVBc=(>Ss>YZmL!UT~p0OWJWWvrhG zuc>L_tu8MwV;BGo^KEp%X(Ry>W7T3 z?TM=EIXR3S{QAiN6ZUMa6TeHU&|G=qa3FlAm3n}0VJa8a^^ZgZf7dhLyV*p?d z!asGUZN|_z2MrONo}_U;=xM6gE+LsbxMF024h;H0FsoM7_IZf~$Dlc$>dnmn8brqu zCPqJ=>k6ZHJIVm)4<>qjdnB7yDpd~LUhJMHpX?PXwLv`N`RbWpNO%)az&%L-{Svn^ zts7cPgpx^-Cx`>kOwUzw5^Xztj-KeMKWRH)`o}7hZb{_-06Thp){Q4ia{vy%x|1fG zoO=3YT(qzxf`6;5{{TDo?7QKTKUeO?it9vDzG3 z&~X9*6QAlPbH-pT3mqu92ahh{k=1VLwk>Y&Fd%?I+V0%s1N22;0c)e7Jq&!Xmb9w2 zb|Yjv^9%)Q+Qc*xQBtBZK}~z*q$F6&HH4~F-U_rR#HuyhoF^JctNzm}(XhfAFM_nG zJQS@|jB2`asaH!Y*0S6t3m|$YqQ{G;rV-Gmi#6;HlskTkwOu#_Y93alm!d$Ry&Y>+ zp8I^KDlApDVOzRQLee?qP`9^Q_Oy{$Sp5z7L9NEwJ(uc#32RLtXQKQU;GYwGCk)36 z^rzwf0FDErE5q~n^6M)78~)-#2QNRO>tWz$qTi#H$AsX;&I-oWR^`wNsHnu(uK)#p zdi+e)$D8K9!{Z%B0nc^$hw(RAK5X}0zA5%EpV!8`XMZm~>&8_pOIy1xcgTGz8d&49W_LaHeYs0r|zo)7_ zUZaLj3kABb!*CrOaG%GC%5H&W_G|%M%pm8xF&5VN)25>T)5Z0>c5_kudpQ+RMjRu>xo!(+TK#!bq zlS5PZvT@`hM^Fx0doHs9qnR_w0%zsQVpS{MJQ7H81d!-!fCb}#d&p@8A--d{HX=c2 zGACJ*Hq01uk>15Ws@7$CYJ&r$R0 zqVWf@njNO?rc444F){lurP1fCUIls3Ws1Qch!kq&}+A2o@pY99&!024WMhi8}`{4I;CkY9rl z=sru#>X+M(E%L~7rcZdBeUpN*bz7FXu;(59GH1UW4nAwnYGZ3pGmgCczq-=8(?gzW zG=S4HiQGC7F%h3>Se0zw@mkN#Or6`D(T`4lGkFr2igm5r;t6ii<&nnXI)Tx-i9D^w zG%_4nxR6Kx0O~z~PPzkdfJ|ki{!nHzJH|(7D!#39*G9r!w1EA%bF+Xu2N-no04j#b zqh!&!cvH#w$06F@jEVIgDWd ze(#4PZB3?HABp_XaD5O}xofJue|jS%RxeLn^#rV7JOnapvR? z?4H%g$FvyQSO$z&7U)jz=B{&^7bC;If9P%=Xd84_eqy^1E8LM zev1|Kp67;+KnME>q`_dsZvXalsgZX>*(n(LPf z6fAU-T@E3;fs-GUjwjL69*H!xg4&y0TR+pa(oFut*;v%ILU%dQF74$Fi~ztVfFSNU zl2a_Un2`P0Adni_pQ#G97AqQon}o|}A-yB7%dz!IW#h;&N#-1IA~=tncU84G-0h%w zoJpt&dyk_;Cp_oAas89hDA7Ky9*3`EDxs?I;954p5sCYduZx#BZEiu% zPf^el`UO_%<>Q|7?fW1pzMHol+llB8A9SYxNsJhXj;iY}-x=ag#9ZvQEo@Axc)F%?NejBA%xnXLl7mX|1 z-o;WED=F?Iqhm^VHKO~KZqDO{n@);#GYdNvuUTd!Ejm`q&gr-D3w!{e~04v{d@;e zrgs%kt?hQ!30OogT3?RXTf!0(2umrQpa_tn3Bn0Ua8^rFOB2;&_i%a=%1L3# zE>9&T=m2u1a23)OYT;6uuL+cj8lg=xnoK1kp-m+wqIg79D>mR|XuPc3hb?&^(^u%6 zEc!)UrdFw4pyi=S%Pxt^(4&++>N^UX9ITOrGBdU3b(7DrURt$6(<|0$w554% zMUg9`lzU}+KLYrxX@_y|{ujAz+bZMZgJ|oSB$K8XCRIfd0tPpsrYYIbwlvX51h790S389i2}w`wX=a~$6+5PSaIs?o0(QO<$o zAQDM-3668Zt5TEP2oM-?JjtFwk8#S_uWbJSh|t01+6LDQ{JIiHGud{jrqlt9WNrM# z!;t46Gkt_YG;12yRTNpY09x4~0|rKV#5kUYern~F2R@@5T6Pc#Jc&8UF_1n>X6M2# z6rEBxZ7qNsl1YP(z2#|t|a&wik}P(z;%!~kQ!tY)W_Lv z=%%#ne+~SmdH2Qu{sdN5&hqUMO^zORzRRyu1f9LsGx6A~5KnCWS z2f6(gHRbm1hDhQJAS+(#AQ>|3$CPyU^!1;0m9&yekCtJe9)O&mE@$0XuVH`;fg`Rb zx3Z^yav87#<|OWufy6-n0Oj`is^^dky6ybD#$WpNFej2D3(ae~s3YMtncWWu6420GMtCv+D@(v=YB$ax00V&|^5B6Pbe>(9 z4aZcwZDaktCouWyRD1!q@~{ZsP=Pp{29imVyxm_E2E#_@IL6(e;Iz*{jx*aU$y|7j zqn_u~F7p`>;Vc033a1Lkw5_R%u3)QBmND23FxNN;^U1?UF+G&r_=b%sU}yw_OQuV^ zZIcjw-HQ41b-k_*{Ny;i7iF^%E&ygdP*T)4ZgF>*l6PS3pH2ZiqYGKeqITLj0K&AF}{4CD1xXttRX&fJpc7QM%)Y)qJ!K_Pjus5i^Cpt~eB#KHH^D_)&g z91Yx-fgz;MC$R!2)jWq6wB1Rdzzu26Pv#@2i8DXIVQ1cWiSSr<2qfY+LF1w67?f9+ z;0?HGammKu4j>c3JYiEgh}% z0yco(Gy}EAyw2etaJ-hUroK;6+)jIwfCsRcUqkrCt%{+N2{O@-{{SoyAbKTKwQWjg zWXKQ!a2uo7wtjpg*xxX(N*g2@BoWRAW1JHi^h>za9aYf|kOM$C?sz=8yh05V8DVD?zLz4g97^_I8^2XkS~jAU{$w3MsJ(ciSPe1MWitn}_aX=cW8 z!1sy%Lf4_Kv8^T4xd&-&P6Wu1Tl=j2OU3keG8pEFZsNxRaS&jL?j$NgwY?_O?Yl!VO$}z#&V0qeu4r$=fM!I_ zVbXxgG@$SgTyZ1k@=-?Y7Y3LF;CA*W@BP=Hw|z21Y%+Hk5J@8=*bxfVrh!ex<-|#M zaf1`rkC$ayoMYB(JkvQ27$E+bRIYZlXGwRHBK;xf`Q)5tJ#2*h^hg;JfNgN`wdzQ1ED z(0F#o%_o?jLPt5^M`W8weT_#DLx}~euPYGg=hbZ2q|!Koc}%aT4WbrRZ!qiG^hb2f z!d8Ry@MIyP0BLHuRjdz&jg-}@_RwW2;HpNEQmU@%$#rWanxPoZ_S3hnd#!pmMDZ^v?`A-9E}~kmDAT-q0??Tng8#;KBs$Ja$+Y+C$i9KShUDUxQzxe-3r= z13j1MUk+>H2eSMP;XfI=lYnuIuhAY1{6P+De8Znts2EYyN@-d{FqF5Z6rxq7TB^;sYSO5w)nd{_)mkBF(qgSjo>lr` zJ5nl5%D+jfr7@LyiCKeK71lQjqXN;PNWXZb4{{&5``} zNiRD>xBjrw27Z`Idzu6uxb*k^k}WI4+|w9#JqOKO)R^4FP6++|74jIjx;;3+8>fim z4ClIShLoo~Bqoy5WJM;Km{PSe=OIwxVs01SV0laCos!g6pp%TI6eud~9f zXfvLKbnp7*dcAW{r$_+O4_tKgM%ukO{}93(Q?O z7B$&`GEYJ=fsAv=C=IIKA_b*`G6`&OWBHXz&sJ{2{(z82p&0J4uBes<(_xVE0DoXV zG<6%enOL{=Urare+*({d5p-~3ON4?ZF*Eu?xdQKr{A3)%MvH-Y19O4P2sxin2Og^) z`u1rt?voPj+n|PsoORvn94h`F(y7a~f=d`g32|}eUI_C#Oh}H4t}dD-vrl%(aU?m7 zaOHuZIBsdiF^_e>FypnoLoOhhENhq=;f4|h?@<0D-FZ!3{v6O<1D<@ulQBJTBjmHS zEm6*XVb0n!fgz_Dk1Iz&3;;b<8r2L#jGka{K7O2GX^T_TFTCymbtG^>ob%l&)ky+* zj@aLhN$JEZ1LI`RR>AKl?Ro9G7+9pwZZp8liNShrw~ zB0F^p)7S9k8@q9ra$<3SOn2?wXlZ^CX)s{49%pZuciR#N{86fTeKOsR1@0~6i6_hm z#Ci_1)i%nD%ZMY%0!&9e@(;8xb8q3cZQjzy360Ge4j3{BJ4laHl$}FC0nmKlgXNz2 zk{chEFi#SZ$BWiBh5}ERZW!#>W1NE!s#!J9!^t?vGoF#&I>O(7glj#}16p`yHwol! z5+jeyNm^Q0gX$0nE^C_750numCBVBaCOS1aQC7681~Zp5643yX35XmMj)E4|4L4|e z+yL`5NIW)qf@7G^b=Ep1P#E`a-M48t^Lxmi-2}?k(}V)YG;R*VN1N1{aWV-0n85>; zhh?nlOb!m(dVwSvboCH897>B_5D?-C1*RP%lgN%F9GMH+f8zlFw8j8|jQ;C4Q_~*V zEFw#IB*8wTuY6@=D&8x^I&C(!!J(NlNjY&7^AYBO!uhrJawUyx0L<-@eI{f`^j_Oq zwKR*qAlz`{4j>$n4V=z;$X;gjg8_^!lLW{u0~znSd0kLz%icY}d5MUQG3~;j%6Sq# zt7h)}f?Ktxp)s6}odTsGwgHpSAMZ2vTH-X@XEYvWeK<@PU}I$CuU|=3EnZ9mfB_!< z@PWk7IR^vz`=D!9!r-;iAb0Kj3)AUt4rw@ccs=8w`L8K$R8KJ+`maOb#{eDBJdN9< zr)*;#1SF|6`s^&)MDCtC4!wEfg`Znj1f9(Ua^QVBj?leUuWz}HEs|O}+zkG)+7C|a z$=z9bnoTfUotGnUSK@!y^oqtTtU!*(_`#2DyIpQmEHt;qqc zW&y*4yPl?cM-U-sS!%9k*BLS(hkrOip4H3_^GJxor*nN6I76o0hcLkbdChLQ4&`Ts ztvA;I%&M)bH*&El*Qogo1;le1T9$qsugPfx)F%;ocByt26>FaBTTq}|gUZ9bs1-A{ zn}2x=E&7R`Q?IGY$9*#^US_hdQKV|?-RYXhAaK4#Z(0|j(zR$IUFH+sYP7cn;Ps1Q zy>*57aDu4o769Z&+b?l?Q=QBgqcw53-;KKV4 zhJO!a2@7Mu9t05vuk<=?W3{4uWhNz~oOQD}PW=(q_h^jC~L64N+e5Ahd;%N$@W=U(5W`Zt9< zcensb`j&~}4ZttTT3y4bpPKrYggj#q2~pPvAD|gtRu711OG>YzYvKKs{ZgncSWneQ z!Mt&4%J}}VMhW*{ck!*iE8?Car;@!76?F4neR3H@7hG=uN|w2Ef)Il=6)=}5!9l-k zu%k+1JwBN;yoZiBN)6%8HW$=|T$NR499pImo3c>afPwCorn5 zDfG%QD=jKY79uIBTB1YBbyZ!(X&^%6GPo6ZT5AfmO;c%B=#*%4+NxD5`@*GIBTSYR za+8$_%C$wwQ;UG4ye72^f`h7c0z-ncRaYvJj$&&Pno}5}fw?U)$wf(l>1fYQAVks_0;+EpI zl_WhO>92BAiqZryt2ErIgz9BYC5F9PrBPB1Po@$}N{iOLnOHOm+_6^~(sdd#7mU?y z;IDghFub0zLn~<++Vh1?D$qx|VWN;J!3#v%^y1%pBz9Sp@7+koNG1y0@cSjOvb9TH zl?hW&ol&K&4`}8w^_5#k)gD?~QwC1oQPDb%q45sVIN<)t*EYZwm=fImGLTffcGm(~ z$%*y#T~jc7j0AVL?x;XvY)q8fD{O+`aUwr;co%f4ClEZOOQi696}?kg_;iTO#{xcC zU1?CZa4|AH7nAXpr6lc`jP&{{44OKd>KT|MNfJ(bEn2i>3BmW~d-^Qz2vOeJjsTB7 z))?dglRo%c_gZ;u9^yT4s!b0#>u^4#$Md39rK^B*j0iAH_w??s)V1b%q>vid4?<+~ z{!{g#rgah;*ctwzfyej>ANhbCf=>KxjQZoK9M5%!Uhk-p<%dp0@jde#A+pp2PyYa< z@JD`fC-zI$hTYw?<){AuwBb;%XDpEH9ovEALFw+8FD5b$Am@^(-CXTqADhZxx18o< zta0*043|}ovjYHh6V+vUj{OdrY!4#b#OZ0^JHCPYYf zC%MlP)7YxoyBgNGZfM+NBRfd`p5$}rw&n+$zt`N;@fj>GCP5%TJYZp_Is$UM@ubGd zm;^d9I+AG(${oAP)Zk zazv}Y7aEj&KG`lWh!EgDScrfls9H{D(#U}=00MYqOp<>#KSh~yeY9YamjXr`(0Tj% zM^!@d?eBPKaV3om6C`of*^i$^mtAM;Y2&rK{_&fd3#4@;Bhbf4FkniwFTAC#b6LqR zA7-$?4G6_9CO1*nWkl;(k){w@Z%I8it^c?hAPUZYQ zxUy|}+7+p`n?tJnAn*=0mpt{y872ua(o+NV%To06q^R`$ALP+cX0LO7F^Yzb%FRhEnWmdEh2?xq^$pO+wsR*>+ z6jg^Y#E=JB>5u;aRIBwT>0LiqH5f~_7slc@22LOr&un_-I_m0_0x?ht9)OV2XRZqP zw}kw1cQ@i0oEJI?Z*G!6fWCoau7?+VKn@9(pXo3Gb7EQt+EzVsaq2JXu&`};V}}Pk z_dKsTZ?Dze4|s6~3r9PD!);DjRhDdM%dy(tzvz~T>=m#8u1Oj^Zj>{Vw z8Lr(np)%NU108$%$^AZS!Rxes96)q)1Q2~dj$^6ly>-s=f8@YFF^$`a@{j(o@32{o zrVc6+c@PNV4hcL+2d_oEze>Jwb#}=zKH6Df1%hxg zM_jB%TSx>PN%u4UR_x2AX6(iWe?pYefH_u~xEP-)z?1s+K8hJ=T650mkH5(v7FftH z;yCN}UXMkk<;;i~G0VIAEP6D6cQ}cTk<@#w%`)qtk^MZF`NBh#pV){dnZ8TemlHExpcpb`PgMh^MbXqTUb-M+DE+ zZ|HsgNg#ceymngnok1%$;@%eZ?w(j+f~eJ^y11iQZeluhV5i(tRB8pOy*8Js zLiIE&d_fE6R#PkLeh1OZw&HkP{oDs>UI;I6$5r=_!!N^~yGd_k-v)R%KqN1*cyCV( zj4vDcUE9sT_1+xt_+v&pFI{g;1)X7BLVK>)#P_>@RPA0ilDt--sSK@;5o-|GUo_S= zj#trt3w?f-PP23jh4ViXX<^d~b5YhZvmJH0dl41#O>agmI9ZmOfni@()%0NCSI&Gv zpccu6wE7;;!u~UelD?f|uZOdh`GSqmWqW?!VN6M3P29d` z9Z671iCmebsKHA_3KLT0z^IkVNMRFD1WiUN2?K&OE=6Ok%@Ivq-9}RwT&9>@QtA~iDNk~e!YZn+D$PY%>BGblmh@?FTSCQJr682*if!%p6>^bu z*iQ>W+QgNoW2FwuX5P#I6~=d_MGYlccANlORjL58uXR8Z#9oryb$--B^S}9#D_YL# zLcB$*)r!i#vYA>ebgpw&u@&SiSivfVTWDUJP@u7^(^MK-HN*uP-7Vttt-n&aX-?2ip}M_OGeWls;x$$Bm=y=}?_c zxF_46=lv|I&2TLZ!+CUr`AqhM%w(RWSb`)xk(p`s%n?7LuX^3>1=Ifkub?mpIP(s% z$P8frumr z3`~$YoMXEA7OhKg4FC@PCOfaue}=Y%+O^!cG-PmaAd%3K{VU74 z7(yAD4w zb3udNB!IAg)9$wR(72^BH3dQd8;Eu>9IU^CJ^A*(^kw{xzo+O=6L`jxWv-~v{jln*h3al1T%sLc;K# zxl-Qdja|esjs@*5Bo(;aRf+gJ;Xs6 z$Q!uik(EL5Z6i#nO1(S)9N@l~Zx$h2hsJX?ihe=`ROo?+GKpRdt z?7Y8+HD_Gk(Et%m)mnD`XEK;QO!P7gaXx{5lXv0MYqq!z{M70}6WT!&!Nh#mo&2BV z&lToh?Z1zmkDT~o_&KenU$iybTG@<0pKBz*EBU@xTG(+*>|42OBYr*z|pBL{%Aw{p+zRy@7SNu=z83Fcy4e<&T} zCy@u;d775+W=WA51D>KsMeVg6KVvIw0n84HtTQcq?rYfK2GY}rz#+@TYIS080U~B` z@(WI4Y-Qgu%>+b}e<=bW^_&%xQph*YG7o-^?fm`Lg?slCFfvCxOL6Y2)T()W8e~DI zh~z|L1m`h{TQjjt5Oa)l_3wnYKQq_AQ|PbW*foF<2nT|A^%KExRhe!EMCXb0=h+2? z(Yh1nKHa~<)U~7>3=mKBfsdEE&;I~q9tj+deE=9fvXekPt_>WYx)(k+scS`!0!#n_ z@BaW@6*{eKz&nA=0z?y-nC@29w})L;3&zZ5Mowb`89dCbtp~u(Y5xG);7s5Qg;rhmvU_oC^@dlqPUpjTHRQarb)Yaqfo(u7LfhV&24~INR90phA{SN4~ z1C{h&4tSevWgg6X<@y=qRB85KKJkBp1yDqCwR|<=K;g>uBA9qpI&)b3gX3?(%#zm1 z`OV+qi`q|={TJ^G?+i8EwpW?dzYf@cq%C>!Qhpu0{5gO8Ptjz*@a^;YO8u>Q_;kT(B7l27NV`oR-r%~tJAnrYNH!Y zj&($b)Jt_lw>eOvbJTB3C3L5Z{cHCdN?9d- z^!>>My(!4aOL8C~!B(ghrz+J|z*s0AQ9@hF(iN)DTDoPj$su|T9Pq9&wYh6f5b8H! zp)?p0yuXSrOf7kiYgyXIiDGiRZlU656UwWr+?D6rNh?{(b?tO}^6snXIv*KdKJdMi z!uo!nYqhE2jn9NFs8GEdLV63V$qNqh-z(7VRqm~lwAc({S-TPk)>CgKvc7-fpA#BJ zKqQWx_*I9Gq*y@Dmta<>jMM{m;hwJ?qO%2NZV~(G#1WejlCW7>)VxH&s1rc z?-7n$-2VX3h37@{i%Aarq-DH8A_SfZRce# z>B;3$iv#K}1a1In|n z^tV-D84jmX2GAqTkYtRJ1!#DGObm~UB+ol@C#(n)pFp#gDyu?6 z9$PcU4tvHy^k1=jb>Q(*o8WJwUXz;cjV*ANlj2AwvBh@Z~vk3vYv9S6Goo#2fed^)uY zWv!<6ksaD%eWq85@E?F#X-18`Fg&0F`13u{1;z(G0quqBKN);uOGnaN#}oeMx&Hvw zNvF>FfE-6gw=N{~9aqA6zn6cL{Eu1p?3}$GIh^wREk@{nytt{7emzKzAU(umh~gm1 z`n?-~_O-#&F}Qc{5g9!vh4Eb_jt<;gxViF7{%1JydxE~F;r&63-V0_=leXy3(=fi% zo_4!wzW-FwHk4_ zwZWc(1BU7Ew2_YGd5h~3!^tEAhW0#Q^^U7(%GM42xM_*Z4cWmxz51%Alby@}IQHnb zuWYBxdWa+U_gK!%WD)7iahSrkcD1aGG|_q7$7i4Fk0~9X{eEcebQc}X*!{qHbF>lA z=aJJKDAIJ&;sBCC$o~N2+sE?cJz7%^)oc4ZT zPGHAiJwzE0zJZ~5YT2!0-!SHaWU#@^?Tn26iweHJA0fMe1b|Kqx04>KgY^P2++=~* z-20sRsndF0X5?-noECsH>C?9gwZ5tcmn8Q*nOGLO*x8@WGY6Gs-s5nd@s6OZdYT_V z(ym6>SDMU);qku%U%& zmCkK za=iTwUw2ha`k#m2Y_OA_s0D$#0A ziS=E<1fr~~8j0$w_ZwWA3P351E9Tmo3LsL^G^wLTh!h)?sHaloGPsl}Bw(gAq=dkl zp`fWHEEH0Mi3r+(sjLVGQ3}8bG=i|FK@|e0Ra~m7Rtlh1dQnVzSu02q)cP(}+EZz{ zDjHPYqN9>&JsaGS>ZNpq`o&f#8n;F1<;tBhpD9x=tI`Qfs1lM&T2~bT5h>t9N;f6S zBtQogriUDBcUx` zgd8lbQk;dfV_zcH=<>64`$<1F>UH}pl}|{t&&_I=v+#DBBrkmXSPReiTHXLY%hc3* znsJF<-6%C_RS6GbONvFwHx)EnN_TF2yv$Fqj2xYBcgey`uSZ7t_%U+*mK%&sSQ|BtbFd=PSDU`PoOz7)XC)* zt8~yyOR^>-CIRGumE$jKzv4Ed%beyC=8`xLIrcpxo)*0E{!i8Qn{8#mh4{>ed5_9} z!tP9kt|g1o(@ON)n8aNI-8m@ot!kk?`a z&mnBk@WV&`%b5;o*xFe0akbLdktF1e#Cxv4*FGDZ#@JQt2td)(ZRxK|LK3wAhNA~tB3j1EGRLff9$OM8S4ti(gvCY+2cQJx}x_x`9 z4@pZ|bus`kA20?ZBux7HD>ptXbE9#X<_{zLFE3$m<00Rhm_4|R^`41grh5{9pCn*= zT6&=^2WWO@Zv^p(#~m`O(W2sAyk~b4EoeOf=k7{fg#am%9g+@bsOCSy)YBeIhyrIa zA`B0)3RTT)=(;f=d1bO(!973{XCwpM^+B)dFww9zu5jaPUJP;^2#FowEecnk47bVz zwmI_xBO$m3O!VvXRBi|b?{iO;<7-Syn)%(9k^%JgLw}DoN|`R+SOgGoB7RfM{noep ze~RMmrySxzCo&HQ9hBU&0JV@yzbTSKLF3fIv!}eXb4&N+ksP;nV~7NC9*JuXk!4JW zHc4=P0Uz?*8UwmwdrS%Lw=8WX&!k|o2o8_y9-zz)7n!3~?X1Ak*)IeFNZ{@eBbN?) za7dhbeLq@jOSqEa@gJ7}?gYkXw2a19Jshol9h*+lW3=Ft+t8I`Q33EsoI^y*nqB=^ zjNs!us&Bt(aBGK^@b(_@!6s$Bpz^eA?wKXNR>_vn0p63`@VRVhD^tM#0Ep()w$kwK zw|ItBbdyB700)_-N#yWK^?ob-F1N2wH0jo+=`DL&)6b|mKQO*qt?2&%9G4Hw0o(w? z!$>hC;x^6)Ay&|QQ-5Gu+9!QC0$WYwxta7Z;0G(p{zvd_>-|p;UY&n`epbBFHNW(~ zOgd>-a{)MMU@_{7=%1ANMR6m~nzL)E>)+zvO>6kC*TJ$kWzW%zQcimn`X!>Xjc<##$sao`)o# zFKLDLZ3n^3rW(pL_K!yMJd%6=0D;j^AB~zdF($tA+K4YF(-;K*0P-MuuRp2(0IA^) zZk=ZOl?V0KhPAWC1|2;i+4Ni;{{Z5@lVt0E^~RD(`1>Wr#jc%6*H?8K#<8+V<-`dT z>9?0T-hqB(@vgN~S+4v0q3~G8ZM2~!-}?H!{ao@7cx({`dEIwaxxKjf+J$3QqYSo_ z4AG`tCIf#c+Z$vT9(YTyboV?pm|(~vc^tQH1G_|snO{l#FW__Gp4abJ{{TdF-n_=U zwvcfjl;ODJFeWo9G~1{xAk3I>Ji)~O01CU}Ta!a;L^xGzz9V9G_JIc`4ptQ>k&Y$1 zg!Z3!^2m1_tm< z!9Kaf0offU0?sFpWY0Z-$>W&9woEF8?gWQ62?uEgVEaKM*=k$Zu*2HcGF=X6mP1}zVwZZ^% zXCydtIP4dr@qJe^m~bXqHn_wzwjk~p5PPaW`QvWJRN6^`4KtEY4dOs)k=TyQ&G^g3 zUl@YYaKIeS8<^e5%ydZPWRtXyHtC6Q;21IvewZA`MBn_Imb`-d z>HvY2`Hw;IF=-iJNzi<3F(qNw4D5Zv-ry_Da{11k;^vsj^fsFKJ!N5dC#VWxWYX0E zR@rz8k5IGDpy_TC_n*7I|92dCS`s<~2rb zUq`*R5Wa2V+Q3TJpQn#Msp>;%Uoi0nS(Ur5YDXY`tH@u}BrdK^=I*&eN}{oKMOOH0 zeK^&#uC*CfRt33NsMb>2Emtaw+^q_UZl<8^e&7j>OGF&LZ(UkoD1l1_8x(3riKtNo zDa0X0P!&YvTax8YMB*rxlvNCoC=RA zZYbW8;;v9F$k^nK$eQIrcAZ3KqNvBFZbk=HdO;|v6q4j%S!30Pl}V0PnB^5w)dE2Y z?n*>W08=Z9iBu3=j5#UFLzQ7yJf#wiWD(CD7QFto=Du;PUOVDCqe(7^nexY^b@g3K#(-hqx0&kl3FdMjPhL=8>923a z00Z#z>Bnx%=XLc$9$w%>jE>R&086pHy){?1wQw38wxh#$WC4Q(-8lQg`X-&F)1)@i zjEOUsmp7=MC3r8xok%{X!XwL2?%w11O93ZuS$C;lQn#|E93EanS{z&&U_==vWFAKX zR#vd;uW6nN7d2~ccH+jh%zN54#0d>(#`c^Nz145SD=PHqime5~g{8m=Bylh}%Jbd@ zqFNbVVXkACTO@%F!RMX0D`QI6Npb2L;sNgxJrr$aNN3^YppX4*F@kt7J-Uz=k=MTt zXl{9JHnh3Y-JtCR;&zT>nBbM{wJZ1q+1n<4O7hipCzigZ`D+C8k_RK{_gYSa%WB_* zv|a|*7S$Qt$3&TudmYE8_E39LcHo!%nZcz zr?1r{7`GVS28 zuaM>)pq@6gU=f3f&$_)sP!5@BC9WWe6SR-b=>&Rb-C_rC1Ep3C#&Pc*=* zAh-d}Gw4KNret+7*=OnW{hp@ZRd?kQ2zKU?o$c#Q(qWF~OszwD*SPs(9Aj{A{{SD@ zVpoN<)f(U$T^o)_91tMSa~(u2XX?SFtq^8{+&tn+=1xm(~n@lvpllh~W$4rlrTDpC5 zLkWkpfWin69soO=!6Um*Uz*gf;m}(&&rotB{{XiD5k0yruSjCu>2c?1Xu-=z1au~5 zNLo5GTT^KSZUD&S^Nrrw`)3OMj*$&8Kp@X^lQ04A?6kfELwws>M^OXRNsgK0(ME{G z@jZdqA;a?-B4Rd@L`E|iJ(td?RSj{*4kPvGy)TQkjnDr8)?_vsPjKm<{{T-yyt8hJ zaXWAh+>gJ_Z_1@ys>md@!~vKC7!f$n%_im3w1^D@?=kmI6`|5Sq;f$SnMt^`X@Uul zmrpVm~3YPQyoCG#@ujyQxfBOmFH>T9*fgxhb!mw z?|^~W@V&o>?!kr4c#yy`4|hts@x zF`v5Mwegrg)W0>`Yt9{3oevce)n#~2KT1&XOHWmPmcG-~er&h#u^m>$pN|iJ=hbI? zMUT*w^+0;33(&qRVdKwV=hbi6_|P7zKhv;2oBru{`_EP5Exbw(Rc5BRBa}`SJRwxVcypeiN`%30U$uu%kt6xOeob{Y}1plTqPF-RyB zf~jI92`h_`5~(#LsFWnA1v4?OPH9zwa~5kor8$mO+Kf3#EwSm%dZgEakI1JvN(_ z2Gmw5)Kg7;j#jC;P^9&4o0e2+&g2BBE<-Cc)!RC2*l7SSDP+~@^tF7j>FX`(fC8(j z--v}$rQ8LFsA?SLbjLMP)-TtL)o$mi*wn72d3*Y+Xqv^px>%L2Pj!D#a>1)pE2EGL zokkW#$B6dzR_`guUSCzy-OwV#dp#S+WOZJzPw}+%Ul_5me69OBPX#Z zl#?qvQL){Col@X~%56)48|t;42g-6rWW*pyD^M4a~K3p6`m@r75S5G~zSLe?T)^$4UB%Q4eEdp^FCmiBO{{WKx6`@sO z5Yk;NB+E`*;W3}>h2dZLbm9L15OP?0v=ipQ=L6=wKZ7hw3=_QdFmv4Jf#{#=Yu0Gj zZfzQSj?h6c1dcl3N3a7bjp<4~)aWZEE4wD3o#H+fFnenM^JDXUu;nZkojqN96 zfjHVYktRAXFK1ZOr?~!X8fCl(wq!Ze0Dfr)?Fxf-wvt&*``k`Mc}JAbNRnrQ7B$s> zhFjAB2AC2(^Xr_gTYnV?h^Tf?T;1rp8dyg5@bw(V~8iN4mp|bj1BLK=hG|${{U-P zUeI`ec?LiEWM`toxT(Hmo$QY&1d-{`?d!osO0$ED+acUbN&O6g$D+4;bSp8n2y-qN z?XDsk9L@uqwE2gA-IS^h)HTv~ZFC7UB#tqdf)-w*ba#P}8e(J+;mCA!Ax#T|P4Zy( zFC;EF7NK{oSfFzO#k_2GLImf!NE{kb6NL zeFCFpP&uWt332LU&C*HWdcwa();f>MIs)Cm!SmqA<~pT^S`>AOq#E#dHNnIBNZt94 z=I0|Bh?ChR*R5-u-#KV@3HBTedL+?q*(IgJAefM6tjx)Xgt~U+#ta$E@zbxgbJYuK zP}VR*L5#rX)X#5{yQSO{CEu6{0LLd7lN=MzryQy_R$w%ecz`1tyPN_J;q0z|<9hIV zb;KDQfgm2emUT?cR@-PKL>L4|-a(I--CY3d#_<|!W}I> zU8G0LJ>_J0e#RI&?E{RH7~o(wdXtrX*8!!je4NMwnCc_9s^_!nX|}q4&|XXtmbV*E zzXUHm<8KjDbg(t9Yk}$k21Yms?z43-7neD}U;^MW2bZM(0A=NF>ThY0Bu3W*>PhnQ zU_hR~C3BR@rEqq*$&NA5{N@Z4%{{xb(syH+9S%^}xcGx_Yy^0mh&E-sW5gI4Ck2` zKHXIfTr>k^-3J}jYMPsb9zsKJi@dMnsaCCRgUo+Qs8)iNz$e{$jRw3Byls0oG2LUQ zvh-Rl$->9U6(3g9E~Eu@)fbu3FR-SU1#HqPyW^U90h}8#IH>2G`CqQ_D`$J)U_(TR*=0#TP2fyXoh!|6Yfu{Wj#QSsOwuK z3w5F~r*Ey1)oa>#n2x`7<&6|DrozY5EIeFCU%J$Pj7)OAPe$;CYySWuQuIZ7%}ZS` zJgi#IpQ_ELcO_nBNeSV%^lEtv*ibUn<8-Tth3s^a93#3^F*Uk4UK;d4Us2Zd!?k?p zTeKoli6{#r0QBz{DDWZq&CY08y;HEUMmUVs}3O1CWQM9JGWUd7Y z;8hsV5~a!tWkBMgrAa}#Ju?Dx3a-+dOsK6Q%U2SF6irew!O6^WOe1MbjwM#CQ;AjT zrwLY}NlkG{B{ZcNDO-_lPXX*I!xcJiR_T>N4^?WbPgSa?ppdOna;ge-C#a!yr7h)4 zT!N`n{es+5oaN4PSyci{l3wJ*r-g79Y9Mo$6$;{LOVXa?!V;bn(|`|FsXbHyIi+V! zp-Jl9CoJkuS17q#c8zB33p-Jw1Est>C;Y7?3^!T=8e&`tj=?KWcFL{?XfYW70Eyyz6?{=3 ziNFaL_M%+iCEIZv@!zkq*P0gDKvvz;Yls~`lDrR!^xuH@J3e4oOGLmRx=so5$3DKH zdfy0N7BGBC-e-4GB+txr=$WqR;04 z{{S$bvrJ{t{6LzN>TV4*%Ue;T344Ye)+f0gy;hE{=H1e&$}k<5{4s$K1fGK@E+QgV zn%6u%y`kWiy{w2Pc=Yt+)8@SAhde;~Rmo`}mhE#tq@U?={{ZEKkhGIZl|r3Hv5M@)n?JWW=SbDD7ABs+oxfIT3uReA+$cRj5;*(CXZ!7yVUY~~k`p=8&zwdcs- zdP!@C1;71xf+1-l9?WXA?r7!Bq*&RK-dy+)(|~3&4;Wrs#acUB(&;~z2##GHf3nW- z{n@7f0EJPldr2k;{M&NU+3P$HRqnLiLA6OAls&L#p@SyVJ*HGVe>SZ5bmNd(9lH5$TRiov;}`GHKGQ2i2i!z zbt*y8j6rabGoB}z$;zK&dFs}?Bt(wFcI`ZlpE1`7RqZpmt%i;dB2P)}>b*sK7S*OU zh=+3@KtRCH-BM+=fWl#o6XqD?;znRZnZ!a~{{Ycr6LBt>+5jEmXD8F3kM6ZBTR0^7 zq`*G8kWPMOT(r`JkQmYc5bfAK=c&hY3eB`M1Cr*K5H^7WyqP#1ebnBD7HJsT42Y5Q zR>Uw?<6&3sT=qbDBuq&S2*lk@}Q-8Oo=XnB8^aWFA53`YRS0!bLeB-XyB z3$}TDtlS!ZGpC+;`9i22rrTww;f@3uF%SU&aX;m1RkSWLZ~%xL3C`m_(T<^ETX?|v zp}#T&W21PFV1mB9_sKHDAD09XCOY89bnsWIW&3DSA-Q=W!0SLm>ad!5&H}Zyn4l;S`*LGD~n)@2ovSG24(mH#k>Q(+Arr>4lE)HaJ*9O6!<7tm| zbCOuP3@jNS1LoV0GmitjbyX_bM0vM$oafNWHdPyU2AC#0hWZ)n)>A5&nd72#2FdI#hbQ3X4U{jEjI)NLBIo~r_na!&QlV&krOSvN~!uL~;iiYLV|??UsJ&C2w*omZW{ zUEVsH$a_+@r&LHTUEHG+v3J2jDkchw!nMUUjd@9I-PBn%oV4YgWK^_J+ZC}++Eb01 zq~p&%$yBRt3({4#S(i+N(_<2&AtRtCM6p^&}jhHj*V z^2#+}uV>+{MhIBgA78lA!`VKI=3Xh#$eedyR`AWpa{{a4eG70f7CIlCFYI8d%xd>O zBGBNRFDF{w3f52tH9eszsi3OXlD}*2Oly^JXC3T-lynM|Z>O(~5jEGey17Lru7p&@WgsfEcEN-|PvIaErmLY28T zs;?@wG)iRzq6vd36>^zW*sW4PHOjS6tRz+lJXIwfHzhq4Hz~A5%5G6r$QV~tDotFu zcH}N7L018QsCYsgjFnndSGQ+l;c z>M5_W%bw(_PDJ-9!l7+wT6TJh#i&qHxPq!PvFe5DO;e6EgM{YG``d0u+? ztJPMaD;oCAZ5UUWt4%(~PLsmWZH$$@rRjVpq%9Xp;|teX+Q3V&vW0c#0yGg>QK;ab zb5Ug6T%h8fs=*5mh5pAY`i0&XkJT@rl<{w;X*#apw|*uU#x#Ev?Q}}|j*Y1Yv?|7F zxpK?AY=yINO=RATEcy>UR*q0qxALY|D678;;9*_UBLJ-yRF=dHD)s1t27S|Q=^l#U zX6jb1B4^~O+EaU~OQISwIG;u8E?&_9j*C9-fPzQSSY_0r*1R`@{DJ!Xlb=!~W3C4y zpSk5Btjv;JK!CgbuHffRXXPLPf?&t_T8N`Y!W=sK_2x|bdMLTlnhin6lo8C5c^L20 zpjN3-Bd3^r$J?T<1@#F30OOpDcTlLveI~<((C?;WCNrLM?7TOKd=VR}Fm4=}FhDck zm?8l&7+%`qk!#vq0fONp@_=~9QxH>Z=!iRp9W%J{IuH-)F~aENt$gc7&~6yloP07G z)`INh;v3wKdPk~@PX|;xr#no6*Ud6dp^kpb-`v_e;rfHYAV%DWeBgSjw>PgX6$Lnh z6*G;q1jzdR7LcAO_&v^H1P0V@X}gdo1*8n;kma+bcvs<(5=nH)CnR&y03EuoNa9)= z=9apf*)7K{0ls46{dwuaqkE{iu4^{5%R~l}OuMJJ9l24}4GVilS8Isg*R`%>k>z{1 zCzv_u+%G+I;ac=amhEV1bDm!<-GCzb81x6eWcw&-^Ad7ca9RoGI*y*{m-gnQx$WFq3xNQ6 zUAkWCH}`5P3L)Pf05xmFrww_il#nnBLDymuLl@gL9U3o?ye&)hnb+b-fmrIsx0v6T`S89S1o+imMwf4LSXP%IYr-Fi+6;#(GLsTb74; zeSTiZ3ry^15eL~^u^>$6A`Gf_hI7gFM9Z+*X$P4fx|Q<1dk+9KU?fth)3-%xgRvtD z&Ao_mn95BeZDjB;tX8TBgxc`dz~y8$#1%9(w7Y!adddL6p3BQ=HFCX6-Q^3HxN@Ck zQ!CS1oCV^oEBTkMv8#oJjiXdm!mII+i-l3umKT*asJcSWzG`)iWEaJA*r%Mn>acIZ z-@WB|`}eM|6cvqGQmaxC-Q7o3i*=}auTO}|++2Oy1E~6}D%a|*OZBERy*BQ3G>TU; zuqs}aV$SV6XS()Q4V88D7Dk(K!rJ(lQl?$th3B;@oUNTszbnjL){?BcQF4~jR%u&G zy2~#`dc~QsLyEmx-W3|58lp?q#*uvlEHYDQSAvX7?p_bp_!7RIN-8maZ}9H9w1xJM z2<>Ukx-^oDGn)UBA`)CO+uoYVP%00Lb;_4LY@V8pxnBn(o56A(}R(3NNbUAN|)*! zlH`}UP$}*}9EPKi2O(>0asHTe2q~vdUrpl;l+$bYz zy;P}DDy_<=R^d1rlrJG_+FeTWAMTaw8}+PV)O#k02^Th!66`LdEd~i%%I4}aE$s5W z2Ayh>%UrEnHmYQ_01B;Yi0f5Z*HzU5?<=oAeR!>E7fZ_OSKO+Q;xF%)mDOv^t2dXc z4)*P9BSv*glG44;g8X7|312$AstT`ZseCsKtMwTDKW%@#va4Q@JK1)A%B3rgiu0Qz zx9+fSuFGWh{$XnZ)9yAL;U`Y6V3Uo+$F$G1pWS&Y-F!QAmlD{J0=aE?Q_k4jTqn#U0yyV5k&yyDK8iyw zm6zrweF-!CAqD*?_-?^;5t9%xpG?8yqS3fyM8-)WxjdQ7_0By4t9M6sL4phu0O8z^ zy1sq}Mb*RH(jX1(cmy5C%gHerAaJPZtZP9eyYj)3Na7+7)kQl;vY-TRYhZwVPn+tr zHG5cyXe5&pfFuJL1mN+7dKNrAaY^j>6^Qt5J83wKHTf9q)4>N27AB)a(ynQd4`ZSu zkU+~x{)5sF=&RATbx96x*0LNS@;403?QSGa0Q)arr&OnW&w}C`drNl0@gEhR{{WAC zo_S-A0l^Sdb@Zdp-FSd9rq_aKJ--djk+w<0f#f^qlM2=4u&20Xta04RfX?c0AP%9! zf;;=HyW0wlbDNfJYk@9kE)#!E2fXbp~fW+Gb~-Rw!X>8w3Ztb78K` zNCa&>jAV8`%hlAnH5kCa7j!1ShAmRy(j$vc?mz24r zB2L}lKmbHHpa=4fUFuQV8kCD&$RWl#2i89S07QD0jZv=~n*fkZ2pf4b&(tmznpzqq zjV{~CIVJ#@o_!01F6(2j=aw0)#^-I?@=^@m~b6iFsVnLkxLaSM&uPw~C zkT~rl*VE_KC&U+IZ4DSM;zA$^k&)ZZU-U8*=Yn0t`ksUN)eG&& zJ3yG9Pc8CDGErLc5wn3Ks!a-h@0`|bkI0KG4VGXTg z`AIATkTb#jvCd=etgwb{E#$;|&+Xb0Z`uR5^d@36CoYpbNj(z*xVPF|;vjMaeEwhM zMccQu7|VHZ>OEG5&Eybd5PwhAQl|E`#2yLr4`c&s09#GMTP7VG6%$DhXPs~*Q!+F&5MR7AjVdWdxp~NYw46^h+c*6@~mABvq)NX-mvK9SEl~}GW2#=nE5X<@d=jK(S?>b zdY0ZHeSEA+cbQflaH7MHLt}8PUOjos8`Y;)=L?glsLj8qyr!*l%G%U!7mn7hxm}(f zRaknB`oyhvRNq`>P&F5!Bb%e0Q7z?ga)-HTBMVASpsVgor%^i+^qO_g3tRo}7nZZQ zg=(eh&c#YSM)79T5>o5elCNp>W)d~)D@Astz7?B$cnLQzLB>_n7Dd9;x~j7)1yTy2 z;Dot3?MZlAGN;*H!5Lpo@MgL}3*_zPdY=tmkV4PCekJ-_Nz_Bxm5btSFcE;Lct=r@ z3)bCQ?rPx7ruas`ra?GfRqTcJ{{Rx`{I8vA8fgyUbn{wA>jH_0N+z$Gk4p|}a{^Nn zny8Fixj-rCF{KzwYG_8OC<#PL2PR-86AD=<*n^8ym6nBGinPF>z~HM?R~!{ep#X;z zr#TafQ;G_q#VC|ca#1K|buzBeR4J8qi<8i@8i+MR3Nlhsy;W7^S*%rBiNH-lp@j>I z=B*$*ate=K2x>K48mm?5JCol@0HTBhIWeU`q>Zdu)k564G0jVGNKm=S6&{MCm1n(J z3!I`-+~ozy)tVlh%8HicVxp3(-yJ5kbzId|t}7)WI=#}Y>xELiL;|9$RxS#t=+_}t zVMJW2CdaJOZ`Z1@sW858r&nIxzpa z`l7V4bxVcgHA~)Cq1G#MygsX2m9Hv_H&?47?&zbrs%gHG`t_JcBEesif-9gHuecqdxG1om0=%3oEZsgr_;Rv95@*;fTy z6tJFAY58uDpIT*(j3j?i?GFfjH}xLsGaKOoB&P{ZPLD01+@d ztg8BlXGr2zK+mA|`KgS$WSHaA-EY!x0k|MWdYpDywt;{-<|K)btI)j3$s_8tgC36D zG=~$65(X#B$=l5OF1V|Pgq%$E(4&5+$00<&ONY-@}ILVp- z+S3G>I3uSgq-WpknvS7rO+PelA}6e*8YSBS0JI#;^c^B|J=TqdP~Zec;{Xx=02BKA zCD*%ac`crRjGly#x+1lOt59r2b&>(b&<-O9m_Nd)Xtobyg+_+%-?;J)&_DSfFfkA~ zPS>4Icb;SrdWjG~69z+8u8&lFF!AamsS(f5vMO5_md(AU`qaS{nA5$n`Pw>x{{Y9Y zaJ(LwZ!K>3kVteEwBWVG3E+swOs!2n#dRpW)M1B*4b3C~SuxieqaZIysai6#f85Xj z7dQU^Z7#jdMuEmZGkD;sD)V>r2@L~jagE1`4H4)kgFS-t^~{n1&hIwZ%y0z2&vLYD z>Qe76U=cmK#QK59_EoP7OE))Zdx##r$;YCx54fof-P{2fll07!>6K>A;jbQPFKECJ z9TUX+&V3ecyMEvOZE2F=XVN$m)=x!3&WSD}dK^bF6Z68##mM!|*I8F}8j-hY1mqTo zC(LI#9FDND^)CfInWXt^#E#-|9LbFKSX9~#S8jBI127LABzFGF?F~irU$T?n1Nof= zqZuSSw{GiFdAgQWH*SIq!3R0xxaph@3ahT#yauv7&2V#|j7Y)B27LsRJTIVHc%_#O zcf@YWjr^}51~`B5BaYxKs`^z;#mo(RPU9Y;GdYskksTA#d22FW(I<2cpyCb$d!WC! zcbf&nVH>0Y+o=Z~qx4$3)#GV-jWW}X&`1yME@*%O>D6Xe)>`08ph(MH3_%#cJ>%;LdU+WES)H;Z(K1c0*bO_Ji~WW8Net zd=r94yz$%*wEL0WSOw)vXFakU0c>r90~0%=%x9zmud>PI&Tw-@BN7_mOppdg<^%r# zvv9LM8%dw|i1$Cvh;?om%(%${T5-UX1&gg2?Sr(rrQSoCn2o%2$@K+LyP)PcNQmfu z{@n7nE_FwSLtH@I*F&M>pyl5&jBtYYSKM~Kn*$w(I5*87B4T5K!UB~i%Y2|1B>jbZ3DA*9fCY9pHscGy_ z3cE^k3JRK_71D(V3t8ft5gcjM3;or zs#>|NO@wsVo}?{eGfg?HO$=2iP;~?!1!+2xS)z+Vfue_Ey7nsXS=yy{H6o~1UR7$P zEedr?sZq+!a;ZG4lmNMLC?#|R7`oTVcf43suk?@R`R@trB9WwHBshn)^%xH7gW}D!xv{Yq-NA|t6sJhtAMjB zEd^xVj#bSYP;*Gi3bd3{WG7-@V(@mb9RQW?Dh2WX00Z3lhF8_?Dq}sDCt;teIw18y z@#Vax>NcdAS+@__fU|{>UZoKdaxC#Srv5$Zh&XGD&#f$o8ERExXAVQ=&M;& zam$RsmXd!j{{SP{q(4!U4j_z9(FsQ54l#ox-=OwdMs(*ex#FPAgCOrDm*S7soR zNy|y%V~&#@hh(jC;O-9HwV(`tP;TG0_=OjUc(B@Ad!n>H6 zF%s861DKJFXO!E@eet#wB`^5ho+OF-sMGuXn*(!5^!4<&LgVS`1EjB4Q_z_FCHW13(iFAT~}!oM*qf&(W!n=T(_;&PGX`|~&($(|s`mDj%suq?HPTHW=QvJd;y9L>nDkWjKN__E0Q*#$!rrcj9C~m)d0uAb z*anAUcnujR9FfEx6*k*$0msV{!aa$T>nEx1gT}t4aZ$wD z@e1y@Z~p+UXQFsD;V$kZoS~z7;ni2TeU5~^ z!(Fv$s3<12s6$ifwOpl3El;A=8xyH9RiZTPszSg1@ng2EAa&BisI6$%xm2F?v0hQO zlFZ;#IHnp&FKTjxZc++Tf>wz*lyuyy2O^c74O=UyE86&bT7ojZY$~liKU0D-m4W&b zMb$wI(%ZZd%J{y6;wT)iZ(-t*wQ%`3xA8BjlB5)^%0ritk5y%*KAV+BIX#6<3m~Z6 zfECh`p@60|qym`Ij5uj199JPFO;8+Eu>*=GD6}onD(y0;Qdek|Hl8d1kyIzODxgji zI;d6Nt<`F(^;()tCb6hMPo~wRq!CRzog{5yYeGL#0M#U@eLxi(tK{$?91D-Pc2YEZ1CFSMq#s$!~RYNJBYa$B61Y9Vvb15~PY9<05qD=Rms7m)pDg!t2GL#P^vQJaVQIrIqHL)^gGgv*KSV>OfN~SB}1Qtt*EtyRv~8T8mee)Tu9Be7x4A74o?9myB5#PldgB79H~!Un^u>x>YAq zS1Syv)oMc8BGo#rRqCUuRSRCUrLDgTToCZS#iw6_(bo&&HWzzJ`ZtF!qy^37pw z`G1vE=ZHW)}EGD*x#^vvc@ zC_;4Vu;s25l|`k+?^^ayaLz)Y2XUcSl{I z5_s>P-Huds7aoS=NZ!!;FkAqTd5``+r?*w(?kSdU69vvP2O!R5bQps<$D-1!t-72M zfZhoX$(hV6Ok-~-?vqZmkVL?A&(iSn;vS~%r_f@iHPh0KVq)EyFCIAOAILZB%r4?(^_+_VdTaCo<2X_6=I;3h= z?IYq~$=pcbh=ZOadZc)U`Xm!1gE*2-Cv*tNf$~=BTY*82Eg55hZg!TJ48-w-09uhP zJT=&iZXKkIWla`}rlTIx3wz1?iSnM|4XuHPKo~r61Lkr4)?Hi3s0eWcHz4F-k_UP2 zQ)!H=yJ)T*1A9-FI0LBc4my?Vd_`qWqh$3BjGQ<@#{k3}_gDi_6<$s^4bF7-fuENP zTw52?jv&a0#|P*@}`gR%HRq09Y#W7r0Z6SK#Y*l<}ogw{Q>)|S6j&@Nth&jm#ERHw`2Wy+R|Ng?<+>~-Or>CCBM`JePL5CY@USY92n(3-r%qh2m!(+0|>Ob zF$eP;W9Y56iD@z;3YMNrbJQMIja!m4*SDeww$#(fl^VQN;UsMVy9v3pO|A#YIe91cqHWzeJJm0q&5vHOKcLJFB*I`mFreKlF7 zSYcSzo(h6g0-%(SMADcFKq-uJYBdQfr2%rzsmfST7a?oZ1?p~BD7j|RdbdiVSbETF zki4ZbqLTEEJ};NiEN^9c zD@xd2CrEb-*IK^|gSqv#H5*Yek6FU1eJYDFjR5r|qz9r8NiL{WD#cl-U8!?cnHRyDzYt-2{h3B-kuSG%FLzB9_y+L}cYN!A; z7SR=fS*BI1N@^V0D(x1+(JGfF3iVzT)M6KozIwT#-)&K1<$bqH)UXGlwebsyh2!)+ zN@046H(i%Ea}0Ip05i%(dO!^4uuiH8U2#hb9%*aHYF0uC7*%wtIF1kMw6$h9%p_5y zL_|z3pJZufw#R3VzUr;*Vn*O4!1+kyJddK%EsPKe%*v$gjh5%qdHZ2M)hfQj&nCfLRU@my3pMKOp!T&fJ06VQ*-*Z$Sn z6C>9>XV5JAmz!n7&SE+G;Z%nr#hYc$88BozM$jWO>_>k@jY8$18eqwEIF9j={W4ET zoT`-X3t9`L7ZU(re8;M7g_(U#&>%ON$o~Ke^96~ttLB0U?ADA)9QThzo7=Y#DYef7 zgCVCxZj8qN8nS(dHzQC(2AjcZnJ2sz}AnIlBYHzorf| zB4zGIRF;SgFJ=f?=<4a5dFoo6w3+t zZf~L5eBC1-bi12}09*+tBPXfg4n{q}_ga1x@dnefu`UkcK#xH=I5Xxjd#yBKI-RiE zBsyXs#^^tmU=9F)?qU{QrPJTGi6pg>=79jTH@u1Du&DQd z(9k_WK5W4tPB_nGCGKOT^6kVmtp}0U5hV7G%UP6!?f^DgL`0uLOkvQ4^IlekuFB!E zI3`38LFN~$)9bm;4Gqpji4y>s?9mV)?gYZf)v4@e_Mb2$7$4Erdx_{X_Ez+o&T(yF zxH=@XVWbEiUBq%adL*uvZ?xj_SU@Ck8Ijs>K0$d3nm%3ucyIDE1m%@eF*^0ySf^)=)X7&2mGt{irQ>N*8( zy%Op{;Bmy3ka0L@$&Te))wJ>&TIhn;0U`n1BybK!LG6y~Ho`+gDrB?{`5b)yOKmu= z+tScu5(Z{|(S>HMrdaK6myF*SKBmp=*q;n`I=8)O;B zZmT|n+$F9Y94(*&AQ;-|kvX5(Dm7XOhVe2pGwJM}B&%wf1J|mxc4vSvsmTt=j_S)b z3TMdao{=z&0&9CAxM=9D)wbNseqqFQSynZObRTS^4@0!bg%?z^t=!vfaBw*LF0WC9 z;WABwitTc-fha95060~3=N-JDJpDnEgf&*)n%Ad*+Kx~jmggXPas`1=F|}0|RZS07 zfo#@HrsT??zK zo794|^)t7k-jE659%Fe)T5S&*snqg|iq)@D>RVzlT6=zXRb(vF?g0I)0sk)limR3J-g18j0RLc2% zcOJr@Osb7?dmOW?i%oLkQrhJ^lxgC3B5RVjDcr0>_n>o-Z$js(v2iYXfb_EDJJg!( zH>))2EY?(3Db!B`NtF?rik&y6?@Ge1s^wClPkWVG&Qpn{KB}j2e+gIDP`uu!smE2n zkBwt$?xCx%GpXxdt1DF2y;qae^|k5vyli^ue^%GMSCzfiB~ZT9$yAu~S9In^a<}yR zmFx6ZFCnJU;d^}+-E;XYb#WfkM(Djw2d_J&(uiAu>&@-5*s82isIxvQeHzOO8x4A3 za;HwJdmg0}Pb!^Ktv$-6Ql^@%LaS6M^;JraNpQEhR;W{+dhvPt;I!pZyw0H6 zSa%dxqP}9py9=Y5(lM?b3o_=UtM{d6RBGsg#dbF;=x!Bo5k9sStgAlsa;aNaf~{UZ z3n`8}lCVnpKZh?O4@KKmdX)D%RcSEYB$)M8olLaPBylU!Rkqp;0m8z( zr zZ)xRTt-%h!nfd|@BP(V zmpDCy2!XUWne{%UN?T2$I6ViLv_pQro{GK81veNt#BxBN?IZ4~Txt&m+>FT_fOzBk zBDir5-7fP1j1Mev^~!QCDBU=hwY#Cavoi$#!D;^hwU-=#dJKI8b3eMNZ*s>!X^;fT z<2|v*zq(~cHLh#{1;Nk+PDdx4_f?6tv7_ESo<{kO;(L+~OL#IlSDB_$uIE&u0KjQr zjm5z50fOGX>jI@h`lZm&DW4z`OhM~7DDA6!H<*^Z88BdwM9J<5@7SeToqcL&s4OO2 z2$r1p;E!)*VV1)H88IhuKkzUpLtcyT6U6VfB6 zIarh}J{?ML4)VC1Vj6fIF((OXwd(I0)`CMxbW1}({Kui}R!*1XFAu|r+}uc${WHn$ z^IERCif3uuMhwI^r!krNtX)c@-R1@dQUJyWUOM9nQ)8%VJD;?W2S9QrdrAwfIv#T+ z&2Rz-Uc3$!N79!y{{WMDk&)k%$&&+>dtPhX@XYyQ37_gdoXHDWQp|L;2|0t0-7p+W zn-Bp8GD>}GNNnvABM$57DHoJn;0s#>dzkJcvbYqFoRT?c>Gk=bqyZ8)uhG>%)haSa5Iv9Les5?gXzi-hSx^#%p^}j3IfKRb-lfd zQYtm2+2vtT zKwa^nzN;4gkwy6u3D#YuF zQrD<)LIRGIvdW6p3&N-eB5Rg*7QHo)%jj#_faVq*YdKe zS6;6ii+00wE zr2r;AqL&y7txAiM-Dq#dPor69Zcd{0C0n#=tF+3=wy3nIxg;LtTB3WEdahGpsn#l` zLabIQomV3jDK#9cxTZbHE-Ay(o>gkDPN-E{j$23zs8Tm3RC224EnzIN$Mx{i9Q3L8K@2H+R zYTIh22Q2o~R%tyLms~bhHRZbAdscnZ5UBE3)EAw0vo)MDPc zptGu5E%~cw8Y+d%t+LozccE>pt10O%V3lU5RoKZYk&W#*0SjVq&d0_JAT~OQ{uPvMVo1r8 zll;Qpt3U|rJ=D;#5MT&bD0!FDkPOUD2ia4pMaOB0E-@3==(lUbU55ZYu|1QGFFRaD zBd2w|ITek*){_wsIp^6?t#K#Uvbm~PY1{$z1G!mW?W6){CMWx;M@Cy#hi=5Uk@V@0 zZdH4WhZjf(-Mo)!K+;387?9sy$jeB+qP)2{GH$shpSVh^Mqedp}F{nJe^ z%1j(jUz)Y0UL2i!Ad~<9{+(kYr%^}<*@h&9Qp_RdwDXuzlwu<0P_4roS&2w&$SH@7 z?VdRgQwpUr=ffz6imV)>6Vr5HN|MKP9RKtx>6I?I0iTZ%= zg(bC|nfLc<7Fq_aFUfBEc5S))4d)-%Cl~i9JzYwfa9At-?@^X)=x5P!b#yKmQgm3? zC3&N0ugcLpT<7-Uv1`Vm;q(FMIel zZ!~yYtw$(-H2Rue`uWr&*>7EKy!3n(_f&PAcxv*{iRBn4-7sTl@ZrNCm&nppRWZIY zN>p6`o97s6^FCE>yXNCjw@dlEPgDO)4gHd4NjVVP|C5B|KHZ8p zVt$x8OlbiwCJk98Cl4CDE*DmP@&C$wv@^=FXB)@ptnOF)ywm^Q*1R=xK4Q7P_2Sq3 zC#R1$tSCc&c%=%`KkRAmkJ{CAk18#pKmTU40yS;)O7H*t(^>U!Y~H^n{J_0!_al=3 zC*>M>{;BHy{kykIBF?U5m~TCat)E?P{puNMPRMYMJ0KA>qqMOzp*uf#9$SMU+DfGj zd)_&RZPT(NQ?|w8aq9atbgCjs+b+rG|F(c7|0}qY*c#rKuXx|4e{%KTf(eCJcZJWp z&XsG7rDJ*GK+%1eKlcyQf=Y{es@3kOiSUNHh8VDW_$!$$2tj1OM`mbqTeqo-=-hMn zq>YmO>Md!<8hIx*sa2SrP>vO!|5H(J$dv8_mbZu+rUC>|ShkdMz!EFqt)#U?q zyhHjH!XaV%110}(#{0+USa*GfK8A6;Kqtyaz-I_h+>q2}VXl-{kFbMoR%P!AP@QSSX3P(SPgZ24E`OT63ctS?EFBc?`5IS$ceGgDlvBlQbLSli0e6Z4+<@3JEIl8XU;N&BbTRQRwom*L)J_a z;Y|!=HB(D_->e-vYEqi<_+-Db)9cL9cCHdu=IgsKBGF@b(Wd3Rm-#vJ`hwrS@y*MF z@X4fj<-QfCI{8Niq1J-mQCZ(BPO}rdUwAihw?z@S591ty!cJ*yG)8@(V~;`h<=i67 zBxI#ZlE9~vN>AE>FemS=ff~;Q0BwOpRq*~S8z~|MoOIapqwLgGOu6)_QyKUY7x!|+ zhgT`q0pFI|CodoQk5%j7#yqaX;0=#%bN-o&&we#8Uitltx;LU@R_=2ee`%wxO>yw- zr`4LprwWJ8Zv!548n5eL7wsd|TvN6m-XVL;H11$jpxx8sR(s3K2hO6~W_0yE<6Qd7 ze{M8=1ae(+Uj5Bqop;p<$`?=C5RIVoBPxc7NJYE~-K;;C<XYWyW?MTy@sGa$M5aj91+)MZNo*aWjeSJUOHP&)%kSDShe|PPKd10UKT9 zAFOb>{B@?^`4y|~1G~H@-pJ)CT>GDt#N1t+teh_CL#yTVS)%)^2Oa&JJN3Me6-#Mw zL+!11e1}-J-ZdA64GAoB$4`EBLZtnt3Qq7IlQx?l9iPmspFh!lo*l?&K*T&rhxcjGO)C_vbV;t_rnx>g7XHB$kew?abynxK z?+w6534*J{_x*$-9m8mJs-G#gV)N_fa5LXE?ct7$AmOjw-%jsWuOAI5*Bzasz{?;-RZ< zU;|{yudAlofQRe+2wpS}rZ#(O6idNDv&zK|G-@9P00S&6Aa>|zz~L!L1n4kTutVl6 zXNuB9qt3f!kl@(iPb%#d1yOqcQ<2^Br=^U6D0Pb4JPsH!VL5i`dG02J8AhR3t(04n0L4Tj21uke$e4(|GCzer9#J%y=h=r zG0Mhyx^rSfO8R2(w%*N2@>`3}D1)rO5AP{G%K`b9T`|^-1p<_1F<>Wi?%7F72l_TQ z5$5G>-?9;v_cADX>hn9t;KEklu(OiFHK^FHn&+;bQ>KlVuvnc=ZM6AVYbR}4EJQJJ z_2u4q7x7jcepSHke43_=M#HX*05^qd*)mImK=@CY9i5XEV_z4}Oimng9I2YZFfz$$ z8`4{@@Z8rfdrhj_Pw#n;UT$ow9`yap#oc^^NeCTN+nu@_ez8SG{*CIXyS+XyLtV=i zwna3#zWLSG8Ss6#^Z0n(tfj5?MzmXq#GZaxjGK&9={>#2gCFHv;H!4*UyAcz2JAPc zFm@Yzcfe0%ew3|4%)Z(;^JmC2Cwq48H2h|gjK6<{WdBs{;A)r||6|+k(j2*-??jjO zljGedx(^&2{Net)?AfKo!@Jg;rwPgL3sR1Ej7YlPdINh;W_P^P%wP4F2_ye{K4_Ra zI;_qk%UU)817cc`EQXdX92=R|DWpfOWU*V}lnMjQ_bu#!`P8&))a%i5A7rO1VlQ zzWus7HLkkx_tIvtMc0nEJ8%E4xMv61xVRgPXzo4kQS<%AQSaN#)#2P7a9XtEw1!MW zf~3YJ?6#sb@+w9DP+aL>zq)nT(W|0EczhBf&(~t$W_^9QH@i@=-8}eY0+wz9IS)}N zC`&abAI0e1fR^M+BP&%gAP+ky4Yl4pEt!-8+OWG!UY|<^N?x8lqaH>;7~X1S#aCf7 zb|Q%#{A6+5sBWivT1r*7v6|X#ur))jIy_BB&Ly6r>Bvle#(lEd!_I?BN1e_<4Z8B|`A-%1IC zXs7wZGil6eZOyjp)a{0Q!SnFBCdriSQ2z7!oXc0q&xsQ4 zDW@9tK3TT`*tDlsef#~zNwD%!B|-&gVy|lOY{X%g`qn5jz%(Ct*w@uN-P#d|raFN& zccacAI|A&cqQJyV7J{Tpy4Y*Sec+?#QSfN#!3=vdR(7ZER6toq+ zw*hw+2ejR_qcD<^8A&%0Ko`6@1RibQj3clx)WW((vE{HUG?eD3WV$-fItQ5c)9*7p zPKjPk5wcj9e*5sRg!ZzRHg<(@c_L~%`bKt=f3!i`&{}v_VM((fxPU$B-w%BZjs359`8_}=cz7NL*^HnbE=elEzt(Q%2K^)N7Q{H0zM}@; zSnzUL`Dn{&ZfkK4ui4HiiTkG!XuXub9+=5h^U=QjBDk(0MdyAc9z1(C4?e)2a!#VT zDwC&jjV@irNb#;({ZW>>Sr%6uT_xV5z}FRNHKPGu>==@H#!Uc>)vvmN!@x>5lGWn7@q6 z>aEysGD=_5MqQp8tl(f8$4<`@d`^CG{6TaXuX`!|v?u2{=0A<2<`-2D7-nqdI1BL6 z0#=TuvGw7wOY0hyC#63Ws$K2Wz_M2C_N4@OyiJ|DmNi1Quw;J>_k3MM`1J40F|&g| z#HdG=cdeujr(L`6?v??3KE7*_dX>uDW;bLUXCpm$S2CnY+g8g2HH^{b!a%R16VLz289K!~T)`t8HnwEtytk62g?QEeD ziWudK@#8f`c{*muT?Q+<>#w@8nSzTSfqE}6Bz4ux4HW50dC!^YNZ0bF8llYjJIUiH z62hL|f!3&bOomGuI$I!?diFx$WWjF_f4=-<|Ke))xX&x!JT<;U`eEE}?J6CaThgw~ zdh| zc+T953arGTpnWo#diYNSUHnx6frw8oSL&07>MzOnN8O=pbM#zNrJR*UGCQi-;*b$| zPIUfDv3CO=BJfL1F7t@G6EsMXbXvF$QySu4Db7iZ>;JMs&*S)vg)J_4b71z|ei(P< z>w||O@2skqbh`>A^`0o78$xEX-yFMb3uwKB>2RyxUn^Q^Q~9%fF3Dl5dFSSXupvy*2SVIK(mwvDW*F ze2sf;0J%kS{`rN{byb=EF0nD@(^cH{i`8T3hA9D-O9J?@45xO)U!sMm19u{h`)I*% zDnMqI3+0o|e*#_POs2D0m&biP&=jD)BV%>061-|2fT=L_#YV40o|k-v{l>oNoTMC4!fpcZe&H8wyj z9k;8;mRc6j4r6iVmUpm{t`x#QGcUYvbHD#FqcTIhi1%#<83 zsElvZWz+&~gnjXhbMx!J>df$le?*-hO)Q*leE)?TehV(<<>(ZRCMl|QS=PV@KwSIx zpwbUUu=IXhmrY3b9=WZT+pgC&nhU=tI@R>Tc8(9EDo`zTp=4aH41a z8!^GJI?25-UfO(C>g`z?nHJUaku&EYw3MIf`1V%PP(rMwzcBOEDV+Vl=p2|kUZwKk zezLAlfU|Zs|C_AmJBFCwUiMW<4DQ-+fiBqoflGYu&JkzkIvlf!?fAEi9TM2xYvzR5 z0DW}LTY4~gTxsE;?}ND?Qr4gsg07<70;0ig!0F^Y#zo!#NhzGsz8d3jSA2jGk&}3L z9Ch>LX8Pl!(l)XubG??E&vQ1Sk4MQ)jNuxv_Y6yuHtwE4cK7(Zcpzq#&MWUhUcg-l z&TD#m8%$nr_UXI}tw48}*6vd_d%uck zicfL~677I_gEYP|K>AGB6L>WGZd$8EivO+;h=ZCPiL(3myvO-4vL2y>{ag|ulFN@6 zo?O*3W(*@x`XIw4!zx;BN6q+i+@~G!l6S@t=BvZC9}AZ)S)GC?=3Z)K3A7D>m7`f;h>9W z%kQ3g673YKLlbR2BD_peHiUn5YRh}@Fx2DZ=U=q{7knh zDC2*9zxHe@@s~<{?P|jz^j4_uERwLYYQWvUIXhg=`Z3OZv>tB%Z~Ozy#b~GK68f>- z5p80u7d>Dl;^^~&k3+GMykH`Tdmq2dU_}L? zP8sdGV)1Iay6quh1DFAnj1Px<`&@dvoN0b?-_3Y$>fy%b=+R)xuDbBd zk9ZYRRq_M3g9|YG6^1T2W)&{>H2SGTmx)t~M5z$UPn%4VllD3wko`Z8MzC;2{@Ouq z7krdPvv)NTxGfzi50k3jHXJ{Ng^z6Sifv8jfj?)izpZLMfBJ(7KXd`tUTH z&Hr_E2y&Hlh@_OPI)|OMFSr&xKG&+<`|U!ns`j|T19P<(_d?-cpZrhi?5X+KS91=F z*ycgUSN;kLoa2$|c?wr?Wg4qe(cH`*NSja&V!rDXilR3hrJjLl2atkhTml@|>GCHZ zc-Jfo^UB9Wt@7r_`ZuwAhz5#&T^$FE%)zkj!n=XMx=C$Q=8}@wYVlodRS2Ryd>^Q+ z=N7?G7udOG5+eq>L~x-;F>LRiAVt)UP1?l%;l6Yk_Gl9d8Rj+&JOgObg?&1 z=0#Vl{5eOQ#09wqdY(UN`-o69nRwn~b9z>$d4FQltK^G+2J5cWeDlu90)6dn;E;74 zhIQ^u+LjYo@4iLDo^<1?;B{Lo4d;x)=7@~xkzoBF49fwQ&rAx~%kd6=S%eL&!$2*Z z$tVFKDx{7a5m>Xu$oziJe#NdGX35tKMxD(n~vJM-( z(QI+-*Q&w&LV>rH2IF&cJ&GJvRre%z&ZvU}8gP+Dc0u}?(xEu++0z!qNhf)h&f{>3 zUdRU!cNOsfSB~>%$OUSV2CE~!Pk=;cQglilbbK?Vg#y!qd=6S_|DnpcLCasCdM_fg zxNob1gVlm(|Zk^LL4Yg2#6=w7f^QgqVB0}XqHbAHh z1#l@Tc^Tx_DV85X=M3KvGq<&%dPg4dOkiM+0t5cmJJ_XjIz~e?X0;qGt8^aJr)pCr zlD=K&4JqVzD>YQfdWT-XJ?_E$!ZFuA2jy&o_&u1LXs_yN6P;9_Kl-}Da_=o-nB^`x z$J3!W`QcKkrN&oX!_N=@tBy1F8mtVA26?iAn{ZLDU-bG7jZ|?oP!vg~>lQAUzM9kf zP_~jNvyF5MchnZaL)YTaVEfNYxqzV0?t_xvRI{8^<&dwUKCYx#UAuxYJ`zFHcG^4saQev%B8(^g?$jQb+bNXAc2cOc)wt<&|>{bAA)ZA?E5M`n` zEDkvtTnL+d$BH+{rZ8%<*~Jpgpf1s9(5>kZYpnquC7RSy8P7U;4_AbfH1b3SDxFtKVPPSWkbbacIobd-YF3|QjXhv3ANsOn z*9GcD3Hi_Im_z8Cy#M=gs^y<+KI*5$#G2fPyzfPi9&@5FW0!XO9N!GB%@PW~{>vDu z4Ej_;t7$8KEV4n}S^o1Q1T$4G3+_7`L+f@_~WwZV^JuA(qdMjML}`tRDkq*n?R z*4GJ*oVDD&-=l0_H%E;(U?ozQsg=baVtQ@5Tb&9&W`ujNQPLDppf)!2VBWFZr2<(@2QYh zBlTJOsPInyE75o@hthBlj`z6!gV@fJc{J(D4#dc`cCeMpwPJldjj?$G9{N4ALM9`< ztUtAX*%d?-nff3+LKI0Tjq32K@qS|4p_B4O?;$lUjk7uzDi$IpK`6&F?lEX4ZNS@K z8;h{ffhYD(dblWluRCX{2!*F<)!03SwFc90z}7xR3~o>D06kU$s%~IBuRpW&%#k2} zAjea_&2bmijl>jKIIP}5ysq5vd3^_`*QG?2**u)ry-$F@oF6I<8YXssaMU!&`yiR` zDyOd0QQ6b`Ffgc}uD_&k+4B|w!pMd;L6RhJ%QeTdOt(_{V&#aN0IMJYOheZpssHz4 zyHBft-i?!|Zn93kiJBOr9)dDfgm20lo4);ZldL()5APFbK{JDPnK=@%94|gjdUPV8 z1`0lh_@%Ouz1$32IhqArI#p6{aO`iT+h^hC@Hm6q89shN|3_&USWoopJBZ4@k2j*X4VZ(plPvUF@1{ zZ|P-uVZ`AVp$Y$!GJEhpseP+8H``h!@k7dQ;s5d<)t$}X3cVg6+0X+pxYg|A^${Og zlR<&8J6y4BX}1X)7Dm^ai#W>FYRlx=ls~<5lM&S?Q!zIx9GkR?SJv_Cpq+VNBQDzx z)LB9Q>V+<^5R*J`qw0o+hob@-l8hokLRihKaqPwgw3gE{WAJoUlA>4N@lM?Q(s+2id58$6kP$dVcxwKzt6bga;1*19O%+^# zT?n7QZUZjy5%dsUCb)sQ&08SMCSiA?6fK`~A~`@-qw63nHhZMd7_ZJTJ@$8HXMhfE zp5&8(jXrR+%a`fAvKS{V)E{Jhrj)K05xs0VLQr|eAu;$Mqcxph6C9gppu?Pe=;#wU zbF%k&bH{e!)u~3`h@>xO2Zi4V-y_mT`q7rb?LYJc(h;JXng*PYT5m$$6MUi(_pZVw zitm~20;#B&$)-g?u2U9ot)q*hIgsdFI~xs={&Vh#UfN(3{$ssiZxlWQJzf`zx^6_6 zO9@AXr)|0lv1WE2$ZF~bda7^D?KT~|fpS#E6F((bF>T1z)P=IB6N9T(P`A5flih&0 zt_7Ygj14;34OYH|26aTnA;+_qjZh|FOKPBEu}JI2`zo!`Mwse?hJg;excF(>eZS1E z;}NhS@UIMpTxCM8&VI(_Kq?|&yAx=`p-5ktpncK|lUUY|fQNt*~LPNquT=WZ(gLh-W2+RJ z&O33MK7=q$aAVVrtXNRg1o$7$?S}m&A6_{Ga4gpC2Jt{@wXHlw_m1B8bFJjAz$bcHLdo#sz>iu4%m09N((dG4fIM5a?dC zuaV*YagU+eHpSkmPxVGl$<4u$_lFmaX=$zh z41w!O7vjKVQADucj;AhUb0diOhynp(Ro~g=ff8xn)uI$XXUg1upd6V0$mu!>NU4!0 zVy8u3ml(qonQW!Y@Jyc+^vaDc&XmT}Y;k7#s$QIugNV_O0S0+L**!N9w zibdjx6m#Q&xSCwgQ1y-s1SW%mBHwgQ{#tixbD+Gi%dND4^vM5tTe7&P-$=p&)M|>k z;~&jHL>!{VJ^R5F2)$hpQ*h4Q1IjKpNZ?%^(=Lr|42QUOL-SkkGE^O&1NNg4nG#~* zFLIw9F|V~R+cj0WHiF6;zL(EsJY_Lnys=r)_{o;B z0Ye2Pt@o~e`Cc;6OBfgF7N4#90p@Ls@UJgFY_)OuIQPj^BMwSzh)Afl0KWTN8Vfau znndbGhx35?-w{mC!xkTceEih%KeILWJ(3xwtKIGJRYil$^FZ!u=Dbg0%eia=z{-W# z@IR?G%zq-K%`=X-JhJetX4iIKDrBODH&&6u=p1X~d$yKBn?(oz=1sBAl+N0f)h_pG zL;Ea`jh(aU3#mNW+_m_@Vm9XB*a=n*)sn0o7Nk9Jzg~2^U}Ys|VyY!FeHy7&m3Xag zce-&*67_xB&{ey!A&(`TK7dsc%NDmYXbXOWu6)u@GXG?~&BOYjDS{Pr;nMdISA6z& zN57po@ly*T|A*GV8s`|6>x8}*x%1UHwvkI**qm zBOp)85+g{bPDAmVOFXEVh8<|w$f;i}zwUC^b5V`D9yE>**$$T6AAp}Pu#Wf{lvGAv z<%7}UD^FAVc06+nWbw(mTKe1&CoTIU`B4bojuO>cw9Yncuz>QDNchf?mE2$VBV5ewj^Cc-3JiR-M5{#lse zqi#j9w7Vd0l+h;E7THS4&EOWyE8{WgE$I75y87(2UtP}&;uC@u39*bO7RSk1Ar~{` z^om(L_~a;byg9WvB?iS0O?nimf)+R1ZNybb0 z#HES@FA(?OTXoDdnnB#~lHc8f`(bIK5bLSWz2fdi9ksCQ=RsTaOQ)fu7b!Y)=@%ri zQ$$yP0IVF{?+lnHPZ?JVB9zV zgR2LOKH^uwp;fCNSmCrj>t-u+9IUQsAM*eum*2zcx+kTt!@fj}WwN3^SBHrznyyV#$r#a|%LiI4d1$+No zm+6pvAu&&YS*`S(C(i^!H)oacqmb2F)oOBS3?tLh^Lu^pI1?LVdL|2<=@g5)R^pk0 zw};XV>Kp4Ii}rhEl{4H$HUMM}6U686N^aDZ&_sQBsJ7{(5|b%{)!;AWLVrl>l^6U# zOyLOy5uRds#7lg%9l8Vi>~l5l72Xnn*S$B7Gxrcz&@|w8rqM3 zy;Q5^Pu37sorCmg>Q}BKd#^vol2b(csoIRQ?!*p=IlA{LpHpdC?x|>bFf?04=oQeUvizWWwww5`O4`QrL@bh`XYXsS<@VtPT*-%JlQTBXb>dQ1P-4 zQuXI6_YX|I=4*6?6?G9SZ{C{s9A?V}Dysc4Q3oo3t*Bh?SRMRH|tVso`23F;1zQ<*rVMfi206U$R%Ml{|s&9I00QS6)IJhf(1Eh7CBzR-Ga zCOu7QZtd~1N*MnEiKJI%SVem7?4aY)X-3|Ph-`r@@bi$z;2sV}bQ$==;& zK}~P%Egk1HS2aGb`)KLxiqvQIIZ!2;F%wotwGWAuI*df5VbX7E@vY1+nAyfaC7OFh zO_Pv{mE82BRpNZt0eIH*e%B;R$yTU}(HId(;!pMSGf^!Sa7?~bXMCa2IwUR8hH|6`im_wV> zd-3+_cBL(Aj=}Zf`&cHs0m;N3N3tAv|8jBl_@uM2AF4t8%(aCwfG;Uue2w-g#6mC- zy3oA_LrG$i!bl%X4+yPa1QgDU`YeN#!X|K#ga7Gf(!sft zBR7ql{luBSyi`nU(XaNut*vW-TDG~NGsm^YVH193Vn4dtqs+$cTSN4NTet`l32w5; z!!L8oe&I-zW5gA(Lh(eBNhW<94*_}Y@gx0=^fS?e@=WsjlHbDJ(gYKS!J(67z`Q3D zK8{aR$O{#PV>_T3tvBj@XXvXDB!<)5(*JbqZ++yAjg#JXI({K$&E0-G+2a=-9C;jQ z!T8MzC;o!&Y+bta0}Z3e@BtKvfo@)Vo>hni?A#Fh=$4opm_fRA5#gL~Bs?QTxvgqK z2`YsSL`{SbPe?2FpimhYB}%~|kUh-yIh@JJBG_g2)UYbO)jyAAM%dPyi!>mdXw9O@ z%ebh8Jv)EkBd2@Sx(hy+R_|br9tZZMb7hsX>oAjxJvEojGf4Y&oSIK5Qxb|(9MvBv z>E&LrZ70xA?9z4HzEMl{t)FRb4U^Ct(Q_I#nmO2=Ql8xf!Sv5yx9(yZ_y9dvORAvt zNeT|yDD3Dhf^Y_W4*h;U^T@btwS#K7_3oW$!e{a(Gmi=0?Jg>CmN1dE!DsFTzGOTT zSTmb!u!~h7Oy(i2!hKmvP-VK7Tut#kIAR4(DpnXTnl_OHY3MK6j}cW}o>l$Kt>mn! z-EQQj-E*b?X=54DjQdC~pP_pz ztdB^8ym*a%Ev`L)D3m@!pp4dw?}a4Cm4Nr4OuE3xJc2LF@JDsc(k=Jr+$f#Et=aLN zOWg&K5ojP%gH`WOV`x-3#s{Ur!1pPwP^gYk%nMXJ-g1sRsm$&Q`QveZW)i;}b(Yvg zB7N7+M=h^(LD|S@R{9vTek`b9OPjIH;7IUgi(4wVWFlu&-&>zIo+-^&{*-SrP{{@wd) zOFtQ3%iNCBM9|k=WEco+K@kWojC+(huDmz zt~|2-X7B~@eZgElYS8;79%OOATk$*lWsL~#qb^nYejy2l%U&J;;yUaOY|>jzS+py5 zTCX5|vIw4SF;w2Rca+7W^W;0i;>g9>?nGwO;2C6b&SrSV22nDGNZeHJx*v zfvAN8&q1yHAfB(b&`q81tgY>W{MtbqslmWyr?u;2Y-3_k5(i7-UJLiAsS3wjzh(Ib96C9-y0I(j&uN=pb?=J zI8X$!or>Ez+je2O`~x(3BY-j(-t~k5%E7MzV2~W)avq=+0P%-~;=rqQs|%5MELZ;V zzT2y*Mm<4njnH<630Qgn&?;L?$f9S);midR+0h%SixpUCW!7yolab)mQ-u&R6d*_k z0|0@b(6U+)<_Am`aL-1g#|B(;f(mkoiNf9jMj$YoU$4&*NIW;XT>KQ!%iQ9Sx9!PU~=_gu)VJOSrd%jAYk{2aYN%srJc zc73@;>W)$BVJ@{}9C>p-*pOegc;0EItzO-Ez~QJS2cOsZ^XsYNpa0c4)oAPih2BM- z2>BQ_VuA;8i^_kP3Mey^X_Mz8X;$Q5giutl6j1rf8>&TAW?QpzdiSLpnoC<8_os8* z1eU5luT-9rF~;%_svyGrj8>Vqt7cMNp3wcnwMLIhx**MUeYr+#dzn(CiX-HFBqO3; z>7VUTYkmgi`+`)ZQlw?Quu0lHU3jk7Rujd3au=uQVbW?QgA_ZAhxoC_<%~Vyfnx0f z(-$w9hakCCqTW5eoEpsa$~{j|AH#m0WF9Tmm*^$0pe)ts!)CU|Of6@i6^u(D6+sX_ zPa&;GJEzUInQYsu8tQ*IXHHW7C@~@p<8n8D#bSvPOJv@&u}eAr^0*qzlY(TB1YLk~ z?DvOi+|;tEU?`nT$aSbPwG?A%J_+<~M$}zI7P*8`;EX&ay5a0`Q-^~iKyX2RRK91` zaNo20Prd30cFRz(gPngPuAQqRk4ngg5q6;*>#sI`Vo zLLz*GO2NZ8<;ysyo)Z_bYj#maEr2``;0Iv=zC@vB%9I5HYmq*-j7|?U6W8f+(9!Zm zzxQQ2(^YccH%bCQ33B)pu-3ldgff!y&jEWjGT;(lM?d18C}Dp8SR)T~{g2O^!W zV2MrqIOhq+F>1!~)C6U;t*&LQpry8=3iHEa=8#)w*dyigF9e<$)-7nwa)ynwO1gus z&8gWGTtJ->)wk61|G||6V?sz5U%f6)`UHlybM1Lw>wdE)9M49hriER>;^MXRY9wHJ z;_5HiYix}Qx|yi6Fq4N^R&s$KINJL6F0Rz; zy}>tpD;m8m{}h_hSaqwOL}bc)QOszT4!4eL z$Z1qw6grU`6OCg$L{8&9c=)dLSaM2SOQnA!NoWdAkh~lUMPS7lcrd+%u}xpBbps=B zyF(2-RirT{y3nHzRmFJsX5Kc+_M9?~3_-$Zm9OaMPHSnsVw?of?LET|zBBhc{E!;0 zON7gpjiSAL5R!*=@d?SELM2PJ$w@M_5Z7 zEX>A+8(LC=VIh6--6P%TSv3JK1UV=#q5Hk2uTpZVjWo3;3FA-h-ZVj2Z}sG?CW~kD z268_UjL1(AP2R$M2|8U#azDkU)7)N&XZ znmUX#bjy@irK$|T`)<@sQ0k`lsv@IZS2h8t(Ml@-Y2-Kft(A&H%iKLyt@}HmS(*G0 zA?vJDgc|5(zp;s(*z`;lB zC5)69<0StIZ^~7VAaS(Oz5kEPvgX>B7vuZA+1Q(=lC0Qaq(RX%@POV{b&W0hzuCl+ zRoz{5b3$IL9fJMA49n&^uK9Ybkh7+;b+gp@j2j`WFvP^w2!B2<=m(+;vGM}?>36#D zLSxfV(w+^_0fP}2yJJ{z5@tS3FO4S3wEkP%APPuAw=r=VUkeb{yrZmFko^FX*5|0n zp6k?+V)Ei5v2X_@e3&KAiQtTRkjtC&qiUQa-?Z~(Gi>K% zE_5TJd}G|*Q~&L)cZUU>#byqbYqzlW#{16Gg3_KY}TR}d*a8kQJgR?2FPOFLPU$+E0lE&eicn)MOd1GTR;s}^n0B+7YR z@4YU{aEP+r=qDdlB&h+s(yPFFFE%euf&;E`B`>U|Cc6oXxZuEX~tOHMDs%$tVchbp3R!oWjd zOC;P7XVyU*3TGItklker`(BW03D;?sO6Sn1I`*mnZ|YnbAAUa!Jnv4g@FGODg2BtM zGTSXkyAAz6^o$|b_L=4p3H>KSC}UGq)bIWgSom6J3%?k7)lQyWO0Z_t8U+(TaLqo5 z%a<_{Ggw_lz)QQ`E)VrscOR`}hoDW*Y$pegze#v?#F}R&ZP!5>Ez%ov3Ae;%M7sY@ z#6qQoH?if~$qh=j*uvb~T1EisJ{^yA5GQ*@` z0lY6vo$R$Meci%&w6Y%gzEb+kff34kV@pZ3YV=dbb)>HP0TuU|WwNz=$cQiSFIt#Q z{PM---AlU{Px9c_gu~T4RN;#^sTxcAH|$pX_5nsKldC2%WXV8FT(I7MAS} z-0`#`#QL6MU~oRLy$U=Xt7y>$g6n?^u>NRohQy*VrS`ycKb^XdxD_kI!Y?4XS2&Yh z#Dtz`?`eotXVCeFURP(PDAehsMGo=Hna*`Qhf*I`LV(Rf?V_2I+qay5mr>i7)m29`onJ}nxSd&7{0q{&5~M)BKoWg6GAhR6AQhNdHjJw ztAOpMk}xUsj$8*##XNJnS|`dXoo}m5&y~nyPb=a*K=#0sZnm_#a*B_FDS%C>U>2vT zRVk9VKBQ`L%58|@jVftx za$4xquLL{zRZ@3$g%PFDUh@F_gPklXrxL#3JPd`c55H-Hz0cqvUM8eTbz>@Cal#58Ljl7TilTxr6)mo;MRyek|FU)1%pFlO%9h$0P#gdypfrLfN zIJ2XT_S1bc+~204JV6(F6^|Nvvk|8P?{l5<1UGcfp@m0;9XIXr>85vA?|G@Jce3d` z)v8G#$EFUHpF*3Yr%5)M%QXj*l2zdi%U_;WTDrege5Qr@SS!Q1#E7~Hh1$w%2^q^k z`HO9U_0vY1^qJz;%h2;R_?*qc+|Dak7yn;HR~wMj!L|jV0L>vnrdb2!=;5Gb<`?!c zIHD=213ai1Zdv)HIGduiX4^AFC^0o8&~oZT0RhXtw8T9%vs#8*Q`4HR_L#~1*xE;{ zwzt)um;F6|_;D`o`?~IP-Pi57WqVM!l}g^#oAXmV+p~3m!B8k$ZqYl7#Quv{c6Lkt z<-TQ^I_lHem3mex?K!`sb0i!&>h=fT(l=A^L+lZt48-T&0PYBb6b_vHdpOr8%|~** zvEO+mZDRfn_y5Aj`k8HQ>hklr(-t=$zsG-CIrhEvq5j>UDLZSI!>G;dgioi!5Z=t_ zyTYmN9h)Dkbu)`!4{yyp<5@wn=Y=mw%?Ymc-9HWfXnynOrsAHCKbP(#_ph4SF}TAr z=$H2vGBXxem_GM)jBv2?-{vzbADR+e$oEGwhwnbj^R%0EMx;7f1`ODQ^Vc#!LXl)Qcf4c@@#iCWzM5O0DhVZ6k>C#l z*fq6!7H)VU?9WnL9zi-a6!toT4#am86{@an+Tj`uAJrjBl*_6z)kC0SW! z!5uQA{G8=t#Ia+Ocfk~=mXVI#AZxNzvN>yx&cW4^HRWAW&ZF29dm&X1xfl-jH$yKh z{G`BBxstrMV$!~Q^0uh^i0WceHl?noMBWx1WBne*S=J=o9y%bA9!DO&_K$6@I2|Xp zjjvTb=U)7q`Dx1Wf8XBanhN}MyGl3F{Q=W+ENW zbu$2N0c!RWUkKeMyrOvY_BECL*WJ1$)faW5sZ3jWoiUjFQBug@iH(L*MQHpZkW8uW zQRnn;{6S)O+XI~8@9{&)t3YMV6GOhaytT@l*xJ%6)$~dp%%@kPT~TMoA$1s2FTJn~ zNqz{?TPQ5~!hhO3jj11s|A2m}{7l@>suWfFBm51y89SEDPEXzC-tR9pCY--QwA!1k zF_PJ-=6k6J3M9`bt|lC6lTsutA27onhm;IQ_FMPspa)R$;7UZ`-+k2{Iwh&%5e`M< zQuGOf91x!zbtQ25cE)Ly9=itERH@Ry$yccK@g`#8-#} zqXLh{ltKC4bnjJp{XC-{rEP-uQXz-NJlxc)hwMJQ^+t8M{?k_1ghI9XB@>ELZ#0+T z#kZn^z2T{AW2P@*58wW$>aU&dY-DBA`R&7E!rr)ZU_K7QyH#0)52OA)YjkaZNI+pN zx(@iu&seH%-?ZQgijhXz)okf0( z!*{2%iIy9(xLuXtVb`kIXIVQl=#>o8iC^D__yyi{DaBMfIqpi&{*`#Q$LrYCZr=SL zDD_6fL@0kds`IYbFl|43Sz}@p?1$@_gfiX&)vm&xjXd|wAD*hsLJz+dN-sf!O5b-2 z=Ot<>-n<$epZ3uU4ugE}t->AMbJpF(JM=vU$#>RHW5P#@J7-elT#2@2PuGr^m)tKtU!wgPej>aOyREN+E0@RY#2Y4j2I{T7%DnjMqTmpbKNl zXTj0|6=P`5A=lRC&XE}a&unrzffPoV5Po8-^-IWA6&o=`ja(^?^B8bTolHnQ+8lgB z&*slJVC6c04>gU;n+N$cZW0uvQmKiW6x=&#awuGr;lWDsIwk@8BY3$T7*f7`F^^Dz zQEe7EmomV$h@v(-G{^N_b}%;GBOGp7pgU8~JscspNWC-+t5D2eZSQ8s$#P_D=|pqb zu7gzC)|-rLh1CpV{XG3#DCM*5s+C=?H?F6n;yM5F^*4SfyqVLs`dNvUxCKyjcWpKL z$!MnM!`+T1L3Fb?)mS#84^h6U6FzNcvBeO zu#!jexCPzxe4ftlV3a-_F8S&p-z+4aF&Hg3CZ@jH9XCOc?VGTzNoOrL{rs_SgOgml zAy3q!2Kk4(y;3wF zDd~drgQVK~DTl}WS$X{%^0Mw-nlGu$MAYrEcdy^yFTfI^48*8rYI}GSE%!|-j-@)C zhyAgZtSl%M)_XsZyGBnG6e^YTn{{@CGj^}?A8{;)1=X@rd|HP7xscf$QpmX$6Jzv* zsbZmv_s<)@F^GUte#LIQQ0*jZC;NsMQiD%YdU zBSUC%Fr0~_5n_mDS{_azZb5k|c!-CI&X77{s^q~$OYXYS+C^a_TYgds^VCj+%9a~& zaV%+<{0*tgOCzmLk6~t0qpa z750Uo_GJ6@bH#H_-0a&3d6QvnV_!soI|{PTV_DG)q$67=hmV}(ECA;y_3s2N0K-3^ zYsmStghMt)OrX)aDc{eAcF)kQjq7b)$npj9UF&C(R(qN)$Z}@mlqS;HNxbsAwyw1O zLTuwNQ+_Rm-^i&3k6V!)1!-NS8=Z+c1uF(HCz+q|d`O;Ymem1VI+ijUN+{Y(#RPYz zwU1VbW$m@|KK8j9gl~Py-?KKT3-x|LI)&KOJA-c9IAEUoBx#8J<*)Vh43h2Ahddcy zG27S7HOPinZyr3Fxl&(IN%c^jxYIydO$oC%UCL}FUTCSbd7$UrUg)*y zt6TpZM7#H1i}qA)XQT`XFhMqWP@FSN_vpky28>ltH&w+D8M|y)?>s@rCm`Lhy!x!zg&=8U0X7m71(<|!dlgxoG zuU3VALo%P@zMwbz8YcO&7*l(cM_Jf1KBWfARm;I^Bz1+Al@jlU2+a*NE+{2JL?@jY z67oFH^CT zljp6jh?_`;d4!V(p(JX{3Nm2-H?(ePccF}YsXL)Xdo#7h>vCub{6qMAnJx48Ov={v ztq~L7-?)Ui#r8zsEj=wzCU6s#ne9!BJFMb15!b1lOgTddvtg4qNMcrqcDok>9&RTo zHts0M_z^mQ1h=vV4!JzuUvD?<-uyv1TX2>SL+?4uJSsasWnY#2tL*MiM%Scpz&Gl7VdVT1(l5V{0dQf|40egpOyQotHolf%GdpP89XaW@# zV@B&3#&28aI^wZq?d-isSToNuV<6-Ut;=R)*ek;>)jb_JPtQBE?pRG4E+wh>Cmat=GLhkNqOqcv-@}_M+ST=-dXh{J z27^Qz4#b81`2nJz1&5pc>4>$_9p#Jyy~fe%T{FHVQndc|P-u)br-c+>BPQEWUJWug zuaWtJd?i9x5gjQ>xJav>N0qNIUEzZM=&DYj!U(`6Bq5tL=Lez5B}rbo2>B_YBi`u# z2<+Hs17Oz(pDQG|M(D~8E5NfQHT?4^&ZCwblL{gn0!~brJEEXfbi$cx;XtntEdgN> zdkVU(1t=(YEVgP z4-q=NZP+!H)NIlK3q0q-6B7LyKElemNle(V;GRyk0xU=RY_xZp$gWA>CPqm#6Emlx z-1JoHAAPR9YNF*C$U`d^&XeXkBdPZDf#S-%sb43fXx)4K~#kIRz6a9OdW{=&y;#)6m| z>#fxZr(ebwf9G`)ow!UM{t2HmtyK2pmqz*7QL4_bd)^W6NP7E%|GrMG??jP%kocH+ z@>O?sHmQ`))F`@DXfidkq+|+>+05d|Pr(RUjhD}6 zQ&*6K0YWL8D;_W@dfna9eVd!4Rpid^$5B>>aJuEBW}29<4zmJf%$hv>^~qkETyfH> zjCY7`!&aQWuR!eWT7=)Pwpj;sMh{;fT{F=w@@0bB*6*bo^46 zj}5)eJOZ!SM!r$AdE4!5Z~cpSIkM7w_@3lu;Ct%A>|w#Q>S#^Kh4JEs3XA_PX!5iB z-AM)2^4g#o57&m--}QSdhtreWmp78rKbel=_riN}`(8294l(+Q@YU1c)l5(L&g-HY zyC=Wtp5Wu~E}MXnU#T&dH*dU9-}hH{Xzn+oA)?M$R|aw2Vyzg| zh#ash!hK&oQArf-PhJy53;{fAli&sDIn-r;E^+OI7TD#vM%!r_#xJmCOob_d&ySfH zQUe}0qY5FUPN)nwy4^Ya~?@v=Z z-~e7@aWm~fx7<3;)HApaE`(2X#CXZoquPI<_m42-OFIlVtSKiTS}K7Eq{0>is-4^~ z4N^N%NizY`GsBz|WI?))2d7R>+ zvruJOV2V&gD;4~egp#7?$o++lR3H^1=D601VAYv;ZHGR}M5)hod$V+CYNqutGhfW1|F@HO~!+$NImI-CDNa-e2xk_8qZoK&PJalt=3OGJ0%^{-k2nm{OImH zC&zRpkyBVN{w#X-Phsv+kVY&OKAr{;e;{PP=%eV6ym%EQ0RK5T-nkDd+>Fal+<~7w zLfMrbOFgg+$sBYh9M`rZX}SB+%c z9o|uddvWcX+v$R{XDhB;w}1AnwS)_J{43l0uaG?#dbWk1BfM1~pWz)ayg?NR>^uF^ zUUjY|Nd2>Z^IHgYKfIpTozNI$c38KhUmOuUZ0*E)*<5UYzu!u`|LwoAmbN9w%X$38 z)B3M7WVV_fx9`PCxK?ijt6;w3+30qrn^pr(C9+?NK=m>1GQ6a+_pjGZd_V7yO6&FG zzREp*9Nw3Vh_|ya+zPjJCz)7zxN)9?LlE;*g#&s#b;$kEZ>R$=*N7ELg?_kco<()| zxG+XNByzg(BblfHQ5o}LQdO_JV+PK1j8gS>meikSB8naIVmue;Tuj3-#IWkRS?;=B z#Fo2Aja1t|gkJ}04cL)XIFqRnI*=@tRXl+CQgcQO&t{V>-9iV?mh#*Q@JZ^zDv<*R z8dp%92d!xj|0#SGy;2ia`1mqtpwra35~>wc2whY{kU6obu?-K!G1OzHimUJQ)zs5Z z5M!wj?Ox-mvKk`1E?Hu%24H|8YsPvo+fV>I20Rl}`XvRh=K6*&KVspk8xdLNci@r3 z?kvt#z9_%e)91Pw`S3F&*9j=7|Mtmv zG>`1;k5cthV*g0qXd}cju%6OhawqLT+@o+tF>U9EqW;u13*d65-rI~zJo$k3U3}V~ zHE{ka(^8#BTT=TK@!tUeB|(u=*KXU;_#f2Ir_fQ_m7R@?-7)ZiU?{@{UzMW!+pbEo^gku&kGmY zDaqZhw!D!v6{u=;#{};UV|2@)is7SZ;{}VA6_6=Z)1j#!N*g|T(39%b2tq+EWPr5D zjSt2l`sfC^6476#l;sloDbVpOnIYAcMf~>6gsg{~XR>^$-NgbYDFpA}RYG4{%vvNE zcF(jsf^k`>5zzoOG0-*PDC0En2$Ydx4S?ba$s;~O^9bQUGP;H;D;2j;e3Gk1_Hg)= zo45>Lg$=;*j{IkUC7xo$GkV>jq-Zn6TUuo-7_7scCImGw?Qt_E%H z`swFxlyYlUhx%FyFO#I^LPAF8Bwr5*#DRvZlAJE757Bml1P58qB`hUX0Vy~U zmP{LQ&o26ABPl*it$vYb1j?kDNp6j-ymZj$^riUhm|TowVC8d7(F=<$^F!jC7V?Im zIdjc=hdRsKPv_h-!Ocx+^W#TW*@nPnmQa#fnw7scP+uH;k?+S)6|kS=PJSZ$ueYsO z6f>$x(avKxu5Iajc4zo9qhvto7%5_xd^2(5(!=n{ufk%M*H8Ym`8DRG`0;R@nml-V zu;3Hfu4M5a9S-*{-kk%!G>tmjD*u%8wDw!>gHGEmlJ>6dBt!&?*srjj|Hc2N;R?bS ze{C5bmz=tP_Rs8~CGNNSm%Bci=)HmSIvKy?#nRO+O7dTSFx!RE2mZ+Dk9zxpX2-kQ z;8xHw=GzLd{6cj$%@x(5?+o60cX4n0ic4K!lRx}E5BPDKej%Q7iUqyeIiSN-emzH)9@e*HHqR@)%l f0!=|dGzx#;hViSS)fTq%O63v6XYl#i|4sit3MdG( literal 0 HcmV?d00001 diff --git a/vendor/cloud.google.com/go/vision/testdata/eiffel-tower.jpg b/vendor/cloud.google.com/go/vision/testdata/eiffel-tower.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9c32b85af67c4a2e24799c977adb92375b2943f3 GIT binary patch literal 6832 zcmX{)1z1#3vrC7>F6^#^)DprHvLHxEEbNjBEU&QN=isANQb1lGzKW$ ztso#shrIQF@BMSnH#6tVy>n*f%-lQQy_&pQ1OT)&9z+V>-p`VmWfs<%JvL5a_@5_J+4|5q!KAt9w8CnJhPKqLTC zqGD3gYyU4N0g;mNkTW5W7zs&cFpO7)sF$er{na#pj<_!=hztZ!0-PR#F=hZ#$pb>QSb zM|9%AKMKQ2bZ{yEmJ`UGN>PTR*MHtVeHLpQ-PGcrkikdatf1;rFn$HN=0)*46q>im zAS;t{6s=TZThaH@EQl8^2k(c&a^VzUw49I%0%cf6+zFnej|)fW-~_|b;2#k1j{-vs z+5~J+7+Jpb3d++o#K&zFWNI!?J*c2HGqQGB*ucwU0qaIOSZ@<(|u2+OP6M-;EfB_CVP^w489`Fa~8%7>)-0 zpDz{UPUUDg4F*kGASa0PRxT?=lYlX34Ixl17feac*D&FX-4gnG`?D~2b)Mmeqo0vF^(YZWQ?$Pmv%j~D#22c!>y zzzu1`bp&ywLgLdUD4p-B4dV`L-=v?LZ(1x)Tl@B?D35x1kG;E*FmMPORQMCcT-S(3xh<1Dm1^; z9|MIORimYzugFnAEgkPLgD+}&kaaRz}*E= z2Dmf>qFsWOurxhma9kxUu3#jAWvo^Uz6tNmkn+{XMSNe;z`cX>J7r_R%Xp$sg=0NB zAMK_zfUL!PwD`UQ$~-B*7sZ(bMd_U!jhv1dyn5)fGvmC4h*0$Jlh;;plBqNfo`Q9P z6p4U0C_#3$`5hX?9BuL?Z|m~#CY{y3Gn`y6VYg*l+<#JA)7hOp61ZB)O0lN#)Fye~ z?r6pPJz3s_xrm4yLj5MRpHKR;oN4ju=(?t2PPkNd_4&%7zawZ$%ohWZDTP$tJ4eyl z7#3!MMTrGyg8>(Q#bJzyl`CqYk0Txc#@%Ej!4!;_XsF<0Suw-MSAcgTi*k|IFPf6E z=;ld>HL>mZ9fWO?!WV8Vn|vMZYrbq_?1u~K%Nc3T*mCt7%3PA!QRAU)s)FZqEF+Ky z7zlnyMJQY!XQTJuq{HK25oLw&{t~b#(YJawj1iXTW|I(_sThaCPJY)YgP=h~?~z7Q z1*AnV)H>!j89buee|s{T zvAz1M!^7=)9{s>QO}oJE@S73d9Gi1$1y!Rx3&WpY8sD`1C@r!e%S08BU+su*yW|NI zaYq^DK5X}pIq>`YrVKX~-_m5*<3&u*EC{zCFL=2Gyj-jzgf;~iMu4>7-Y7%*$&m!h z1QC}(q24rXk29puFa-mv7RCtc!ErErZyE-^R~``kHQZ<=%@8F_#bmbHCi0@8e{(TZ z8#>^gt{qICU$c5yNfmECt}rt!>cWcbozzqIWv5Bc@eZeI=GaQA7?g=MzMu5cykbIS z`nm@C;~c3e(#QLZW`i+|$=>G=;;n(d^sUEnqT%9RVnGdd9K~J_m=Cz*Ha5j9?NrTM zmgg$9o{O`lH%TCcN69i4&c`awo=qZ}{axWKVFELx1|&`A+)5!jhdqQxzy5rRy+Es3Tmi(TvZ_3u8naDHn!lsgr?wf!s`^HZQ00wfvwa`Q^lNeS=&et;b?z}u zV40}=1Q7g}*R;!VWU;0@Gr^n}i*gD#5l#qwM~sFHK`eCsV&tzwBphsWAAk55I0*!QZb({XK$)8?Zme z6zn>>;3~Z>Iq&uWY;c{0P-(%Vlt9Xl9@15>?yJRYXP0IA&<^fsjBQNp^z&y}+)+ts z52r*2SfAy6U!EVCt9P%S8sD7yT43TjC|6D14|RIH5OB!+$sbYan|Y8pZ)R~a!P2Ks zni2Bid(CqX!F3v3z}ZA7T8a5t<$*!MU6)y>-nJZ^cM)3~8=|0;4TWtP?TyGPgwHzL z>&eTE_Er-EYvqC{HhSBeV#c3C45$+^?~qwlEMq+`flp<{IZ8td=DIX1h6(7w5oSrf z)pM1wC$u*x>5BZ|;91#*WxixwRQP02f3;OfaBro_XEJt9voOExiR(;}VKuR~3yFK* z_JoV&#g*405%Q>6n`gBCL#zv4yts|c2K4QIyf%kX(=anDSed-$hI%{bvan8~z3wt} z%bcx`Ymn3XnYcwZjQ0xQp4*vwzGrrEO@ni7i+=>VliSUolBm5K3G9sc=6Ueecf7Cg zuS#ntzx1y2P8h|@WJ&oGN2voO-vwxg5bFQh&f&^L>B z+|z0&BT%27iE2>P0ae2m<9FNWFWGsHs-gKR(|Xf>l!h&Xj)1y;P|`9%`aGe3x{VV` z@~sR*Iz?b4K%&asuK=?wU;fbU|E%Ahvf9-tV*lVcX|K!H%!a5Ec$7m-&J~R+rh+OF zm;DBu9wq^n?swZBIZ6Bw`Q5H9YtYYehE&!T8)fLH5)=A~S-jaZ*1_+Op-E}oAree} zK!#ubs~0duhnj$iM6vWSwy~j?5lH>oX6`S6Mc5TtbM54#_bNv7e%h&zWC4D8A>D!Z zAMMUSW*Dm2-kg^n+3Ybf-mf!T&3gmLpr)_A*6mCl`ThXn z#PP|exP`Aq26&ROuKlbg1&Xg9@M7E=Bwg77d<0`%?ORnH!Rb?bw2z*`3v_80aI`5> z1Jxn>0+vq|EeLXx<6JDGrx~X5HSo4RgG$(6ys+)N5q=;|yj+4fS2_OOwsLojArqfk z(LGJVDm8QGChvG#{Y;|CuuRQFhAgU;E!F2w4wu+kn{iZHxHs~;{?rw~8~%q$u9+k8 z3q_CTsT0!3T7-Lf`bDbv;R4N0ur&TwwKB@8MtE(>zc;e$=)8;fzuL&SU8{DxwsNl|LB&#|5I*dTHtO+4n$-!R zMGTEHq@2M;$NM`WX*=-QTkIoN4(iB~hyAxYn%0t2M!U;T9_-GyT6b>-B6S&nU!2Y> zYu?6KG_Zd4mbvIDM?Rn1tx$>-5Za?meqiXOJ$YMaH{)HV~mRH3L zcR$k$0V+_F+VHMWmYeIxW|nxLhG|Q^{m*O^JbAOH#)%AXmW(!*#2LpbK1t=;&|G`1 z5;PO~bAhGBLEPqoKI(XT%Uo(&yV<%#TrbZ|81ex{&+BL>hB! zvrYe9_bj>U3eY$C957_3*y-2hBJ61D4Po`AxFG`{-%aMA1AYh)Cvg45(sNcU>bafx zn=wf=1}y)K6a4D8N^mEPzg{v+xSTox)66#|h&oYUe_&7RQ zm^KEQA^2I=>goQbx10oTe+Z*9$M(lO%PK!k9$h?ypflM2z&yWCw-02|MM6v+lC>yA zz3Ny3X4UQJG+t%PTWkde>p8gkUEJjov1$sc>~4r)4G|h9rJC=66nHZs#reVkS*wg% z>+IJ96WxlV!%Uj^bnboqy}I13{xM#mY0FGPhLC_8OufUSnRt`=*odRDviLKvQ2gPe z!+?7$%$(80HZw&{>b2Wiz-Vt34x_V#F*6w}*2iVvc*}5vm$ZLurx9Z{YVU1TaLc=E z>-cTzWGg{L?Fyh&O!Db5dr$RiLUszEoX&=2#e;IzA0HfSW#XMF*4~IZ3BJw-YwwDE zs-v>v<=FWN(G2R{?vwh_u4IV927G^kA2%%CavZ%7@NF8F0D6d2wu+OG zT>-Ey^W;oaQa#2}3|nQbZmpi`C5oq!a|5h(Tb@!n!oD+9*L(*0hx2QCe5RxN*rf@i zSi@Q}2Yxl_J(JuwITC4=p2Mag|yeC{+}`;JcpD3wFfExTM>Ke@Bg%bfQ6 z%l{^jHNQUhw2Hg}EQ&nxn0@i1jA|0gmSm$m-*cf`geRj^=6CcwdDZoGZ)xK&?%2-S z$=5ik9qF?Epd2n|g`l&V9I)}S=?naEVihhTp^(XFQ&DE*Cn#E}tx)e-xVG*j?mwG& z?(O;Wv6sywbc>G}O`?T8=ZZub}(^`*HV$sqpB zmnR)x+ql%8v%gH5+*6YpIC8Y8&eNl?6aQ)R2FZ_9QVzKS$ZeyYT&L#f=3bpU?qi8< zY}-7~L!`i_n~9Hr@iI_04)44&Zu%j@X1F$t^4sk0?cB+}mqz0G4nxNfNNztCBcclk ze6>-U=scV+32K0nlfjUjjVm4Pd!xsRxf;@=jGE-~K)Plp=H~hK^iQgsHkUD100{*f z`mH{NDeI}gl{W&ifk;1_ILv{7Iw^~X?hRUh_rn-cazV-vK_E(vVDt=XMx|-W9`jI= zJhBCb3U1oPy4)iD0A2>zbq}-l>lQ?CyAR;oHe&0Bd^q}Nf|GVTJV&=YOzisC1Ox;W z4Yq3V8pVi{fM1l>Y44eIPb&v5MA-*s#Q6SjG;^H4D*4m>(_-&wCb^}+DwA^7swaY( z5WIz*^O0N4NMGQIW3ti+Gi2aPmS?#6YN_y-=HY4hu$#_h-^M*z{pU0)483;LixEms z$M$B@bl**`-S7Nn@t0|=L$om2qOm{KV^!j_J{cW6N#Q^|;vcm#Xu!_tNx=Odrqo@a^ftQBNdg_^7g8FcSCs%wtD@bFZ>FD>JIzZykCF ztudeW!xDsyeBE&}@|=M4Pl4BrqSr8X)2IFdo6Ikv)vNTYH-8Usm!-(>S*b0=!zdga z#IWhhvLenIElaI1Tdt{v8~u^tnM{1G=wo_}e(-S77mliu#tJ)Ar(}a%x-m z`I(2`L3Ke>K^c5hJ6Y3;fdeLcqU&*JT6ROYt<0G-Sw6T@%43hOdy!F>_uHcYJoYDZ z_n~y7^y~Hl9=ce~tW^85K%HLyT|U*JPxw zK=s9h34mgtVKU!~v%ptx9TZnTWW4Lp8TQs#?hDt_7}s;f35G5y;n&i&Y;f0Mv1ZtO++R&$;D?k9tKu19v9 z@`NB@MXUuXL!y`D<=sn{UB)Ai!=V@JQ%$xy6BsjYfV~@v;ohb2gzE*w!-6jUp+Q84 z^2>wxgOo0xX$q5fR6u&d6#&Dc5=L4WO|880@P6;5lBA`~PA>wVlBQcYr(}Zun z-CV`@FfzvJrC+O8MP#dd%p{88a66`qAD=N-LPt9@C5feeBA?+FCOJ!cy8Fze*&VuN z-u&pdUB!91we*&3G&AT0fpfj#>Bzbrk`om03B`Tqwl&3K(KhK354!NV zm*O14DSV{rL(PLF_}|%|2PxJFk}VV@@j}y+#BbQQ zth^fBO;aH-kmttw{B@mkxy64fnSEf|$Q^&U~bpCT=qtna9nTvfN?&J6D-y_T8$3X)VdV zf{F=hSe|;NY$Ch0_#Ic37*Otk5U5L@nqzYWp-3U*`^!6`Hf?eob?@Qv8WVp!xwuD zteU(&Ee91F(5%?(y*AgZ`*jkI9#??ai(ug_-?z=$9(-;e)XQeP%`fZ;>U(y}{6%YN zVHjFjJ(__48-FgXgLhpVO-D>ufZuE?;X8u5@W~{^vkTcBN`7QQ%!2s}1Vo__UsIZ{ zdIf-#M`5~?77EHTnLi&q8`n45{vou+*<}~nsArNt8)~K+dbV&_(2O{jB2_Oz;X}_m z$V0xI6euo~AWrq@MbF*0wv>-f{m#BrNrYHiF9e3KxR)msX3#Z=o^NzgA_}h1eK~n@r3+K}m;%MEO m?~ZLx;{-v)0fo2}^1Hz3&Mm*Nh^9f~^~+}+(NPH~5~+aurk z?)&b2_y59LtW5IDOeUEmvl13B^DpZFR5>YGDF74{6hH>@19;g0JW6_4TL1vEvUC7= z00007K!t(^;6P%gkQ*Ng761i#dO>c2zv7=DaT+L?-|5hhI4cB?1bK!+ZZ=396Y@-e z+}1&_eq)Lu@n1nxOPo~P#2yGXA*EsA;4z z8w)EZ1jon5O3K2^$HC3V!VY)^0DOh}^&7y##?Aus^-qcG5E}69uXI)x7N~E3*6`)! zR|ceJ|BVwnHy;}dMEln-RxU1%e9X)a&P*oeAX6Zd8OWa5!^Dx9m5GHJARz4FXkumy zbRjhbT3R~@QlB=rQ;0W z_COaCQV)AO2WLJHLF(Vk`5^IM$;{Lc5ZK&;PfbGVF9?JZr2eZe?(XhP?(9q;uq87q zFE1}M3mY>V8zTh4=V&cK*;7ktT@P_m^4+)^N8Q9v<#Tw-B7cX4ld3R)mVSkfRt3g+>FoM#04nyt8o}vco^AOH6YFWyK$H}S=m|Hxfod> zZO$UV{5SHiQqHcXHb66%Urhe2g_w!uZ{knN&RR&^3J7!t3NZgt6GHw5{wDrH{$}}K zZ2koPOPBwHen5!9{-fc4>$qPzzkBXa^?s3jiXd~KozVYmd;-k>g8WkLH>slrbOzbE z{%TPnZnnQL|4#e|srx@6|DE^`^1pzZ4%RM0vJPfee=+`Fa{eiy4zh4@Hvt3xYDrRd z$arA-yEgx1{sWhQ^rMEg{hxjLUqj}1U;ZWOAILw2Bq0r?1q3_)()3rH^e+N3{Mi30 z^l!*N<)rLPoUK%>J^n8HH~mj}$msd2{NGuBL;v9+4zdG*)gi+R2x$!$AlSjgjuZ$6 zgTSOTt`4>iAa@5+HxoNoASo9oCp#zYU+n%>>K{&j>!l2~h73{@yZ;Mw{@u_2uX2zP z_1i@M@66D}+Twp>hQD(DWrn}V{|__#7vz^(zsZ04_|KvAj{*Gm%>S&;ka_8U9yxzc zdA|nEf6@q!Rv;IUvlZxn!}g!-f12YzVJ_?FV#WcfvN#9~{HNNGsm|>8l;-SW&irr0 zZxO#I+~2d?{~tk;9xnf9IS>Fd8w+HnWMN}uB3a7+D_-at zTo+DC3VB76gey0}ucff-Sb&8D0V`(YYMUgrKYqXN_j>JH1S>n&Hr# zn8+U=A*~1h%X$12;Njr`s1ySawJM9M`wvy~ZTc8lsrHOk2P8{>ie!Pbh=)huXu1%ld2o*BMDXSsFSjPbiZ zk2t6Ndp?%OkHLB4Fu#RD^GfSbKwM;qsX#$Ni;^mj8r=j0k+}94wFcgv7VJ!&?bVo- zSMtuBPmK}r_Mf}Xo0>~klj(iyn7&Ul6DXSjU#q_>i5!xOV&603<4}Dr|0VjrK#7vSv>>9_uerEaM<*{sHw#1b~pe z^SWTwxegIB`oXWcgrR~_;krFf*M53j2X*O_fGEm2#m2H`^$yU`uQH?gh`NT4(tG2O z#cZWpMn{Wb4%nn!B16I#L^ozn`9~7846ZHAsK@rQzk3R@r0h^zM>>p*j6V|=2qpgj z;bgh&%p9h%P=;U9Kyg^7;^u@0*Ay~+A#jD>pv{?QbvyV5kGbgLqf0=6v5x60d-G_k zPQTpBHCk>uq8+O)SS~OYkDJ>wM??gi%;7jXlx-PN&N=voXXc50C76@% zbnAQ3d59S~DufpGUhVsD9hwA*$)*|{XGBr@bzUO`eqF)Ev5KSJU9Ce!2xq~u~Cwc8vDCRO+)4+c6i7FSamwr*x@6C&AZKw@gjG1%)z=iSe z9sI;v<;#4yQ{`pA=*gGm=^8j>2s5>Tk4~5>N||8og(7IJ^A&$IKEhVd;`?yt_nLzX zYoM;XPxpnZsN)S+I+@Gp!-12C$8*Z#3bI-ju`E2iCea#b$@j&`yMnhIAY=Yxlu zPq}^u%EYsEAh5N9T{kp>xz|{F;9!iVc71wGZrWaZu%c-=D7kXcz=8y>7uA|$)Hvd1 zC-tfkj#)yRY8*|w?O7d^I;xl^-p6MtvAv{y;Tm$lp)jOrv6H9U8=?en#y|W-hC!V4 zmg$mxmbWPVXXKdV`6_(u4~v$S<@_u%4%$8kZ~2;74aOfZ_mMOi{gdoto*P&rO@p&P z_VR5Wn~ydsb`olbl&fZp3=N%^QS5-keo9jDmlPBGZxwkJKE{wQog1DHcs)*~HH$j$ zs|_BnBuTjfP5N{dh=I;7={}r0E=6notJYiiJORvv=Dl8Cw3SkdO*w6$r;vI75}k=^ zqb|8|ba`u>lu|)!`EXFHz{VS2VOx|tFVWpB_(_<+H{gm{*4fNtH?L|YQRs4*FS^U4 z)pJ+v`;{4MQmK*nu~Xu}$uN&=eJpm#Y#N{Igr11gu_xKx%;lXOrz+tG2`B3ecWQ>~ zz=h?<7Dg%XT4|aU-u}GsXld1>%ixU(Dw$G{}0ADfq(zc7Mdk=*yIP0sc+wo__t%hxI+<5UQRJ z*}X{3V4JP#(q?bKC%8=4v7rk)i)$sNA0I@uBC*rc(FvU>q74c|^^Lruk{8(b!0WAP z96o2`&U%z)@|SE;_r5qya4D3QQ~6y<)jj@PI) z@=G`SzNB*D!%wL!sLa!NjFl)(Dw8AcmfBL*JXUr3n9jBO>_d&$cbNv)K2kYkK2=e1 zo_ZzmMeL6${@~s%)`E`>vddT@it@BUXAks3mQr&gWpnf$UM3knQ{Ki(U(7PSNcer) zxs$22u~C#`2x5!s9MIAZ2DJjc7Zv5=m-s=(oVJ1~Tg6|~rkj+Anl>l#idPAY1YQ7Q zOCytpm8qA!@3ONtH|*)TLJ8mP>ZN1g+H;n>J>@UOlM*YQUKkeoNe!$P65piFH79-L z7*i5iG?+ygBtJ-*GSc6zi;B5%PTQ>Y*w@t}DghGcfUmMa8GQx|ZOeQvySnNb6-1!e zqHr#`nEjDM#h;TPPWn2pK0jm>#`%j&mP9;!iOMpnE7qbuv%2V^1>T-XL+9kZj>uSI z4N;0PS>4TT@N^;@%Zi(`5!<4YzJ%viwSQ&CKxK{KS(Nc;(Gy&ff{hvU7~aS(Au>l? zlkH@Y-k>#p)WE}aBxB3d48)lt@^;D<*go*DVlRPTA-ju7FO30)jqil)F0%8>(Y7mD zBizZZ9Yc)CmdTjBPiHNH(V|5THOap$Fd zhO|NIAlUW%Il9V&IDT#L#3AkVAa#zw+@TPtH1?n?+9}BKW3XvD&$WF%A2~8(K8l)@w80g-`iIl4(ewS>-U!NStrS^#_j2pXe0QfI zecb|sl9uEF3`?vTnTS@(-CUIy0G~S!HKs~LX%%s#8rAz&AMIr3Lno_w=Mrj`X+>9} zK+o~-Gl5q5c=lLlF97X~dm-VRZ+$B&HWn95jkAdf$ydcs-6-0uP-9XGZ$Km86p5%X z@`1VBf&*{pi)|P7Q9r3M)Tq<&)Wm(H%3G=4b5~BJDxycH4X*{}$w{g=I8+y4sJ%&j z(^|nuZjcqvOfPZcng5*H+j3zM7dSZ6ClaYho1Xt^gl8~n$IEP|EyAH3?_?DTeVIIr z5T~I`p1*i$F7Y8o~-FSS;CSjr<^p_mSa+%gHcvusoyPl@^~3u zTaX$@?1)`5i`Ra9HRyZ}f4?!5=MU1|$^r^`74q%YpsJnc=G`B6T{Yee=SzzeYDre7 zd~$kf2Y#GaldHyx^#W`6&)lhXiA;8lI1MER4TG+Cx`DU()O7^oyI~ji#G)kjOv{8L z5>4-E8rnV!>;q(o7nLLeTNP3nxl=s~JY5y&_@TWm@rdJ!q;b@FhFN0YjH3Uvpa$>n z9}s9~)lQ~+pAjc!@UeIym7@-3nB?YafSBzqN3Q5sg^RMB^+!C5i0h-0thqGwc5GU1 z*lHMp3U*F1w7S-nyac{`>)-H6#yll;G)0)0X0z3EGqUGzL6bWy$FtQsP7=NAkvHJV z&~H}g=)Dil&&RqnBoWynPonSfPl1k?B+^K&kYapDlroS|264Oqq!}`9ne)LhOD3h- z@yEcoByEKRLs+Qe-F8DfdN%=&qQ*IdBr}QdH3yVL$ zsDP6oo^MhQXl|RMAP+yi)HTjE{K+r3cA(#Ej!y3zOcpjRP}-7!<&$|)i7~$`U3Ftq zhbsCLj39EvdX7gS!7O1*qgsBMMWWu$G96oa z&Ty0Hx>6pAad47VACWz=vF&2ohouc?*?gBieyusw7)z@hRcb|QWLw52i`@eMWv=9m zAab5plA$^|oE>TK$mO>2!=utJTtU()KAa^7M)V#^%3|DffgY4j;Wtljg2FdMvW_R)sw| zjBhx+%DC+p9bIestj=|7R?pq*P!cN4EGvudG~qy;-Lx{*wW@ogSrh50Z(Nd;KQ;e! zsom=!(GH1g_1hL4;qE*Y&k=t09C8sbX|#{#X(fwcKR`pj9?67s*$_gJqsp*SZ5OM! zsyD1G9_Jcu8b>0sF%c8*RT<(LJqCq4L)w@>VA@_H{FQwt7EHc{z|KTaUib6t#7{p_yUl4%@heXO&FePNqr@c|0}kE% zN{4clP-uRDYv`mHVIwPfhD}eo^@#Gu^wq>v0oJr!GAW3wvw|*1 zIcyc374}i6ved)eN%Lc5kUrJn6iVwiOm@w{Z#Ch~503W-@a`?$4a!&86rfr6h7;kw zt@9MIgxdc}@3?8R!T4cNxq>j2$njZdyHne4QC>+`YGV)s2!kN%l#X|Th$*p?CP81P zF*H4}HD#5jj3C?SmbF}}O-FNz-{y9~6`xylhU|ZVsr=rMEwS`?k>_MCQ-e27@xs!O z*ttoo`~$6A?zh*FZ3Gs;fz7=)^w#I;^LjpoAAZDGk@M1UV74`DrkdbkSN7OCU3emU zjbJ#d*;m-&bg?ZPnjl^6L9eg2lr(*Twf!N1S@&*)y5=sdNiIQCnQ`M`Dx%08emQ@jJkg1Yk?l}+b|+IMVG{QrO__eK*Brva!>tTSfEx@eoqeHh=p z$l%*aGqfjb8bHp1C+I1h{3OZ3sVB>6jSC3xr0z~6*kOfTNG6`?bk&+vgz7dOC zQVZ7Er=`uL%SM=6obdGHg#-c!JBEUZKrs#*U9p&{Ivbj0kWkTFFdqG+mp?VnCK&@> z0zIk7{5=i<#TOXXDJ>!}yZvUGBCe=Ls6k37adlr|Y#b)f#|0gY+Za0;1Jkx3JX>D- zEYFhgi4%b-ZNx7~;eJqRLsWu!Icx3exuK;v5S=$jrRu0Pn{7HG~ zj=3=3U_0^&MU$>~m1PVhH9UQktChYs0i`|@MOL*}6HhH(X};?;E@P}5IWiHcJ-cC= zouBTgkI5__!Zs#By65&0R(vT=lZayJ;O#^T!cU&ak2%4cFwUBBy{-yw60Zq)2=UYu z_y8n_h?ADsh3V7j+EiafV9~Oi4{f@X`sHYS(6pC0{um*2TRy0^(dMgmKW95i3B}`+ z=YH3rd4Ri>6B;`yMa#=G8U)qDROdwTF*(WY6A!=#4dYORU&_Co-eWQ(a9hM=9u@F0 z#R5%C*~&@g>$rvnuD9rGRe5UH-DWMmtGLmm;=@cpO?<*kZHz;*ZrS}Zr|iV2+)gqg zPTCJU|F{VD>YCQvo%~NZ{>+$D9;&%JaXv7OP(6|&Z{hTgUrWVLjOmZbwejVDsuTfe zV8SP-&Ul>l6`YZ}_p|ZPok`mu zG;2ovdn1owQNZrpXLx=Q9*)VRL;~y4Dh*em;G9E=&~?Be-<*V|)LVd<^;w_{903_T z9I9EW(ur5{B;B{qNa1`WJ=V*q>a{6}Qc75_n+TNy>NGlX1FZcZJMbK+0H{|`zm5)| zVP8Qu+yRiyV#rYd>}xc1RE&3+WUO$cY*^&j6fEomlSloMGCwIB57a^7 zi8%|Omsn6fp-`hx9i1BxrRAJ4Z>i6F!ryg~yVLa(cM^rvV~^L7m!-V`Tn^C7W{U?|&grem@mXt{80VETfxr6LQnYk}jXt<++IZX^Is4t<&aDP@O+vA@1Ei7977dI;ljh}I zIkmrR3B6qvd!T!!?Dc!^JCJRiGgI?KBYI}PqHUE6=`0N>WR!B;_j;;}fTJ$yn(4O3n5_9)?7$i#qxRd?e@f@b8v750O=r?k2ZHj+6%1%p6 zkMsgKA-!I4*Xg{`Bi1~J>Cw7g>u9^FLOTnNmwpP5cH0D9oXPmE6}o14zlK(Ffs!+& z9O1uP8+6lWj7=E)uz?{iy5a9<8AvowK=(L9GWiO+i;e4Q8q1A)nyU2?spJ z#Di%5@GY|Kdi`upit@v$3z{B(DH$Zgc66MT^dz=wZr9{G&_0nHFF7&Hlc6k0!F1pH z&+duKDmbL-cEV`cC7#kA!t)*q`@-R>#51$bPov5doNZ}8Dtv3N58gZn=^8}E>gJzh zgf(3TzTqaNvBMtWJ0YV`W0Fs8x=J%@ZIeN1$Z|mBH#`KobQ2e^T}I4{$!15CG<=1; zw;8~JsLk>6df46Rn@sbd>)7cXPBP#R_9{^)u8F;BPV$5IK+ zm)II+0^KIE<|?I(n(ZFUPu>cc;!RuY0qJfFao%rLOO)txoPNa64Ec&&^s0HKLtZ7v ztYq9K0 zRVy3UzA#SmSg^Q2SG~=z_*GpMeoB5Hh!i=v4OSF%^JP22a3Y>Yca|nT?$~6jQtMCy zeB30KEe;uG4PY!brKy zc|JJS7jd`Fz&j^k4q8_BP@X&bL# zamX6;L{Ab^&p4SEdl{q_4@1EFwLGgLNlWHk?>6p* zmui(LIBmW}52l@S+U|sLS#nwAN4>UdAT?f=z5kkJoF@{JJ*D)HTvTY2i}(}O4-+nXJ2`))!`ca7_7m0%3Ygbfy#!x*G?DJaUM8pc+Nz| z#E}lHK0-;U(A|xUQ<+#R_La$Q!;*NIx1HE-X=rcUv=DmsqHO90zUXC7p|XlHNN%Kq z6>QN^it9yMEyP{6KPKDuXGJk%mcwL|+v1%ZHri zKgI`7FF;Wdh{@(l#<|rg%~H*0D^h<>{vs{R*3lAvwGo-;D-&W~UFWo5kY3;D?#D-_ z8hMLOvnWA4Ghky^V&=4R?L2uOE>^?+T5{Q7T&B8CN2@gh+|zXug<-_|t{$M#a8kdg zlL+un~TDi4D##_QnkcA`uR9-gAZc2VIUQF2Sk(G9a=aKz)! zJ&up0K`}5DarRTK8smzcTR?R8veG>oK-W@n;|84_ugJX`qmOnx3cz4 zLq`mb&;x&KbTGqC7lNsqO)vjY| zACm5&9c9P5;l-ya^p`a`pTgArp)Lh0&Kk}Gsr^b;8H!oMO-ZY# zL1Mg-Zt>9T%2tB6wM;x8Vr&TK2_(4kPJ0~avGS%?x~fYbYQ_)J0?6+pNQM#s^)_ko<38EUKes5%q*Q)v!m41h;9$`3;TTR=oSn0fGdYgsN#9j?Mu&%VX z7e6B|E~iWh2W~dSYcx-@rcNp*tXCSRV2QXEJ8=+L`U2QI3X6JXY#LJ9Bbl`KNlDaE z?T}PwCYD^8Xs_uC_3TJ09X|Z>$xqQ#DvFS`gtfB8>3Yfm^tE@fU!x)>hB9<|mMpYl z8ll;FJ9}V@V}-$6!_@$dO9k)F?M4ZuJweT(#9r~HBrZn;H9R(ok< zwQ{pnl%D6fMJIh1mAlpyQ0C;uwWD_y6b}qC!mBuVipOQA!$n*{(@Lz1 zb!x32*d+yJfid;Yt_yf1AK&;whxW5$&ABd;chr*Sz|8!KP%kX6{P zdbtmy%*iTQhjjAbCCMbmMo~=M^5~*rvTeE6zbh$c9Wf&+WYuQGt}NNJsH6-bk4h;! zj=T!W3%>VDC*<;CG?8=h3Pla0t~i@d&b?0$4X5{!>01gdZ!L1Vz;DW@CrO@}sZMG) z#?SHiFr~YrJLFp6O5|-Jr05(K5@u}ng{vWZeeocKhBIC)zP48K5u_?qS3GpPS@q0Y z+8$YwKlh!Dxe%)k*$--<@aprn?NVGm-Bsj->6z0s6yrAEbBZS-kl}>B!Mu+;fz=hU zYJ13MHPxN6ClLn0V5N+0j!&;#s#VM#Eqwx~cLbld$`=sQ<=Zk|oL8)(%g$m*L9!^>mB@XlHY#n+88Y`m0!0VgS^_>O% zVG)v3O*u43Hv7Hs_qoWD-@FpF+ga63$hS|0KR=7XaW1w-e=X5`{rzg~R)*yc3+14DR zmO!iOtrf%ds&0lf8PKZU>8^LMhhzwD2aVojk#{eA?`-M~UCi3O3r-&-gBBnp}7yV<3>s%`w2C_9n|;td!sisx;9R%>CqzXmQzYNjz-UrQQ% zIXr`zs$cb5j9+ih=kSI0kwvMh;Y6G<9C*)c%+{k{h#nrqD{<&%ts=XQkN&was$A%%T{8SBnV7T1H+@?}Lq|C`Th$(hxTyNJTc z+j`N*n$cXMIUIeveD|$>Gc;G@#jM1Y6?gioV{DQ7<`)3Ivap(fMnd-cVS}ssb(l&j zVeIi>Zp^Wb7Vb~R2s`cuxC!9sg}_1(F*~C%nzNz)<-N8TwB#? zyEpVlCUlyH<&xlyscOQc=KL0k%}Tn2CAuxyfhqx77kxid$iyH+(nG)?&G@6p66w#1 zfoi=QDqq#}6%il}~t#mv6Q42*i70s!^+j8!^49b1fIw7y6^=n{Q}QxNYLhF|Nq~k)74|+!M|;x(TaqAW!&As( z0`J{;{>qTzv776d+s+1i+2cvQ`Ko6Bd`TMT1r|qwSnG$8ui=>oL{aw2Kt2289tl`C zmFx+lN?z?LPlT+hqja&QhKFKMb=21{F97!_JZ8TrHrRyTu5``d)9FW?6FM?T z^|=vyDDcg^_On^C*nw;b*~EQ!#Z#TcT{flK_B!ZuW)oVx(l_b&9Dfmg-LHfH-P+V- zhDx8?DSe%JC8}F`JK<5O|2kvHcZ?c~Gsp$(5OO0C!FqvJkAt(awhY=X?(3x4*;LJ> z+S{LwzJW{R5y91s->U zq_g=fAfAAC>syuS{wOi8!!0;r6lX3TjVWJcnUd5p8=P@9z0#e%ey&SAE6^~b;_C(is==D_=4J&MHR|l>aTX@@uRTRFwpE`X(p!;-%vpGHy?0UMW zcsopVuStERo}q02c>z>?dQ81{)X2k2ZBX~dtT*9N^>~nbQx*I8T4u@IP2XjytWnrG z_9w|QnHlv!l+9?`plsyuTeBGN2*=Q88(Vbo*lI)Z)Idq1#oa)U;ZKfGsr0Di)>U`#@p!;U2E~TA0tT@q zxSqvAd`i}>pPN>^E2c(#GvxFPzneWvjX$4vuG?84YHWO)nl@#!bYj2oe>S=EiyL?> zikA+yGl;6SNYCi7+h%d-zu^S@gdcKqsPop_Td~8zP2E)lY^84u3;tY*K!CNn9(K`8!Y4ma+7*Xm@~HamYXPjjN_9ti5zTX%tS{uM zkhM$4X`S~JHCwH@{>ndy-(e(N*hr$8TJW?4Y8HF&C-n(`);fHi%mb<2;!fF zbEQi4X?O6y8-Ld-Q-Az%BZ+bR?lJp7I62GDEHr#^9#;UUw!s#@kiE5xN+t48FaH9F zl2V@B5Ei_p;WlfBV2H(2r1lHSylhX&QBRxRsntg=T74A@gzZ7SZFiqeDr@5I^pKJu zub0{b^{|6AiwD4VTp6D+rhw&T_loS~Cn`d{&!(=_36k4ha6ibf&b#}GD*O@sRU4bb zySfcWBsbVE2mHQFM3ecX=(-6m1nvZtVYHpR0KStTtg!MkD9O*w;w-dNS*1;hHo?bA z4V@|ZvfmVV=+Rf^4=6(y@$2~~+a)KgftY}GLwJt+{5AyPS<_ly5AbGNhh?4d_ee}p z$c)K%xfOSrKHuRJN48=39r>%*j%40wP@mu!=~h{cDEmyutej zA4iy7*5A(8*zG5|vHkpBeswJMOy;oPB8Jd|$j_fJFyua9t{3+~4)jo3rPeGPm|oyO z4{DJNh<8BY-glkmX?m!`hpi|htF%FBh``LEp0!r4m1VxsfP?CGCTRM5+ zr1c~<*N(-xf!m5RwAy(jKB0YZV}u=qG3o5DStyNOQ6m8)SaL>w)3!)ZdP$0btI6M0 z?Hj}gu1vLULHkxcE2OizRz->h%4QL$A~<<*r$|<^cL-b9jfzhf22?sj=U~n!hSm|-|I1(kZHeP&- z73F)5*N)ahDB`e<{wH#W{gghn#Siwi;To;YhHm>+3qmUb)C&&%kNlp-%wgF6Gw#>B z=BF0a_7V3`WapW=?SdpffiD1rCSe_Fx*w)c2`tf@>~3{fYaO&vKraV02}TtP?yqeJ z{)4KF(e~9^5%6b5MC^r#Bhi*|gx_L!JdAK6QyJSvW>4~&orOaut`}>2!^oRXFzp3e zc4}6C>@iPFRV1y%RwONOD@V2yz5niNo&iYUn`{siSHm704pqli8&sFQxbf=&!Qk03 z{$y(7L+Qt9>g$40;-v7j<#+wrLJh21up5~06!HF+hbVU*&5l`HdC^y_n=)FEFsxRz z-4v(UE;6sgX6ZKbc!wRjBtU zUHa#W`~KN=Q^;++wN6}BWo^hqd7JNz>aD>tY6`mkNxl6S!c_L7E!vaIo~)9lwcKmy z8g(E8L7m!o%_L@@(PeEMUNi^#4eX6dSj7BpIP^?*HA80NQz-~Zk`7gKXaGk)CD@u(c)OY@TUBb5g0Sx}{ z2}hw~m`ly_?;4~8^sCe=L`@}MXY?<6)G&w|U&_tL`>K12DQ(7axpZ&Vo9@>e7^>D< z?&SDVnbz&MU%FV>)m(ZNj}=cV&os45J*o)TkSsrMuLtA2nxKkcaT48S;Bov04p2;N zBS#^#7Q$c-TFV-!+8s{o4|@0hP;3ELj*wz@2dS<&kbv7cW;Bb5EZ?ngb2zQYTENT*J^8CDpJ6SZ;hn)h3DuyH4- z&teapO|4(H76myDVt{Cd1o4QZ)rs&} zkj`U!){2Y7H7~n&drrzB%Tow5R&dBFp@ej}YE21EF>1z+m{BP13m};oZlZK&Zwh3M zG5@2!O0VdWR`(R`$k*y~C#EOYW<=v64VE{;8v%6V5yM!mpuGAlJ_+dsDze*_icf-Q z-7-65p*^~qK~D&dO5|I2VU?&yv!R+|Tlf`rc2gNgLcQd3nb~A$gs$-^+3JhZYCrEd zKXhg%o@#Z*?3Q;xF%~5N(Q+G0%;SN1hQh~KA z+c0snVeU5f!e7BBM^4H#{o#-ASNIrh`Zc%-8yj_7UZsFbS}7uX>n(pRGBV}0xzGzB zKKliLW@^wa!H2q=Hy->v)^W%1jt{ecxKe;MRf5VA(z59?>04CdyDxs+77w&{Un${X5yUysMXyDtfBEpD>p3DxWSYcz8 zb)mi+pBUdv#vPBweZVUt8XQqnJd^J{8yM)1mdUl}iVsNL4|^JP@frK+uohl3&5LOt z;iy_$CBVKwdE}I5QU4sf{{pyTV7=2fM3UuD$@^M`S?GrIK?bm^pNH;w@@8;+r}) z6MJdgoxzNiszc|7i39MVlxr3%jxE{0oqlRbhcj|&Ve{_)p?8HSJE?a>8D_1^ex0rBc=}?<~t1% zYW^*&x|+_@W~7(JWHe!P^Q|V%`J+M{3=-)>~kVcFw1aMKt2dQYKUJER<&QK|-2EJxXOv z)tXYWS58cFS?o@&3;KQdU8bCw6>v`xOCuYsDB&(H@60rRiq<1`QrzN*{Zw6l0W<_} zX*#SsEf!Akp>KQB@qa8q#@5y9A%Ty^;O{e6Nyp}m2;ZBIZ+~4D)uQ@rN@nhI10JhI zpTxurk+x;Vh_9CP>q}tJvYLMCZ-d`J`cf|wwY|tZ!DqlO`pi()cTjjL=+OgTeezc8 z9ky&t?Ep?=3@6ZtX-%qN^N}WByb6Z$b#mTYyZiV*0@=l{awh9_71z^BpxH ze@gYRh%?)H{L9Q@?~f-O!>v&?6)7CDN~rP05_uMmB2(+ScML*rkW(YN=_q3qFZ=f; z-BWuKo6)rPN$h*MXhXuVbsBq6GT@(UcyzBLzfVwF0ZqHf*catKM0Cw<1|E4#YLl-T z!={iZ-$cIv{OOvxLUpDMP$Ntd9JsL^xCy?nJ2kZEdKjX9`5HNGBlJq*Ql*E27F$Th z&&(x2m1mJ}_}0TS>N5uJ!nbxboDdT6>{?geOPU@RP&iSwkv_%b{?2*u?T4&gAA$u0 zBN8~%Fw+S5BdKKbP&7B4TwT*WzwriYX0hQ!UN1U}^28;dI3~oN)QBIZTS((mONbN= z-MUO8v`>u=!cUx<)eyJyxlDONP;va{b2o$Vyy+qOA-<1k#V5KDAbdcpsS(XQ&8F}I z?JRKOa@ znr?!KmX70j2rr558$`E7j}%)UD6KI!y(|RB%prMCV(~fGq1GIy60urzGW!f-YT*eM z?PP)$9=eGZnZC?4I@OMO0Z*o5jpFea@c}ZPGn+cG^`Tom9#;Fa8JoAJg7D$jDe6_B z^z4_A!7CYs)i?n^d5cEaBEaC6fk=em?ct+fozJ@AQM%vfUH5fzp`a$st%>34OTfqo zkv#ieq!IZlZeowATT4@CCpP32^XGlT3!txI#g@MP^PWrEPQ#VNbLL`q%LjtTFCk3u zJ7M=YW_KnVl4i5op9y-l^yN>jkEwGvx(FrFOIY2%1e8r&+FS1*6PE6!HlqKm8goy8 zbQB3vrMEJ?K?Sp~voDi@Dm=Nqs7yix!5e&2ogDG~)yjg)P?Q8lb|8L}JMr7VQ*35o zP{&RDnLx+ghl|v~VKhY)r2!wosD0ynLA-Z8sj&HDc^|N+z}yop33>D+JN*}Lt6E5o zq)P{!13uhl;0B+!_}mS((^T@WtKw6riqn2I2#b*m&N*_LXteem1A z9uT({q6l^&m)>73pq}3AQz|qa!pXEnk}!^Hr(Ul%$3u%i#tJ2$&-M!}*bE(B!_}<~ zqb66MA|J@QT)R#X%GSTqeTPDO$C(%)y_uQ4776ejm5O+(6t=rl3cgyTm zYkkP@;=hPpPvvk~Ev-w@^RwZ1vYZ~Q=@xkBt^ClXBVm|jvID&NDH#%$)1f)=A?jx$^kdkid^lOfr^1R?k`u&o+oAXj zGt=|JL($$G0_>$jSt){H=-9@ea9g~(B%EY1OHCgo63r_ujkphKClC-KQeqb)yWN9+hH#%f$5JZoonfj_Ugps1Hkzv@*k&LEmr}3K|kBE z;)6HasAOIMZv!RajX&gY3ZkeJPIrW%Inyq3HVM|w#ETDPhR8aq97W90owFs}5b{~w^EzQOQ^_~ZB z#gt|TO0SoUJf<|+_@t;Vy=vn=lSaO6$Bm%q8Q7~t_xqz_Q#kO;1lgl)AHJs>%bWDZSN0z}(ra!Dl+SreB+2=)?9f8l zQS#uE(^soA#85TVMS91SYfQN*cDUG%g>`5Yd*oj$T*WGX6ivQWY0bQ?XLGb?5c`yU z6$yk6=-vAohQW-@TI|Ms6NW({U16cUDQVH_%KvWRRmkk7IHuk0k`P;P%$`w6C5d_T zeD6?CVt~W;s-ekH3^P)c@s$ld*>x3u;sQ+xZhtNZrCm16-ov%@0)v9@Tv(?u@LWu+ z^94|Bv-$3;P*gU3i~*5=H2PELl|Nc#cmnEE4gqCBz~0o3X{6u2k69(1=^^3`Iv#QG zrnihylj+gdUgeq1;Le^7^M~HVW8(6KS^DS1Yc28xVy7vlUg@kM5amK~yH2agDmq2s zQ4CF~>U_2o+%YA)6Ls+5HzjR)jCBRS&+d;X2?9)m_}kI7eN_gTw`7J}m0k-t9p-^5 zv-Dg#gT(Z)4d@2Z8@@CN%p-5rD3}JB#vdpYPjf7Gm59hyH6pt?kKqD&3BoEjOh-o& z7rw_y6&^=>qa-t|tw-rTT(@ZdKLEHuN58I@FeM&EMO=6;k|n`DQKQA#7*n%X=+t*C zBk$S|l1h->pAfWH@;xu?*zQpKFu|S_3k5Jh46``+*a(M337dn`{*=3elV(T_1 z>Wansd}<4VaIy=UFKXI2C&?m9fpv+i+G?$`qr82K)~Hh6=t!HPPKT*@)cqu++5Z5D zn2Th&8zlIwXiB&?)SLA9__Ja0n-7f&DKd&S<6<@+4IMJ^M$z@^rsRogQX>BVRgIhI zmqT0$^F&|jqHdWkUViTEZrBsTI~uQ8A>&FaQsld5jXR5rB9jzNO-N~e=6}1SLs~QG zmTn(Cn!51O{Uh@t_}G84n%dD19Z!_cF~?6!V$SAU8});&LVaTSB`Y-l0Ach`_hamr zXIOpvBTt4%aiQTey)t-!hLJ^PJ!ei;B*o(H`Jn@Hx5=sQ67o(JVccD6g6UkKWL?SXoBYQh(~t@ly=1k)mDbF zA(}C{3xiTO$wNeuBzTa!Wb_uEqia82sInZvGF&rLfjcxOu#ERJSc0)@CbpyZNR)Q! zl1+UUS=mD1lSoAxZ|IoDu&iutjbSW(ji1-1i>8Qmczlx6p~RF;wo6^<@Q?R35xQ@i z9=nfZWxZI%7dJ+hpT!zHuiuKZ>(ygJ{{XC%ZbqUOnojK9z?rGhsi?HtQu`wtck1AVrOZzt?aV`YN{;mZxc~P^);w=+I)uI|sN13Ui7gFr-X5NE*{{a0K zjC9}1^$2QO;ReV=E8r%QHZ=^Ed@#EpZ{Qa&nFnf|vnq(jHp?#HF}Z6htF zjIKnZ#f;wCqvlE~G(9L>oOrY2hr_kLNW*M)nw}o_c1Oc&?3^Vvw2zGx?UAHzvx3Vl z7MWFtDi*Sowv=rjqhrThlKtdcLe)&~5j84g$ELIk5^4+pXal{t}q_(aw(FJP8 zu?{4OCC)bE=?kOp8gG+nC0I`JWsLeI!Km@3C&1NXzem9z2!Gh zN6Na(!z|n>BF3!xJe@}^OCu{7)r*LYZb!lWpBbme19)6x?<@8ZCEg*#u{Kc6u>m?eFE%%calr^~DzR9PLn5e>JPCaaX6|TjpG!Iuv$T0ybv8$d5_n|1sA)a2 zBjrM0Vv=hQf=iJi?kJaIEYs#C(uN6$F)$a#}a%PiC8CZ}Yv zr29K1ghlXbbaXSbqBvtNv9$P!L98{H!o>&ANgOn9Scm#6rtR;JTe z^p-MR7Ir+oj?Xt&V%j#-A>=-u5knpqB;N(&Nz{!_vmVF%lI0bA32o!ozg8qkWqK^U zRTYZE$BE>BudBU}$&RYDZybj!;|P+)mN&uWez}r>qDD0lb%r_-Lv}ZazDeSUkC>gr zirR*DHI(rEU#;Wo)`}4QG?mDl%`YXuhmfbq@>$ra5@U8YYU?c{Tgr2dQ7N&_np3HM zp>u{bHR;xSNM&L_w1~&}C`?O(p^xy_qRYZ`N?40%kCN9EN6TMmTCtF%hbtvCk0bv8 zk=e9yvQVNBw?tmg(!_m@67MUrhlqaENpnNQAL+!f#n<4D5hd+@%P9N9D#okQ@R@c= zZbOY?TpBvDgXEsPBrSCYY3r>=LzkjHZ|M1m>RF}C4Nk^+UT49_XZY_benWOHNgr5< z{aUq!gfuaK!P^leOVP3~7i8Hd_DWlZJb3W?#Zk#d+;(Zg_%wL5YQMA1{{SKX0Ou4o z-zWI!e11Cq$Py9IhW#1!i#iwchINW`Xac-gg5Fan|7p#5<=mR-FkE<#w3OKSM!8CEFrrl zJSB?5WVj<5=kU?d2-`HF_IVo1$A+r5{xWtwy zMHE%$^V!#{=!`9uLoSI(Y=k%SlutF9%I^%SG}5!;hwx8MF#WlUCEggi%J=UHj}|Tp z`@`U&4-=NuN2||y?C7#gawNz@aT!r1`4;{R?)w2{eA{VhD6mRgz;EI{C(s47a5NS-{OeT(HCTftlC06VtjHr z{uP?bLLksfmXq0Oki?J&@#FrRfk#&cNa*1|JC~GqHS+V+ch5TWD zHI|C@`}XhI^2mt{O3G^!Sw)W@UaRo*FUuUiW=n=&r+o!qX?>e8xJ0lA8ez4%(WTiuFlcBprU(qF={a*)bHJbP6)0HDp+`k-i zMPj`t*pUeRm*~ALlcEi{nzP%%rne*GNSYw1WA|i#>W&!wDC-ZiOsXsI*8M;K!~jbW z009F51O^2N2L=TM0R;d60RRFK0|XKwF%Tj_QDJctAToiGFhWvdp|Qd61r#GQa?#;5 zLsNp1q9j0LvhfvjgOkEURFu-<|Jncu0RaF8KLSq@c4UZqXO2nLDZZ{2Vs!iRIqO*6jg+=?rN*!LA>u)o@N5TCZrA9URH+sjtZ4c;5i1)Epvo- zsrM{HqlTE=UO%{@)wCA&u%07GM~tXn+P5a~(+35`L?TYy)~|198&y{s22-SY$4f{z zNI9!DN}}yT!7=`7lee%G-K$y5Y1xPBzjbrcRU6G&_%IIzQLPQ_P9uWJo(n2t`E=KA z>aP6$DZIM1+Uy0~)3suDDtuNOG?XAuS0$ir>F`?nj4(1asG&BRTIbHM~_WuBJPitaeJ0A~%Z01Dh9FSI_>x2EF zH>z6fTe(+e%r5eT-4mmaTNMowwY(PKwXrJc#_rwv92F>Wq-mSs!C)jV&-t3{^lLbL zbaM&Ea_&~P?yWu`DBn-wsh)ZpqPX=~f8E!z+FCHhuJ6m-RWs_S9!DH>Nh=JN(3tJH z9S&6k#8iDqYmd21;mvmCyMN5vTg9&SewrD{ROFl9t!>^I?|AqsLw(`yOy{5?=ySF} z=D!s$as5#ode6E$F1%AWwB6AYY#*N2%$E~UhxhN*--0T)QB%&N~00SGw1zFf}4yfN|KMy8JQtD(53iDfZaq`wm zpX$>-GSuYu@KkLMsP{FscD%p`4~qIup>Z^6-5_vQS8l2Yh=fk;RJUUXjL*RqmK9>G z>_869Qg;0hkvK1->+}@5d#CL#3)!8$YSzftvsI-`{u7`3)Erb@E?kcC z)D|ruX~V^0;&~v;k~YassOI3&JCrqzlakgH&+-L@Ajcd}M%`c0G=L2{br?Wvh35^Y zxmoUGS?o9-fAXl|1&njg4WAz;jw@$^z(nn>M!%59Gd&{3qUe z-$;EKQS$p;ce0dvL74bJRb}O^qUN-)aT1JdPC2#NuGfmF=pC%nan_7Vs?bOcJ(<|1 z<7bd4fq-fBes^fi$kTUZ;G_w5^P*e<7eEVDp{mboZd6(WhXPaWne>{YTH?A)|LBM zKlqvXFTY{uOfJ;}{{W3+^>=fp`mAVK5Ar+z0NG<@vmWPuYo&kDL!ybl4goURQjnrt z=L?|DMl|AXufALQwz?&ZXcUdm*_C8_rd>(1;F#%#&S?F!4|=BKQ_T{q4^XS#lVwt* zaJmjX>fmKYhc;U04%Y=mkHxHWfCuoEU4tgoc=qUbCG>ycKJ(VC^rN3rg=%dfqKOUZ z9(SsCre$bg>4D*q_o>))WU)b`Gm?GXYs85P=|tVY=2r2%DH=O5&1XEczRyaNt6ycp z8YeW$Hj09y#aE0rLAxK@?b6p;Dh6IK2K8(DLgz_keP=(UVVDHrJG6g=v%aYGvTupO zUbR~2S%;!E-T8Y_>1*065s124(0%Ao`n_>TJil7@a6cqSveebxP>Jzf8=GmO?^WaC zuTizu_8)?ZUr07iO{-27^GE9x{X0IoWqW%|tp{C}bLNfiJ%k0h|>Pwws*K0#uCCn%d55PHk=yiKyo|`o>dFq}6Bq04r-)2&&0! zgvwyVyRzi#NY=RbV0;%$^De0OIG+6uA+ zF4J>Ci0=3*{{V`8s)^_C(Edy4{{X^$=d3IL0QU78R(CWVZ|%H;{{V*Xejk!BvcstE z2=zT)qnFXYM|w_af8eJC{{VHt{Mv0aDPvTLXx>4eg2weao1qt-VYuy0rZt@601d>H z+P3D4MWfI?hJI)@fvjlAG(gZ;_J^?M3i`zYO_k<92L*wJ^~+KJ01CqwKZsCkKx;+- z4VpSzMMl!6F?q@2ol14?;3~5lrCRK-7i%)EJfjaJ$8I32Ydt10-tq9z?E`@!T3foT z_<*egdju*~4{ni;?~tB-CV~o~IAUkT2ENl#)f~(1ibC)ElcdH9ot^Et>IUo{IQ)9w zNVcsOwX`jDEQPZ-fWm#M(_yG@S|~T8ljjK3cYnTAgm>w~HCeSbw@#`ocFwJyoJ`ac z%QNm)yJl=3;i+yfAIEmbiT2=nA=9w)bAR$twt9>Fr+7&}ao2uS)msC5j!C>#vc9&^(y@=~8 zdFgM9RixS?BJ8GH=}X^8U8u&4Ur%p%%y*5c{TqYXGS>`?c6&O3*^p&l86C?5pwqkd zew?DLH~#=DLyD(Dkh@NsLs~d#p-fqwUdZW(SDyB;%(NY+6;0HQ`|P{z z{{WRi-m=Ng#s&)2K#Q&py@ptYW+Hn|Mnu4Z5e3&>xi+22AVwQh^ks*5iAJSuP}#ie z!}CD>WS>-1GKjFzZ3-mnm=1cSX1CJmx<}lW(}xZ^bsKh-QF;dbsP#83S1d}&YK;TY zIs$dI`(~XV!{;9IJQ_FkK9F+{Y&xGZ6pT<*tFz5HZwZg2@IldRHdgjq zw5b-FtZv+u-?V$DwqBymU!;#U>!Wt9O~*lV+#1oDE2nAuF@r~{tYz56W8j(w*jH_U zbP#dqgR$9+b3m{F(Ci_%aGtc7C_ngBn(t^%FdSoQkl@{fY(_(GDxkX(VDVJDLG;cW zlO#2{o89YFvF`j*seOI6mO8UAHg`r`Az8kwZLeO*t6GbhPgfi@vB4T|?MAmXbW^l3 z@522lZnwo$scia2wNOr|I^Ec#M#$^8Mzil~j^tZ;*GZEn7js=42N}Xg#WLM$A8Va9 zot%1(x+bMPcB#{vftJ;|aZsZ4DaJV=cNGegDpcs3QP)exZ5v9bPR(hgDYOQ$lMprx zbh+-xDz15TRyo(&0o9$J^;(gcrsCL9pgN1o9l)R-##G~yH|fL9;;D+~>}l)W*r7d> z+8ORdR0r{FC_Bsw&1s-+I~J0FmXpZ`*Qj0U>`Zl9Z+LUV40*gtbw-^)>#}#~1EFLq zWjrx$-~y*m3B1`yXEmCl-}*(C&>lc9eLZ(Kp7mp-+7xVb>)i&On;6!Q6v`HMI!GcO z>~P)}NZfBm8&~N=pmC>*?tbUbe^tdFGMlV|-DVg$_Gd}{ovWj(e#ujW(?62NRnXiEVOd=K3q{`z z7c8`0Q+u`rPJwkm;4-BoD-BlIU(wm#AJSYPo&M$a7Fw_AD@b)_6Lq_#V#~ZICH8`sqA7GuZ>0G?&((dUuCM+mZ|b^1PW4Ytp)Nk&p>v!X(TOKw zb)V?cS`G%#bN*k=8BKi>dO?&XzO_Dy`&&8zD=io-0>tRW3QfV)HD_?zR<5qIB0N4R z&#h(@YrFUKbzM966z8^)wpoT%toY%@xrL5tj?U@Hbe3wGuWnlk4WBs<;__8PZG#`1JJ^W4LAt<7Gj;^&;oJ5)~9YOFAE7a7dq z?vyH3acvjVbvMACBiyxJQ~DM2e(&K;rD!!pLfg~%5n$Z_3yXV`W(5zk^e7jUB zF0K75I_C-O^Zbx_>Bu5`KdQmDgWNwf`p>6alY?*=H^;qw)uX_EYwm3x1M^>TU)lMu z+3Wj1HTyMhXXd|Ut?Xa1)&8KrW2irvAm6G#o%C$8M8>mrBO9!I_5`TIZuavTav(b7OugzpHhg#Si^t7Bu)f3x(_0}`>>im_CFPSx@G z_NrrWyWpxfYQc0h4jG8$`W35J#3wU2U6opy)ac{BQ}1TVrH8^tv14=}YNxGq4dFuy zjrswN1q-I_M(Z)#iY@}_1{}{sqXJP_tGO&N*`QNy4}Jjd-cV=|1^^?+1%G8V#YXU} zGtqeqf~QHHs?Pxc%8mpDa;UoO)@wDM3p7IkdEgWl4Qw+~14$liseL!Mi3~(sV@4v*)56KRjPC8o$pjIydd5Qth-Tz za_L&)Cozh%eIVMG?Gtl3DV-U+9j-~FW5qZ|GQaLn`2M;Iao()Ex?7$gr!e-I^4wM3 zgp;DqeJnnFPKW5LQ#ca!9sOiord!H_$biUZJBb-I~-MGyy$<@l?=qTnD?CwGIf};SS!x^CUL~3 zx;j^kW#5vJ)ORh#U4TSH6*L_O(RM{c!D)%aB1Qy+BY8vstIb28$aT7@h0uEqsJ>~$NoxU{T5TY{=)UpS_dG>{{a zD#HG3_I`=Oi9?Dis0S9fI4Yyes@Ax3g-zT?w-3ujnx~;vr{SRTMaLkodAUa9M^aNz zTzZ`s?%$0*8{6^!0FJ0*aC`6vcN7@R0K=Am?Yj_Ap*ZNFWvKSjeLqWXKdQZWUFL}y zHwAHxlD^OfRGjxH-iC~HaZrUyc)pg+;Yr$$)*W$`GI3R|J_UC*nx{R#1!vYJ){pT9 zc3uPF3f}#_5gs^vQ^^bJ_kgGzbxw$NHlAT?S*gzf2>o4xpDhrYcG9Xn{{X6Tlm_fo zpl@O<^Th^`c@)}%2LV0cp}7!pDx00#$=t>THE5k%+2x!Gb-*2s&^m(hZ z$c=}3A=p%I4g*dY@=x?5@s?m8rYid%Xv2B6K{bw|F5?_2_(4W3u_4aH#FVG<20d>Z1e4;(;UQiu7Bt zsvWgPgTFx1S+sS!M$C*1Z3{W?(RXXGPy*h1fFmMyLUkz)X~nJxw?ICNp;Kw|j3I|m z=^7;+jczISO4_ac+xD>UR(ci9&VsE<&T%+R(8y3E&jn(m#n1O9pr{&+zR+<_y4rW+ z329vgI#xkh`hu%(h&$pJ(iST1qiQyK#jUGZ&IIEd0o|mH;ZZw;Xy%$#iQ8$t)QS%D zNGhd0owoP>s!D5I?MC3xr!|s;+8QqorQ)3OVpJRtQzeqQ8m{c{;I<;{#zUwy1*|7% zI}mt1oW10GRpE~HD(TJt0BEWDE!JE1t}DGxfzL-3f(oN5xVNR%!!*|lwZEu-N~7%b z1w#DIU1RM-a7|{)bb$D#jtD9oCEmrFeWIYe)@u+q1@xe4)u=i_-a-EW<)x~#KZjWS z`%rI=fz5QDYpO}-Dawn+C8pu!^9m+9k`(RgLKMM3dM?|P?>;IUkdxMoj>EY~)p6D2 zPyj#}b%{=Cb`Eac;9#asc<~Ad)HtafhalnEBLcceH$!t=LeEBObaCRi=mAu?@zJo{ z3pFyg6>B;DrOyly=c4^*To)i=#eMMbC^XasyKq%%eQC8Np^VkUG;BNecrQVAi;8L7 zt)OkX?`PPXQl@-;icfa7%de8O~)*ayXDhR(1Ps#;$h za_jy`IwmCLijo13a;V3-og5pd7@v@xNyn3rvef1LjZ4JL(lQD1uSaD4q9b#v4zI^B2_@)gSz#VO? zvfl;S9t*JF1==prnN0rxR)@R(bY@hOgoInYsmy(?&XXu^JK?PkWS*XWkWSTNe zJI7p*glJ~ALMLK$0OsAmr;&u^KR+_DYeYdDFj-v(XI9C{{^8dk8>bZ|x7#>xMb5nu z$q^?L+MT$2Q&|G=%~3eu@Nnju;BY$tSw{|_>IT-N;^DQsRmv3l5OaRi=DJSMR%gXF zl{#)T^CS$_>P&okH&ChdchabH1*lamjw^ z&I{IQ%pJTFvd%X97A&>&fJ{o$I^@?ZXHW0{{UEfk6V0eCcrIj z*eWx~hkO>S9*-UW06C)_ort~NhGJFo;Hy*O9H)O$R|NoQkiH1ic0oPyOoDI+U|Q5z zc{zQ+{-yU#AIzHSk#)H>sM4tCv|<-@5&?yS#$TRbu`?^v1d!{;SEtA;N6MFudDgy)b!qcVjyb{p+FDT9?kcgWLjb5ymxDc zkr{;2aCpJPj-DNEg3LAD+H~X>W8A*cgmX=(FrOz;^vpPjUwW}{JE` z%k|Rj4jZx1G>&~)7#-n8R+fy1o%8bpMbb^~YZ)%X453|_4cqhchKY9!;VBf?%wj?Ou1acHBGFk*#rc;Pa1mk>HM( z4I`~1Fr8JnR&FnD{y#sEQ?*=}e)L0fi4rm?s+A6mHtb@x!E1pL-cuf4 zfsfcY&z)<6S#!Rf(dDuDTVOCd5r`4r8c~o9%^Mq#I)^BsNn?c3Dvc) zZeK8s-p{=Tw`w#j?zu`K#Ch6t(Ae{TG}mr2fzh6qRP(ZLG+aq>ILf~4AS#t8~2Cr9q{Wth`GI=a)mRa z6`ErQVVNfbvjtL=!U69FmW-B5F0N}epHc1H?KZzw=>E`;S-kq4-a`8icJHuwqvQylLRFjSg$FILd-?2gqcy zTQu{~Dn;s!!kd07jR5HeZ#&X-Xp@jd=Q3oe+1GBK{{SUHXwH_c3 z;js#?b^5Ky?ndn~hS#c~K#)d(*KqIB?9bP&bJK+j{)TuUn)t-!sFL23V>w|DzyW9%A;<+de7P0E;~Vb(~K0_dhbZ=wR_$%MT$sBY5(KGyqPkiRBoo zVvO`o7lAE{{C)mY9UGPlC5#2DiQZNiy?ayI?$a0Rqhf6#jokU@odCdh0*xxveyLs| zwL^{B;)^k9_7xQx4<$&9@o&$9Z3=jS+vlZ}+nFi|#BbAzp0sFcK1{1s8bmgy)y>x& zrt^Lhg^|&E%}JEa+=rbX-Q>cFbWmQFF)+I|90M@+3IGYfF@-o+x=T zt(MAmQ*5YRPM1auK-MJt0{TLy`p$2L9}gmWo4Vw0Ox|RvdxLqD8BP7X$U7D-!>t+# z88g~!N^1$Sx3PVEQ5QT!oNF8i_XM`>Rkt{P*_U;o4aDG&hw0s#X8 z2LS;A0RaI3000315g{=_QDJcqfsvuH!SK=H@gOij|Jncu0RaF3KOvm#QANAW7F(jP zQe*G0h(r1V0JjqNM*8R%aC0lsJrPftc?er-$9a)ISH%ASvqengF#-e_`k6V!hHI;( zycw5T29Zg-F^ZXnLObq2#^%uRi(PMY%P)E{Q(fi~!-UFG%5xfK?__hh^fSku>h2~r;0%M|AMAT;t7*>hY-_9Ef?(d0eiZzbt zsg3K}IF*bpQJ688WQT6fu($?MqU-+v)u2gaRw=1bQmQh%s|_7?okjKx{{XabGQrkH z*~dXwqTrEe?$TmqQJelWKBKqN2oNCvvjLT4()-jmE08v`J)O{k- z@W8-h7E4$T1sVSUlhQA=U#ea3#C0^aZ2T|oU1R*UKc*|`6*LeaN{4mE zd321{osJ-enyMeTZ&YtB8Y8sdNP%}1O>Q{SEs=DWV8wd~4NqiNw~}7$l_hOo@@Fex zaBUG|3^eHh#wI!%{_7IQ+?1`nZZROGJb(H-JG(FS93z&iNQ5_G_myOhVL{n{*`3jP zC+>L=d`v`ldU#Iu_oL{W9bcKE5Ii-GVpbMz_eKIcM_+PS(Pd>KRh)PP8M!KAM@Z$J zCS~o&0~qk`P%eqfGR)fx5)6DpoHrbwVfB>=001)cn}jO%fyt(tanJt%r;OPA>i9eQ z=!8E108GnEpW-VlQTjk@vqW!ZAc;)2(01{Zu-OC(b5D!i&W;$kVM>A5qAb>a$uJ%-1Wb>pcI5~juc$a%- zoH>{aeEmxd&OHbBGYJ4R1OEV(*Z%-|sEAdyQa|5iaQQ5Cf7pUFs6JBk6@v94L9_c3 z7~$Huq$R@)a#YtY9++=U7t97HSQ$Y?p}n~Fj^M`bH<@bd4Tg?+hyDeovYc5{7MtzU zk@$hkIe^+RTHAsKz(Hc?l&Y(rGl4*(4c{{Z225mCW)DysW{5xRf&p|o?$J||0IFWq{t?j@YeInXMDR&<}H zkry-mn3ZEhU8O7DXNXmmu9`4Gs}^2tL-d%2kqr*0`4HcFaxuZ4l4dV5qGJ28Q?+wL& zW&Z%o#IufOdZTVtS9j9Niqwyr{{XR}$zbl4&anqQKkOjE6`3|Kh4<;fmKe8mghrDF zj03l^=b1BQGLCExg zS~bBSa77@|%+2r$H)F$p`K_sl?F+Z&5X}n|0j@J&mUu~W5^t?W8phJu7joFyG2LVf z)xW6Z<|v7U^~iJU^AaRq(e(6}E?i9zV+BolliC?R@QBE2UX|lNNn=#gwWz znumQ4?r*U*Zs+~R{mb+vAD|KK5E7uVu-mJ?@OJH{Opj;90`Yp1!c!C?X5OAqQeStA z7H3p|WH)Q&8~~;lnp{y6Lbej6K?{lF;WehSIboyHRsR6kCp^@B9`GCZXcg6StiY4< z!j@^HOYiV|{{X~tA|Ln+dOg83Tdq?yJWEu#*sY`4=N{b_R7J*n#Wq6_Z?kC9%B8p`-Q$u^k?blc)&Ou77Jc5o6k!5!aYRQ zW1J<0hnMO>uICPY#hRs=L>qF6K;AdPDY=OJ^eAzzf{)ccbB>O07DyPrxU5t(L#J$&W7Q%ho6(7HQ2f7}49XWs@>DatXgWySbzBVBVhlS4(ZT6OSX zO7EiPGS}fBzW|%?UkG~nGwWyE>38=2hrwHOaWwh>MXQSXVn52if<%Le12r;NcTmNQ z`Ml-HF@fgC@&_p*W!>+`gffTzIDMZ1zpgMHqP@SP7`(Qbl_@u&W;_D5wFY=R4U~FV zyYUz4hn>f~98Jlq+&<<%kRMCQA$mhB3!v`UiRXu$tkJaC`Av)$WgL)-a;^L$^-Ov>-Slwqo zn#}p84IGTx_CRR{>ld?77qf|B0c~RYylP-);LilbvmxR+KqXEr#^#8}l3JObSTCUM zIY|x`-D%)cH!%r;RhQ1OL-Q_5jcZ~WYCsbz3R%^dN;jWd9r_foa_!|5`Mztzf-3gw z7}JFoRk|Nqyn3qn^lT33a#yiY0 z=p4CWoY5Vv0!7x^kMTM=9=G8HAhAPopW-|g=~-~kL=!-6%%oXyWAH^wn}TO+9$-IW zFA{=4(8sJyArXe%8SF)S{{S)dGis?^YBy?iGJC;yY76OA?NRQROf^|RY@zpET`
V=0=FlZI>ZxT*_AHY-$ZZ;A$roZFm2|@i&OC2~ z@zO7+)O_X&ZwqxrK}*El#g1FKDWwUy7}p(f0xYpD0xuVG>APK_QnPZLYcK|pJ6QB_ z-FOUSKr9%uH>kauXyz(mmhy$QsjD7uSWTd>c^(oNGY$cW?y<8b1HQMWp7OBfLw~GA zTn5Va#wsdN2P@4mZTp(gp&d1W5N#Di#lm0Me|7%=kkd5lS=UFT(?&q@nxoYiW0j3! zG=lr;QA2vD*?cdIfnYm&_9OW$&?OCZH$QMGc|lc94zkhMNMWgP*iO$}RC zJAEDf=8PQRTV##>=8FaqWs^{(aSl;>T07%r7O9USdIkuW0Iy00*XkqDMelT%(7!#e zzpjMm-H-aRn*y7hygBrRndxo&&TB(rXs3$?lZ{%`^DoY#p_!Q(U>a=)FzQj8fR<9~(fDGyT5i&|4xj5fvtl_0hxY&h5uq(jn+3PA_xWMf^N5kUs zz0!)s-lZ|r@g8UiWKlu7%ehdkwzp64n1mV+Is24>s^Mu!)CVp?q}RFl zmla%KayLcF(;qT}Az4mAM(gbt<19D@t)8;{xyBX)8oTKZNb68b#jnaBt4E&1xi193+iNz;QXX<+dR}L(iF2$6(54yA znpAMwn#$aK<10|M^!yiov)WoIJ+e8P8AXc&RKqH;=XraEp;)0ASxiK%A|+ENB4i$76Q% zflx}@HBfs;P!5Cl2CV=wqlNeNloU#Wb8Pw}Iu5o6r^txqT_a`oTBR*mt?c@jg3%i+ zF-NNCOJ5g8Z1mDOu%$~(om-}1ZnO=3O>vB0SZsT$dk8d@=4Wa5>oP4Hd-ktN^MpFp zkAo5NB=l5yssMf;bU8zq%qAX)`SWd2?CgRP}+B@mXLTeOXti4e#Zeo=< zlz%vrWh3b@NHx0WFws`2Gn9w>DvEydJu5oOT=m27u%V*{Ur>|T%WZkJd2mX1;Bs^s zae7NQdvZzFm3#hw+#6H837x$sKw;I2J)-h#wW;jEezD%mvwqO1)ddActf}ls8#?tb z?$i1bot}j1ZYp^;8o&q$b=DoLq*dtFtRCo#!7cv);XdQ+CUuKj zwq=JODKx$1uD_{d=v_9<)pJUmRylMH7wVk|Z4aU>wZk^5dT?(0KPi)cv0qU{(9zv% z`4T}<#0GH7D|MsZ9|28REb8O`07Mp-dg}(HUty@8q5(*|IfB-^)VlshRWDWEYdT4F z7OEpgB2gQ`Qo=8=SSI}uN21GwQQ3FTd^{t(Kqafz9aVPu5v3RmSJId5FhYjyU_dzD zUDbTS^d9vpl)!8nM!~PKzwVPR(7x+*e7;eEoe=|UQMtT+fq3?*rJT9iVe_Q&@7R^L z34*Vf$lmDYQWC{bTp`Dsm^*fk5WUlAh^x{#gYa}ZM@++E8pcnw1F_3t1(v(Ycb2fP zcb(&79SCfGV7R5#44fJrCR>gjRb{&89V1gUIXS74vRP$5aHUsd;|uvIEYjMt&*U*! zSUMTrVq|8cuHMkSQ+068yVt};eMEZ6h9x?sOdx!tq1vx93+`Z*?Xu+|q%=Aw^i0aD zom}VET=%oelj3PQ1$_HMMDvt=h|51jbsNB+a3!lpzzL9a1-DR*rYO$6V+QFBav(y* zf9t%m!I*c-3X5fMCPQ<~dX{kv#x&l}J)&iU-O7zZu@(rOF5**JhPg_yj+1)*iOc5u z5rap%MM4?aKB2!@`Z3GYK7cggbyX|%T; zuHMYiDeD*JXs%l=^iBYVkmD59#Ba+O4G$n2`44)P3@tbk+!}S>Uhn64%{J=$P0VLCJDh^H>@XNUdAkL) z`qEU_%fxGsyruza@w9Dy>g=5Y&YFJG2YZfZu(vYDmCAK*ER#7tPz{Ajs%EH16OR2W z=46bVc7T~JCW@=myr$5WK(Df+GsAw04=0-C2+%)qNHMJe8&h*a?}@}jKy{#H>(xHA zA2`;U$}5f~n%-V}pP+tF`-_LSlcorxV&4z;2{_zD&oGn-)kz%%6vvL~=iea8 z-#a z3i4S8QIwLsviUkDVz>sV6INaw?P8kQWMnCZCf> zz4=NF*PhX9wki0Nt?AQn== zSV@?XuihJwkJq#RVxp_gO3;`mifaoej`E?Kd|HhsMmGP%H?e;j&8jkt07oBSf0as?fGEL?qnT$q=drm>rw8DN4okNBqivF z|C#9HDz&?IDJB~!$Km_OT4>&=LWOndf?+?AYHVMi?EOkC z6CeGa=7+1!X0iJ^t48Tsd0Iuj?nM17yr$F?2&sBkMZZiOHh+^G5n2!OHoe)P#yt!? z754**(pB{ri9Q3h-{$5~Mf-WsF75PTMsNS`xrN;Gz}Qnp?!Bg8Ax(!1An?H>_( z-qEF)EaqzYnLa^Y>8Ym^UOew6N##q=i=Lp7A@Xs0h~M)vE~pez6V2b-I}R&CbUWWN%mbz?irAN2(rN#$fd2OST%lf8Zl?FX z?X#40sIZ)*#jPq4$LB5Nm`nO{&>$Qhw<_mpo);YNW+0emWqiO?${=Mw(PR2iwBa(` zv3Zo6IKW%Ga>a^@DE6$yPHE%)Qf~8v{7SeJ0{5@UiGDH+m2T9^5{rDk()rPOq|SPZZj7 zD$_bntc?3K3t~!6hidlOuORykmMI=;PxF_R^77#M#uW#x?dZ{L{nd1zow=M*e%Ece zwaROo{^Sc2dfbAPfBd&|d1sv8JL-zd#KufS4gJse^ohs;kvR4x0lrotst?xgh-vCH ze>e8|o^|Ahj|_XaJG{Btm!H;`6&$P-scg-4UYkSl(N!^|wdU#*ZH^07|q(-e(n znUQ5RJ3r%a<*PS(#+Llvq!&BSq5xd8FQ74Lh{p&grRfAab3qo}3Wh z4|~1qs#^xPIGTEitxHI}eoL&wUFG7KuXb;}Qu-l%yP*%mKDgeRbdt8m(fL;N{mY|u z`F2?Cb#BqRX%TyD1`fHALHN{Hx=*i)-q=q0j2HzzNQA0r2NL~vXdxi_qZzoy!~)Zr zd;QDKN$$_i)0ib`wzqwV>Q^pI{7DYaXT;bJg#Va)(-t*>M4PC(`Ybdf!W zy`^Pq+WV#O`;*<_izNrUs92fW-m} z$DV%xm{ydNd(fxOZ<8skR{B-!LURwJ5kn)OMgIDlTWmRgyjJgSjm>A;os%hoHkP0l zl5MRW^yuk#GP<%}{{Z0_5TV*gN-%@DZe4OrB9&#pv=!WR0+cixvm zuwLWLr5(q(`M(m8;mQMVK<+m8(+;V077o|APjghvyV;AABc2s@=<1I5&1mL)X%k|m z01kM{91P7D2MBxc^v$EWvErz*pG%yO{dkxfd7Oej-T>UF<-SB&bqP4d0_HA8&Z8=3 z>9Or-gL{`|4WU#%A&Tpt-**Ij6`kLlvd0xDl?c(n6&e6|K+h1UsQ14g?_2nd>Fnu4 zRn^d!AQ_(^YmU{PyGcaCnBe?MA~jW$_-lANowk%&xOZ^JC(9q zZf0$!pT1lC1DKCe6O2A5zTLr5eo3{r`64#AehoIwGZ-w%Kq0=)6Uq`J@$UT(SY6a! z`0i@~f!eT*oIl-;3p(oe1ELzv-dB zV}>rgLX9ylSP~y?TR$!OF7#T&rq^qlDOFEtQ0=~F6Z}>)ajDEJnyu zZOu5~`aXH_g!cUS39qb9LKlsDr6!%A*BGyn8a{mwcUpl%&0X`f43XhHLmRU`OZO`W z?`ox76;0aL72o~=EP?$)W(;FV@rnNJ%Q7>Xnj3HDeBx6%Ni*jPl4a@>L}-_~Hj9|{zI5AvmS?g)y_*)3~X%wLvpw*$eGb{OsiR9ubBB}Z(tdP&> zn4LRTano)^JQuohZ~k_!G{!N&&$yPk`d$1-%caPMXEq3R{yVjRw=&NyLpIGuQ$AY? zCRx8H)-_i9uS>`M_1%uSa%5TgGoayiU!qtY$BqzdYpUgGYzKZM88=XSJJH2i?CW^O z3?Ke~Dpz-<`HB)Bsi&)2H(UmY!gstZ9ENn$8HOA(r@i|OmVDgsYloQ}9I;10KL)sQ z*jK|kcCRO`ovn4WRt;Rb@$TXA?qZ*4&tV-zd`IL|yCh5h#d_F~R>icCFrQXZzyTU! z1Z*D9f`+uz531Z)oZs&~Z%WQ8M$pyVT}CHi8BQ05DXUm}2LY|I3+s9pVl$1M+_ zrTzDbrRxo8U8+yIC>#5ubPKY3%*NYI3LGx{#y(Bsa5^T4#kWGfrdPcLQ-#j1Ps4LW zmFI3;P5ax9BfA}YiM{{}ikeOSi<%(zzDuqMiO>8W58 z5v`R2q45)UOvQOYgTmL$VSf{W4R1VJ)91yCN{$$OsYQf|N{R-xMNOpPf6zl?F9)}g zV~Jnj0~g$j$yUgmb4OlmOFNDp4kva_p>ASuorL2gI5n_R&Zc|)NcgMpNY=ACaH+XN z+Db(43rjnB-QW$Lvz`)i-s|$g5$c%ZlO`F-{Kz*M ztz z-qP^I@rdgLi*^35(hA=&Aok@ELhGCNwiASVM3{*;PRRsTH)CR#q-g4HKA1jf8(*PU z!u$PX$Rka*kLAtuCv90$v#Ig{M?6{L`N5X&#T+3Ps$TcW1s_#1$GuM!2$LckK7Y)6 zH*1-($a~_#{WIUpki@Z6d0Va1f9KO7pm{@xo6_Iq@zP;sQ_XWBQwKXsEJ9?i3=7FbmL1a$NSuXQMYA$Pp)*>lGR|K019 zQ~{Zc$Lh~|pu-3r$)7JozTj9vE)(r&%nXdOc%?)R=aGNF275p)KiBKE7&7i&xv18wH)nTLN{ zJK?1QN7@JTz0P}ZJ~dGZ2J?CukuIiR@Y7aaXScqcmCf(gF{OzWva|u8rs-^DCMuut z_iaj)i7k6ND>x&!3l2Jx?;c?V(Hi6Grte!t`g7jQqrHm#_yU?Hr>LcG=yyZufZXp~ zJI<533m;xXr4qm=^y&jyYq>{uOmr3gHz^bt-^K3FcZjNE2SWau<-s$ma=|QH^U{NG zR@pmMW$8we?yWjj^7bBGn~xUEf0F*ZG0NP9SETS>;G?Xg>*`<-5Ayl5Yux5{a@Oe_ zaj+PtA;s5Zcs2tYB1w=0zNviI)GEH{0OeTJ9 zB=&2ZRYErGf%&sMy7F}1J9(aTNv(6&Jd9Q?k*TR_bgSB<-Zi~#(aZxeW2A0gYm|KI ze=7Slf8Jouk924;{M|3tp)PJAKCw;_Iy9dO!S|n?I^upE&z;+$jvu~`TfCBUAV)?m zNJW+iyTU&GRcrb%o&Q&O)A6w{UBIAUCM1!wG;~s=4&fqQ@*9f^+!plSv~mXPcXg-q z<#bgh<^te&)L!A zfLj`rBeNVl;`6K>YVw-hSXy?|J|}XXmJ}6`Z{r2;DE3-gp&aNY*`mVeT|10E0RxIRiVti=sCNx-X_DNs(v&T=A>n_x$$+$gO_&-1p%Vg+bE0bz!^ zmDC}fJdMQ@qbJwQ5q4(;oB%~Y7Jw6L{vW_wo94XiG*qtNOH@(Mr07L9=d!)nW#dMKHt^n^0GoE*SPAe3#L+NAYvd#f^n zZCc$xlQUo0fRa3J%zc)i?3UHw&!v-f`{qewFsx3RNQeTAh}&$o^?C9)v7PdT?eXW; zo!W=L_};NM7hl`n%Ek_B##B=moG#Hu=+DaK@cJ2~pIY{^vX}huXf11{vI|cW|LDO4 zGELRL7}m8v&o+44Ql0X15NV}g(Z``3)7yM0x)tjJ!j_AlT^=Gya(PN$>EG4S946~UUUZ>&Q-t`n}aL@jJ z?-HeQua25j%FY{J%P}Zr$p%ks=QbC3=-Ib|;WiT<+=>!CF4cr1wZVH?-!a)4UC2s6sNFO>wl4}ZqKh&5!@+(Zmg)J0ZV6c>Luah6JFYybqO!% zo#*5E4ytiQILiJ*bp14xy(PM?J$CGsXh{vMwte{6%S!o*uUq{I{O2nJ&h=_TI)h_Z z9xO9chV`Q}x45MpER9Bhad%e6qfS2-2G(V6Xzwg2uyle!(Y5kAD4*!rX ze}KPNTgu+4rOPq|eg612@yf)uF6&)Z> za<-dulQ~n}DhjiS@MZ69H2q!>&k@FB8?!HC@*3e2c?2=%5Rb1NIz%#`go+j+%Qjh8!YG5@1he=75Y>x zrIr$c%*$h7&ph1d|OdxIPqs9J_%zB$a2EjVry(Y z@gu<--+}zE)Rn%CRL21H!p7ZM`)DBrV*AUU+UTFZPUxiPRnOo313ctzSmi><)E6Bu zHWFWlUjE$cf%1Jj|4{I8qyLIpL1{wYj);)OZE26Zpm{f+7ZU#SlD0@LWyx!EphbaW zIrm&S;V|i}QyW)cSTlD7SymDbc)*Wa;*}-ZgL6kT!wYg_b&4gc z|JU{XzuzTHM+q*bf&PE)Z*q+No7G1sTCsE^v$6m8>Dm8rfB&zSgd8d3ot6l{O1$fd zN)Q>k4}SNoB|MSNy4xxcH5$n=bJW`gK5Gip@BzCtxL?tOs^lf`M3y7W(>e^X7n+=S zBKFG7ylzg05gu&_uSrZg_rasqqupikU8cJ#Az6|D-U@p~^TB;yibiJIKI6^<$LfQC zrLz&j6B(p=lU>o~Gc8h7;1YO&uGBg{>5}Pm_4jp zHkP{t)J^XjT%0?s{LU@#fM@$GV-b5{VBRa7K+sMGII!;~z*_t!rcKRZ+9oOYU@bj8 za+^{bZG4$wkfd+M9s3;nsvHO=t+ga+8SiC@WME(%Vo0Z{1v7TXt^-3BWNb6RL+suc z@XG{|SMwWbBa0JzXq9{IbDA39Wj2p83lg!OA~&Ts1c!1kHY>K% z1RdqEiT2ic8mn9vNLbD%TZ{$XSs9Ku!8$x*CUfea9tLDFbeGAjDfakU)#oPY6bE~v z56A65SP4_`KgFZKcin%naks}-27}ya8bFDN{ zd;40b3BgTk?@1#RV82I}>O(vkzkNe+GtL#{h8|NV4yM(*j!5b+ArAoE`r1LRaB9iG z#VhnYXesZme*iKqSDO1_H1Z7k8D)03sa8^L)xd@bqsS`XA}4zb%*pnMtOiygCI1m$vXXrHSRMNGXrJ#b@FmmnC@EzZ>MNzmxQ0*%jOt zppm?Sc91x+%w~p>hVZP&^DaEAK;nV$RKfDte}J65VE9e)L+d|4%}RAN;z=5;|1c^z{-Ug3=Hi`i9h?N z`eM5-&}F@jil`OQ`MS+hBwDoC{O_GIlV|H+^aDdzbh<@+^3E(BvZzIhnDF{6#2&2?#ya4Lby<9q6%pU! zJ=WKKFw|&_Q?gZQzBZd7GPhGNIPR2MpQS?mBo-{oWeluXkY6v5AE!g8`|uU3XDUjh zYmVO+mVY+ri~{n63IyK&1Ngcz7;fh|niT~~fCl{CmFmPz#_VcaA_R4pH6|5PVqUL3 zDmo=8H7tt-|5UW`Zt7Uz65#78+4@sLBr-NSzz{!rFqA!6CQnavnMSFG_n zt3%OSljm!$NAkKgE9sD$Q;H#bXta`d{&>PqAB9q9?0*1RHtPmc!pF6L0BU$fFv48mBpEI|Eg;c{XIk1#>Eabt>|&7h)0>@pO)x5y9gw0 zEMbTU!v3H)N z-P`~NZV-Udq+askCmwT)aLpDT!30_9PC-RouXTX9S~hSe6ZyX726H>PhVys#>VNOL z+YT&TC#FgaB3S+bO5mp~*EqxlW5uA$2#J_!4Jn_hV}`t7{meg3PP4|MKm2!)mKBHz zokdGi5nTYuiNNGC41-GT$)AYs7!W$u#@zl7B^Ws=zE60C(3NO8q z@U|U^z)5I^zd9PE9O5cb4FzG}aU`gU3hY9{vl$L7t7hGpWSeFD#Od|C2Jy@nMccag z9*34>KQba?G^W++J0sRWI6Q0BeohDxj|P?RE1F2VH+NJk-R|eiqrB9h)z1 zdy_?8#Bfx@N?+RfO8QHTnAS7s!z`i7)huJZZIZ!(A+H+jTyYC`1}IeTnGCalQMxYa z@CXX0KOPJ1%?BET4znX+@hMlHxMSgildG(IebtQo@`DS7u?6H_1Ni7NCD~(Y%^Rj`p*_}C&Eix^vAPt?IRuT!svSE0>KuJL&2~BAf51lo+4P$6{0nq8rTu=KL zwWvR;qn0sI4?FQl@1%3cBbx;~f#ROK3BVb=!fqbbnd}+HBl;YA-=gOSvBc zJLwCmg7!StGHM1fETBaja1~GSB>wTt@Ljs^+Sd!|#YseuC3#6xfwfuledq$HV(VI} ziaR)PVA-2fHy}oS0%dI0t$-O9at8@1I_tyRtPJ=XACfxbfIQpY9G4m5c7;>nvA~e* zPI?6*;(=|^<7F8+j&x(;X=>Dc^m&-W&WW9%Wtc~mJhN=;0#Z_Qqm9w_DDEtJ?UBmT zV^>evkDCQlNPF&9iTH8Vb8PY}X}uX*;9}R!KX7>j?;u}T9%nq7VbMfeh4oa)v#?7s zif#gZ#xAr*PSY)~pv_xe9&HCxkaG*p&PB~bfQiFi2~(yWwv%~`@2kc+xPX6`cpnYN z7MSYDlYtb}=Ps}YpAnHMNsIDaN%rcWlhjI_r?tEd@~Tk@I7)@Vd-_*Wmor3cD4^aX zF~*B+*Zgq#ptqEp z4M&%ug3~(3M6RJFZ{52zq!@gV_8z?uSh?KrYmTr6kd8$I2T}Uxmv^}naygPZcig7q zPZo%YM{yv#RvJG5n6_7)Q?#pwZm_wyuL=K6;a$|qr?mS5n^9YE>#cHq zb^-wg9jnKq(ToCFk1B;=l`F>cl_aaTs0H_}H8yH$w$am`EyN#>xmt#!+`x^IEhD%b z%}nIAvM@<=jrAi;!{iTNc$5xFySU&9z}fbRyN;699P}O_P`?xF z`HNoMJnF@^@y`&hGY47`4I0v6mmBH8nYL@b1sfpJ3*_){b0uN;-Pl(JWImN(|0=#u zD>`BMCK%B+u~u_+nm@hl)ptJ8iQ|m|2CFym{LUe;hlS$L3Ww}Fz;EsNiV(C~;{tx3 znFy8aq|^J>P&;m$q8kO)sd!B*u3|c#gR`ApqCJ%MZp7!~xI;5usd7wPBB!kk)W_7Valxr$A!wIn_PkVC>I=S@{VB&)xgp-Yiw||zjwKw?= z=VOXcfcDCElA`Q^A|?@x&Ko%JD*)UFPLq1ECEA7yCx#WKbz``2Fe&N#kfhG5Gvqyg z8-trvq##H0m=&l9?n@#=^y6s8IfB?~Ze-SZ{*a;M&4~aqEK*QJ10-}~>wLpv-=V?0 zuo`h89ckTL9+pY!)NjGM{-`_H|IwsL390HD7Xw<0_JvWYgU-uDRULD(+7jX za6L6a;&`Ib)+l3I~BsGVDr%6R2X*^%L87a>+5Mr ztbBu#Mym&~5lbquYX54w{rIG4Qd296a)db2Hh1@?uD3=wHVU?H z&l)q{hDP!apUm2vr6$IA%A=8wQN6RFPJ@#Z(1yvMrh4lmjWCd(*U|5uz2!+oMs7#y z3l5sD_%p|QwtZ_D84@(Wzk8+HmAA;7uuii#>_WC6D z$4c~AQb}w*S}s5Y6gCqU@IgEgv^yEge^KL&o&rUfyx80t*SSH*Fc7aJOl*;a&$zV=U6XnQ{#q7P;$!dYv485scj)Npf?*{E`tjOHn8w@UOS!iN7kHdNNh21 z-!j@UuiUkkNXd2`o8M)~2+Ac%$w3QCreE^`8QcCY)i&}EkCdMS=GFy>{w6YqIl_CJi>&O@TiOn>V0d*Vxp7@Egp(ORroHN*&Ln0=*^INF>wX)O^o^vpM6#B!bpI z7v<2inu-}0$0#Y>|34)K2Y``E{-2WKv%wl&+K5q6GXIZ^^1nhZa(eY)HbMr(b%<1` zJq}6a@_#b|m_Zjml97W)F1rmMHC0WT<-{toBEdlz#E~laCZ(4^s37zAC^34t3`H+! zYIe_$Iu6?6p2*}XY#I~15D#iIMM;m+;ndmbwtvMj2#K(V632yVJyomPnXl|MzZD?Pk9>G9Y-@G6Iau^?fTRD5i`uz{8$ga{u4O%Bi$iH1 zIi(wS9^l@l{{V2lTNGT%{~FB_RDzYYtuN&e1@Gi1{FW*sYVBonnRu`e1oHrLdk#`t zP&?60mB=`~?x|i0(Y^mCn1c}r8G9WWn);iUY z;yvX?jq_B`9hJ?`LbpMRblu32#3>Kx9!v3KWdGi<<@Lt&vP18Xti(bI%-Si=orx=; zfb&5y=*pT9r6_I}>Im;8BJ%X@N2X+L(eWI~WJ?7&<(g?J@hX_N99k>#m67q0ndtLB zv>`mYJG}X);uFy>q9>g2P&Srn`tn`TzG2x0gJXN%)T~bYXkJY=gL6|co zxlTbO)hPw(7-mmdAY?JJ)Lx14m4&C*!uh698~nY!=Rl8GGqOO9tZN&LHdEWQW~e`~ z8lgf1+MGvX1#at5bY@8k*&s%?1N?vNJDSw?LWtUYx0>%8fM;aXWIVU{ixH)Qp0x|k z@6miD-C!SXTFqBL(l81Bn>+Xe!sE_Z$R0%;W^dWFrQmX?z#yxTwg@I{vjiwyk&vWv zRcz$q2ulRkOI>>->bD}f!^Thml7+4}^=3T(0Gc@LiDukuhKg9qH@a8RqzjVv5ntjd z((wo^OkLm2yrF^)xe*XUP3GuNBK4yb!so}CzlENLod$U zN*g}+#Spt6$65WB@Z3h3{y6~LDFXwz4xqQr<&71S3(JDW%&*}uc&$>B^o|h{*GxaY zEe$fZ9-+Ni7#_B!*Vz;8wZNHBLdE&Jf@&b6!U4|wVJ}kKfqC9$xX+J$sa2xvR!JcH-l*EkztNt z?In}AZ1ssNUY-6-SBK>yxE}&HJQZ;pIFXyG+p`6)*u`&yak9f16Fu%7MSjcM1^?7p zZo@w6ZKOE2Oawc(M>|-zdz2N>9MMD=?}`mK(EkJcxr0hto!li#MULEO+>37rKrWM1 zkDx@yb_6Lm{VV02LJ{TBE$_17ZFZu)FCyhN?CmJ{^K8ij*UC9mI`lx(`ylt~%|H^* z0B5-ZckU4xjM^928bDM(yNX9-cnnqyVMY(vwOd5&7*F;Qy5lE@uEp3+(qK1!ZLEPYFv@AxszEMNYi)N1?6%yMxnLnahH&k*0gJZw+B~ln<`YI^JPo%c$H#a217sIMthvOA zY!;2TFU0`f7zfK^_*fK20}s^GGL)W8mPd{(Ml`2=r;giu`dxDyI85^|FkI6_I@C!D zU*z4G5rcYYWNMMOY0WY73gXQ}OZ{9Kl4vQm&-{*MGZJH$WI0|u@OD(WOpMO1kiZ>a z{4_-Y|5K0N#_TpZ__M{4kC8AHg$wkCsJ#a&*^;rvxbZ&H*^y+V=Ye9bVQ{OzVJd3P zorsLB2x6BOZmQyBCzZjI#%ayybx!E*us0B{DVzn^9mcpo1|Ns6IC{|vDQEM|;%z3P_l9PFnwhUohvhI$`XW<1dtb)tEt-T9~g2+!n6WzW{$EzZL$DBYv zRwpcO3fw=Aj21{mRB`jJG{km;aBR#)7e&T<^!J%nV%Zt(m}0YRNHP@`5{Sjek6>^W z#ItCT$m)omr*^m&A(IIvA1M|^LMm-(fFbkih%~vOGaYcEy}Zi!5~Vq7VN2G)xwVSI zs6*GOd+suM_*nd70e<~~>4Q&#h`UXKAbd(X)`{#kuFy7O)z?~k5o6?oSXzDCuTHxA zWZacGc>IjcQQ<*hrTN6BLY0myF@$ViK|OT)0?~x#v4 zqlgm`)*+PkkD#LH89YWSWpzj4Z$An9G=1d*12UrY7VW4OWLUfN0U6L#@8nnz|c76uB&>ySu=#ilBOn6ed`##zs-WP#&- z4_0v`{DVkrV&GqSU1d)WN*YoT;KKKmd37%`gJ2)jcrdmDi{DWz%ZfH6-1CUj4U8PW zJKhg*7p;IT1hqMt5$|MljzDhPHkLci{rJYN2Nb)rLljFk@y^kLJaLD38MA}odYPu8 z=)D9>3HSVgjWe;h3wITkYjo9`At364HrmVg&IHO=A}Ab7!(w-!W;GhLdA z+|WCI{{U83;Vph%{r+ZF%3En=pmc4=McN4z32uC&z0JbZTWs+kGm!IrWTEk}EW1x6 zTY|pw!0q$vg&`;_bC&#?nIy`5@j82^jrgpY+1R8T!VpW#0a-~vHXsH)ccLKs3Xrta zDa4AWN9RH)baS~4DeTR*hP2(16}bw$M5y}1@Lm^jtyK#^^s?kK{_p^cO{}&>3%nWW zBYPC(b$H>$h~rCSPw8VPo{ZCS!XdYCUR>k6w7&e2bcgU7A<=4+bfhfl8^WXDnsk5O!>TbU~qs~33Ef7m9#`un$+^g z$%Z%mAn3h@K5D<5mA?w$O5YcyaCl5S9=~DQ^b3n>J8VWWt2#05dC;)ov%Muo=>|^r zlcEr*y>Cqf+2ZpGS4UW!@caKX4_Mrg%t49rxbv~DoAC%l_Q?bph+aX@@oNXk);V-J z?oZH`;<%{HhZh3NoHt=Yi~WluknRki!q|S6yyjo^5yU2UfAG8%Q${GuD6(MNb3k5$ zbq{pBT?8TUb3frKYO-K#g!ymc}SleoAyiu(LPoAD^tOW1!b2r*2f>t#{c zKutr`C)f?9J}Q`--+_7Y2^2WbKTa-IRK(Jf8ws*67#iWWIXM8~Z`bB2$lTj4;;YIP zd_!4MhaN#POyZc+cqrZ(7g7O!lG(X%twkMh*OM|SI`;?#dhv(%hqXv92hvcSSWk{3 zlr0xG7kh~dlP2+pB+9w`7baH|!HEpnGFegrK3xQVmQw?NjYZljBJujp_0_80;0|jq zc6YTc;?tr+hlZlZ(#=*{CyJ4tBMhEx{Ij$u|1j(;JeXUq5_HCaH=w`(6Sg;R7T!q@ zTAzNsnEz)?uZt*0TlnI?HZdbq087uJMad}lNu?IJP02iDd?mJ33phWekvu?xW@3XB10* zi!(`+=u}u_Log;)+xvPn$8CXu{}>6rOU#-+!CaWa&599|T2R@>3q+;n$QVj_daYfY zr6!_2!W)ARmNj3sG!EB-kB0Y|%s{GGCmm44>qi^eYsMUblhyLi&Af%ujFp#^lS2S9`OP)QJ-XJqqBH3Jk(U9F-3OCJ)(s>Ef#><+`EyWey=2 zn3!<S` z#{S*VMX6MO{s^=GUhqf@5G+YY+34u{Xz%k@ zIgAA^C~)#(W0?5Hl(N2ga` zR7fp9->lWB@AWceje4wgMwH+46t%w2RX&I7$S;0?OF4!rAiz6NjLF0cxiX&N(4mYJ zr_KBWc4~;Q`YML3TwH~#3eZIo`K9$X_+^$#Yx=;c5R+^Fmp=JF=MmNuY;25Vh7rvG zSQLQ&OP`GXKk!L%41H3F_5aZ)$?>MPp3p{@g}#s@*6PzeG0O2hN&zyr*3_G?cuP;# z4d!SZ4Wc$=UKf+U#z|Ft-?N#>q4ZGNuv^SX(=RpgB9e8yL##@g;Zf>^tbKy>)+@S} zpOb3sNwgJsNrkbB?5nJ7?#kjUlZ4UH$4e1SOnqx3SCe|QGw+ik6a-V>CFgiGp0!?! zeoD~*vCJU&ZwUfMH-0ropLRw9ZFy(U&w5pdqeyr_()T|ohcZl*xqb!cMMw?Wa+NQ= zRS5a{ikC}C((C(Ft%`Q8L^uAgFj$$zZ98jEnXoB?zufr>oZ(OJDtmzs!i5Ks#rTCF zi(jy6a$lE2QakcdV%X!7WnzTP_pR@Ub5M3-XX1u60y&X#tMPWw{wpCROuhS!r6OFUp_rgjKlii z8YNATqc=En@B4F*Ue@i@e6tad;DgWl>M3I3af`4>>N%u|Zv+$&;%+fbfUjDFSUWYexey3zEuPu%PzQ-oNvl__)!t1BpG?knlp#2I>Ky9xnwW8Q=R%) zYOP4A{ojQDx}T^h4>bsNeYDo<+n**%;%v%n%&STGwV&OE|H95Cm7uPHVXHQPIwr}u z6EgJ=Fsm7-Boq?YoSply`RU@yFhhQ1l|=rxy-45LWD}lGVuCuYCXGVrB&(MSXWrr7 z-m9g~+0_%1r%yGH{uW5=XK~cok5orJB}NCI^!1-BO=wtcM#rp^eq+NW48wP$@_#ql z5~}vNKKP5O8;(AGMXMmn`HJ<0D_Vs1<4LB>@GS4s{1?~TKyC$Z;v>s9`&);2{ci1t zQ$GedRg7RUE_R+0nG-QZlHxN+H(_$u=LfwfY$%e=S9muiCY`{i(YA@95*YP&C~U-( zXK+&~uH zLGIa>v@vVy%&}q=PDh^8wx~xsBlga&UiQI@1xU7RwPUAjcAJKZw1kyg1X)s^Ww1Jy zsO5391_#8P?Sr0R6YWMunn#3Ggr`c|lbtftX{ivTE=X6&oXxXBC1~C(j*<^iRK^7c z$>;RF_q5z%nPjzjDMxdEl?;M3uWG*^mP$m^K6`R(U)O#b$iQusFuc4q%IRHrMr~ckZ`HoE#i!#cf4=VbF z6~^n5uVo${VVuAB&k6=VIV!9E7c@y*d|2@AKk}vZ%!tq*(%2)Z-ofrFM-wX}GxA}Z z_SCHewvA#m{~rLHKx4nT<#hOQx2{gNBkmpMqDTH(* zTDNzfj72L>N7l0DAXM4u{{ZwU^pv~+XLg)c7@&D|{5WupW7vM26o!%a7ye)`!_V}q zjj84*_c-RHU5D9^+l5}?md&<53T4|YOh;aESU}oSdA*+B@P|o2F z56jOcB^pT_!+$q7QBsa1{{7^BgU$XhVeXao!?186)+fX7lgE5GG$8pfQ&{vL3!DAh zLhI`cz&$a^06duOP0(v1JqD)_pUJF^P(|u&es_!54*{&u^;h3UM>f_xLwZ_HRg4_= zeGY@uPrsdTxD{3=D?%)#136YU@8DDsw$ z&YU;j12|wzMJ|?x53=~9ycA6acPM{H7}6JIHIGE*zGt^sa&UAoQB~oN#ij4YY8BCs z(y7CS)$_(E*$-TPsj~HmXj9Ct3qc{Yd}0<_V@9_;Wec#S79L;qmBd6#u067X@{YU8 zPXl`I=zhj9#LM6n_5gmy2>3+d#nlItCI0~Aj#uDc{`^}g1+Uwyb zT?bpqc0|!NjA^_I*cwhu3-)oYu}7H~qt`W{tQtSY2ACnZ-R@MVhdF{W(@->x3Wc~2no zQ+#Jz+I-NM{{Tz{H}`{LC6a=;8aAQ4dp#&c28iuLM|r-`!WLL>L4BNdP=HyKK}sw zh3(!2bCxS-WWa#z2A9#t4s{mlVGuj;;t!)XP(0I&+GTv4<1CWkP*RA$Mgg9>>*>53 zL-5lL^)>mzku*T68-vTi>gLty0ze+>zc|p@Cr$pdpZ;V>&99;Ey1+EKfY;>LMdWk} zLQIXRI7`;vKl>lh0Q6ozj0N2Cb4!+{4A+#!-W;SLpasx%jp&}O=m&OguXnl{K6AN(;sbrSOl)89yAWQ&M6ph7875d@U9JWxpuyd z^i@MU@1Ifh=OKlv;!J)R+> z@s&|Ja}q~*FfC9;eCxa#fBF@_SQ#eCQyT#EBxcxq{{Zs2M57)J+u}c*0RTV|@v-#7 z^DV>xh<-*J=^JK%4LBFq8fefb0g=HeB~%cq$|*; zJil+8OoSIi{SZ&#iIhh<0jILF{{Y}zUqHy!Sx$Us#6R%4YklLtSaFBf@c1{D_z%3v zPywkMG}Nwz-Wj3vf1!9jzYJkZ3-VmmN3q!Ozd&my-?;}shVGT$b+*9|H^c2?CpOfnXN5YuUQPtbm zsfoOO073~lj{7pdgbJ|3aK9HKSJm^1Z`VFk;|3q*-4Xaax++*JEK6wT8VkJ_Rnh9W zL1+Z&Q$zjpi>jT@7`hIghH@KxbYdyI=95A4%wN#Jri$id;sumU5VeV{5U{fJe2vVC zo{??hf2Vjx3HHUB`UWRJT|b*JLQWy9`Qz&4&=&9e@szZnr;aj@{Y1vjBXJ-g(+*T5 zPX#-_1nR7Jn|x-mVhV|176L25kk&9O`dA=?dhv-cw<`RjAK?J+hP^Yn(Jy!%-05rD zV{_0}_3mf8yc-sV{EQtna97qBPKqWwl-g|Ib+YAv0|+1#o8{OFCkecltwcTXqtWjq zu#(pnO5?flhz4kcJ(w?6J#xQJW(hZQ(K!Zv$k zg``1(2#7!VfJ}#LRVco9&EwYs2c|5?)sR2lcV>{hJ#1;>JoTJo1=;)#3_;xaHhxT& zBMXioFO|ehvvF3yNE#pU3 zMQp`|^Q{f?UFOu&yW4>Y)5GHc=zvAeo^eC4+jljCo4Bg$UQ~2>a(eIUQ75~8bJWfQ znz5w1*gm-3*AoPoO* zcBc_9sjVuxxN5_j<)cHN>4;g8!P(!CzE4`koyD{Z4K4GfUJW-AVMIlClC9?*-hp#& zVhck@Cl4DsyETQ4k;~LU1R2S}?*(RuZ5$G9*ElyDpj{9DqdQ3nn+oPLZ_z;@{pfRwVWdZ z4~0JsW}I(kR+zv1Ia_%CA5i(i3e-)6$K3eGfGrJF%~(Q;?Q7O;egTE6k2w3@XE_in zq;fm{`r@;7IMyqo^V#s0(+UMst6shYwz=d z!5DgY^#XQ=Q@q#C5qNgRpIrye4nE`s-RN}q;}u{~Ghk5bf3C1A&8%C(R0(Z0ZZ`vI z@eM{+x3-SaW^h3C03R6^E%4*3dH1?UFQIs|+S(0|2%d1WY$XAl=%2aZxK6_jItqL( zr#2fHD0(2$0B8aLVuyHMO}V5B{{VZzfG;SIby_oX5DWUuFIn3iU%QT zNBcR%LFAC5z@zkl(qkRu`D8M!5llhVKi&#_nr{V5$-3*wVtNvQY-^s&IsX7ykv$yB z9qGIw{A0zsyEdQu{sX-2o_%o=w8%*2c{|kMk*M}?l@sH z%YU2{+<0842>XA5hbOF#a_ujUZT9Xm&mP_{qgJ;5L6bE|2fu27HszJmDf@dVe@bnW2u$ zP$jy)Fey^KXcJ6*BL`@I;et&juX|)Gr#Atr$?=}Ar^Y#I_Tt(*BHbg| zx0A0xsPdpy+SS|s0#_$n=OMvK(y>B`x}p=_06qv4(+X+|s?)FT@qt_lQK0$A1@~da zwE`vi!!QF0BlCkqGkef{8(%{k3NV>9OU35lZzV4{6zsA+7#N3Rh>^Ffo4g^~W9C22 zq66(y?SebRiUR`5l^!rUxi~melf^_6)8;zO#q10G`j{*^{wZyW+W2bNp9YwKh=PYnaj}pzz8f3%3PQyv39+*q3zmOnZ5zOn(9TYtP zEossCct;Kj%T8+Fqa9{afd+=vfO*8ZRHfrmr?GV)W)*7I0A_ocW)0+~j z6NfXOqX)^hlmV@|#Mc|;;r{^gr-s<|Qz6$}jnG;f1AKvU1t|apl>&V4a_2q1Qo%%3 z*$0>`ttR_h0{1UO3vz#kCr2gp4uzMc$Qirg} zqw|!8NFe=WL!@(64{Tx{m}4OmoGZxyI9N(3rFy~v(8b{7P6j*Ry(4n09Qx!qZg9>D z1qxQ!BnDI$vr1BhYfJCm{{UItjBSLd_ zuw$Bw9jq3VAg4#kgLU_86=Oage>j_t*fg{RqWB&+l-b1JAOoqX`_yb;iVt6GjPKxa zcal&XtBH{+;*F1&Z$iufx^Jkd9>R%>iWDgeXV+MW4aKz$+(g(R)xuR_x{V1{7vrqj zytyyF5qF&X)>(Ij#T^eOOT^a37sR10fG(k zJKvmQeGUqQ4#R?N?*Zo$DM2XRcJVsJC5b{+@>WnVgAC;qO4POG#c7dUi)E=r+8qr@ zKLP_|MwajyWJcta1AH@O*6}=jP^$zelThU|_fym{6oP@g!lL_M_oyFzToFCX%M%vWg`@XaI8zfIuLPqCMuma31NqiiKw3&b(t%O9(r`CWyT^tPs}@ z0-`0_<-A#1ea3$Cp_S>mE+hW*Hdy+PewhMjDz~gs`xy7wUJTR9Cq6J*_h1Mgd;b7A z$?_aDCTXfw!c^1MF-}w>T@MGtfWR^{rAQxH1R5gT;xsq2v`4XrwM0s8M2$5g0! zm|Nc8T(H7eqgP%uvGHI+07$Q3lU`AsrvnYZIX~tx@*|LGh)C_abz^(S#1fmiclDOm z(@?M);pTN^dCmm76H^*N41_xyD3EqeF%=fth#|o%FOkFvM`Ce?HXWG_Myt+3+W07K zUN?o&N}|VyIE8d!!=3T(l+#YQzPMPT7TTwxbLEP9Xs0EB$m7PZpV67~@l8H5rl5dQ$> z45xcYHhZk2ZQfwB{#S-~yuJ z-y1NKtuZ2PI-;azvEQvghyy|kv>kxe8|WG|ccfv;4L7WA&cq6u{d2tGW!RVcf6Q@x zANCXD;lxN5qCO{&U)E7rgU@c`-#K0(A43dk`ch(}&RYSvjIrRMH7#6<# z4x-jR`!in)5&#EtUFPM0+6JDo`+VS~*pt!%OYi-1kQiIdsZYGnl_V0_{_nAybk0Fk zBzH~((t-;R^^~-Zmy-saL@4DcK_t;4D0+M0d^POCCosti(&6#0)_#xcB@7o>ARF(I zz`VFKm6#G(4R(hP?wg}QNc!MDO9)3sM8|>`ieGDoH3MaQIcSjRFvaH&>p$dc(TzMI zh*TscQjFU9%GhX%%L*o9-S@LCsS8yRJFOkJk!`Sq3r#N;d||P`%3Xn*Lz(~<0dXPj zejz=dh}o|2?71N91Cd9LZV>M09#MULgg$U++U0+<23JI>zXOBqKg1oG9#6b%)fOpK zbO0*2dMpudnnb5*wwhtVLXfv@Uwd!XF>;$w>0N~6aC`@f8sXyj-175~Vt8beTbF0n zCF-pPEpRNy$E4Y;LZYGD!N!XkI+&?n3YcAy^t2e_(cr+P;R+Ar5g_yk%J_gV z_cD1{`8gc&gJftelX?#KH;pbZW|l9853FO+9m1NP5T7{8l|k1P;7oLzJ4|;`PKuwC zJ;)v8?pz)>{NTam!R0snIBsmnyj}hO0DeHUS+KJis8D9;A$=!X!7(B`VP2lxOpva| zn}B>cWmwCjHij1Ii=}`)UEm(zBGO5r%<!k2@!2I6T4UP@)Id*E=v#IaJZ<>)+FfDFJuh3Jq@jVeZE1=Rkcq>b~%; zQtuT0!<=Q6YUM%p+b{al)ve|1c+F2d;iw@c+jCbcfSj#B_x=r)Qrz7zl`b{{W*j(10$r-1)?cBuU4|>v&=0xM%~%roVWG5f5OF z$Nk|9Nx>3x*E@el7!r94S`O>*{a}CrHP)f}H|GjY0s~DCzx>U7CD@!E*PK-3FNV&2 z9em-Eksb_dCj9xDpJnBz%BATy=#9lo0(8 z4{`LxDcRO(N!>kDuZIx~hSJzxo5MG3))hdYK}SbMzFe_3s8|B@3fHd|CuHgO zCmNAjuV8*$F##dqb%}gafwXLH4T4GKeUI^l!oVL)6fRMo+gj)B@ zi`CE}HfT1u{1U}4KqjU0+Q;w^x^r&1*sU6 zeg3i8lnK(g_2T#0gQnefO&_~YoR;G#C3SzCGI&2q{i1r0cn`SH00-7oanm0!*$=-? zCX(LlLq|V>7$QDJ9u_z?luGMk_;XRd)`v&*-V$$M$HTetjYR=j9R>H$;tFrrfW@7=~vMc{)VaTu*9XeQ+5dU_^xp;+)bFZsEip!%OLx!NzQa zEYr4YZw93R2)d6RFld$oq1>8azA@ERi)~K8L|as`FcQnBXs=t*`RfQ7y^%UFJrmsP zVf?>Xp}QM)C-m`!*ZZ6ST@LTTo1`gDq1XHQ$ftmCSo$e5Ns|q>SE=Xgg3=&+1^4)4 zEwUTRlLcQQ1MBL?*MRFI=ld+ZJz+dkx{E8}pBYQrPze@3GuXIsYV$8M)h9kPt|$uZ z_xLd&#Wzl#Z(1-AMreD~2k-TcM}fUo^R4sV1o{i`*!_;;`N%*=Wa9@Rtue~<>909@ zprowG=Aau*>n2*NScMdH@Z_9foI?Q&AlUWz!6J|dB%#*&zS!>sC*^-y_`q16NbSEL z?ZW~CKsv*6CGeWxw)?9&fI{;RZ&$Az^|o-p1>k@Wtz(T1hb=Z%fCsMK=S<_=5Qb|H zNLm}duYY-bXo;<~-=F(|8Db~r4RPF}A`@>2Uf!`vz)%*y#|}GTQd%jad?z31sMP`{ z>=ya)nuA8bTR-4rkReqQhIzYANv<848)2M4!$_95hd0%tkvuvzbv`i&5h$E+Pon16 zDK{xm+53HAv4R2`*m;t-oJ|61+D#VI$7dX6BMOTzRIeJdMF9<&E^mCdS zT~9Aq^$pHfFVT(77hMZO;2z`7S~?GiAINGX! z;cuVmoe2WUkAE3zM~7yg?8#6bD~%lrrFNs?#;!tiAu4^JewYK3>uRcsv3PwOIB_?4 zDNtEnz^A71UX=k5S3L>No#ku@o}!7ytZ~GPvqNBK<(|jQ1tJ2_wNv8)6%s``n?GmP zF@x0&FQ1{nQK~hRsXRP$iB!w6y8b_S%5GZ}qE1?&*!e_bP>>pe9|i_%NS%s zbeT2C;J?!4Nl&PpTefe$@Ih)?E)S16Ht3^qAM$TQ@R&f*7MIJnEk>r_9)5p}?g5eE z;Qn)9r^2Kkyg}Y4TG!rf4YZLdL#>JQ;cK-AW5!+TW#J=+lJqnzQ_*qgnJv?F^%w}- z5USHpn0uSVbyK9^`QrvCt=kaZ_3wbx74Ca{;Q(b%NBgX#26$*5-<)L)4Lq9H@rLFt zuMfZPSQ7LvpLTJPj&Z;$a2TG?CL#&yC=X#iLGO_z;JgRIpPXOFiYy+A1oqyp6ntc~ ziEiLM80>IOvC40(Fjvs|Z{Li*)Hl4piOJspod7&=Oco+F`Uj6r`_`6H`X7v#N`~O< znB}caJ~-cfdd_H_`x<1oA!_1QrXjq8gb3*A;J)I z&g}FYsPUqm!g^Bt;74r+M0g(0)+pK(1GiOuTc^Grh)5#{$Ln7?79eRuLgBBkjNDM8 zwh|=#kH@AuNi=6-pI!cPGl}Xn2n%LeSnjHC{9x032X;3+Dx2SZi=`+Kc+;yX7ySKkh1EgHHJ#nJ^_v4n*s* zuUx1fQ+#IUPaT*|e&Z}D9R}h9W7bdJO&aQZ4gQ>B6K_Z>d-K*=E+xejHLkoL?Q=>J zUbz1Nn0AQ{c7yc77zUUhfkJ#Ue5SL)55UdG`o>H_b}~Il*0#8;o)8PlyL5OoLmaAV zrm7tL@7n}xODkQc>l!eq+gq|_P;8)RxDCnCtx@LSQD;YQwmEV^WqRfKzZu;qknnD2 zA>2~MQ}%vw148hcf7T(PCNhW4FkYP)Ce?FNZY94y2mUk&!2QKWZOa($#bYwBrdbB2% zc6?z%bB_fk3uTwQ5&Vm^+opOZqMUG`eGDRdb#aQRL*EziaS60Hulv>&umZV%gKqV3 z7Af`Ddm- zPDmX9)oO~@8+gbF;tIs2nJ7AYB90VL9ZF|t>F@s7LA3pUygKvEH0FEaD}*EpgzKL| zI%es??7g@$l_J%G+e5!>73n(|nT zH;$x{v;h#BX;snTz$MZN>qpXkL!1bN0t;Rx_|4A*swke#eXYqowQ1o89z*esB8Nr^ zb!U5zJcI5hy^_4f{i9ERdUq8GPjnAL%J+;|NNFK9^Za(8#qxPtOQdVJG{Q7y(En|*5h9`Z^d6qn5Ff8KGCquQN!u03!lATN+BLnhX0<825V zfi#}IbC2^2q-_;K?SQYbEwHi-{nX)d!(( zc#whUcm3x85J}&B`(Xuk16#PHifvpk7;Bth>y9#Wk`09O{NT_EVPT;CpB~r?G1j0i zghiz8#vm~vlp|wF*o&y;#p6euMduy|PrmXMs$$47;|a5)yI0^tU6k(+n!f6vD{=H# zOn~VD1L|dXPhOlOTuc$Cez_Uej8qkf!zc}f4lvDo^uRB@(^_iN&kX^iOu;K@qgn@M zQWJ10bkn7q?@6aM%D{k~!@=3_A*#Ovb_9fdq{;JDBPe!9eVq6xfFW#(bH)VE;0tH~3lXHwMUeE#Iv$wxoH$o#uqe1bYFQyj-`o8$ zG!C4HzV&#{A@Zbeo_;cvQ6EEixNh1BMyBrC{{YM^$gPyOc6{YHwpR=SDR=jiNVOe8 z8GO9~{rJf8@L&}Pfd@h09QnkEJ79Q>Q^g{VLO5|IZGw}baH?J8fcfcJj)985!vccs z9g(=?9*!$V?{TAPnD)`{gCxQZ7F(*k18ikZyOO#r3^$g0(?dsRTBz1!fqr z_N#^^&5VOV0qhjz)k$}e=9$kgT(XfTlCj|x$FctaaUxTS#;S0L7mbhJ1cUybGm@Ww z^=9N9)_+S?hXn%{_}%T8u!G` z!*Z(tC1%$TqmcGn4^LO`0)$`#^l$N$+!Cp)!}va&S_1?v0W=!>POuVaD}kzWU{k{S zSvrdau=o}fI6=5AVZ8Q*oGe9@c>f3fxj~G&Lb3P-~hGR7O?cr4E2$RqHLu)u2Y1iL;9o@ z_Qw_4Vw_NtT^#^v2f9{Eh__rSi$Sb9cM*~bIeR_<>zp%!Ce`Q=UNx)_>ZNCra{Ln{ znI1^+GxASNko7?>zTrFM`RfrFiABe&aM|%8I?KDglNW5Hh&^ulxDrQ>YVuqE0CK_@ zfCuYqt#1m)ejwi-*&aaPFx$8d%46ntC=_R+2hM9vIw4+uoO{bD`Bj~i3KQ}vshS8?YDO3{w; zDk&oMaHQb$Gm7oHTmUm{*^z`zB@>9@428AQ^Z4%|1t1%YUK8#pr`3Pmz#DFm;FP*R zy>^+UkPOcR>%sH?hq^pAO9TkkUPd}~>^T6*@tsB-Tt zcYnS;SekHgklz?+O%AkNfH^ae$eX`7Dx;ST5TPV!_~(EGS#6l;&{`rW74$@Q`nVDv zr6QuZ%DPRWZ#SbUO*jZd3W4FhV@57rIH|y)bYvUBV)~duiZ$l*tmAof zSAuJS$$hbHEtNv-udW#+c{sx!0w^dQls4~GJY?7buX`-kU5i*3w!J^U&PGN`7npKg z&zzUI72E0O5ul6g0flp?&TSka01;dx$mRBffnVbkPhf6sQxS{va&G4M;s^17_exXU z43twnFi<5C-BI!N!FHHrkaz>I6s(?i>rN#VPF}&lIYJr@pi6@qq+PuS=Nk=cA9G=ef@oM9IxYbB>f~WYiHfb7iU(6_!vKJZWC~QV_}(0--QaA7ksZ$e01Sk+ z8pMrkFxtKQWTg+!8M$C9FdL`Qo}zo=uc@u#3$U2G44q@Bk3rs-U(PFas2w(q*%8;g z2D4W{AccyJ2daAK7xnpIH$|#{-6`2x0IN@0CYt+Wp?yy|vw(b_I{ljN3$RpGuG{%u zn(r8HDmFiljDiBIZXhZ3$CUNs8hnF84WK-x0W(5dZfq#K>X^e|3S?CH3=0H_b*wPz z(B^r`lN~r^eR1Pc^?|~UQ+K?W4=iax7hP-@*B!Otccf%D9bp^PtvrJi^)pCs>yX%P z$qUVxlaIy-ybNWg^m}0#R1=Yh?qgw~7Iv5Pn5!g8ewoX8hlxR=j1VxI;WZ;v@UIP-(8VdTdGD~9O}-=&w+43|ziA^@9M?y})rrj0=-97di) z#unf+Qd$sB0mR-r0kA17Hu4`#cV^`;gT~kwO8{FE!hmcod^nNpHX>31? zTp-m$5cU&V%?n(XF2apda}uM{FvI|I1+RuUdJl6LvJOt4Of@&2o*~+>4=Iem>0SY3 zJX<+j4HYF%*?MEc2UN!l+uHmi5E1Z#a;sH_aowDFSB!KlWYlG*iJX0E49b``LWtND;wGBO7PqdI; zFbGu#e7gS3Q1BgYx_1bZINyw1KI)LDstB(q+%e4kVJbC?*T6?WU22~&;8CXEM+1heba_QY{)KBF@ZcJ zLmepL2=93UL_>j7t){h_%dvHjM~{4^q49Da8dK1m&sQZIt#guvP83$K(M@C_;&k?| zAhqvjvPMpzk*sY^2TdNDOKi(;gL_f}(b=PPA?XBADyHE$4TQoPhybwj0bgKOq0Y@T z2dUPcmbZIH4V&3K;jmCwNS|-W$1OqXaDhh6INg@xQ4S0E#sDYfglbSg!nhAEvyK)n z8ne;p!C-oLDdQ`M*I?_1M+eK}1?+lEGExVOBA!XV_m&R#&Q}EMjBO~1LYWv)Kqeu0 z`(P5U)*CHg`FsW-?1=BqL#hWr?K&K*j2;D0nExH`z7Yfkh$@{<88$_97&{g)CqX-waLZ;t9z;uq@x~0IC^WHFp zzKzoWWJ||1##fd*^y4_;={GB(rAZM`B6tpsXpg9vhLf^m&fJQE`^=Dno4~3&q;8}0 zFotzfr8J5Pm7rbiN0_J`BNO4)0yozN1oHai!u$w{fU(eKLUYHQ1iIdGAy2#{6ov7O z0E$+QnM#OGZ?;tA60ZUd*Kg~P9afe27pT%5@Y8a*8J(+F7@b90SI3*?`|!>*I1Od- z^{l!aJ}B6aO~4}Xhyc*>RQWo?+n1LtZ@r(+1AAH>@BaXPvqm659Er%$1B?=ZqeHoz z!dt0@!Xvk*9Pzf3I}=8)N2*2a?vLad%HxVi=0~qiZcHr%f&FH>7W2G70ABZzjfK=P zBDdodVIcYI3m-b;9uyNI!(VK_=tn5yTj0w?zmpIpFI#eu$GMK3r4V`3 zIn_9N;xsay^^7UWI_Ba~@_gk`QNRfiI7hPpMKJ-O4iEuAYlG`F6L{osrnxM4%&Ft; zg?uWIfC+tw&DL)T1}(z{x0jhT9!>(4M~33;l^PJ{JYrqSuu$pd0{adynh+f$u^f52 z5fgwLM)&Q`Yfi{kZXdU<@LRsvkO1vgWi}FE8c|5fQ=OhnW-&@tw`YVZ`RfNy_z9?+ zw!rQBW9UPdV;6Oz)K{khF($<;0hyV&w*ll6gQSo!9g}QCX!GgfdCS4o+7KrCCcW{E z-$^oT-SiQAA6lEbP7DjH+HgGK>6zq*t`346w>hwNBup?fv(pNeIJjMszVvAi56iD9 zc$T86wSMplq&!@6Xiuj90M05DSla6!N8U{utk$r}%j*oS4~At}qsAKHpG+17et*^q zn?3G00&VaA0C9jd<&S-R<(ndq$44F*;@u9_Q|9D0?x|#x=D*H5D6wH3K<2qS3a2Mn z4W|d=Ix_9@ON$VNqlbCC8qztKq59!H6fM-qss?E2OHaD&i4N`3jW|ch0#u+)}bqw6XiYkDQ_MZ%fdB ze~e-~7$G}nr(UDjW3fSQ(*FRH4hh*w);TP@CUg;y#kki`ENL6BWOHz6@xY~Vn|i0o z-_}MR8y$F_f&7i+b$A87r?>IE)j>ts&(20mHN7#PIY45K82G}8>*dW2yW-@NeEKeE z0DLBJ+Mh!P2(ot!mu9bAgHOh4@pa=7(1(Y^))GJ;=?D44J>+dIIZTL|!swC60OI}T zoO9`jpzPV_&+(IhjR((j6|m&>#S>zd@?h(opCN)2TXlgqwaBL-zA@TJ+8T42%hJn` z5un!FfWQu6ZyBO#^T(!IheovH2y&i8*z#<2A|fD&gM%Fl3L+pP=!A$59WbgUKCP;$ zf{k8xzWrQp#~LCmyMg7LW0L_Ik?vseU_d0_dao1nI87F7LA@L^$?A_hWI`e$cttZ+ zTb&RA>jUQzeIC2?m)v{GQ8?-W6Wnz29_3FI1lFC8b(t7|7Esw|Z$R2WR5It4qnJx8 zKu9oY_l45B(ns*Qtw{m4PBM<=dQApIQ+(hm1E$!Gg_C@0@R671zhB zUOeZYB77J5$sH%6W{P|n%(Qq15E>`5oC+Ti$-tV>!vsc#bqunkyfVtrQRL>vlsvN} zxC723YTVokg?h0323q`Fs`?thG~h2n{g0d~PFuKuYjyLS^>Vz4liKlZ9$x(z7qy^OAkUGT?K9vwz2lMNX zE)~ZD1La@`Dxd{WxcIjb5hPGIgwvCP6UM!9_ZJgl@7FG;c-L2Xb|AC1pptw2m=Sk` z%52u9;(KPpVFtM}Tq(Wbzj>Ki=op?*yyHo1PK%m5;7?Ci1O+y6i%+%noINO;mGj{b zyg|>pY`lBD@eW)h*ZR#to;KGZ)p*u$L)nEqgX17F55^*11GiWwLYmqBusaV;zt(AT z9qq(r+3+v(n}{{z1O-Z*Wn055jb3kr56OgAhXpg1w!jfa_;<(OUYix3Y3^#uxOwxidb#loenEUyDi_+*X#%ZDCUw73l`!MtG;&QLNnJB{{UQCy||y#46lQ%-O(hWWQO_<1?+ZB z7nAm5kg{#%`2G$`#}_9sUe5Duoojesj$*N)6AB{Rb}o)TI1bTDyAHVH`_>##3tFG_ zF=-xJo8RLsZp7Bi0s8~~@#!$qul>a#UV4xBn;cW4oCCkjGnh}qkD5(tt`S9@>jG@O zQJYZCpSCd40u0evu6W3XM1j1Q*C)$U-->_fn{e{yAOyPJ2PEsd18-o#rl8IE{?G6b z4iXD(xIt?2Ki}-dwlwv{T^Zs419Ft#&JSwdGBs7$F3L0PRamf+4k1IYSh zJ`GC4Bkdo1oC{UF;S0aK0t^?NHI!3eF^A6ws$??fS=)Px(rt8FWf*U$s=lh>s4{uT zA;`equ1O|%=A!h#RftPZ)uiqL2&5Mt9B%zuC=dBA)KCx2>3M#!N4> zfpAk<!9R$~-{--#b8!tC`0r$L0`ftF0oE_hMLh$|I_<)_^+X_a^1WakC z&Oxd-lJUGCr8(XKLD;xLpo7Ta1{Ex3L>a;$hQL7w0XhzV-bismo|G;HW2v49KSF(G z#=rLAV2vv_hMFX{*W!06=sd(wZlL01kk`Qnfgl*9qyA;pYSwSJV7l7^3;s zEus-udJX>g$iP)%ha5k?a3&%pIy!!L_k@K33~WE{{{T3YhOHYN=stJtaRQmyXF?vf z;jBkWJZp}!MXf{{c4y98MzKRtYW@S`7pYerC9ef~^2KD6pnup|$b-($^@D;(tN#Eo z66w!dW>dFb9e!~)Z2DrIS?m7*82&b$frdErl>Y#E0DYI95Z%%kQEYwJ35}qe{{Uz? zuz{jMxb=9=eBOYt1?xik>#Qg;(JPLb@3uj(a)U1e>&Kj)(BjdgrSxK0M!`E=+bl>z zyjSluwQr21n8UBmK%3(p=X}mHXZyrI6@irR^kO_5_TgUiU}0C;4d3GtUMacU7ztK|3!F4UZk;hL)EOo>kw2Y&VIu01!MM8&&01Ra_I2xuGXsb4E^cS=-^o zM(fgJxESla5MEr^=n*(`TWAAwzxY@_l5L15MS3eD%ci z$SN`p1|~u!0{2Q-+}Vs99ww{n?~2m(sB9-Z#5&2@sdKL%tkCzw-$R^<77mcYAS*pT z<13Un{-!n$1|e7FKh9Nok9aTEqiLa+)%@X2mj*`ErtGo%%G5nTe>jC3N>MVni$0S5 z4gT4wt1ufi>R*dGSS{y*h34J_gQw06K5i$;s)32A}es0Su=<8 zjxox44A)Fcd>RK8P}4)H%*{I38dmup{A*sA@M?)weIKie70m$#LS~c)O@u@_&Lwy= zgS+|q$kD3bP6NfWUM6Y}m#8&8+WQ$$=1OBF_L(hTZ12| zI{ss~E0luJ*s+xF6)DuEhokY7(F9_;T{xf)x*xrn3GS2_lWI;#VY8xrf)}>0et|eP*v<|)p^PIy_#jb~2kcmP`%9NjvjIt1WYQE!-gVKk8?*m)QW4|tJ{xKAB z-fQ&5JYyNXrB>yEI8e;Kh|AD^7&5{<7} z%5dW6J@9sB!PFz417A5%kG8SrC$?;X^Ivly(8>X)28Q$wT{{dYH@*a0<*J&@|f3f95`h(I%@Of{{YN2 z8brf>GhCo(MY|5Y@c#g4NGa{h{{S&{hKN$=R0@Sh=heR%1npFIubdo-OkMEx zi2a^0r5N{PK=8LCviw_!rafh7L_zz;WJnf*SJj zzu(q5SB?vsSYk`(U(RoZ5t?Mv`^ltn52*2py}UL50C4HAZ#fAxd2mhF+s;Dp`}cqs zTf`JkYmD%Lf0Gw!I#Z+~6h_NqFqu*~rtE?*J7tn{#yeGUM7#IP0?!-kIh0RR5oVj? z0UlR9aFn+Tmv5E>(M5m8X-K&YG_Jj{{*y>l57(cJxD+oZ`u^_)!cA^|^H$q7sMksG zWe8Crel9zJJ#y-PvttF{zDO*q+sqHQ6mlSPL0DcI=o|iXMTzDA0IZzSoL{5pz5Pre z%BZt$7;9qrdQ1>mTRWIcqIfgO>ezVm!@n5;k}3%?&I*R_;JV|iPg_&0d8#54P%fx0 zY!b)NU@N`%#vW~4a9yw(>g3tL9+ItH5Hk8ExSpd+$+mCCIy6T$d9L+vyYav4oK>%_ z$7tolpU!CaX+KzLKY2%^8-=*frxZvP?_U{RSPJtqf{qfvONkKCab(~7^~W~Tg90<% z-f!x7JI#{odc}sN3BK_<>UZZ9Ajd84zHq?by5N2uaRV0meM~NA)r#d z=wNAQ7=&k7GZIFZisssPo~@Xy9&?w@2}HWYK{Db?r2~-dFLkOd`z%Aa(a5;TB7L(SZ@SC z)iA#A>(deP0P`^S*)akdqjEO^K>@!0TAlkiaR4wENkuyaPBIjm%R-uFk)zLe*x@=$ z<^HkT!mC#aW&4LOgI_NhxTGyPPx+Cfj4nsxHjSAyaYj6t4?6U5mMJKb46-NdDts5W zt}e>~!a} zeP-+^!aWmaD<(9`yH$IufFw~m$TS|8IOm^SyaHrp-b@^N+}P(A6av+*dUsezm83m# zu(v|X6Zrmdf;lv_1EO9y`^B_s747T4Q#evLr>Fj6BzHu3G$-5n&QP`F{NXgT1I_lt zgS+up!+pHqfZTj4C+!y>I@@kgzr0v591EB27%~MnvNB8+ffWtQ8V8Ge#t1#Hu5yU! kIDdJ_5EFF74WZK!Q5W@$O4oJslHa~DPDJGCbXPzB*@%Vsp#T5? literal 0 HcmV?d00001 diff --git a/vendor/cloud.google.com/go/vision/testdata/faulkner.jpg b/vendor/cloud.google.com/go/vision/testdata/faulkner.jpg new file mode 100644 index 0000000000000000000000000000000000000000..93b8ac3ad2f9c01609fbf66d0bcf884636720749 GIT binary patch literal 167040 zcmc$_bzED^w>L~%S|}~n0xe$L-Tf4IiUgM;A!vXkNU)x^6nD2kad-C?*WfM%0)$}2 zo%Z4U?)jbf-ut}r+`rznv-hmEX02~#_NMi`wQ#$ z8dkD`m%S|(mWm2H7A_VR)?+M+2amBH+;gh;H|5{F&OOii;L*SI5AS)t2M@8H->1g+ zw|x-yzx4MY|Dt=?f64z&l~gp|z2WB;<>uqXdW3})je~`KZ^6rdFMj=-=i}vl@by2? zfB*gul;3L)EA3z~S8*O5FqGTU#>EQAZSCUB<7Mf}!^h3bgC!~Bh1t7+|HcB|zwCd&{!7LF%K2B#?%m(K zk^D;(H`D<_U7Xzi@|PCjm*n~1l>b;7{oh&sXXQVZ|Cb>EY!8!G0bASs4gBA7{_)Ut zv4weBLV(grE-oM^;NRB&X#cTOxG%Ck5CZ+Hw14q${ub^VpYLy{|7`M)pQ4i`)b5?V z*S~%LE&tQ7*JpY8wz3@-)0m1hX2l*?s zelKFlVLf{I5DyRcp78FIe_i*?fAD{){-5X>J{~UqeJ1W-AP*j4V?TNP1n0a?y@?Yrrzvy=-7BTLF>xVgy9uQ+aB!2LS_`&aPEGjIl zhmY=i{$G9jZ^C}~=<$;WIJoz@6c4ceTPD_nhmWuyKl#0Y_59HTtcOI8i0=FKolM26 z!bjtutKDB76*fNJzBAZ?(0;;M&99jF^WUid2PqK^7EL%IHol+e9hMYrah|w_{VS}G z0~T4RKd>17M@U$IjV9l)3BtBcvG1r79>r+0j1L%MJ$&+iP>TO%^6&}Hr?Amp?D8Y&IgeLcq1)dceLH;+eER;|{}|$abY5+HjrHj08&>xn*8Bgt|GzzO zKAk@J^WVb%AL;*BBfOg$x3d0!Ef`_2kFW9EOrntQ^0rE$ak~a*QBg^5Gh3Au>2~kE zr~JETFT>0We=>T1 zY0Qgd%KNyKf&q(gGbomJx5Kz+e}Q2FK1jd&7Win_d9Z1+fU>Nn{L~VZI2R; zZ_cf6C)jQoWQ~FqKB`GQrMLPo5z0WH)LS>&5!W{Q4jX#vFABZ>$c>m;N{l+;nYcR9 z)bSoA7gVMnTO`^fbu!4V@GZu4&LDdBAp0o;Wq$LErHa|Bd$$!z!-Dzx`GAnwaL)*7 z>Jte!M`|)mmqDBhYlSi>%$>^yhBgngj#I0eeA$556mm&lnA}`8ANG_yh@zUT?vR*4 zEzCPE)oPslf%WG7$3Oq$g)wt$k4k_PI2sUGk5--iKj>Z@IXGHq^e@qFY2~c7h)|eU zs|-K6C5s-RkD{uW4i`q28Vvb+wv?R-{PZL6v0Q$YH(ESX=jAcHH;BkuqQ2a# zP&7aT)nSQOLRmCMs27+%$C14ZPdqse8+L2^^ic03u!Y>_8PF3tJsIY;c$+2^S?rtw zEgCH-EGwC`qO|XS|F6=KZ`?Mun-}bcxC?k-$;1V1vSiLvju-qGzEgBL!5KTe?&Y7@ z`;ikLOKq%_bDmU0Qoy9=Jzc%CC@u5)N-Jy|hM4#Juza!yycr>Shm$pp#kNqH&>Zuiu*w+3rnEG5Kv%qIb93f`>Qa&|~XXHm> z8FbglB4lG1L7`v_awsa;EUq8K=&G!6cSx?3X_#&v-+q!k*Fa~@#`b1FN;RYOy(bH*jR@yl>kzYn7CBuLZu>LvHlL;I%EI@w;;(RwF5)VN(lO|U zA$5O@-oDnx6^v<8=xIfE@r|?AkVGzZaaJj0Wi)w7zkm=hxZU^~Egjt`S3&^`q%U0L zHVIPKH1h%z1i=t1Oa58=Zr;5@I`W_7`@O0Bo}L)Dr6^{Tc#l$~dMX%{Y79>d9C{y1 z-YrhlwbWew#)`=oR-fAy))}c;&J^mu=$p7s$wrlJBV;5hwy4}`!u_+n`E^eK6A|3d z(8`u3aSIbf1w&oLD}{(tQS*F*tByxzgmVL7QC=`Hk{}m~fX(66|(Xeaj zG5MxO0vhLjEbYFj-(h;{pKr8OS2Qo(6d5X*o6Nf5`WuUFE_^%2W$Hc9y>4?dOc*-7 z40@s;=^TOhICIl#WDdTaC974lEl}-ydF^R)25soQL}$9KxN9;-WgQGRn}-;Du0ToF z=Jm}+cp34YAXrD#m2RGUER9SHq`8!g?FlAd5En+!N274vgRO+Ba7ukT8Vf5~-M zeVtk`Dd|C_nQY%l@`GG5PqrJ+reK&&c{J$F!rX*xXL^U+Gu8RzYmo|Ks&Y$RXBr`Z zCP*MV!hEiKz0H~WMV*NGi*mx?<7R%yBn_rfTzWWdi?p*XNB8ZY!-9K486FH_TaMyj z(9|BrOnNfAr0^i+6m=Yxw(FD4U&&q_>Or7dJP32;h-wi!4`&<=~t0(;n%zl{yJ@>+q?Kq;aWXdW~lJx=d0P zByChZh&X^e*q^xNElpRL`y$9QdtSjl$HurcX;&op}+iR`7i;zX$A6?X6*zuoZ|+>O&GE zsB=DnX-U(y>k#qmn=GHrNz& z$dk*?6JLpe+o1*JD&+Spq-vZC=r|cz5N&ojx>UzIw^_rs)Yy|9LLR#Ap}Jv2R@`%1 zBaf$@PKt!~)tn6!<-fGNrh8FBm;<@?7jlYo%IPkTO=Wz+1~T3Z zNu#ra=_Eo_)>FIj{gkp>jTHFRaMTAWBm#ap1lYI}pk@`9{HKjVsX7;Yv|r@#Jnd9d zO|&rqG`-yZ17POoIEXoG1gO5xvzdI99l@F5pJEU_)665*4XGV)C&Cx5C2J6|Cet(W~U5I zrdN3z4L$IB%wctafe&zAC!WBsJ4ICMw8i#$W8?Rewx_MW)9H^H%-oiN=)Xe%`O~6k zqJ>(z+1$A2WyPa{6g7_dR;%Svz!gmV&n*3~fFuu4`hxcMT%tnvm4JSO&qPs@!mC`9 z8csT6)zfOIQAXWr3b=?I*3wd3+?db&+~FX-`h2z_@5LrSAATbsKGP)*8EE4UodyY{ zgt@6?Zv(rzWk`A&v)M_C!_=xjJ})F{!bm1t3kuDug#1tI4NWhc$1hgyw z1Y2GlA3i;6=0lDJmLUWKICgByERoF3EX0N^CEkQg6edi60J;Y;DTUivnyPjqhB9oG zFRh~#W>f+Cp#5gXaE(+`Vog4eYe0G}Q3Fi2`SFxZe1%7&%CjbbS2k>pz38iM@;P@K zt16w{SG|1R+uvBy+n(K9?ysgUYdaXrbMxRrW-Lecu%_?g0S6=qs_`A@G}wVGP({^A zKOU43iM*VqGa?+xGo2k$9Vpn|VCNpWuYYp!k0lEgW(v5w z2y5sPu=%)4_}pnN>lh{{vntfb8S5J39uFSQW-Zl3&J4YZ>&dLnUxu1H?6AyaTcmV5 zK8JM!Yw+MsZBmf!&?C$my)3bYj9k zY1=*~{4m368B?6zR0g4nkX!*fyh891azv;ZO=0puPNLNTDFrXss=G16bz-_X3PvI- zkJa>$scjNm8PHhu>b~PVV1Js`%W3Vln&Ap>k+gUK#gJxg7C1%oP7@+A}g|=)c^9F+K@N4*7n31 zfmqIUI7JU{okDzN2z+@0i<&>>2`awJ)U_lxo~nu9_Gk}y5o>TdVK^*YPhvDUJAa*+ z!Nxn*n>7i*2;0Ov4tcOhj%VTOG7jW9*d^#0E;}bC_C9%K?BTE%5h>atAkv`SGsn@V zc=j~uDu2;onM-addm+}o3T@BVTp*{!BF|Nd{~HVF7qm^EQmVdFXS5SNaH88VCeb8( z_Jvs3);ez|2xN^aG`vX0V*>XA{nbaq-JKuVtn4Z=a|5kJ7eVvS@Uf3GI zxsgx8L8Pe+ce^-ZdIm&m$Ge`Iu9ae0YK;tfmhHn~Sw*QJIHy0Mn;a_9w;ljkXnd`!y`JZ`o^7uMbRj+RVK17JNuRdIxy{ zTF%ZP*=ARdT8+vvbXu9nDVkk|4!jlLG zp{CHeUKOr1-;+1zyWf3MD^ydM8_o!G%zo;fm|k15jA3}@y4LBLHjfUAO8@L$;oI5z9sp(TS*IN}r z7!_?!rs!xR$mqs%uEd??_?PrQ$Tr~Mm$&FEdamVLsJ5}LxZ69OluU2!k`?F|G)9o7 zEV@8!Wsl5En05TwOhz?@44dZ1!MhDDxa9GiyF6SZ^yP%$48XC$f*er zj4Dq6Ek2HkxL;gJTH>n_NSkf3-Pm6JXbiHQJb@csy19Emyj-~9TRJ-^^|hBozVI^@ zU@wm^)FT0=Rn(#3n`EP4q$kM!CCTSNw3uFFmcT&xeG#UFbZQIfu83@{IZ;DsHP#FA z)k;$Ph(Gz2YSO#uCKujTJcn>%r!ybi18R;)_1HmqykpY_THa*)=3Acl#A)ABl-L4%u70Gf!=M6;OI5 zV~Q78KAVW-^8GC=3i@qwt1$8^Y8ye@}r7L`u3 ze1P7})I1w>bskhSF+?jSq{va!c{@4|rT?l?of3E4ZOaeI7d;V}2~)#FuCq4yCO4e_71uP!t#`%cHK)<|#PS z_nC~@+yN&XTa*ekLWK3RJRRB^r*W;9iY za+>Xm6xD^4q&u27>TRe03^m^RY=-njX*vV)fG&PK2`RJwgHVc*1OG>@O(ok-y*1Cb zdelJ1M6L3Z3#TZ|UCOQzB85|ALrgr;q32Z9^ru5PX3_}m8WeHFeq zl6p6u6X69*Td;thu@aR;GJ2F~az8X%e8xLqS#wc0h746hE z(N`a^t-OYqD>dmudX?N;pq>==7&T_$CV`rXj!GXf&kKA7J*p_-)+9oZkT{djXu_Dw zXh{TA$9FRO#UysQf1d8SpTQrK-=h*6+^t8OE(?7#pP|=QW_M}TxIXY$BwS5{MEi(? zW=)2tEUpC8^jmDJ9%o#@aDu@x>QlOv!i`ivY>a!dt~_ zFtZ$T*`z;pzB5dNU#M5?;0I-iZGp9O1Pc|glyJepK1jg@Qhzp<1YrTXi) z(absHnc_$i3x2J*9?ay8mfqd7Q45}97d~~~*8N4KeYc52p%66nxTg#Tcd;ekvZvW( zp9j~_*q3CNZNbeVhx1d7)5YNL&J$k2Z<_RwXLZxM=>pGSpbi98`0>w_@f;E}B$;=w z6?$aIVLp`$Wq!I$p=euIH)(!GZVJ9E)M*hq{pl%F?vAD)QIr;mL)_jBoI(+x1wEja z%QLibFLTxC3ZC9&RK>*gqIj96g+zsE<9DbY)Zua|kqYr^_rQ>fCwP1v-FN1hNFkKY zFzvQ1fd;Mz0`%+BE#gusdIQPV_fE@+^)`OuQ`9BB{iv6Bg4}=cs3t@ER8Y=7VZp%o zHpPr2zKk4W3^$nt))7x?)^v}`l-;yM=!ybKT~o%lSd5jtjk>7gix2Xw{JFhxQx;9EW z#>F{GKxoIkYDU+FHMjx;YB5sG-(pn@2O+@ib{#qx)r zm!&qNtS8_pQs!A~yKl41KBKe9Y=w;%LLPa@jq5}Wf5QFKV2)e^J zGcS`t<&H@F&$9GYCLFf2`2H?U*TBA^Ol1z)a;sm1t3vN4#?}Fo|^dC+WFv)KiVaD#*5!w(`b6G>{%I& za(H6OlSD8yhpDgIMaKNlA3wDzhxM@b29`je3F`m)dw%kBMQ-saf$wviXIJ?W=hN7vvldnh=ZdswJx z#ly?2ee7Txxy!S3=d*pH3&|nY`|KdMb?Ge)?V0UA*-y(wokCC35xBKU7CaL5udg+e z`Kcc=F}W@!O*Bl1qWBiWi|0l)Ge1)wa66R`XZp~e@~gY#LpQ8b3G5GBuH6&eY#YMH zO4tp76&YbL@%Sb_y{&{cr-MM{&z_X2SL-NKHH~h2`#cih>6Bg4>uwR(X&?R@TCKI1 zqW+&WgH6A&yz3f);H)3~Fv)?)g&B}&fH#MaX8Nz-CR)(N$u*(5;ifgJfN*FMq)4Fl zePi*NQ|$Wgx_DorexOD4{$ti1Mf$zCs&jxZf$gSQCm)Z40C5=6X4>K@d27iE}k zCvLFtE4#SnZ45QIYl-F?)-gJYDgwf=v#-1aM zSbN7l<}B07YglBf$y?0Hef2COH{v}q)WWwZ9EK)NDmQDO)il+A1>%>ITv^aG!cP|d zSgY(8!Er_45=r%a?#rD-Nj!$P*ufrCTFrw_KR~cD2iCzC^3_9!hy3s^qNotpq}na% z&^B*@fbI9CP)&va>;3HIS7^0-1Fg}pcOa`1Z_=~u%fV}V{RNj}Gy*7mw>^^q`;EoF zII8|D0GR4G;wbFZ2@)}GX0Q}IoK>VWZ}pEq@Qo7Dk`hW1>L0l(_cIcQRjX{+bt#Ey zhofq{oCZ9dsp^|{9O`$)`Q7E(M1bNWsnFF7O${TSRJkIPVWW@K5v;A;>%L;+i{<8( zQ}|nJfyVQ?%p}e=zJ{w8;7zqwQIw~{kLN-L2qW9jB^1Xv=6*mgSx-BQpq1ii$uL@V zUx_P$hErc;uXKn|U{RB{5Ni>fYTg&-T#RZwb(`Je!z3GT~=0Pk^;qBOynRm-E z&83JSC^5i9a@=REniN@cL{Y69-q9y)I-3}NYVIjcD=nkh07{_8^FVqU>}wwBt;DUX zosCGQ?3qjO@^o{qetBwNZ76Cv3VSSm54dTI;U zT&`fZoPJV4?2=_PTvY4LE{1$eargOlGA*{jp|p3etljj-bFR7z^47(OGk$+&B#6t5 zLgB9&+{hqPJN(#8LZ=s+==$aUvbegFolaf68GqPbD|w_et|;X~_*_mR(CR`jxCif-if>P{xmFcg9ssxMCM4YM`#L$Z8 zheXb7Wb1Bk1B?&2wMObbJ?sk?*H#To?_@mFwyjTdiF&AOxRy1UZywjSZW*)e;{{#C zhN}Z zUy`@&o$*-O)&Z81mnq~QrVz04iuHK;cNzZxY%{jr7^F$8dD+f^-iFOrn`oByy?q${#c{FshvQW3+dhOr zGc{54!N9|rB`9)0+v%k5<(~Wg$$A*cIHt)uLx7*wWJH#_*)UFQEo%V2oV4I|5%MGP zmF+MTYJpyL)W10M|9GA`c9zJ>ZbdQzF`8~x$N7?QV;`2f;4u4asDgIX*J<{F(XPVJ zN1Pt_RZz_1&Q7f16OB|0j7d3H6W=M9oEZ{t5te|l*-h|!NuCt4LS)N9^X=d-m5xQh z@v2)tMxat;=T5c11Q4DubSNTKHRprUI2>d5z1;Kj%9%i`hSl=HTFdAFN8Ry`t{V;^ zDuTj3E8O1AX3NafpvXjuv}E&e%cabF>bhA*<2q(mZ)KG29j@(e&H^vp#z?GDZf|^T zq>B`PjWqPj?Sf_|hpVShN$w0mV2!vL=<6$yjdn-zz#(Bn%?iE|XMs?ts)1g&_MFuQ z-w4Mxyn2D#d6GkwOtGSLt@R=m4%+H#-ES|5Dk=-GhQ#Zs6>T}^#ZZXZ`-^Rkb0tF6 zAw38`Y6jEtq>@CqA!cuS(YEa=#(3$<(G9fsbJQKWy%yJEKkWXq#+$C{`eJOIsReJ$_A8sXE30nt&RBhg7}0?7Wj zRF}|MEpt(k#gkE|O$k*1B=ecwQVi!5>s)8$DaC*Y@+gCnRAHqYequ&i6X=^U0FKBj zN=N9O)+F*Jvu11fo%JG3n`;oG)$l8ybWcQeb>6M<1(zGD2Em|LcY@4M!&Eq$Xh=|_ zPSKE2%4MQ~hXUUyHks&0*a&7owP6q|HaECT{4r)4)t)#99Gafd4c&GIFU;mOQU#?q z{^XE{4NBPikF1bds(EXC8s8kum`BdU_iu|Lrg>}Y7ckbiTlx$0Ha(o^M-2k?brh&A zWNXUK{&2(U+`>x@XNRZu=qIj4`TDbvcrc;|jiIsEe7}tJmXTD=UnzA&)wwtbDWCRC z7qN#`Y0ddC?1TZF?cBFU)UfBe6KohnNt4QSk^%Dedf~N~x0wth`70idi8fMdl(i!) zFSqSo3Xsr_v?~{SkdCf!UQr9*`_=&jt%t__oPJ2#dVLHUa^JKzDbx2%j zcHG$|k6%e7ab}hbaPCOy0UX8UK?7jf!)F@L3?5~kdXpqq6KIuaq$lXX4AFaz&Oa

2*kGV{S_o^5C?+C3=~vo9Tpt=r+9lgWMQa*FZhoQa?4u?b;{I9>=md_=PnTtLCq z21U>m#1TdfuenY(rVhhV1#Q){O)n$0a*J1rw#1?`yrLdK9n7I zNW)~JOI%*n5EX4+c{fMi>h}5j-KHh6+51ghd_-6eupjxFy2KkUKdhrh8RKWu)cJ?#696{>Y<{Tr(f z^&4xAGj`SSH`aB&?#8%Ky_mu;6ZkG|U$D&2Y;{=}rvrv+@Xa5^?jL8bjs&0z(_e5g*o{ z9tuBdC#F$7##M0&USlG@GRl7w_vi}1aoosdF+kP*D32m%fjyKyJV8s06!9BN=k3lX zNnjo20BvJHEZr-1kp#Qe60Mzm@&Q5T-r$ZK&8~^xSl@C0p`Wud1GeVQFYdo@6;+Bk zY)beF5Bs~MKnl7lfE|Hfr*7-AmEnQ??7cj|}$uX!8@eWPUn zW6TF~V)@GCyYI(%L2gtG!Taw+Wrc1%Z&8Qahw$#k&$e&h?XjSk3H7 z0Mx&+#PlwgkkB$fZIl+j`fsf0A#xM6`thyxhKitX0plF%Drx}b^TEJ^{1W}8wSlv( zkuX|_xSSaKRy{zIL3iXyB20o9g1%FZx3)EnUEaRX^qu&e`=(*T zn7^LY7Ey0~2e7+sM7bcRV|RU9W%w`D9wj}+?e7z;!#&1{VSsF9@QZpNT+df^8|McI zfP)ifvmCar%T%!T!K2s|V$EK8-ET6nBmjXcr%x)E4#AKYBo#knwq|)x%v-0fVq&AJ z%{OoO!~n7qe)%oZHhDwE3|F+C$BP96?0IRIV&+PfNF(j?O^OhbD+z*V$EfJV9VCHi za85-we)8vJJq{>qZjJZtvEpW2{7oa!;B3oZHdX>vB|qz%y$~{d85h&B>1UWeq5Mif zz!$G4naoM^SWIvcCqd;EeWgCz%^+`rh-4ErKXs359A+?gDHgvnAlm8jer+?o{HWrV zKKyMtvzbing$vWcCo}cc9d>_y52m!oX+8sjsM08}NaBJ69(_qQll@vZl9CjlsE>4O z65T%4Mh!t#acgl-K6=__t%;VNZbTzV_2@RXF=j9iY~l%bQ59*S%Iv?U`i;d|EfcO- zvVmqeSR8e<)-RrHjexow?3o1~UUIfty#Oc}ihWW1-Y|j7ph4Zd(?Cx zMa!(F1GKIwah{oww>zfCNu?Bue+BFWQSJ=AgQe%&Br@=L05vpW`k=@m~WJJo-L@UtzT{) z)R?y2-FKQ5C9$rlkS%2(W7S+*(yt$+^rsEhxYda~_QwRt&^W#ZDDMs*>;i9%M)jmI z6WXH2jqUYa&pRT1&*M9OkG_iNrs?V)j7y^t&3F{c{$>^*oPH!vGZB{5nnT-;b{li) z_jsXg>LG!X^fcma#Xq5STm9LZX7T9_qhf3&R@_qr`1%ep5~Ox9ey(o<%fH(vt2%@UFm zREok$QbV59LY1u<+}_Wj*DMkw3a53*$kG)x$c?ez&M1!5O9B|9cf9)Kt#PcBiWZ^0 z!pWCUdi{%I^`}yXN_Gq^#aSy0=LBn?y!{wXGd>;(Pzbqty{>rhgj(werQW$Oq%}v& zoekHKUCJ4&A=^<5a*JBP8%8`eaIq(m6HWGnATl)~sn8FLCCxvt zpurLSvQa^iJ!}LI=vFrHkPm?3G7xvpGPzE12nmRD1K(DYOAzQX=xHmdNi z2lQ9tM>_=!a(1ocP!?3)m~QlU1U9~XtgR%XoxgEE3%n_Q(-ELUN8}h7N!&;*?-oZJ z)M_^q`F1>?S_Q`qWw+{VHqf_!_Ugri_F>W55+T&D2NAxTt%TB)JlTR)}0!?nT6tk2G{-_ zxka?E`9GayM9wQYDyYy;=}95vfkKjADpr`20Q$JFpicMp>(0_%%JL{IHnJ_6y*N$84otv4Bt9$&D>5#Q%xUwTmL;aW8Ra z?VeO_gemASyU~qh&ERmH`cnG{GfavKkhJm_DDQfA;Qs39b1w(B!&5Wt!hnP^)k5)< zI|=MsLJX>ySlf+>?^L0*F}1j0ossrDV(Dpl;N!P_+9bU0UIzi? zmE-tDovu{O0*-M}BNcp<$JJ^(7=EsgH1Ma7;fS0dQO6)n1&YGHr*Enwys8O_T-VAe za&X?9Ek%*%4lo3&Rhct%r*ZAJKsQ0ME%j*oGFm!z>MvebI7ogC#TK82xiZQn*f(^{ zpwO0T#NH2XEwPO$@R?JN?dxXO0bE4BD$kT-gC=7E?0zwfQG2M+*+EQEne(&eLdr(X zb$u_+@MlK)1n3rXx^=J7w~Jullh!C3t-*o3M<{f4q+We^tQUU(pEEeF)`G_ zVYAd1kLu?Y2N}Ccfktd-#`O1LnLS1j7{w74N1L;vzHMGjUS;rfv4I;94vjAQBg;cJZ@?v{NtUosDhZs|Y_9eolhV*#BRqW=sL(2Qanh$%Y*n&vRj9R0?Ee)A%M z3Yk;2oSl|`2XnQ%0MnG=G4iG{qLFcsQ9L8p;Rwvr$ zq-K)Y{fH3YL8WSS#T`jy@;q&kba+ZZ|LYTd4?z4^F%2~*vkyTolLE1d{|^e*R)3}O z32nx5g(n?<7^f+|H$1ug^C-dbJ(TbPms{1UJOg<7j4I#=?|WA|{7J2KPk7SLu6%u} z&qf9Ee9hk(CV6Wk3P0`nS}FdKhADDfh0*-9-TM|0bK~Fs!{9?{@*RJhmG`1yZNf{E z#$U*SLyz?i918sW$$CPLz7d?w?v(Jx?Zm>W^@Ax6(vbc6^C@ zc0>Lqg^w|S=WXNs6{HbFH z>vt_7Q<-FZx0bMLN&HlL`F3A6$05$3fAF(0pFA97)u<1~+u-V32h7S*?&vyf8u-e* zE;lp^Nc@IL5-=h+Y@Z<8wC{uyvd(VQJU~$!T5CGszuP!$tvRh200=QIS_jGp1q9M7 zhUL1UbRR)f3*Od^g>+TIWHg$5zj$UXIawR!y$!qk{y9HgNb_2yZ=RC%B0{cT+*w+( z1U*0zgOu1WmjO8nXTcd4j9d%X%4gjMr8Wpieq-TH2oh(1z2a3M)1;GfHfE}Qd$zuq zZjrv8@Fba%2>k|4tgf8HMXtmmW@gDT_w(gqjAF#K5^cQ-@Cn{{Ft4{u`y8rxT^mR- zz9%yp$7I)A>trS9tE4BMHS5o3P0=lh^R}oxZ3q5DFhPdkm&_W2j{LWdz5-OY;$56v zKt!rOaaX>w0*K=!uhfDK$^c3jw_+rGMHqF|wIbnYJ__7#)Z5719XiH&%3K^g-mm$+ z>*s3hdD(H7^dD`FqtWbT@;f(mjUc_D}ON z1nX+LQJ&_OQ?q~Vv-$QLtAeJ|klOWtvlmBR={5ny`C{)dvy#J;(sOlIW6m_k12Cbw z!4T&%kLSJc0-MQF;^osX!SFd%KNwXbH3ky3c(9F$F zYL3?VSz=J)##x3t#=eo$*nBFyx?g!O!jiK7D)Lk77o33x5f!mb+P$rgnYQ%$ z#YvvDcli{tkrs|TbmQ1PA7LMkLiv@liP(wn7z6bp+B;|?NRjagS3Yqq2g{w+z2!2` z)x6?1N$=Vv_vK{tHh(F7{K3`PlBS^YGx}&S7nTaOpq*t>D(agyJyr`k^w46n#>`+> zGj2fW_-6aRt%@9SlD8} zv^a-hTj^bL$ec>{cWZlHyTRSSa?OoM#tgY;{tQGY&T!GGF*CBot zHIGwwXU~FnZhm}};NXu_O6*sO&7#(`8xeuq#crq!scWD3h~ehR!X%2Lk|sw2 zDp(U0VpLdO^iKl-;jnc{d4c`uCj-TcV9l_9;Tcz(vYu}>n72#IyKPXC$&4-D_*a%&Gy)dAxB<|`FeH0Oo$=!T1CMsR->M7 z%H=rZGkPfNqo&Nh?VxClgxgE)$5wRMj$bSc`%AcOKmZ?zN#0z6#iNyi&-RAd2mtwX zi8X#k45)=K1$FO!8+{1Um4SwZ*xvmeB(Vf%xi6;$A{j zUiw2ZCJCXxOG1fMa&MpD7i=8cdZDf@#*Sqy*u{|EujSn0Wv1$5(38A^9 zXYBhebmhfdp2Y9eS0>~OUZj{Fh&{~{+4Od2ny9+2vv%sgoPN<59W&BAksZ2+JK_Ge zl7_24w_o~27Ywea@{+gG7w$B{Y}lk^?=X^};{NQA;q)THDiZNbzt?d3={WKWX0eIA z=#k*UmHDf%Z=aNvkEd*Ycy4k-q?h(vl`N*&d9O(K_>c=mOAck8goC7v(_*I)KbVCf zdvLKx38oC0hmD`wwL%A`GubTHG>plOfhY6F|aSVV^C6+oGx_sra15^KeICw@rv$X*rvGg0O zF4orFLeu`&42qeXG*?um?LC~>@}^RBBN@U^Xy5m}oy_?};QmnOl0tY_?|?t6pJc(GoDRKV!foU93StM*iibz~V~- z{hN!JMFo>;pG&>Q)G{0$5#ZU^AA~g>75(-$71BDxCXEx=qUP47HdoZlC8I5Dc{?WQ zsf;~WF(o>k*@w45buF2TiYzA3XVx_(x@?V+R1DdB(o#JZ(^2a83%$T#fWDe4y0no@ zyIk^8slngs^?=QHN)bv|G5(00eXzXTz)m3_ZwT{l%RFbC*gKeY4(Y-Xozm5EscPC` zzW)&U+kS($)cXt{@Wv^dY$$=Mzp(~E+?JMysdFKEhWz~WQ_Adg{VntLx@;n4O+rSO zWGmDIhMX@uSkHm4Ctj^yqIs2U;$)2mfc;_ph64jeo%N~3dFoVtq@&SsR~jrwQ}tRI&4yDi36*8zGg9X*`3oNdLi0?@+mt;IkQ?1m90U&>ZVjd z9aIC=6}81f10B|J;&JNQ#81wJ7>)-&%>7IAeajNRJ7rJ#Y>F`8< zZOu_LIId=2b5-fjCP-f2U`bP#?=p(_JPKuW78qg`~+F> z=3pe0iii3MMa0`;p{^eRDnwN*rL1k4&k}r?xoY@kGgUnYIS%O^KxB&r{Zdzw3!c+L zOM`L@Dh!$fGNn5pT>!`LcP44HE!IE#U$M|wFyK=gOzWY zNVqq#LgBC9Sl08yol@$|CimxTei!?hilKJ zS&bb5igJmndiz9Tma?_wR)Mc~FeQUXltsdK%^{ufK%9o5vfeeF|J z5ETNUH%WoeFG!bi1(MJL1nE^F^d<;M=Yh1)LPAsOC80@^grWk1AT1zDXi}vGFI|iX zihy6&GVOi9)%enJbH8(%1V64x94qTc@d!k5vNx-qj?k`=@DiMmIOVde;B2Nw?-z_*K-a zFOqK3@00)^Q}uOT+padQGtx>)txp4-DM1@<0e>G#^?mYZMneH0jU9>2P82KrVc2KEg&+E&M;ss0J zzKS<|s`Bd&sDL=ygI;>3%BA`$w$CeSKoeZmxp$FY;M&F^!^r2axDMV5OW~sg%{n_O zdm6pBp_6b|qR-|{t@WE2R^9D8(T9E+=6XD%>X1T3T$E6t~lxP-G`l`zc_e#6Q}$zEtqU z{5IMxyuvp_qOx2Pv-R!#HdVTnp!A;KKh#7xSexs8gv9-igKRmkt;hloDrvv6P5~W75qzI^BFb7N6QX&V8>YtOCmzWOkm%XS(3^cW#pLt>CZG zar4=w4!LjZ+VJU9o;H!~Ki^C!8)0hlTlY!vnR9vyH4?ld?9RdtPumo%|C_)jyCHNB z{eBVLx=hdft1>>mem9?C>kBSbTnawwiqrg|lc7iycGAXWKQ!VN4uj$(GfU|;;*Mr@ zs{i~%*0$i}Y~_^`K7Y_R{AAb z_aR^HzOw(T<@L?MD+WoF`%T56owhX|zp)R!Li$SqwW(9VZyI%-->rdL@xwa?m$2b@m~8>v}2 z*YFrc9f>Is35ZH}SdEP&LSy8vxkT8_q^yv(zovY@Nceiv@ygS3rqAdQv~yk7XuN$Y zM*eJ&{w^>a98$PpYHj^aY&KOrSw+iNaVKrQa%`k>-alhT$mQT}UVc#v?}M{Y^lwC6 z#gN0jHRb3`o_KZn;cyV{q8iJ`g*<%KND)3S! z4K!vgZ6G^+dc#&dZ$xXkBe^SlW&T*ke8Ch^d4#rwRUZBDv>ut0Mztnq3!m2=mBDS= z_1^dCHvj7j@}}f}nNrPo@uNlFvMJ;fFHwxBuzL6g@FM&+9%89YTAy?vtp{`4^7=&7&Zxj>SOO- zRmPH1)SIfp1SfW`Z6&3MJ{OrD-98dKJ^d;B-tLjLuDwH#*uA1Z7s$`Ro|L2v!aEgc zfMTP7i4=;qF};W`Umy}kt!u=ud0vAx;+G0kw>}&dUehUZLzU>`ukHSL#@+VJEa}k? z^NSp{rS~)Bzp0Ucgg#!`u6kP*ih`Xma+ur73eq;CAt_q6y~sIds+$9MR0`8kes4en zKCCmVs|l+a*LhymuwirCG| zxtxlRJ)Q#Q4C|!15xY91`J4}=FI3=m`n0Tr=}@qhQFGk8mYZzlskW_kWx%@>zh5`R zbRUSVgG!?qlm|ks$ddeoev3+sdm)0Pkhum+=KNBp8zE105_5PRE?Weveb<8(p$ z-!Fc+DK>I%r7KpN%KY=iH~wE=;+MrzL&ul+_Y+a+z4ze?*3)(&T@FVY#X6rr5|t>9 zqn*AVeE%&30<^jub6$9i{M+^78mRuIOlU@4f!_Q5yQhSbmbS~EtD5dw;7c84LWfSK zUcX*>SMdCX-eVqa=<~nYceiHnMiPBwHpUY#TgZiA4bOi|X3SI$g>N)k2SKl}6waSMjWI{^8#u~4HTpwT5X(`3*=e6m3Hf_bsW1QYlu$6dIbG3+#zGfPxg+O? z$pJOkjU`VPb*@cE&yjbh7uUmg?5xYKi*0Q<&lO4&vVxUsEY+!BdbKMsT$V~cLvg}p z4yc3O^V9s7!B=A2NcRP4IH?!w1+Fe;MH=s)K4Gya+U5q_g|m~N_xS6-+yRf`F~s<<}Y;r)5ESIlJ;5ko=hKe z_!BS_-tgdWFU~;eGE5!_v1^A(_4#z(=6c2)?`cyA-^c)i*`h($6(1{^`=v`_2-AjJ z2xn^RaPC$Gyl{^Phbesk^5?>L|DLIw83i_=ZOV{au`~O>OkNO}0sm>Q;Z2Uo;w+a_ zyer#sj$1oXO*0;W<<;IZ*Ho5xdMG6kgC}dc9Y{Zi=_i?iY?}3XG6JFNySnH>{Ip3tQE@S=~9zky= z{qjh6afN8Oi9retJkN-yu~@tHC@&EqI3U~x7SWQ1H&`bcr|FVmE!ciEr2y6*A}9`k zy2<9`<6NZJ*N;B96e!4223x+#Lfv~x$|w)`>1-|WJLN{{mmr!*o{{~}0YR`3EV3fcqMiD8H3F1_ZK7N^ju z;hOlqdw6w~V~meTgFom}_9J13IQ1QcufZz(*txRPc2u&v{N53ri$TFPD`fZY_M0z& zFY{OR;yY3*b*%15<)<}RR8U}zxQfBycU}NFAQbBYYY@Lpb0ub9Jg8InAzl$zU`P2* zY#us^7x?+H5BC<=r<-(&T-OzNyQV&#D{Slr7dD1+Jwpnk^#t@rVK1gqqDD8J-3AAT z*DLLLLnTwRKxF$}Lkv#;d&N&J+bm%ET>o0IdS0-)@J;Z~kAv#hSpI|))>n=UfsBeg;|vE)c->c5daYIAWid@J8Jcc^PE$V&-tPu-cfeH*%bK%+vuLgP!!^v#@) za?nf;%O0)JQkBe*?YY(`^A6izCu~7f&z9u=@5%rE)a+V+I?N+3W7N$}cB3Vq7ruJv zBFEdM<}toh5h@{Jgk3f9$c7h$kaFnu9@qj^=WHvfYgQq${$?qgA;45{c}r=56z?4` zX$^0>>+Fl`u{*GeJaqp`uzJZ=?&K2YJbhr^E$ckz9!XbJOWHBr*d+6jhsj#5$@T9B zvMBxb!xUM{D1Y<3?y8qKW2AK50 z3;awUYZC=K)b=KtT15*CNB6<)aw{INi4(~K55hHh1sv4p-?p>hMnTO%x;fiJHpcEvv#NaurogIH-1U_wU461o za0tGtrO3-%Tc8a&5~t@D2{QQf>5TZiBh#{OF1}H!$9&&MG)=GqC?Ap zba=XB1)I0q*zTLOoR%G>?=Re4E6P%2iDWlOax$qB#++$R{{D3Jb$s`LcmuK`liV`? zSrFtM5t|#z6#XhlrG5L_;?(@~DCr(I@hC)Lw@cQw!7k&#L^Ks=Pt4GcaPhC_Qs7CS z9jc55Aju_0hF*Gicw5yg0)X%xK7Dsk_FSii9a%0L#S=(nkHS3Yf5Xwpmg%C&MJ8c) zJGkS9{nB|kfSq)U-H3fMFfPm6}Mf(AndI8NdId1QS} zUT*&MS=y#E&h!ORC|WvISW$cc@9McQq`6Bl_i}Sd=3C%uO#7;zcBpYJooh~Evtq#B zDx{I^H*nnPI{%Pls8!hWLFt(SC+Qx*5BZ!yN~{=;#J`vLGEG219@$MBq_fwRfubh#~7gPdoR{m~Fd{uk`bbs$%`oodq@ zMi2BrsZ}^TJNL5&6W+T7>$K_-@L3jGV?syL+s}ZEnM0le<3^V9ck6f2!Vzo9XF>zO zRAG56Xn16Blrn_-G0W6s6A`A+_|q_mhI)O276XfCV&pe#@CTW3mhZk*xOolGq-Ohf z^u69-m))Jx4)xtJ8Aqu2=IUjyfog|9tR)ThS>VqFIMAA76E|1SEBbkGgmP$60Ba%J zyH|P>k8Ee|(cl)X1Ba?c%ZWon=TJ5dHoqHOu4EZuzZ(^2=?YAXmb2@WEi}0GTjQE{ zhZOy(;m+*!{L@<0 zHG{5hNz$BEN>Y*$uidFPDH~S7wMihuum5Cb%x}u&W(#mXq=ys?^8&6Z*M_-Ax8R{} zg#h!Mwz*-qGWATaMj+BVR6ULK3lN$F(ch?)&N=*`!$>O57dE?OHQK6euvbWa<52`% z!(n%wacp0&W4anBAG)BGn*Gspn|T2ovE}0vP-$+;M@;*N>#ia)vK5HZ%qY-ael!=< zt3$x+D72ZL(>-XDR%qz&%HNtjILcK2U2^-Fo9 z2?n`VtSNmQIrwv7c)crF#|*1Cr=y?rjK@J!Nc5nX=$z&}TiUK{1D*pl;t-<><3nU# z4OV)HXp)Q1EQwODK!hBe55vQ?h1;EXbGz<=*##?u$$tlH-rD2S|2+I7MRzJ~I51`R z$a=MSM}%qfH>n~w33r6sNWX=oX$JP9)FT-|@MmuI-TF=MwINySwBXO_@R{bE+5TuD zg&Ul^UwJAZW}7xcjp8!m|1luy4UA9cgc|GTiwQ}N|GD6Z5s>1QAN)4=^?HUNjQj@p zj5a#K6>*PCQ#>Z+NS^92TPl1d{V1R%Exj6^AEo;RvzhsoC*?LA?NL6bm#eUw8Gffj zKCN$nJFWRtflJ^UB}CTfMqFxka(0S*(xH-G^uY^yH1HWuJ1qKL)Idpw3C7)hhHC&V z+27gU$xTm6CQFYW94Tb$FcyFfWdDfW$mm--pSde^Rhyq4l=4MvV5QH1w`o@4@qIAd z_@I39vryF-mtD*0OrLP*W)*L=`Zb7K@SE(Mr`VDJVdpi&y4eNLbNgWrS@BgoByE`@ z*<};~QB)cp#Z*gW(77pHXm|N8g+c;9R=c!V!CmZ|cBc2C%ark)#2Fx2E{n#p>*dwB zHXzm&o;)+sgazFJA1RH#Nk@r4HhnTg+&nENMfUKTGy+^B3i!W0&fpg128%Pp3to#E z0Y<1^keE!N;m@^Cai}!?-Ob#?7{7Mcc2zp#dD&p#?nZ-Ag~N^@WJ`D<*6pj?-&;5+GCFf}}`3suDxqf2rlBBjH zf)4|{1Zj>E&F4m0$-D>~$?Ojqvmf1eI9NjtRUEl#gM&o|aF$e-I8C8+jn&8xq4K+d z$=U^E$rG`3w}|+P4IXbdtLTBvf<|q=G<0=~=3}VeD3~ArjQieCN7$xH>xv6hhuGz5@F0i#4GiOn&8M=a5%zIDxL<71 z0{aKq_{f$*Vsp#rrm@RNwXzGh2Y!4SZ`(o(Jko&9c^B#hRgl&TYE27=B@b1$02_Sd z)8@W(sJMS-92G8?2i>fW;=@cH#>5Lbul{n~GVPMprRiny4n?d*Yd>&fsyA8m%OBL{ zg(4|~KQk~L@}t3$ZKsCJP7jvf>etuZ)NfeOc;LHv=%4F9QsKfjkcO%-tm&6w8KydcRoW3@n-n;R^eXkzg?)D3Am>~a9*j$TcR(3EbLo1~mog7pZW+&z!WkDRDAvt(4;(qK7tTIEywd?~ zqKN=wGQv~Z4082DbmSUw2602xIt}Cc4>>I#6S!QdJvEonZSoJ0&HTza|++I z#df2#*CZ)=oBEu?VfpJ!NaObZAdUZA`rpOWqyC>nJhMp&AbyEUkrQ;6g&TcK>49Ew z0ucPyZA<*$`W87A2mc3cT>Kxj@#n&~%KR)9W_>oU=z3gMg!>_me*Ua{((t^P5s&&M zR&|P-e1GH_9fFt%h+lm{?AY9Tj+<=H@L!1N%3xbytErRK^>G{pP$Kt-W{zF@nBie};3o||6a}<^y00Uai{T zc~u@-=7{+73G8NHOvQ;hS0+eLI*aHrx2@tjf7cyCglViC$)Ux}+tpR?s0BC^^Qx$@ zL3D?5=wnF?Upw_A|0Rpi20nY$hsLiU(aJ1xi3=C!(xU(|fTin#ISy$P#pdp?AAkmY zl0-BB`i~CQZtauqPjA0XCk~=jNBdZxyN?iU-|Nz|eWCYxbFH3($PlnjQSNL*NwNRo zC#dQCI4#syKMwr{5^xOl5%LlAo}O-+<(&K_>?68VY~|(y!EcNHix%H1B*f+SNZ@LM zC95~51}_Va>VjgqDVt%D;9PW|RrGD;FEau@xf7XK@0i<|G*vRl!K8Q3@Q5Dme?-6c z=YmM6{t2In+$ZZ_Liz;xQH7QHv6OkaEJ`wBeG&QkKrSeEx_be3$e5qzo5+P;zweUY zKv9%@658XYXy_~?7;w%J_zB|PYTDF7yGK=T-7L^t;&nWbm@)L-r_YyPFUU(}1Dz+0 ziXl$!pBE_EWTDvfPdRF`i%*)c9Q8TNLnuk7+-w7j-51ohVpE|TN%ystu?X6>ixOBb zOi8f)#dcJ1OzuSY)#^0|rO%gZ7?v<^!>p!EL@U=m@K`DJAj^KYx0{IFehdmIGEWF7 z;J8;exgd6>ticnzg+Npavnv^mlM+rtW3>bwIv)<81j~vzw76(~2`?HIOA5q^9jeQh z^Mb-PBDhJ$k)YivT(1-=0@#f@xA-Atw{eFe-`}6J1~2;Vvobx!9B8(Nw${E8R}l^( zQ8m%gZess1t#bKceJ|D72}Km}dNCCQQ1&vNp84o=!;aSY%waPN>5B^{O@65HpD7)q zXg1ME+nw#hh0i};Z4)t^hqQNKYF3UrEum1)b%G)lY!k;}vMs|U7Sdb0g@4XrP*Cjb z-Bb`L8l~!Lg-I8DEdHSnULUQ~y$zLHBurH$_&uI|W4Vk+g~Z!3Th4@%PSBnW?K03i#XmPs9?dL3Wz*f= zxF5Uc(hn#1v=P-?KP=U*1t0lL|RrFB+I+l$hOh*G=&($<$mi^EwBFu>lmbKp){vk+S?77n33`hU_0HowizYC0UQyt6l$pqbwSw%>B6(F8=7pjJT^Fi*y4={Ws5EMrrocU*Zora zrHy#eyT6ycCQ1rvm*UD@c{CvSEL{BjiqAHbsut2+Ft%HOW_dW~vuuW(kj+=Y)Vvnd z4MZ8i^r)TTX2xiEsI%-a4n%d9sXLiUtzT%3`B7-hO`H^B8ctZ|8Ujs5*?ZCh5nT&m zGBk%%2J#IQ~KrcA8D!slL@glL2ggm9QdB&&kP#QtFmo&7XxKm9-DkL zDB})MDlrdorY-;FJi7dC*`IbYgI}VJVi2tJlExrvmY$cniRQ z*u-Ih;(WAFSJWA=qdTlZ^r1xHEoz83lvcv(gyjZ@T8yW{p#YCLikDwZSR$8pAv+d;{&S;MxO__S}sI0j@ZXx;i zpq&@mFw#f3800gn90?$ASQe!dHI5ByxhPYW0d9A`F;}OqDI(?7?f#s;N97jBPIVFA<(H>;Z@K zw*#XpZE0TV8oYoiA&C}D0qQjw97vPR&o$d0>{60{4aM^$C>NUd+EGRrAWN#~)tHJd z0)28x=St~us2Tnj?+r+qfaVzp0Ut8ioSnP$@YSuCN~j8@4-rpmI-SJ<_1Qf*wOE~yD6`ExXZ+2&B!1DKnC}@nAak4C?(5JE#84F;|((#7~&6gZXit{kj>mUwuf*otXD$b9GkJ2kJiRhsg}bzF#^|D>X~LI z^q;O9!2T{FF@iB3>V=;dQj6Hapa~k`fd?8wNuvS)ts9ZsOQNWO^()>HpnhY?-G}gn zYFQ3Yeu!CQujjT^kQ3VsQ;9OyN8JY1Sj_8fL|f%;XqZ!O$PBbHDG?GBb_8XM8gxEp z5Br47H?XdA-&4;b;|^Td$>pz#Oap&z<^#=!<)z9-JH`3zRd|Pm#4gf|Ujin^h!zdF z?=*ObI@pbm?iRj@$$uCi1HGxSNt&olhtVnaAT)0r!8={BLiPb6IRv3Fyqv&*W5pY? z3oqfzy4Te@cxxWcr&TSwAaAR>zx}tEiy;+)mY-Be4DzMEvFl8mdl~#v)yOR?+mf{4 zlXsdssoITn^k*0g0};v$gk0JCa)`3joOxw_F`x~oY@#20cE>a@!~aqALv7b9N0K36 z?hdUY-(S$)msHVJgvpWGJ+kpEJ*@=WYEA!6^&&z4gJE&pP0qP? znh|>yS4Iculuy6#v8k$f87uKIQa&@Y-S=V2%#AstYXK|fy#fbVbE<%idGHV{X<<}* zF?VWF>b_HPvGRj`z~PAe0IeF)fiaijetyZc_ms>!vn_zy=1!zk>FC~#WMJF{3zJ#)#j2n1zJ?V zNSk-t=dZ{%s*6xN8gIK3kh6m07fu^}d!PX*mdvyJmTP`(E;>Z5SblKq6$KWd-`Otp zNsE)GsH3&e&d?Nq<6D5MPJ`;i#Le(#WrNmUYIjc=<5*Skz3xGUq~G$|a!zs8?}W<;oPAtuK+N6Rl-sXAvB8@b{jSXy4y4aN88m0;5`Sade|Y}ENSO@EZBfor9O18q ze6JPX)R=6$mOPIUY;|Clpr7hJjLy|#Hf1&M$=sz^d2{sQUV#jBiw@!=IMi8a^Iu{C z;#|}Bdx8h8Z8`_lO~AO#SNb}HMhKv?LHV3hV<4Z~fKgyBUeBsw$}dUb>h&N2P~#*p z*xxmPgwR2WC{v6wogE?l@@aVns)3^a6w7*cX9wIg*2=C9KpQ;2Z59tz>P%snT)wID zcFE-OzIx$BQhfO3CHy2t^os^flvX3Z9%z0MQF1R*#bc#IgwHTYLU2pCr0`|s7Uk}Q691>I2Myvh({Mxam3iJu;+pDQOr?N>DQOR--T3r;P zy!Ok(9X#2qA~Gv^9g;TK38<)n`%}&)RNm^0wOOvmsn15#9F0MqwX}D*B5a z6<|b3JFA0Kc!vage2&4PS0<+dKjlsgkjb53!l)34nuZb|ZV7fve!5anZa74R7W`fC z(0sK}zfMwmVwOafx>#-ABLe}A7=W-H_C~tn zL-cT(G`Q$tOs98_W!PIi$ zQrsq56qZpLT}^Dm=&Z*FYvvu8aG#>JrbUZU8V=wEK>;at){Jg~M;W`U)m;glC`tW& zssOpp4G;CN6Htj%flcmP*=+Hc<&eDRGP!=I&4apba^clmEe-RBi@RsbgBE=b)DrHt z($x1t(M76zP#yr}W1t#-%Q|~aFYZS6NW0J0V~?Wog{Sz*DohJr!iT!|@ydb?^7^Cb z2FqXCtqc=XplFe`Cus>n^c`zJUvFBKrDx(<)aH6d(iI^9SHdN-l8m5C-M z!|P@v6(06&LN?pLpoEv0!$~j?b?-I?d@Rya#XVDaDFiguZySIQuPs#e_35A(*NsMy zxU!1d>+#il``k(NwV>{H- zcfQvi0JcJ4jl|!79`pX1y*|~$;wv$&fjO$9xd@1&%vo%MflHKYR*Zns;!nA@V$DU1 zC9hOKM{`wL()1}5#ioVsfR$#<^@$Bnof*b6X`1Ctp7nH|nx?B$>_= zNQ=rF8##{suR)DMhI@u31t?i%3Y%9hvq2RoM5vdX@f_Jt}Lww&uq-plC&4a_1j{~ z=(OjHd0lzO*gLLIEM1{5CQ==r=h~z(PzzQ@%U7m|aMyV$T~PQ~ySy{LRBr$eXQ&G> zcj-p#jDf!&Sjgxs^Yi# z00yu!6s%$}IDp|Oo^2F6tNn_aVe3b_=l>HbTs$R;{*L%p15>S9n4mYwcV&lqrS{bl zAR383zdUcQ;0ufq=Jda}@WfK;p`}5KB?c_UQb~|AC=bm-q6E#j197&b*sTq=MY*DT z>qkNlJu~+(C@sP9Wx?A*r|F+|C4oViVXECXv{gp7$Eh*d2GQx@u`)Kip593zw5h

d#T zu=PG%-hmzmU~5ZkgYZ82eONX0KJVpV=SrKYF{Um|RiE|B;W@Hl54w_Dg~-i#r#i5G zEQnqZ+<}MA*sH@AKvYG%`@h5!*~K8zqVU`+s(z?1VgpkZQ0Y9Rg(VcQz5MtDFhc)5 z6T=x8TmMTiAL?#YXV50Ki`Z8AUB=8_miN#GHct0$)j8HnwEp%fdeM6W@f(xn@shn)+^6Sl(}RF!HPdrs7swJCt@TT&u;|IIS9t+7*ZqT7xYpS zw*Ia?Fg8KeP(@J3h+KxT099Lc?(BBu5Q#@fzVw&&khBV9-h$U&OPe(L@`;fOUmWX} zQ)vW*z5+MqGB)@eKBSAMA}^Zl!-}Jo?mD5U;SB`G5F>lm`zB1vCi)|6YSJDPx5$;a z;0nR4oxNtaclxI%5gqL7!fH$${=DrvmTvsWgh1F3t-j+G%pKubCHs?PZYh1IjNMU; zS|dC43RK#fr4r#@IyfxZNpu#fz)Y>oL_lj2rJLd7l&rh>72NoVB9@_Ql7wt_9UV_v#n$BuflWMP0ZS)4Q@^4I&l>| zlx*9VG?&d->a~w2+>WNcnLyNprd9Q$$cb<13nEyKwxX==%jt6Qph{n}$MZp$jbP7` zETl*$qUh|D(@}*RGj%letSUG!e%Ib6_yy*4uHFG3v=W?;{^(6)%)wNVh_$F#vwOlj z5_!jM{O4avybS}mrz?!d4o&V|d`zX*J}>sS<+j)$^@>%Uab*?@J z39s)5-0ew}-^xcQVp@7dRDY_}Kk?%Af0dh|yc2FpT~62za87Cb-85$XGEp#^#u$~2w$i@f0x4sOaJqv%FaW)`K>RCw6Lp~RzPO8ov{Z7m zTVI)GMyGzyz2AKM9Yc((2*Q)39ceWpj1D1mt~;ivoJILaTD*oF$dGgRJ@!GnvNeCU z?XO(rTUB}3Z5zO=rIkpx6B!cd#7{mqHGYS7^nf zh)ROD(>+Lu^tr)-L4^C`M8UAyb2chlTm^NKVl={x-vV7FrvMKo{S zTurV|3vdkP0)JxZ2UJ}LP(Q=NePu?JzkGpXGADZ$vqdKv=oT@8rlP&d%hzc3n4-xl zOKCX`!C>P_wc6ursACSM_#9JC5JPgG9^~*ba(Y_y+fCwp#%F86Kzwg7MH$r7&#=Bf zA0WuRd18zD1r-@=4Uer<5$-hd9kkMOt=}lV2bEi@op2&r*ixU%N=uo@h0bfrPCphI zTo634KY`7(PZtXh?>!+2C12T{twql_WU4*pN~b8aHbDF$+%xh}D9O;2PGp*0z7<}| z4QGDMRvfk8AqEGTPUW}W@t*Rn#}BnK85O(H^?qQBjKxtCv_gVjfaaeIEe?RpJU8Pt z>f^2w3`$BODpu~}Bf%CQn5FU|)rZ4$#k`7Vrb7o#BP(_{a^=g2`z+-YGmLGiYW+l& z{kSg$T;z|3;_}--K?dCE#XH^vVF=bkNpnv&-KL>&oMsXp&=_B<7(5ae*Lae*@8#tDu|!o+R9+4c3^;%$PS|1Yzqn+ZfKYq#$ll^Ua0r0`gQ=fPm4_{*w?^=M^>x&&3hrxx@o?WX10uEkN7q{qzzG`gGd`-Xy~?a zrWW{lSUhJN4Zr&Ote^O1{->((G5YJ>2Y9ofHbG5C-nbNGjckEEOz0|CH;(T|6Qw3$ zaApQVnory4Smi#&zlY56@2AIIUOeW?Yg>}P0fW^6|1#lMQC?}RA>5bummYFjdE)QJB z4>}O$xCEu8@njGB;FjQz*20QQPGFm1CtCLtWwd4z-M*Oxqa#oc{~`B zo98sQRQnz8q%LtH+B2~RQ zGtjNEn##JPVsp-_UyPBa_{LT*kR}n$9iwPHa5C464ywsjm{E07dv-vCHkJ1mG~dmp zAurhbSdK5lGdJLVUOU749BFGhR8M@-Exx!2|y_iCOG|A1Pd;BKMWafv?dhtJxB1DFUnUs^gU4SafT~Kk&{LpPa_= zLpj53H?5+FF~jraizaYYb*J!Q<9smvAuyc7JQRyK&<{Y9t#91bR8f)R4C(Y*$O~Xt zW2V2_1H*f$TP+$!xl?|yV<>$x(f?HFrJF?gki&_+WbYbu7D*s4ZCbg>@ zWl|z2eubENeBZ3f-ClU9gR44Ve^F<{G^NC1Vh7^?;Ton2G9We$wls!3cw#KoO_+S? zJ{f2DDPh1MyQ;;Nb5G4m-au#0@Q`AxdYToulAXbT!bV%8iz?H4E4`7wlb${;cify# zzvK!Jb?5&DW7d+K1+@)aX5=)?n~&dg2|XTyc9qQ?pnC4T#AybRxucTgD#%Zl#B8$`ANui^E;Q??i^-RO4~M{reGZq&*; zfnjVDlA18;E8Me-#=KIkw#X?rW0QOjuRp<9KMq<|@I%y)+V)GPyHXBF$$IVi@z*uri6z6_Z072J?*48a zdCo3dK+G0^Jx7A3=jMIL+NrPTuRyTJ6R;^e-J=P=TEU^g+CX%%1HbFok3rkj;+Sc zX6gUAaR0UPp9@Q;>}R-Rm2YgiRMNjcmBm0CgnxV^LuNiMF)br=00I01hEsvuwD zRO+Rz>RE<-yXBThAaegj>6-xmy>|Nvi~hYwKnwSr+!n_F7pA?U%M_+PI{UUQa(IJt zYkK*YvJ-8Cabra)`{ZTvzDwhZSm&P$ya(97m;Tn@z(Q+rOfg-`0*Rk{wJd z>lfD+c6J4oK6R#@w0=1DbIpDiDA%#Se}P~5qKoIG%?-nM(Zx>VPY`lmVT$F!es#Jj zUsBJov5f2vVDSaF3%ljJ34ogZf3Htq+4;fml0JV3&(?IUGWt}?@vzhID2%fL#bx<3 zSVq@qeEq-wJX#OG7zTYH*P(re^8{YSMn2Ga-(e7QSKgFhm@3U#9oLr(D9DIGOSN^&WEX-Xa8Lo-^4Q6ADpR3mv z>}?4(6--WurMB+b9&o;Vj50@jEDw2e{PpN+eW1vQ1M5Uq%Wvi1`AwKpwZ|97y(MED z{QnvJk`c{&BA{>wWG`EKRo525fTr9w95|?^x~x{T{n1GQZIs= zp`28x?6a?1;pSIeBAs@(G+kEV@vZD{S#+zLD_t_3iFefEpPQdF9dyBUxVTR+9lt+X z+Sh+Z4b5A|?n_JLkZk*-(BVU#=4XW67ZrMo&nCIduicaveINPeKNc5?nTTcl<(izO zKNnK|To}xM?Dey9+?#Q}dyA(3sgPvne;U^~-)*S(bCvyVcE6p~H<*)ag4g11^xsaA zmU%9_)~hkdv`%FMq~lA{RXpsVYnvAP=R*FrmU-8~ohd%`09D<+ZE$o1G8XbtKa6^N2N~{ zE_`JtLaw<^#$~0Z$?hUu!gPO#R{l;+JGh|#LU-=yE@Sh`m1l&4n_74N=1qKaW|Hc4*i7E$cuIzbV!4Iq4u1BP^7phXX zO4rivP3})+8d}Ij-gFkfvio=Aim7L_S$%6%CR+--lUjtY<7Gmt4N;=vYmhcc^9*^A z_rWspqX8BNHBRGLI{dY(rE%6RHOO1NxTGzlJy=JZCZj? zV>t17@a+%kg*4oG#|0<~Nl>KguR1^oiw6F)q$gVy;7hZ7uKwn`nDOi!gx3JTVKua@p(ksoJ5hOqX0U4|{HbTGb zMFS(c{{U|3D@`(<(YRF?UB#VR*ONT;Mg#Q2++0<+4f-|(L0FB>0A|ixE66s8O)#V@ zK;4_C;Chim;+g1LmFa*Z?Kvteng0N(KWroR)@QSJp!)Nb{;ZIml1kX!tQZ?v+Y7*8 zt=b__v_TS<_@cNlb=W;g)6bAV6yIAq`6wQigtNbzIQ|3^JcYWTViq9~T+w7;(_C6G z=)|~>6o6C*IJXyN+PT{YU7!hZ?#_vQEi_NjT3Z(iJY88XrP6@9YlVtQpcqhkkS|h4 z`%mghiBIY}5zwQ;A0jdNSPsb1UfuVQ9};%2QLMm4WT7cQ0?z*cTd_(a5uHy$NdYR| zoT$9`4{-kgO&)llqI*E7?N^nYpi3YTblU7gD+FKZkA@3C zEHk>0?S3QTMGEkf_5z3@@~v#HJVJ<}Kt=J6V&+1mTUF5eT)@3W3(-M=V0nHElNQ-G6l}mb(iR z4cf*%Ic;0g{B3LU?hxLslW;un2^U0Yo2ek8L`4^!OQ6LNJik2}YXqWrOoVM9+SMh| z(I$<(c`-o>F^HT}rHo)0F-<*x3^iruWYexy1%%xQQrj0e+2=muET7|6HWS|HHlPBv zxw&J#uXguSI_*G9bM90ONu2wI+mjnk>w^J)@aEWh&wSbJR%*MyxC$Y1*;!qkBuooJ)}p+ry0r&_ zg6XV9(+?9;kVF$8+ZVWufHamalt?iCp)RSrMvoE;lpySXclcr>YF!7}?3^gU%KmxG=T?>t-`pXzAt0=)r@|~&y1XmS> zej^N3o}@^8M64)>mElQv1u9F!&bluWue%sQ)UK+cEsUBG7Ra8xOGSZ{XFOnM47!J0 zcem{*tX=H;Cmb~?q?CdW78Xmu``??i?NRr!6pLNNW+2z~Lq|cNFB?i#zpsea4Jg@@5^Uq&%bN7IoWW zlP>q2tQEu|{{Uc&pwzQv>f82ccY1dfl(l1Laqd0H$|jTiA+KQxpg3K$xv3~h zD`z$&%)s7{4@iDg4T)Bt*xRFL>FEZ)x+Vna3tEq8l${(sr)GedC=WRIK=>;uv?YQv zv4TTA!GnZA5CqS~2&kMyuV_nX+}PNcL+)5mT6WXA`z}TcIwN_+;=S3j&&*cC`fBN} z+a@Z~`39p*!p9vwUtNE(1(M04z1aB{q*(kQUsH1CjRtyftVegT@Sme)FxVRFlNu7j zO3PYxJCjN&R;0Y~B1(ZOQq$Fmyz?#5rxf^n>o%yZ13K!!Rb(<-#q+V-%?0vH4-8hR zOf}SC6U6Zj(HN3QvP)Y#Mk2jMMm8pPHtqZdb;`=~y>yE>q>>A-Bp#Ugc%4!D1Fo36 z7NC_|VIZ&Ot`+kaVUMO7LUri-i^31{2SfuaPACnVTa=fCZi3f ze(i17_F>(Bud8u8+B+E0Fvu8ra&7|hQmiF#XewaEwmY(9*(?$X#GyeL7khwYsx8Y) zn|s)4emlF4>TB5jF#Y4n(#6_=JKe355uY}99aMeUZ;s5Gm9}qa(pXnqe9d^W$Wl{L zq**GrE#$;KGqKKhSr+8V!FN}9c%KYWCQLGcT#+g|WE~qas{kX0`u+}I$=@b;=q*xr zgG8cS1xZt@G=is^<+FU4ynH-2qvi-9DM5Hd)mQ*xPbBsheh5reZ<7?W1~I;`zOAp| z_hCN2ZteTBd2iW}R>{xsDX^RS$98O^xmhpKw{uv-il#SizWo0H0k=CD8$OK_3M6V7 zOKzs$r{U3;CRW`~F8rByB#uC~eSHdK!y&$Wn?vi-vr}~kixPb-PrF)=IudntXx*G$ zKCBQ|A2rq(GT-NWW^`VuxR4bWzBu4*BQN;!Y}WWToSGQxKCEp20Q~xOwY_n-&9E4F)S_mF zR^m@cR!W?q4>r@@`LaUK+N^PKE&?#Uh_oMWY3RV*x-rGk^r%)l_UgZ31TVE^x02fce&i2d9dUwm8 zc7KCauW@6$2IR+e7T?3%mHn&4$+NmwEc}OdOrL;b-Mzvc(a&~`+1 zpl^9)gJ5HJ?U1E{n$SQ~80W~o*}x)GypsMESX zW}T4LlOeuLyQ`O@3|Ti0?c&~eeht6$I-W0?%plCTX;7<{ny>K%iohG<=yG`^zYq`B+eVevCM8QmwxcyRz}|7$ZpYv zyx42ej$Dffc?csf<_B_TZiYy2=+i_buJ`A3TKMI>G-7`69&lo#WNjPmQU&#E!n>%d z%&lbKg|qTyx9r&?CA^t>UA@*fFLK$YfRatpMp;kMlVuyHFD5Br7(#h9yN7f)SD$rV z(VK3@Erc~+PW7)wmwsFOO{j82*0y*sxBNONO?q&{Fk zu^R9-uEA$Ey9rn5*)`FG;$gg4Z6k(_<%6SpEW1=~*#>tN{{S80!DfMMkl(L%>{dIg z(dOmnyMu17;ZetS^>21muiv_^OQs;scD8lq+FKj07oWkUw@16*$**4F?*>?FF7OJoAxBde<}8CMVE475JVeosR$5S)xS@#DL1Yy zl5^egYhrTq-Rwz7LovRVHPh9qMJsdyCq7I~Qr_!pzvUjSM)c>s#gxh?_>nl>qh^}N z+crD9&TO@H#Nfsj!xdwqLnCcV51K9NZewSl)Oohips1G3G+V||ZNsZX*_6&Y!s{|# znl?>3mNncFV`W$|?%w%#`;eR6+bv#KZ5!X2yCICz+OIOh9^nbuR^!&TPSsuzG{ zys`BcS62NRs9)*^kzqL~RY>e|U}st)EM-@e0?n<8q%2Czu2jr5+T-PmMZV3}`0~YD zCJQ{#WusRmxE1G@lvcHc$!y|kxsbbh#-J-(Y9hEQL-6h&hFdKri%VsXX_#x?TIkJfoZ?8#(T1HD_6ZyVLK-&WmlWrQ_W0^mH)RxrO7I$)a&bYt|S zHx|=;yNAD);@E_pYg0`^6pRFF_U6-uyV+=}ES4H7R*-D>V{^s2L{|@5vhExE_ZcRL z8+f*jzf1WR&u$J$WDOZ6{{zXjv@& z&Ar~yw;vP*n$*|<%u+T;o@^<7^tHd1^emU^Sm6uH-rX21&#MeM#QHWUomooHpgju_ zhey$;Ff)KYe|9$YyQ3U8Sc?L zjr{FlYOkAW+urJ~-g~*Cy2mcE$%5X%#)_iaY*v=}_gup03mcwlz~#SI7Ztgv^xa{g zFW&w4@7)RAKJT(@vN4U#b8Z2+ghAfq*Gl}`DI?rTfF)X_=nEpQ*K6H&@kv+seRUi3 zY= z0H$ekgCDC~^ATgc!G@l#y0)8$-E8*$FGX+GeEr(*DS0$27ZmxqF08n*-u*Cb{GjzC zUgfy;BcseLLFtq$>ctS`< zx2rny%0QucM@qn55`h(1s6;>0ctsr|21Z^KPnlbUl%gm3od-A^W)Zfq*vz)1`NVN$5RyUp-sPyDsh8t-JpK1C-qu2;|1nk88z40Hq0{Kx&}LfFEWfsvKKGKhU{diDIDeM(vGw8 zsg4fhgZ}^)OT3yA^(%FV!x5LZH5T1U7~lT@l(CMtG;Y_~w_n-0{^mA&wShIm3>YE* z04c5Ao)S`wDP0$MhH;S-XM^IWP>~)9DTq&(h>#&9bLe78H06Ju$P0!ep5wX@d+ z#Q?9CR|baGO_wnqh8SsjpczkJHbp?TFWbwT1&r*wb7xXj^3@TSJkgEDi)Nj~Tv+mbc-M;Ow`0tx#=MH{sH&?4W^K`w7wSVdZP&NMGkeP(o+1GtL zu3;slfLVQs3t2Bpk+)lJzUWO6w+7xGt@=p9ueI&H+1It_QBYWb(3&1{{{SX7eZ1tAMUG!>!c1=DA`eCyYMR!^Jzjp7D7WfE`raN(v~JGu zzqES>7Xt2%k;u4tG-p;zu)}^6y;wRW5^wv&w9M%qo`(z?7BBIi+s&WNF5xy#cw;64 zr33eI4?P(xJX#TDP^$JX6^K;2tNdHF+tK8)WABS#Q(8VxFD6>3>V3l!O=(?$?rY7R zcJTpiDDPm^8c~gABIVtc?MCvGJ(gC-e4CbhUW7FxBMf3uC1XW*8}Mj4u(U!Dg3{y~ z4nKVwB!14&yUVmVG%5a`(vGgaUv%j9Or_K}(Oq3)>%l=_B3-=8HYRH|j^!y_$cVE_ z(YR?UU7+%Bl!!-%t$&%viQdl7)jKXX}Ym49j{FuK~*Y)XkXRcPwP-1FGn znQj<}Q5?MSYzY1_<~g>7h3P-k_f(Ww1B8`WDEN(W7MTzj2aCyK`Jg%@OIY=3WMVWh z88ilsSN0L+GrcU0_mA28Ho{R^+9a}+#OjBWFy@)LCoZ_;{M^=7h zhifpD1>YAkw$-n*dT)2Nc683RlJ%2c(z)-Sc755gY<}CtubN=nyVaKIV#juGR!!R5 z-`$2je{EZJHN(}nn{MLSGS(OTQ+KinZ^^lG-O^}n5tkoNF02LqO_x?1ue?fR^JUo$ zQp9K@WdX^(5zI)_zYLKra9OA;pzCLVCqk6Y2 zTXNr#A;`AwV{*28>D<4AdG4l#zKy$CYi+@@hV*6J`>55=h3_#vqEv~VK@Og=Fq!9AjHW7 zN4s!D4ZX>i9;`X!$-8^~_8Jy?Z#InK)8CU`(ur&1$VbVK)S8uLzkM5CTRJyB?e0u; z#kadzS#jT&cunp-*RLgQ$GCI-zik_Hn{Rh8C*&9cQFDYFJ@2alxUbtr7@zm_(6rJI zb9vb6tS$%DG!E$5E=oO``&jBZec5)kHNg`Ccbc^R;g@nheA_v1CXK@?QPD5v+xdsT zcK&Vqu@%T1TIAD<*<1N)PT_-d&wX{zk^;%K_xE98Q3;sADi9MoJa}43$#0!6-57Un z4U2PS{?r*6NZ_S8h~BUuw)NdwvR4po-Ql+gpGexQa2}k{HXB`bSq3}i$x`oLn1+Vy z^=;so#V%*JTW@SK`?8%0(-mRAO zMmqU23lCCQZpKFE&5~2caSgiLaEoQWt-Z~%)su2+tj@h#cQ);BcCf{_hq-RLw*GG< z(X$75H$1i?*8u~02@QIRn{_-Go8g*zb7b93bz8*f3vD=2)9S)%q|E#?gr>poh&x6p zU%yt!y?UT|68BI$d@W?Enb8j@BWLKX8@hY6ar5<7jK1U0G2EflHuf!L%R;S}AXp3u z!IH;U%``D?aL3V|dE>^EsEf=L%`9{x{8Z{_u|=Zoas=2d#!bsQ+3xILp~+A7VOt&| z5u&Ki3XGB$i+58)#46?>Fsn#$8@}LgMgnUOZj!d9UC1!p_Lwuf+}rfSLssV7vw64a zQ~bTPX<5_Iw>;@=v|wdmybmUMVesF4G-1nmv|E-6(b$_MLIJfrzRkMWs&CW;V~>}& zcIWnNyqQ~u+WoGtWD=rWoQx_1^RysdbSB+s_|M4_K3g=fJ`0O&5tab zb}(F%rieK(j4Vy=)?F^k4otg7zpGxpv0$4OdNCEI{M$tRr_r!n;n7vMmJJ5vGW#|= z@@xIPm7$L|OFY>tUFim&zR|ELzDgByn7f^KLlsXAbpkyl}}{m*wm1j5kvp9p(3D zdqE>Sd(yp5jUHiv1xK4hx`SNEH*+c`_KaP#tQ&D35Yi&>8jhB;)~-bpb@F1BXSsD? zF4b$WCj2aLCmM#T+KUVfGkW+3P;;d)tr}B`=0}$>#`ES5?qg|G#KY0tKyK2cLySRQOf~-iC;YTxYw-)9 ze86M7JpCIFM|_{;*Z6W1)e{wGUaZFi3R=W@+|}t%4Mf$i>bu=|8bcxP$)X@joV;9} z@^1}mp$b(qz7k}p%TW;ogl~=U(%OP_8zeGTshH|cU#hs?ZU`a&03&R$Ps~>*ir4Z2 z&l?p|>E^JqO!2MP4zyv+uykRGtgx_@7qu^&!Ym8a?(%l5e=(2JHo3MhGMqcJ^X?rj zVoStW;1k8>E|Ku4YAV;YE1Hn0knsloqoah*ZcW6CdNS9}-OAarEte9Q!Z@+0t?`P(Jecmw z8d9n@CAV^8Kp_DpVklBjtHcFG3=Ck6E-LD*yIp)FrVQ{AD4(jc>-6bq0I8}rToB%F zRAo|y(mzco8vYn;vZagqo{#xz*7Y<dQLk?lgMgI2t+!9>Q-KDJeWEsGn-yP=EC656XAD# zS?8@|lPgnRCNQp6H7d~n`>}gflRD$oSqkLe({wc!hjckh+X-OKy;z2giRBhgc00j| zsH8738idfXjo9atJPgDq`E(1AQWz?OG8QQb!lO-$krnH1uBmx3DGqS-=*pKZw584u zU0m26OYyuH84#Cd!*YKsBP_%j;&!Q>jf01g|vv$IjMIw-)@YVr*c)S4a*) zq3Os1b&Cl4X1zjTY|Ntfgb2aE8FH%E3 zbHsZ>=5=Z= z@(f9tD8k1FG9ba>7bogH7dm-*w7ESMQWMXPT&95y6iy&#W2lC#-Bgl%x zToWS(%hjt$Q43HL0zjCl%Jwm{pnKm2#pgrUS^le?jn5k1XR`h;(jeI&3oYlY*=58b zfhsj)6;*gD%n&?`s$+qCUMl3mMs=v+Dj-%Qa)r!eDDoB@8h2#JSzK? z(b&?s4!9D_Y*tkx5maG1;<_*|VCbn-{{V=pvXc~;swJF7bjJ|TMWY3^*=Ir%oMOb) zzcw>Z`5o?3liY}H!(HOZ51W~@lRBy4KqC`PP8G7vUM@zbYfNQa7EK?pzIP(Tteps6 zdlZ106}=H^Y*RFr&8?kwnQP?PXmH7|qOWAQ3|J8kwj56;iNeLcqk1MbOPfafE&Ckw zx(y>Np${;^(+n;5v6taKbKVV_MHII#VR9l2Ns*P}V=wl>r?c$JoL*FbmMFpp6sao+ ziE*^XNN*)@0Mf($QHW^Tf6e8}CZ+=0eZ7#$5DgY|u+{xe{joCStVi%gvwnL7EnH0yg?zjGzW(9@f64A zQlXSof5*zkbZ4yhHa3{~E)@*U4;qIHM;ri?8>jU>=Efd`a51$*P_GinHMi&oOz&!EDd6q$3grrbe4<=Dys>fZ9;+8aP zb=s>${{R#$re3M&#_+t#{{R-xNc*rAC=8~@p1~aBw1$Nr5vNQPbR7=a;ANiph!0wJk}8NXKbRAA3Nqks|YEJQH2uWLX(82V)M?D zaJt~KJSvzVJeb^s-1iR$Fbdw11%MMmbK2Cdr0V^z3 z_>Ry)unMzOi6-KJ3eC;DUgV*YWb%iseS}Fho^dzF!y3y%(+wA_?!AxG(0hTn3|)Iq zv1JtsKg-HHENejX-^A8b`J28j%v`q&_a-UnV)bZC5YX#(`CgHh{{C$6z6)@5GFalt zX@oMrDM6jI5cVc2a6~o9tD_`8njh`StR(Xg>N+q2t`QsreyjtFk(&J~bbOU2@&1ZPBG>(IoKrx6~lMQ1!Q;d86Lw`J!)DcyOXZZ1AKwfeIlCLZXqP zscB*%A`v`PK{N@0NI=%|63X>XI}iP^rdJfs=D;YD9wY)rq>AKO0j(;BKP&J{lLJb> z(rjdrG)y!0dr7p`i8WS%cd`0tik3SpLJ`DP+45ejgXy{>{{S4CP)b$taE`Ln#gr+6 zp`CX`M(xL1rMSh~{pNzI*#II|J;-{uv0a?`1wI4JI&I;2C}*iHt|StZed#1F3|M z(%SD-VwiKk1aHzKD0{~&*kY$O_14CO#|zSpCtz%f9W}leLeE>VY-5t2NO=a~l~XLD z`=Bfy%o6debdzFdnN;P;qIvmjd8EX*ee;VOO2*k#79mDrz!;yFE}KixbbjSCf)*yq zu9R@rrj2CaO3x3ltruKt)v_HKRsrZucwPf@=FX4HjMUZ=u0@>+E|aD?#{#T)T?9z@ zR2j-5_@(A8D%Por>A}S z57$;oZ1G~r`Kb9o;fz+sN+e?!Pd_a`0@pKhEkZc37WuL4#L)&7vO9;9P_fq>&bBRk zWWjtTL1HY2bQ(RulFs`3kTZ_h8OOEZaMgMqYz-K_Ogd^boIeTCAd!HZ1c$~G^B){! z6j-H=@B=SWFx9O{Mnxft8uFV6cVkaIS)=9SE) z(2c_;1R`E;jvM_{B?zTjj%;cwNx4MwB3Q|+g{X@OPBndl2agsXmnTw6ewx@)bi!)! zfQ*U)fh4m@nFpjp-I+!I0O<(@Bg)vPz5!1<_C03ve2DcQ0Wb4awU@yk0=pgdTSHKH7v5>qeT5n6yfmjMR^iHut40m zW$9py;fXIi2)+}Y!K%g{4Dr4pa>Dp%>vakDFjxGlnz9_X9)1*(Xu4{KrAdfn@TDl3mAosRb$Yw z5&>8Na04RA? z1~&M0KzRhslDzu$Lu8=QP_?0nIAI%G=Fak&?$!wk$c^{HU1Zh?t*D#I_PmHpOcaC@ z;=c8Z5^xuA?A$C?3KyNatn<EMk zWCckXWjDQnZ=0(XC?FlMq%E26>pqtiGYr#W-Q)9wl-x{D= zxCclH`GF~N0ue#DBYu}H$Rv{ z&nPJD&9RDGbOu#MLGh(3CKXq8ZQxRsTn*?2#UM|g-Y>yY&1Pw}AZFu$Ev>$)4yT-yg!-B2A>gVw5G-KWx=n?S+qMH0@uB_^aZ9l^I2UGJkuxmrEXW2+ZL`=Zcf zuwNbWVpb12h8DZjnmDeG-s!zR>OAQ3_GO9zMpwo&HI$HoGsv?6O`C>npJ=6pwWfw?=ef2+qnCP2Bp9R6N+c{Vhy1=z0=XNT!2ngfz6iCPD;k|Mkb9}jV+my8aSa$M2XjtKMUes zsdOg}Wp1O4SN*pXRf?r+q(`P~QV$4)x^Tnn&R(x-dRcekI`1;wt>rZ;cY7s4pVLOn zH#RQR_f~u9jv)TJb?F zcITV3sFM51Xm|Q(D<)+4r;;T}(nB9biyJ)WRkx;XDaz!Y6??@kF7-H1&}d*|AdxlW zc|Ufvn#NEZ)1hczAJTdpK}ni0ubW+8FK`EzVKuDV~}|x zf}B=RXhveyL-Q;bl#}6LlM=2j*o!`oCOw=JghJ24w<|_;fy>#6wk%EIlJ|2}K1^Hn zf2%qy@@~=VU^?_*d#MehFXE#0oFrPc@@?;7iD1Uz=EC?b?z{*N;uYn(sTwEqRX9`( zXsEi96BTY1h;WL)b>0zg&J@L1JYfkyyBQn^GRd8KNGQN8i2!I4Cgm(lAV>v&R94_l zOhgJ}Jh8GvK{+J=J zGKFzG=)(}tCA~fDWUiCKejZWP?KN9u#EAJpOJ5)2FWlh7*B(a2 zVuBI=Jy;nMws4@DR~Q=^YI>^_x;au{2{pr>{g{%jyFgJtmDhqG0F*dJSSlaYGr|Xk zLRFGXU?k5x`o_aR{U8y*|#h1i-rs*l0=1-KC!tqDJ7iITu~$)vmF(&d4;7OSJLpZ!^TXq=OH|Kkvz(cin39lLBW4LIjh7Hz_JF4H$t${ZQ!gg%DwBAM9g_J#y;Ff08fM zwp(~dO^^Q3Q>Q=$uwv|lw2xO;62&1&Z;87|T~wLmuIZvjG+3&{#jN}M+a$&a(}}=@ zj3Zs|e$cGH>J|bL1I?%^grjrRA*DccYK56|^c0#WW~GZnq5H@ZrWXCUaMDoO=)xk3 z5P=CMlh!3rRT0g6B9W}310_Qb;!@Yv96?B|$3ar)-*!r;hRPOt1R*uS zKn7#EMWyfJHYPE77O*X|A)1)pD9 zm|G3xN24#o^14NHLHlj$&kL3k77XKRwILG_wg4jzSS~Dab#FkoR$Jnw;ff0N&c$vL z2NTMo2^rE{&Q~`J&Y^?dqPrV>G9Q)Mz{P~5gQy-wf^Ll18@f-)jqgJhf^!UPGtBkj zO=N|G*o@sIbVee#Wp@u%bmfae=6AGv!!db1A4Q7D z1#Rq};yN?8BLD?LzeHzW70($n{{UD1UPE8OKgG+@ms{5sR{^M{LWq)}u#9nJtXW#p zDc9Mfiq!+?#GVJC6pzxVN?23bDv6A6lhiDa&&8bRU>AoLX0BC$#;Ij8`H4!mq05CuR~aJh|Sy+$$- zhZL3|!IUNvqLQo3vEfNqqS=jLMHpMrOtTi&Wg=bt!gd*YS&3zmy*bPhRb?p_G-t%_ z28Qb(# zApm}0$|CKdA77}f4QOS2*}|lVI%8c3dF-Wu2#AhJ*`n6+$^F6#r?sMkNf zanXog8A9kz*maXeA50YuraPR`QV=UTCaA$G22f|=W0pj`5BRwAI-vn6isNV*b7NSx z=DTa>n?ka|Cv*-Vcuiyl8pOd>ClSEYn@zCUxP$aQ^^^ZxjCjx#o<=HT$u8URS~= zaz>=Q04v2~2@j~=7{lUFEU*nTF|CV-#sNcW)rC>`V+QQ)LU1n=M@M0daYareghWDC zh#4-K-Ljc$MJJ3;i5+%);Jhc|c_Z^tvO?$!AqXx^ix`q{3%!;LTd$?XBd6UM10)2> zu%tlnnEOFm1sxE*5+D+$;F?+eLsTOG56n77Xwb7*ZCkV=un$0CR0(H$F!$zofl}Ki zY<4T8XuF2%^K19NHsSGFWIcvUn@wJ=<8MS3NOLGNHlH@b@J2()q2BQP%g8Vo?>2PZ z-5CW{tF+Ay^`PPjG;i2fCuyL?V(W2ctTvMc}AzU1Y+L`HUeqqc14}z|;fT zuN3nY9Fd+5tT>Q-%Lq|CQko@>(MBdli@=GHUabwfEEz`>2}Cv&OE{i)z-^4tB%cXT zaikKFsR&ygdMuG4VM^jFNzSF50oSchLn~o@>_R6BY*$MeJBb3zvKi^3v$Nr<1Wbs^?s-AtlcJ=A{XqsxU5x;? zrPxj>Dw$mXX&fS{h_WFVuF@EkTXbbgmIkp>M>Axi!6)}CzD;ti+ue-1?hr(~`g@yNer=dzDwBHlDZ5Ssj9i z*%KwYl@oSslHdp)hs0j-FXc68&M8g|oe?h`I6$=)_nwqsH7R{kV(xX^y;is@SIwg; zuODj9s}w^EVkPB^FT`OJZJ9EfE`*FH`(RH005fpBY9SI9U#!?VWk}U-VG&l#pw5_+ zI=9jcfnY#dN~J4=tj`kY1~HB=6^hqX(-cHoUwFnMLfe{@3Yy9!2!f@h7R%rks#1@N z0y^r-QB=HfdLzhjN~=l-UO5yhCrVf%<1ogZVF-zb-Ex{TK_+nBx5{k&W5ZQx=^{Bw zo53G`I10V{;f6Np-Magx-in7Fy-DiFTl4WE$kAw zdUI@tBLfZU#nN4UY*~x7lSW_Es=GtNv@Vn?x4q$Kgy85EaXs2vm6wbM@mLp633M+F zRD?*H)dOy_gjazGNX{;)g%L@7Fs+{Oip+$_Q5%upc`E!AKu42WF9mlf@fVu4AM&3R zeo+w{=FbCIcxYgV5<2S=34D?a-w0I6)$Gl1jT$WX1Zhx-JW6389RXbkjPRl;14KNp zm@OE8)CMt>NCs%jN3+BJqQe)H`B=b*T~lE=N}5PzFvKEYsc4Z%CDvTUWiy;F7U9v$ z;mn^l8-k$!0Bu!(K#p+CtZ0)Atf`*DHybj(N(GI-Ovx~lK$Yv%O5z~bPT0E`Tx`9P zqy})PO1bE)B86)(M1~ZnqOpW%%pr8q5%Bn4oyO)Ow6?6lT`p^{ikxVQzU;RV{7bS) ztYThE8R$~tnJIu-szlCJNK~Ey)GwB700{+>5?@s1MyRsj?ee=vW1=kyAA(n%!nh3`!CyVRskl zEAYv@9ej%%&p&(_QYHo!NdkIBp1Ku8{g>UK6&*^evlWgKW0@jl=~>d$vJt$P{HY5u z_htD&k16S}!oosch8TdPsQGed0IAnkMd)FoMkqxhc#s<<8FdQEY(Op5NjL%esw%Qu zFYwG~7Lhx&B^^~5$|$pm{{SO$>KMCNw0pyItsbfw8jka^41`~^3{Ft4-sAWN;>+JA z`+f(vFYFcR!g*H|AxqF0OBU?|pgqTGk`?8hiy;6OrlCku zLn^pwIwx6ldoU?fJoR8JCC8viQ&+_S=#qetnIbbLNtg9(3&|ZQ0E}zAEhClYD&49W zh`KEVQi=AK;kYD*cOv1Z5NqJ3XwN#D7h3ltZ)8-L_uEw(M61Yt; zqefB{VkHzF{Ih4|uUb3~g9uw{RFE`?Wmr7zDBcIeB$4-Ecv09%>Vm0(TImx~))kPG zMv@yiUgCA+F%bx(+&f@Gwvt{v$`e%#6uw)C>hlR)Pofq(Mp)Zz#5EPn0^_{Rb zN#YPj^qHtE0L4H$zeRG|E(=N{iU@vwCw^M7^macHdL${d1^0sBKmv29a;pm2=Z9O; z!)1jUkTZ-9=9b$gb??mUxXQ>RflLwTqNfQEF@$A{rjR&TLq#xX5V#-tc}fg8xBG`i z-i??VZ*_j6VTAdS&ARC@PrgEM@c5Sh0EHn?TV}-M0KV0h%2s6vkE-9hS4uA}oG6xH z5*RhYNh>h(ZWv^zO(*Wk9{oZOVM>Y&qiVD0E3D0Rw{mNuvgT9JS{l^+z~?j@3~iL` zBu}MnMb$Eeeui3P-<{~ZZ~SLp!CP<9qurYy>?1nJVnB527ONo{lD0PB9<2GTtd+wh zuOkb!U=xILL2RK7#hn60^;?xnew9v)XTvU9MK&O9Jp?G|xZNNoRC9ZZ@R2jJZ3JK1 zs3c5<@&~OTq!)wr6rxEqp0Z%P72Qi8iYOl7Fcth&MH5MiZ+MbnDS5kUq~h>Gw1I`h zons2VAsr&638>c!=Co>hs{WKp)WB*cWFcC%6;?@+w(AH|kuCEoLI|tj@hONf2q}E! zSk((rWi&`#6(u-i3dZE}+OY`{9}*kmgkrMIi`N&V0Ut6L@lm9PP>Z2}ct9N{ZOR2q75lr z!hiz$cKHlIRkr7eEV`LT^^2M-i|43HUE$J?m1@Tg-f#7BqjbLd89M26n*6SRjtbGpFeSuM$u6 zf)dgpCVuzOS1T;>1`dF+b>H%_K3pTEuVtl5F~-2x--uOM7%8V;$Y}1WPeeP>Io`O2dviHcC1R0iuBN$$aGdf;-afY+SJC+3gQX^H8Yit zkfc%yB1_T)MQ0+23Y!BEl!6ysf>M=pN2;l6tfP}z)+r<)2wEx>SZ(5ptWn?~_dxg6 zMcm-m{3#G_UW=G`|Y)Y9UZj#DML%-|&gWhz}a8 zDa0;?+lZG`_=N5#Yf(0J&xpKFWdSgX;0gZ#%m{{b!=eUL!D5zXtpki{AcxAMqfwNbyO6}` z3}DU|jHx_K%@+l?omf2o0EtgDfzV_9i~j(zHvXT(Ouw1|S2gq zqB+}!5HC`#ZT|q~Orfuf6*YyTBO~s zG>yc(_R+Yf{{W1+MV5JV@fbqOS|Je51PsKolN&wde&o*|%xDj(&`GMo=(M~n3@D;w zUi8V7uc}6H=ct1z#vhmn1h+h7(TQ6YRRObHAhi17)e=mG@gK`N)e`il#iP`4#C*pQRzo}E@AN-xK#6^_Aq%WlJ z!p6=Vh%3(unJNxD99;o}qHzEZ%rN?`!GQ#D!__A=nOc}VX_!Y3KBn&QhcsUL4f;(KwDhC@DYeD0)# zeiKUa?IHmENjObDcTzM+;l4?N9A>QaSBU4XjFVo@AE%E}KpEW0BZ~RFWmpK;CROB; zKMd=TFw&`WN!)KN5$IMq@5zVM^egbY@Y=r#jB2RyOsmFV+AKw!<8TCo`y3af+6aKf z2)4w-L=p@f5_w%yD9eUZ`GVZV)LJvaA=-o0EQop`fGS-tJ251SuPsGXEp$zgUy4Qv zNIg!WUlCC+ixebobL;cSx6suiFaFiM?pe-?e$ z)B@lST!s%qw|rlj)pz1z7=T9W^eKLnkE&+QnIqO+ zU)Yv{B%p6xaIK1yOMg^ZRKj&tRNy>66rCs)fQK|kSC%;1*BM`jtxax!__BDunzOGC zZTTm{6XHZQ*IzlZ%;79Q<@aQ;t1h0ob17>@F}K$9CcC3{X6jg};nbFmshf5;b}kIa zFLDKRwU)rkyBInvA>CmL`ZK+ovx0d(Z>`(SDxECo5vb!|;Xv2-TpAzPII!3IDA9wl zhlfP}0LLk?kaR)Pei0%msdeEY;#=4kq1nsBAOb=0Gw~I#<%*}n81b?y{iDk!s9NE=S4hLQyeNNqtK8Y+d_F>vuAqEZVRQj>n3W!fzgL^9%pMWbT2c1MFy@Rvg>>SI_{ywY(*mihkx zB#&nD!bDiBJ8n}1)@Cm|`b9cVWF623mQMsvwMM2{)x%d(#8D_vZ%Fwvf$PjS5RGvX z2^fvtT_qemCc@&QrEGQL9UqjqDrkD7IX2ecOtEeoBMIS##bz9YW9G$b&)J}BY#o<02G}U)_>z; zvx_KMttpu80EAIY6gcgUkL*zld!hJ_C#ySv>HUziie3)>)S1Tw5|y)fyq8MSIW-jp zf;Z7gTG4TO2*}Unf2ax=*G5LT( zq}%eh8p9eUH7~+pFvqoeRLDP|1Ru-;U=KXOd?^bO=5h$Gr_(|a!P+yG%q*hp)m7-2 zg~ql7RpP!Vs$&O=Bp~n*izB0rXrr?x;}QY`8cA9NikBUN%Z@mLK{ox_AxeYQ0Z0Cu z&wQ%{wM}ee1rwr27ZCAMR87=mT%t`RSWzSh&L{|)R4OrnpPf?ZpB-xMz>Sr1i#+(X zE{IEvREb0_jOs>RJmcud_RQfF!;?CP;RS42S>g1WA>pM|i^`xIE~@MUB_cdfFo1R7 zDhM)zJZ*W@!i`LY2wlh7sR~q85$<6(n{fCEFJK^?&nB)B6&@+`MwIVgEh}n75}Rj{;7xLFo6n98-d#rBSX8)Di*63vou5zNMmY z7DmjDm$1`#u%s$Ke|95N&wkA7wvZXv?3nUK-qEfTV`6I($F+LcZov(RRXsI^joCV8 z^TgpX@0iV%qdPZWM!Wkd_+E1Nix2G+TG7#ZZft10bw$h}=gomrs_)62xCg|9f2i;k zAcRyRMpYsL5+Ks(>%+_jqaiSaqrxPE#6%uK=)MwXxJ!l9k7iJ-Bb1a*kN%w_W3N1wRZ5GXffvxIq_u~vcOaF`1F0o^kIwwkIRdN+i}%$i z3Nc&isgVFgAV?wMd&KdHKhqvlZ-U6tfJwp@l2V@`NlbNux;8iXI4s3MT~;Q3*5XrY^-UdZiBw;-&5%8)UnVFyc#o0l$o;p#<5l%FOCwMSzPPdC8^stp5mXbg=EdPyXF$aAON*@Z->xa0D!*Iu!~W6M+K@9&8Z)S*=AFJmiV?C z-`STf@1r9p_j_ZPG3ouNOmk>XFD0?-k;E`n7)R}&7A#UG_f!3jurix)Q6eM5O4(8X z!w?uVm@=zM}xx(!JD-!jcU zS8gygYh?HY#j5Qv1?pj9nbx+&KudtN&?>qWD=W}+O4o>l*DN(<3Gr6H0j{}Jp_Af) zPeTz!CnlM!b(=t{Okuq+Wv<%wpG;W1{9U5z7oA#V=RrXH9yOPv0ck+OV7bCmi-qEC zqrxpOLY9ag20uwhssZGz2`MUMClwW;S9TdhMc2VCpR$G!1?nJKIO>sFk&yU7rDYdQ zEF%Ey(NGtIEh);h5{t^brq`$`Y!9tgHX=&}^Ei3675uQXFFV$3p&pI@01MO9X=5lc z@TPF|7zne=;gR}G9zke>!uLZwpp=9nxF=aBKLhfhI7yJ=OOz7}S!h4|UITZs&?L%5 zGq0v4q7x+K+PZ&GMe}6viAKLzwN~pE31H^LG5S3Fp3SrJXK?aov2WGf@6`ABhDxLU zMS`>E^LGu}!w>kjokL~Zdxq+4*yH&7UHnWxvi9m{zLVz127csAq~EA@VV@vars7%1 z+5Ta>5mE?$?)#lfEp*SDJ^ujQMtN=j0B6`@f5nvo{{T?`0JA%Tr<;H6pYUP-0NY>j zWc-iy_d2${SYpdvFD7}M-_7VWZd!I>8+Wi3FPjlkyARe|PyYb1(b5clq5cg1ELh>@ z!9XyrpzEUm8U&0F=3uYCU&L(1fT}QOzN6Kg#PWHa!_lDg!!pN>?-$HkF@${2%^0 zGBPqh@gM#oa(~Q!?lM38On>e&Km3$`{#8H!04Vm?;|;2HHN`J4X$98~rXkMbDK zK5+buZsG6#vRFG<`!|#S0Kfzb%pZpde zp->gD@{k;;WB^1pA<5&C_6a1SB!eL*o!2Ce#V`K=5Psk9^Mm!sCcjKpOa0-0c)$CP zS^E9``NJ=s5AXBm7|PNs>OWuG$oV2f*Xj&@+?d2$+WDwXx4(?4DVVXJ1Q+5=pYvBU z(EeS2IH;l;jy`f0MIOZ;shzDf-{<$-X`6Lwf!1vkah^MiC4UJqZ_ zf1h9E3>-iDmh7US0{9aWS=5B&$JkPDC5;$B)>Uj_025!1p%79}`4WgjFR2+8(S0;X z?J0f?WD%BTEQ2Ho38qbzt6(T4wMzsx2~dZmwI3lrU>!!zn>{!F7JuFzdHTkFzurD4 zczXRmz<-PP^vOSWpFVrPelz6yW0?+5jQRGEMl1&E+UO)cW9J&e0vOR>H^+G&IytM@@6%Y$(DJS^LVR4e(fi{G zR}Js!v&}LjA<*;R`@lrPxgMJA;&+K-)dL;e=v^8WNQHLJ_D$Xew^Tc}7Tm90_m& zP$(*{avg(--=Y*S^^KNac3SOIoZ z%Ud9VajfuU*0b;SocmG_E`M#6&(qFu(a0owcNuR7@gK34Kfk;0mpN;z@i;xdZ#bbo z^`t-=3ZMwd7r*y_AET@J_|BQ*pU>a>a9~tzxA=k{rT3C$lj6Rg^Sit+SUe&r`S3UU zSv?+GZ_InvOu{R+IK{I+A5N)zImGS1|BC+eUtn32(Qr!U zWLa`U)#MPc;|$wDLReAK-L=Fd7~zp25&(DvFpG_%m4Y4+$$e6w{{RJEKj*CESN8k< z@%Y2{f8Iw!f8W}8^LRYR_0d1SocMoy@%vc+00Z~${bZjS_piv%U`A2*96 zRl#4sy!4f9bu&M<=imK)&zxWCzu)}xhV%NyFNu9#6pBcsAVG>6&xg7mko15D3 zYDp)XQDTq|5IG%$c$1U-4KLZZ>xPkGf4{YZ)Q^}yoNTXm9nABQ+{DGwcn0I@=)`ST zAJ?zn*BDW({8NHZD&|f-{o^MzAD;f7Pn<_<^>gcIm-)mGBuS}{`;vpMesW=Wm*%{) z<0tC=`Gd#j5rN@ujo{BGzg~y;u4JXf-!uH2arVk$eXjOOYF+8Ru*W+WTesi8lXy{o z?|;91E)S!Jk44M&uf8DY`+od?S=amLC0mL(?|E4t!|Ma*{`~9b>y;_^uD^erW@bKo zc#H{%zP_>7SpmNL2R7M-7H8h ztjRr7>S5nM_a?Is@<;f574Tovmo2-3MXE1R75SqVd-?wWC4b4mi~d|-oT!Rv4-*|< zeZWNtzXLFJD3~;SWZ*<+{S%REWvHWU*M$@|QAf?nyq zNL~x3U!0IRC~PE&9_b5IfxF?7z=n}h5mxPHIgKN)BhbUmcVk0R7sG!1?}&*fip=Mn-zvQ8@wrW}({O9vWggos7bClXB^={E=L07g;54X3uVA_PPy zIHgUDNUU!bS&E<};5{fn-b}kbplT0hVj5PG^z6d2(1ZyC39~{LQwDE9G{sf1VLD4H zp!3L4hjs|Ta(ETKTR3Bqsn9YSR3Q*H32MZp1fpV6_y{Xhs7*y81-3#D4UtoxC)K-U z31HqMK&oR5M*t8BKoJ1YYdb6^V)*n&*CV9*QA10*XUthqiMSkjzmI3B$Nf#-{wjaG zu$TJv#D3OMU%$WJZC@{^U%lbUest?nalft30iDt&ZNSF)q}SRBMOnC(C+g$xkU0?+ zVZDlrE0pglNgVr1M{Yy*Llti)jW3epU-iv?JNoPQ?T~;fe(mUa-p)7_I@%teTmJGG zqE#ch9FW>_fS|itp~0AhgDs|=v{_VlSvdJ4S9v71O{>LlJJ{{+v91BNEQG8xEJrUH zT07>>Po&mIEvMr%5=}f%XB?9pVE+JuTQ#qy@-B>Rzk=fcNhCJX4rP$}t>cpvCQ>jg zL?ud4X^wd%fTSei8yJNfxVXnL(-HHG2b#WqKYd~TW`DsP4q^R>Rx!M+?4pZ>y{}*k$B|)00jA6FEL;VjVyB&x$}>2^S(IA z3Ywkx@AbSbkrPd;E9AGj3NBtHWl#0{`O0ufn2L~ri+6jCdo0stBm%WPPM;8++e#Y|=>(;#WZZZ}%i zK3q*T7>IzRCY_^(E z!!eF1dZ3O@^b_d`JAv2fYdB|^k38?px*_q{m$K=6(^J!{7-5(tke z&>YFurF)q*kOEvfZ%yv&mmn&n!FAulUPv9omlEuXQdEz+Z$>Z?AOr#?6)+Ps)R&O2 zh^1!eMgDxsYb5J5D>QNeR6!e<@e(XQ4 zYzfTb4=ah_AT>B-(Xz`x2Ji8@YN(f~&ztc_HKW#5>10Nq`aj~?oFtt`G9tg6MFG`D zdV42$?sGNEjXQeX#b|q++v{Er69J<6(noueeL|1N^^=}w+%@lO_&%PWu%BFHYV*W9 zjOaNk7uotg%<4qo>1sg#0A8ah4#u9sdA6wUDTPxMKC+(#eFr7q%upy&L2Ih;32SsfmjkrwPLTIG`Y8sMmd#suO%yf4tVQQVStp( zRD?vx*dmK2dxA#QweAZ(euT3-Z4dhW@o}6wXYn=n!<0m(w}dK)JT<<|4Vp*+(pN+% zTf!q%FK3g)p&k@_%?X}pcAB$s<-r?c-p0-O8|b18^oiO*Qh^GTqRixnvxCf1t_jrL z#mN;kY_atoJw|N^Nk$T27FU9cSmvNSy;Cus$tDVea}uDqN%U$~dK0&5f#iZ@*2vf6 z{^o=5E@t2P{_T)(hg<}bxJ^DVjYDO+OoXUwWn@{9i4ZiC7Yys0g7j-A^E2OL`@Fa6 z=#BpXzqi-*>YR9S8i6`$Ir+zFJe+->*zYx$>0CcYw5g1Q*hTlmFT0O8Qd(-#XUBb? zp#sfVaRkiX<@E0%-be%NDOYRPKU|H3t0d#hy*{~)3YlTK9hHJ1Vc&Dq3F3V(@>kT( z)P6X^2C$SVOEGGIHU99HMBK_OE;idKtKJ4iqm_VO!Ai)#ZNI-)L3QQnvq#B+& zSMRKaiG%hl?TsAW{sEsC@gF|7e^1}uaujofqhdEU4F>msbm(+3&qT@qS+O!kB@8M^ zAmK!kWC_LMcCCU^G+PsO%eG#K{LfOC`J%GK?fENyz0UAspl?$OJ_GLWsMwh%W_)q_ ze{v_*^*??6{OOS=)LPhyGxaJ?B|2!5td7KN=4IN4CTx9C?bAD-w>bkwXWVrjA^s;J ziB~lp97ojL*BL^1li=IY*IB2YYU}=U-d8fiCy$mo+i?3ZN3jn#eG+;5=O4^G6T@FW zIn0QT#Y;VR=DVCt5=gJ7IS@@toxYErZzA==k0nX_@s{)UkKey}!~6T^FXLJD{{Y|X zi2i?%Kff6`dY0NJ$LRAp#w|QIFP%rMt~BfImJC$MKfagqjUqN4Nq?k~-?t?Jb#h=U zC2+TM{L(%-n8P7Q`pfhpTMiaaS|MMYFOXCf%vWkCgtbsKkz&su3d#|Ez& zbkHg*;%X)or-V_`vfZVMiAKoa1lwUcQcF*ixQ!|h%%t7E{&F0a)hz>p>CtYz3=^B; z^>%iysgPAWNv35`_>OOD2d;4hERd;qpXC1lT%I@j8ceD5h= z5gLEzJ~6CnW3)@Py&X&9?odZCbVjxfyQeZ?n}g0~9YVwi3k!)5r;#m1{lN>l>B+$A zSA%hFmbu?{$-*9g zTIbG7gSb_FJRDcJ$-FgXv%X(kYB;Qp%j#43=i7xMZGXSl=Z-B+sCrdLMP6{4e&THX z??2@Alr21*ew}1v&-Xn40B$P576VeUSB}1T$-4JzDel;I|u0$)})4vt` z-~<=J^8P0y#`mja{uA=Q4-Kf%#)XG^?^wiUQpq;?QayLQWJDt%<}cyu{Nm)uan~2^ z(XXaN=V4F!@t)Z&`?&uAc#{1+o6W0n5O|rZzgK`s)`Zl7)WymSe3f7mq>fiJ_B*_U zeqI?r<$cS^N|K*J`qc04+L8ilWF>+q@zcfZfbnSf)}zOX3 z7k>3w13qN%Cf)6?%OLEt!D~gd^)4P~5;Z8#g|9Gt9OOa?q}1@s&h_kphC4u!?)q~9 zubz`5F&s!TookbXV9310a$r}~+`_7Jf<36TTyh~czTlfg1f8TBi~M7ojCONE4`$1I zQ?fvYEiG^#g*fsosj?KB7SQ^)ZM-AQn`4oBFT+h3rM(SKk7gy0m%QH2^m9Y>_{9!k zbrG}`Fi1lJPi0JFAMQQjiQLmXtWABLArr;>cV5YW8i*h_?JHk^4e9}Z@6I4% z5?M753$+XGYja5(BFD?mZC4dh1gFOeXS7tG5_sW&cvDZ3Cs4)2yS_Yjk@xs!9}(Lb z!7k;ECP&$Q@N>EPO*?VhOwr$_bof5%zW)Gcw1#4u1`^_U|PSC6;*M zow+{w!lweYpWWFAUe`d*2*6=TAhONKg0ivPXQ&{V%v1p>?dfY;zOS@0VX} zZki;g@TZ8qE;7EyA@c5ZeUXOYk0N(*2+qqR4|hk%?i1^AlM+Lum%r1Tst`~~uDtJm z^DL(*o*3`@ZaT=$4H3-uOZ>Q|pJERij(>N_!#?QOw~s0ChtDnw)AoI`18MSn7(sT9i_9H*L zhPz3w7<_2cpu-0UH<<7sR-&p z{?C7l$&3BJ)74@>pS=C={$ZZwMEg3y{rT74b4Yt$9z^_C>z`zP55&d)02PD{iy=OT z)(uV8Q$HiSm_B(AP7IAfCdw}h;9X4M0s)G11SDs$NSk#?Scl50}=S zd~XJkFTUu#M5CD1(rCzdvRX@St46#+L>wsETSTo@D)#Z3PeH;YL1_Gl5;44(pI|PE zT~d;(3rs=1t*FjjD|{N(3YH+vNVZ$C9{BT&1=x6VdP3HVVsT@&5j|`n3@F4EwKomB zZr59xb+wx3B0$xRr0~=`)lInpEfFjfsVEaQRlDINJ2I6~4?8 zh1$>GUu;Obr{1IHH2!!NV46B7h&2BIA53iHRAHTHHpexFbuV)qJu%J8h*)okLY^XK zb@k&WNa7x<7yJi5lb2r0@gF}KQ16%?l%68~D~@#@A}Eia-+A?^nVb3YeG)meP;3F) z0H2rRB&%uaL?3g{?Z#MbNGH_zZKuYC5)~*2@JuxBqgAf<;h{n)iV@{r>>J)>&}J)A!6}k~ptEzkfK_@8E~=`|bqmzcmbR zWp>s^YtPr;{DUcDzu&y%-+_wy{`)U0FORg5lTp+jDqdc45SKhceIxh#gM*M855Hfn za>rvNE7Lldi{`So7FP%3dY{H1hBrUY@0{LvO-0YYTt~eAFW=AX=Fg`WSuE5s_+MYQ zll?gK`TNdSsE>zJuBUx`V-c^PpN=cRmZXUw`f578ch(5J&}5EJ;l*C&sV84!+(i6S z<9T=hefhS3A6apYBjrus1|O_H8_VCP-QUyaHgyl)eQuAYep~se`h77}K7DaNnc4Gk zdPJW-zo^ z7vj7`O?vsij9DG>>+e|1c?j1306KN|-1C-+jq}u95Z4n88S3^%_@kC&_u~oxmS>kG^}d5$RO}e zs!QO}CoGWZ6EhvJUs>~p z8fv{tSEjhx(&tDMXxEH#p~C~ikb0scDZ^MVb7KVbd!l8Fubk|>_Z)NwP0y1G?x@lZ~;KBj|%pN>AY^S($= zVIFP#^WyWcQ~CWX7M{V`;MCXr@r)>rJJPp)kB_A^;bX12>&9zZFO7b)Kh6ZJ%>G*V z;o~9ler5in=UCnXpD*?wsFKH**q_Ay)c07KD;lrX{TVc#A$K;veERo~Pdrrr0C-z< zxQ{0s;&(F2&wf08KD>~m8`BEA{cbQ_5Wmx&axL)Htj>o;ez=Mp7tx@ys>b1j z%`lG=T{Uw2L__f>5#47u>m8+AN4Ma`_6=8Q>-&v7rI80#4HXc?Lt{48WlL=Z#4&7H zBS@NZ>2GdUJAYnDOlG?tFV8=QuOZI4`DdD+q_|TtAk_20c4>s2&9F&FbNjx(vZXcc zimH#KraunwtSSsP!t2NK{Vy%WBt zURs$jV%%9B3&*$Q$wz(c(iEdXYw>c#(H8M;x!OnQze66?3G>A2XCB^!oXTYUKQLuL zS(Ho#9@2c`U=pi)oo#w&a!~noc0_}Z&X3j0H>i8( zePlw5&3+pF`edT{J$3H7$4}V5d*9#uBS+{&=5(>_z^K{(7C1*t!0;&Qpg4LOkvH))9E21qIzjr12DSDJQ(qxeEy(pTw2(UB+kj_uCuEAY1Fe@S1fK=@ovr38yXto1!i|YaGiu%ZZ zQ#oqw`bZy2`{Bvw?SIcXQcjeDf;L58Jf55iv+K~@{9!d7o`-$eK-dW(e&qMx0f`X0ARwcNbY6;= z*?PWCn>PrBv}&bhGhORmmiZDQ^G(S{Cwvne@8iRtDcIrzxtDE4cGCg$@+;wUf#m); z{{VmYymqH&{XV~9)OCefC4MPicN@QhgxVZyr~6mNas|}a@fx0ZI=lnoOZWb~U(R+8 z=k@&M+3`B~z>$MrzNRx%iT?m^i2Hprh(At^zY~BcrmCyQ&;7&}PstJuSv+&+SV@vQ zHw_QV$E+xi4Rj+#Upsv9fP^;!e2gL6H|fjS#zf;beGTAI1)%-pVQERr1_eQs? zi00mq=$Pxif0TMg_$|*cACI^XuUZ(S#Jhx9OxjJn!Q>o=EKljNihW_uPFvBg37)xi zY{S|)q%@k=qPY>_e&0#j0;t!y=>BdHjr=^`i_r!C=zBqr}b%Xo<+_XKA2wIrg7VgWXVNTKH9 zWx>r6Mc?4zn``i>iyGLqqZi01B#*1nz=H+PApBnTShZ_X&# zdtPHPy%LA3yb2}R1Ro%BT*KjsA=RH!cq{9cpf#FeYFYXUAliIQU^7v*U&@%ISGf}! z$R7uRZnbu4WL!l?AX_BEK=pu-Y?zJ7vV<9nM?}^C08qvRN;n>fbW1F4xu9ks>Zh>! zx}Qfpv`h^|qp_ez;F?}h4=E=RUMX(U5va-}1&2@xx^UWYRGdpbIG7`n44J8^s^GmT zQ^Zp}B~aGLo#m|{$gyykZslTb36@cv!f00@UbIZdeCwF+=5cB`<&ZRa*4>)%f?pH9 zT^}ERoOf5|-@n@HS;9-)BI|YHlb4jiW{!A-yp=x=ytO1xLWPe^kWpshTW>2;6_AUR zMGlLp7z1ovnIe^;FAVIc*DkVIN3bJ0l{)QicLwg*pLlQXObER$@J>f$RxUg{ft!Wh=)E) zI9FXRx&HuoFs%&RSs)S8B#vOsyi%1$=!rYBz0s2ZI%?u-LQ2~bHe7;rD2pVt;~OF^*K)b~ zbB%PKKZwb6AK$J*LIkvayM0V^i`n~k^;fld#Lb{)Cwt$E`c`T1AEEUYf2ErZc6zVl z<7NMpFDoJvTd?zJ2v<)cP&-5jA) ztt5HA;->{xN>HP@)XlbnK}KBp;J#n1{?1B5HXa*)MO2ft8vESY8ou-ue;W_3!NZnZBmx5>hK zHpsoRSeM9irSLNA#dPFjqJ>d4Flnk!AvvTQ?57?0ee0^pPKpE0KrH5yhr$!Kn-Kvy zOfbvAwC2fDs38(7T8pVQ+9A*dx?!C|x0u2LmQ3kkh#Hba4nq_wBO28e4HD!gB3x8$ zI&y9$GE?Io1f~FQqyV`p=W;B#Fsqs^!CbZCSW-rtA}5DN{4~pL z>#6LCgl~zH3zN>u4R6c3P3&Pk{{VTpJCBx(`fd83kMAGb_Oquyzwgw`}AGc(RmY30m?~yq)AFk59OiQY_q{rk)vvQwj?X7KGz2a?M;49`;LcLLR*qe6I5QO zJ;fCoNvwx}idPzoeh7&^=lYDafRO_t20#ogv2Me>hA&Wv=I5E;rq_CaDHM3zf{lOz%}kG2!E$u5f9gihs06uzMD%Yf(qT66sScd-@u$ z(Lxv)k4-0mORa0EnF*Z_+}PK3ffNEP2H^*0#A;V8S#p~P3E9ad+^eF`6~RPgBV6rg z)W}^-lSgb_L1qo!xblN|waE>fj?yqpi9p=3S|A0=v#$|psEXqfVbH5%d=${U>5OQS zEIikCJej6RJ@*BE#~Ba=!FS71_!@V`(^_;x$*S=L5m$hqfHa7-z1{P8Nem*z*$Ody zdTwN#DDfe}8A3yG9#dDA8p{lgM9l7i=aah7`93F3?4Sr z(nC?DV62w`3s((dAPpfV8T22ZY4e)|rR*n}eQTKZHW1kCSFZXU!|Ia1fBE~oI$8Ix z+q{zN=BJA9Q}DU)!bCnD-|VNvEoqQ}C&tSzt8-uPoGp7_dt3Ma0B0Ix;qgKCWr%il zc3`IFi zVUm`FFRyk5R3dF<7Sm#QCp}s?L0Cv=0|JulLA~CZlQ@XKH%OPNWS? zT$}7geiiJFW{LJjKE_WL4*WHXE+j^3C)TIU$*RTdSo+E0N9<0gCtr)XK>Z;*eeSUp zSa%_#;S>|MzzI%`kP25fEiXzSWRP6x%D{)s;XEMp0dJqo0`yTG!Ph@do_pH}4{}d? zUcrW#YB$DGbSJBIeNPeZEoWNfv~283B{gvjh1e+|T)GRLA1(wQT0oNvU z=A)UVCYHHtRV=-bD-A=1cWQE_!c-t+ow*5t$KOx_EC?|~A|_%^Bm%0{ep_wr&z~Fb z?AhNWEIMbr)Qf3W>+>^^0#SM;UmQaZDUX~mJrG9?yHaTrfT-UsdpIPT^yg9 zU3SJXUH&Qk=UBT4{{Ub=ncoqbeH-E1Ur)X-wqB_IQTuBF;vdoUpMlT%A75M*yUEW* z{bE0VHGxSytoEwkfqLFGneu-V_xk-Po^SCV*X`xh$|r$FktWSs#v@Af0RR=CJ~?*e z)4|Wj^B=p5brt*cjEx^K@wVHS>-EB^22c?ZFm`34Q)$h1azW#n&A0vMSL}bP$vTx) zu!RJWg@Vw$m^hL|jW@~pqY`c!0mLywm*4e}vkGEwsrMV?nfHQ_syKW=7ZbT-ZPldj z>%MMcK4|2pk-`#0I1&?SG1uTbh+gGncqT?d8PSq)qXHrS01E|9=84qbIKGsAa#`9s zQUwurI-D<8gEA2*7f;ANbEnBBE2(__>>#9gZvBD!=dfZC$%pN~^^%FUPt`N!H^sR`;Dx}5r@)-z zwV?e1^{_cRcPivtO&*O-&x-E^>>r)KJg2{Pqs2{bYQDzJI+rJC4FuS+;b~u7kU(n* z9MOk~g8QvTNJ96pp3s z&PH)d3AmT8pM&VNO*a%Eezu<)$QH#R)Fui}#BF?N&y*KbCJloDf=WX3n94vkHo+SP zZY1G^mB=JifShsV%)l!E=9stz!sw_FIJw!z0G@=6g)0s06sG|Ql5}C{z}j|&P*iAa z0zm*sX9_oMMt}lAw+dkHbus`es}%rtb8V3&!s)aVyDS-l5!g0wP+rfN-;9|Zjft5F zj<<3pfdI}B;J&aW6nDVp&MLx?BV}uPC2f|?ij>D>9X!pEA3=HH6Qr`8wxn&MW31Z2 zQu0ty+SSJ9D-8>Fj!}Eu#FSJ|8DN{Eo`0A3W?Se*UZBq!4nSBhqC){jpUxFVSzoPcLrcqbZ1*3(L6F zRlCVw9!5$DbEQgriZ^QMT?{$R%`sO9~7}0q9TlL~Gi6HhKnT=JyPdKg}vneYz zz45f{FWgb^x2y_YHrl?^c_E-I%3Y!rJ6!R2|_Aye9`=k?C)^+ zMvOCRM00SY&on{GzF)i!<)xn}5Yfh1DpEwbDHL3I6R0^M1T2ZUJPOK?a(h!C8;(fx zb|TvfnNDE@(R@PHM2nu-w9O8}L`z|UlCdg`<&_a#eUd!+0K)-c))rlZj#mYpxZ+L+ zdR%w02wO}BvI++F4Hg>Gn}=+4C?GfmFnm=f67mHfB={S6-Fw_dI8yGcl>|99u5S3i zPRmNyGu3y_nqaNOD`Ij%=>Gr-(0aR5(J(zvxz-4XnYU!ycu!Gm=7z!<8+kFMX-#mL zX@=cbY!G*QG-#uc7EQJQwT~2TnCRaWlq1awE-uEj7lrp5`TJnz+8?qz5wYF^1R4>4 zC)H4AOpwUv z+_n=V$Gj9kD_YdfzO{&?Oy7$bjsF0#;}ifBLQS6`$@YkZmOw*LB)AiBm~4q!#?U1m zjU490Rok>wnL8#n^4rTF$IiWe{LTPKp8x<%$Ax$g$YB=0UfLo-(p!Y}_DW#P6QXUL5Y$Or2lt{s5^L!F;)|_w>LD?Xj<~B>YK* zDF*t1;BF=R^hQ$lwaqc8)pMdb&SRk*0Rj8N*`7K()xrKxdoDggel&J0{34 z4aY^&ob~aN1Lbf~KPN1CMC3WyJavCem8u#Nn?(MXSG=MphIJ{&z&*xaZCQ$uAIqrj za!Ke9dr?0r%%=5#%G`&?>Bm0vf2p6bKTorb6fK+%l3~!dfS0#4g8oY=N#;T+`Z1u5 z6U+k~5bkK%=S#NcYet`8_2AF0^F#+;-_IB%BbojCeEl+h52ma8=5gY`ZvbbBin+S3 z<~}P?R|gOw56wCUjmFI}^1MWl8ucW0U=p4l6|5+Hw=5KoWNKhnq&*E}QTN+)`%~ob zEa@p$G>A90ACZ)sr=oT}BjUB4f|?_lW9@=`MP%YTL{wFqmaX!!b=fuJ4IdKO3@1-fbBNbA+j9Nt#0m|2etIx9+ zvKE&xh~~&&PcA@HGaz$X3mXB%8sUys&0*YqL?muBX^G*wQL!WjF86Q}qHFp}-Xo$Q}XDI*}0Ue`X*=gc~3s0bRS;yF~`uZ)yUW zlR=4%Mw1b+B6we^)tvFMc%#JzE+-1`KP4=|jLQ~$L`9dkxTcJR)Tv0_0WhLxpdyTz z=mN~!yqbY3%p6R(0vJl}Dgfd$QeQEDWAWAoV59zlA0!N7iufQ0!H?ka^z-F)`!b2d6(-3U?phypE-igVQ}B(^{%V zO+sc8`4&BK>&_;fTy(&_wN4w&txPMc+0W9=m2E`cTWC-D3EXU|~@;e@ffzb|QiL>_>)>yB?H?2u1} zUbuXFVZA4^(EEUVV9&}r_<4C|n%)9{=$&uF#z>AB{C@s3{{3&Pi|PS)iaaxQA-sYh z@e%nH=j~G%AaZhn0IlC=Cjeqf5&>X<+YtwYPmwSW%<{o0@8xxPI+?V5ByrE!Fa?-C zwvD_?cjjayB8Rt6*M51tsI>tGf+TVWI=I3U%$k8;!SX6^DZwz-#E)K9-PPdBR)>Sw z9=;drlS|Ru*n0{aJ7gZv^22|*j|HbS5Bs1`qIW-6BV(3>>^57x7%u)7BojPC?3C0m zVKZKNCVVy~iSDzk(i>jlc&Rl*lL6!jb1*?JV3}sTPAbO1ZlXMMD;>|Qf=fpQ)JQAT zb)1I)=vax42s-nVb3#MG6!C05LgYl7lf@*I`cK1}**!eMi=|>q_p8WYIEe2=Ul-^# zhYt6oX$R;P5kz|~AdZ})q@4sb@eUmD*%!oSLO>2?RblC%DE& zL^ewKl33iyz;NL`J!pK4q;8HRwQ@yRfusF|C#&|buZTLBOTJaoa?av7B z`#*;*m5{!j&rrW7$(VeA?&-H<>kbm!#;jv~N6F&@kqrL*N$w-g9FZIz;P`rfCPJRf z_#0QEv(j2W>9Shu1ctd4HV?VQh1B{d>Pn&VcY1y|9Iw?hd>MK8JkcZdljF`7j^RG2 z`(Rf!e{bK;o@D!Q^M4q~H_zvfu06)T(?9Fu2}tP=s6+1lS-gY8!2GmH9C1F~R-@+)c@lX)XAt>)&oA$F4oXOHN2u?^^+s;+Uvz`VKcjD>_t znYJx++c~dp=H``Bwv$Ep&!dSS|q-AcV{zCCK&C11%qCy!ZQx3*XZH+uq5!}53X2#w%TNX-^ zkwCX6%Moe_%bEufERK~Mpls{QB~Q77?-(gKlKQLl4k#{I#?gLRkGuI}QxTiv6%IEW zf7i^p+C3l3{)`EcdNuaI4D+A9V+1Sr>;4bhhfmiSMAo02B>4dTf2`JjhfBu)0DrHx zC=>Ai03YgpF;Sr=Y1bAk{U=UuCRt5-a1xD?>qZ&dqZbedn}l=lVV?n!039=O3HlQX z#;dJ%^Ap~(Gq8ie4YkUC)RCYdk0soCmuG!salljDKghGzunuQ`O8gVs&BEq^vZ~1; z`H=Ds2kv=DCOLU8uNhQ4Bj9{iE%CS0YVG6K7nc-RiPsxXuXV)3Bvl%&3H%4=Bhf+O z-Pfb@mV}3WsF>!^oZh?)KqwxNhth)%PLc)0AIgWdiBzIcw;Cbc%XaULjoor7}fkmeK01s z0sjDfImJ{!YJGeiT>bfuhMd9=yoOFgO#BjC`F@y!7hlgYuInHgMK)CGyy^Vj5@VTr zp1z0PK5&iR*p{nCtD9TEO#)^62igM98KO&xt!_V$-a^QI6BHjWhXHO*rZoQNnATYP z_56PMz*hNui5+UZt2(!YTFiJ_V4GX|=J^v5XK&d__KX|$rvnX=&rBz?r~T%yJR5@kf;cIa5J|onM3DpRZj7-h zN$H~d_ISlvK-><`vwH7d@y0DRyP?@d{^~MBy_9Yde6YlL4d8$pYjG(K?3kTNGcv#e z(gANUGDl1}kmCLtY47g+87pYnJ%jRR@*?CbVROs!o^N8dqQ< zV$!vI*M9+08v%Sdoysb8QoGyzM5+`-ML66-N?DSD8UUh}qHF~OWLWd%>tC6&`Fp@H&So_p&OxSHbkj%TX51mZnVKA^>CzxH3 zV_Nw``mdAKyTrvLMX8vWIwVT9atSi?p(JQv<>+3B;|8z8F*M6hBe!u1+mmG0nsu*; zBdm!ArK6UjeQn~r92+N}mHz;OpuR2=3Ivm;gX;ePS|mKLSQ?(E`PN6J5-MwhsGd>W zT3``{!Yu&w+_&}R$SO1>_BQ)uX-MDPneyd3YK|!fwWL@(4LC%E_FgRs@htxUTAh&- zOWFzGho{V zd|;mc08qYof!~d15yKCu{(tP`2P8=LXXp5E5@%$65>M6-72X$?zIiCu9~|o-ot2*e z(Kqd!<&sal-1`-{t(l?0DxE#=o_gmJ64L(wF@w!~&e+IDL`$mxIX$_(W9e!&KS#Ow zW2|?Gi1qq+-mJzoUvBSTrYv6|JDq(zXkxf3L9}S(g+GAZ`_6|=TdJ>m9cpWMA!xba zt_{Zo_;5-{LKxygF9X5~@x+h>2-Ehdc(pI%QKmjXsDbQ2&h|=q{7icq@U!&yhRd`b zw%+0|JIuAqQ6CR5l6TLgQw*k^%ropC2*&5TJ_GqaBgDfREsX#rC{i`a^VXMhbD&n` zJCIBm_6Y_nGRet%1LVVO^42whA-O9&!|IuTC{ zX0|^kypG7x4Zf|zCFEuy%f8S!u3xzwk{RPU{#DtU(t^oXIY{DfLnGgTi}DEQaL zDHRddMgIU^FPOwBLB;_%cv3tku1|-R3jY9kv5`v@9>Dy6rT+jjgnYlR^Orx)pE$s* zH+;Ax@*tI|-5ve7!9Y%wzN!A)gGi~C)9n4R^s-1W9g>Fmq33*2@dPVSP^zE=tO3J% zVgz&4Knb>ITa-fBRQXnBL|YQ3hT7e^TCjw!Y+d(-$JI5pKbMqbl8Yj5XY$aF_mCKo z5e<>#G9XVX>dpASyxQZh<&md|dyvG{j8;C(N^QhlqON;^PsIb+)gj@OKH|DhSkD54 zJ0>J>?%i-JK1Aq{0TuOrS2^JTY_-c@uNsH#IU$&z^l#op4&}5&gk0zcZq3B!4FPAv z(&bCwJz?e!S&!#ZeDq@~x9C5S5b>K&cj`aI3wYY0!CyyQ0J~ifWYKL0h#~UcyGi0w z{KqHb&eyc+s(E61mHW52!$(qnRUZ?1P(SGp_khSOcDa;zNMgrv1}RQn`a;~Kka&+6 z+vR)$6zNyL7#+GFW!1;>W#u;uld;rHa^D)wBU}&V>HX#QK}!vI_cD4e=MyqW$yJoO zemDG8f`3Q(nC@e&kn?2qGaloPzM0V+ebZH~czt5aZsL+FJ;tttLqhC)rx||avqawk z3(RT>Om8?kgu^hG+)&*Nq9z1F0VKl_7zK|L!b5?nNRxFJ1z8J0z=1b}dW3udO-1VX zp_Ix?fe0!IQ$Lf5sK-A}Xna54F@e%_K0fkSleyc3L{x*#a`yza0TH`_WKqGCMK_{~ zVemqNSg@p2L`JSE77OlSE7}P$2fH#iI~<<{aJRpp0}Fb>;ko&z!hw{=16hQPB3Xmh z-%?jixT(^-tCLXIWl)j;UhV;7%!)VE}#u8FT?1w=|h>&saM1kFi9|1?4Ta!hA zpy4Vc4=a-^QGiWh8nUp32YaQ2pacg+DAL}lCpIAfepWDq}PMh+wx3sRglq;7SJCgBilLEQ|N>v zs)Mne)^vy~(vIpP#uuJ43|tTtLDFq3YrqJs1R%=nffjz0O->Myx%ef3R8Zn&n}25n z`u(}VBigftoRpBVw>3BK6%eDB^Yf_|lt)8hQg9t@bDsh&uL=H3h|fsh!fjv=l)I6Ln49rj-z?vbD2sem__y_I>l&~K zpb~WGDvP0r#Xv|c%mePL#K)JZRq}^DF)vlrBo4SabibtC7|F_6a+5H&Z)ENbP6f`ta3{+W!8cmO zSHL?hN2RqAH-({a!CIJJP%2Bli|9QY$(Rk0u*oM@ zg2G0IrzDUO3jkKQY}w8o2@l~SpwN*wofZNVSmQ^C{Bs+~DAm0x1@IOeR;wZ=&|NB% z@A)|(^T(s_?XJk$vRmDU^J5KH){$8x_Ar>#8{;i zlT3}GI6|yacn;B4VkI0Vi8V*$i$1cmP=XMUBtcfvo}D#?5QL+nJC7X_l@lyA&4&F7 zM#j<16dOi!Itzrfv_M>bxInN(+wg}b;BcBEQy!;F42vz3vN+RB6H<{03FO^`j#SgO zFMUc}-AmaseRF{fUGOV3UIg@_F;oIN*#spT#m(${FmVQ*2&ye~1L^n?lvN$XsPotB zCQ_XfHew(u=1;36P836>B>Imh=oGaK0f2NH;%Sa?rW?GGNl)_hx-g)Mn|V1lt|x$d zXMTk`;78u#@L?!urJ*@FNm}?~n=D?1IUc2DBL#)+F4#)6)e?{dQHJ1|GzMxTx)Q!;0xWb1UN86$iss?z;Rfsz@gF?eWWszzM9!Daw%yMe88#}0$iFp_1~;R9 zlkH{O9x{1N5~pdGsD%pg&Lpum@+Q(bLCPeQCM^K0Q^ zk^qnvLJA6KVnhLHS7mB0NVs)4DpB(^BOGi(P3)MOC<+EpWXluAFOfNBb-pvVO zAkbyWc5D(9H9pNKUPLfz0f``+K`7-Pr$GR%$!q@rbw(23QzLK0KTk$I%rzSgOln_h z!~{j+LLwR`lyA+?MB`5Ly0AWNdK_d&Mf5+>gg67_yN}1I$Vbcc{{TUg-Xwj-Meils ziW~iVE4;LybO*UD_wlzk%8@rrH}Ye>haLLU`_?t{B!i*ae)z-=yPe`g{V$XG`(~5y z-`5Z&daq+quF^*)grBN_g6g3h>EE+HnESDSik4Gb;vbSeam?*IH*-eh z5S($dX%M!&!_naUiNm2HffE;jL zz;@d!WGxfaKNe)3)Q!>P!oy?BJaSE^1$r`p0aPNCc9pn%?21 zS2;98$zBB$t;gi27k=cP1a35D8gaH4X?p-+o|i>Ltdj-P%gF@vBthWG3)GR!A0r|` zl|s4C?Fduy__R=!{^-^d@rTl91tYl2WD?wf;%ru#}zm+#(14+#`O4@$YvHgf~*WD zsr1tXXqFf(K&yBPBY}LwaRgT75+7q8T&n|Wd5JL|9;Z3YOMKcVTae<6M9D>F^`p~O z#{yl_myfhZjFU0O&IB?waTBDWrf<3d4N6N$4I-P?@*D?~P2C+g2x4MP0HlF+wAvvM zOh@sAR9V2^g4!s2*p^s*2oo4LZbifkECSP9LuCOLvam^b+-?gPz)CFByRj1K!OB?! z@ZZ1!8@E|FxdfVIB9Szjt#Z~wBn%8UAx-jpoYvS?>=I;+5NQc0!2(XE=vf@fG9H78 z$)px)z{La7fsGRZf?7zM6-mTIykG;B;F-%&asx<7KR_6m7cFmZMY2?Bt|m#3soWoz0}vqRXzYARxK}9T1Hh!8)nfp#;8ze`R2qzH!qf zBb8`yFD4O^qAjqDCImHxAe;n5U_l^s?Bz_ni4t{Wpk$$uG=dslE%@R+{xW-hrzkwg zD?_Y~k|@gZKzj-#E;wz_pJ(OK++tC}dzauc1FZi5EeE`H9VZ|4#q)BHPx>=svHKVI z`OoXTu2Q85gqZtIFf6RfMLkr!W z+vC>b%tjWGJf>XlgVy-J`;`zL29t5jz6c<^_(?A^lkGj^X8?U0eek5JdWZxXQko`C z8_P*qDM*Q++bBV{d2vbKc+z@1M#4#G7(jECPK1l~Ol>jL&?b;OuVN)Zk_FCcsZA;E zMc~t@L?5A92{|t{0>vjOBWjmrz*h!kW*p8uK$1w(iCcJ410qy}DoJcf52(&wv4S;A z<(NXqTZqu*CBzh`1A`(-txHIGGq~D;5MnII0f`GEV=%8jaa08jSk}o2K*WARV}+7c zYHXm1Z`uS;JWWJiaFA)p9l}in@*}ELWbWdeGy$$_z4pDV^B*Na|@=W>;0P| zV@KTC)qi@3%Y`-UA#TS7&4UJbn>Pn^Y?ouoX6WsIQM1w9EKU(l+g6uqQuI#8YtaL` zQ>`HJEo0UdC9M0X*2TWOUVLE?Y<6(ULR#*eazu4>gV8rjZ*_&1BtsCjwqh{_#0*Cj zT%&*_9Fo#Pcof2?a%q9AGwXVqq==PNw5H46p{cpTA#9hW5$NGvfc9pPR^Up!Z6K^k zryS@eT2yI~$gsWNIkl#ziVhM6pEqpbV%FGmK$1xitEWv2tc1aovm~Sis{yHrhm1%A zE|XElsFEO^WQ|^_KzJ=yzedfVBs&P+)$%OEA%!9(DYKu*_c4a4Av};NAs0ZNMd^@m zPv|q|$8j)OQv`)lQ50037$>4Lp+vD5YyhbhB3M~VjY>i^Njvh1gu$tk5IYfOK}VM< zcBM8pfzp1$TAIF(x)af^I&EYVp=)uLC$a&}I)4E#Q8p|{789VJi<+2NGzg2c+<+ae z{2p^*xGqPz{U^WPGFbS1=R8}V!{F}}e@Dst^uu3SFbQ_@{`icAjyCWSv*)}3qP2HZ zHSsf%DgdQY%bMek?~Ir5Ll}V*J6XzwuiqIg7vp)oKymx$2j}F}Ka(7ELzbN}&#t{^ zCklE>rcP=#Q-uRXt3(Q4f^SF?n<*+!Xadv#0GOqiDs3SqBH<~ti4(TDnFvRsB1J-M z4GPq;qDYn6dz5@L+}!DDZadfq&;YX&TS$x}oSBv%QVJYRa;^bq(5Otj3zm7DQ&X`X z#aC^l@i8>5Ou-_wC&N*EMWeWcIx4M%6J(wNiDFnvP88QChc-oI@1bQXE!T4q*&bZk zRD_vmNOl0)^cEPZtP|$~`G{zVR7;T@Bq5^Rvl0AIAmPm=oI%uiwV2jIX)OX$B_bmE z%upf=G@a}S4M>uYyA*2h< z)ijCuf;h{-RA|bEM8p!F2+brWQ9NX#hJr5mP6|U( z2s{G$iaS?$vAx$ul6U4}CVRn5Y*2)W5@O4}5^l6AhEa!?(GV)&zGs%il5)d_w9lTy zL^ecJgb00Jb#<)Ko}E;|PRhFoow*~Zr%u7(gbBoeBw?d(JV+4YsHN@b!q^NedPhSu zxu56(ZP=EUEeL{>MwkKA90(yC67?+=DnhPLvZOh63Umf@62fFgPJw2rlBd-fsv{` z;ekL3XaNM!k&BomJBugawS!x`Nr)lIU@TTlSmYO#4WOeK0vI3!9vUX96dY{Wq0)RWQ_ENkZD$;}5{KMBtOA%=s>tC6q%mAo7r$mJ(~0 z_jgV~^)pc(SXD+_C%cB2+q1cw-I5cuzcSG8Zi~i{_FZhB2MN8q%}9q&NylMuOgvNs zL;_0KYLHp=t;JZvHmyVvFGzY#^Q=Jxt^$+*b?o**kPy-a#7ahzguE>fsFN{t@x3a8 z)49h~gfLB&eqb*V(UvTXT4qL}8u{9z38^D|m#89!qoatef(k^*A1BFROe*AJ_)R{| zgcB@pOk~w*xEk&mmCC9Wu_ObOkAv*Zc$rd3AK?=I& zB*uwhbmZ0(i zML3NDM}d_vFe}J?6HWp^*l>* z!A^y9Fd=s|PQ++b`1OnFAYjd-GA_uxn1VSPMeQM7$-|Mn3P~jeJ%X(+S;0Wn#qB9= zET^dAaBn12;XOzQRU1U_Bg5tdsy794E=LM8QjX{HRrMV(hepsk9n?%(1z6^CMGyi# zjdLhEP<+Nli%$Zx*GRqQH2Uq z5E{7_sR{XqSkQe8zAMGqmQI-^MHZhSM z#-{+&3?nu0csZL1vlF9OlnY#6QNgA3lQR@tQdgk^L2#6-+#%e@pshd`^C-KHt3Lv=Izd`FPTK;|pa$ z^3UY!_{Rn2N3>7a{j!D9hwt-&PSVfVW3I6PVh%I)RN^O>%ctbPp~QU{o=tI+%MF>+ zFSfF~a4;pNuP;(P9GqOfx%t79p+2js8tyMRF(2eW`mlcSlB=Ni?&SU6PXQY!Qbnj(z%97=P!6iw#hsQfZGfUoMi~Zsb9Og2DAgT1purqJOJK!!ps#|+lEJ3 zKrF$kVA9U)i6sUO;w7vsgi9fI5NPyB;&vU#!juh!9dAaw8lc?mk(eR^(Rf9-5u!{c zBkhRrLB%E`#u3$y_bcUXr;??l0%d6NZa5>Eoq|2Wv|?6`0aHrplP3_ch=NdvSqe)| zAQ@;;vS3X|A`@04kRJm-D@#B`&a!wbS)z&yBBBjOuJK1@<|luIl(#lSxjayZdJ0D+ z;x!k&kwf52Xp(d;{)R?gfa?YVVu?o1Tp7zQNVTmxP6QDQU`Dts=@A||Mem$|(IaY5 z_#)m|icVK_C+?qA1J(1AS`<{|au>CD6qp&2ziLo4VF^hwI*gVdDqWcdNorn|Nk||> zXyxy>Q{fceY_XH)GO<6Q?UV!3K@i^V(Nx)to?F27#*-TmdLjm|C?@noIAJ<=X@+A~ z&YYx#B@nXFSeK$(Zj|m?DjprqN#xa#l>$60rLvq9%G3$OSRjNA_E=6KH0fqILScES z9v1>yq^l_cWtD;tJ}_8GdQAgpz*%QfMC|Fnl@wf?)FB z8zrJlK_#PBGqkgEus}e285Gm(UHJwqK!GU(zzEn=$t~j^f*h01dL?kN83Zr z{{RMlP)4VxkF&Ha+RX$G#EK9@FtNdbHnP@8D?llo2~=faNTy4BOA%+5=Hn2cN)(P< zpqtj1Bna(6G7ZrdQZOipoHgQ92+*fI)%rILZKO!sFHVo5QjD#OW715!>V~O72~d!h z%2=5W(L|+;LJ3L@-;6D=LT+O~FeV8{$m9?u#=uUbAp%JV6BZF;*+s4)THhk{N>H=F zdTn`gU{g{iRf$LtpuWZ_ERZVz5F{s?7za~0@?;E^p&*Wu3|J82JjXkU(VK7~99q)y zMrlKXhY)TKXmrkV!O%careP5>OC~GOW3cqastXzvQ>BFKqjg5kk^xJxsECr)@#2A<$MXOc2|P^%)AuGy2~lWfIXkSxuKQ9c+>%#V@N zNKnJHGN@7mh(J+2^Kf^0T~_VMP}LtHAU4sd9z-!>FK3!h-&R3X1`r8M4;+$jryObw z@q}g2T4ebyr16V*1(hMLY^K5|Z%`1+VFPkW1|74jSZzQi0xUjWVH-{4N~WPGu-#WI z#WQMf$PjE82`uhc43RJlBFG(wCromLp~N;8+CI^QM0P-|#Gw}&*9Is{q?jj$wq$c5 zE9fzTB!B@mZZ?jar$(O5dfU8m5nkd#R+>)K-HZqV3WSIjn#7+F2sJT+1+(C3rddaZ z<57vqqe{?kj)0)uf~c+^~rR017PaCs#%6?>7{c;<0%T zkQ~s#l64~0$}8P#i)JL-DFi@yB8G2 zjEaxz z2Smav1Sf10L=V)2Lu2aTN|3N36wn7UM@dwjL`ldBw=qhBJOUSBi@cE%N|2RQ6p4pp zNsR!i?C{Z%hPRNL6TDioirkZXpd%=M2nk1LRXm%k8qlW@*wec5`YQXs{{&oh*u;z0#y_! zH3ycAi`bc#sA= zluJxlRs>1h$*`+^@csq>DGo+F$R{Nso|lxOvH%eMnE6;@Z)i#c1q~!EJ-RV)gR&_x zG}xp7D6B71EsS=A0MMwdImiIS6T)-+7hQ~`gm~zRX)vi307#(S&~hySpn_HkFmNqH zcDXKG4M}q-QBnw!3DsF?vPjoy0OK>YHU|}PH21JbVaOzuJxR)H&T8E&IEBk9$c^{U zW&yJRQFx;hAeV!QZI~pOX{6J$l6!wIyot~qtSW9&S$u+UW?Lb&=}MT|T%KGhM>7c) z=PXL3&c!(-aqN+KEQ!)@mYpDP5)dQ64RTjpMocFvSg5~~Zv`n3h{_cSDB6)HQXzmO z@i@|mBv9II8cQ2QXo#oQwRGq2i%d1IPVJ^&u-16#7q)FKtu_=PNm_!OT?1^uSs-tivBMMPfExB81(7Cb< zBwoKc7L1kNrnsBP8}T_5=ws?9^gZ%G?)Z|!TUqEFY^7}Yr%k+$zlQz z_kt!v#yCU-1F<(faht&^d?3lb@uKa&oFF@9!zSaY{rJI_xmbvBhr%rUnlpk&1Ow7Y zvQ;ugQ-_fwozpzn4LIjVIMTIf%ItwAaABvM9)R^QFv+q65Ge1H44cf86>HfstI^y- z!U;xTHeQ`#?+qIjSezbs|X!Hg$N>9hQfh`Qruq(dG@8uaBn20 zWHd0KJp`1vbm~L>1 zJPLNxA$s;0DQM&o9El33uvmyVl00z-mB|R)tty?G5{%RvI0-%p%~;E0T?)7~OEwM> zsM{cM20*RJNxSossl~`4F$LMX0TLKE#KS8_{upUzV{BYvlQeV{K1>nJlBHPM$wg1U z6)&-B(Vv;sap(5Ee7}Syl3lyQ7E@2Cvwi~dng_j^TAVDOso1Bec z`p-5_e*XZhr}ls547@Rc(Ccq8^%$&7O;z+DKI`$3_)o}F{DbcyjM$IC4oN=I?BCaU zAn2!{Q})RdOGjHW{{Yti0DifDw0%eU&L7kL;4XaqaT}NtdJc$XR8K6_XMh6Z_Mi6S z2b0Ca=e&o(*UnOQQN>i`blE>ToMzYi#2}Vo@s7ts^z_HqDqO_AkM_;4O;xr%OG~fH zU;&~8h|0D#B=|rcKrC_BAi~m13%$;G1f&8Gmyx-DQJ30^q_u<1oau#dhL*xl5QkTMDN}xS{JBq_Wc#sr?iauc$_>OTZs=#Sr0sw#( z=O0*}$~QFk4>!SCyk6bP);qkqn^NBuxea#4rLWBvh{ zk0FYJigPNhGmoFivaxb;M;q%jtxfgPHr&Zl-w0*+qA^$HG+!SdtI@6RN>V{Z=J>D72SZKM{@mbbDHFRWA>M3T777_WcRs=Rb#3C1tgEKkYb zIXKvL%#Ym&ZoeFR$a}3|aW#IakE3~Z^SYm?Z?^t-&y8UafhU=4&KctEcMP!jdZTgv zChlN{ig!Mc_;=KwwZPanUwr?1sy|9xT_Pz$xRNV8=5zk_eDl@A$u{x7<^!;Og}2)@ zKX^`}&loQMtj(W>{-WBv`9Tn6!+XR=1B%fPul5!Yej^@>lSG+=Re0>lF?nuQ32y-i ztXPPQspZ0;O14^I*1!)FJf3cYH_04p$j52j>9#*HFYUBuR_`Oh2nfHt;1f+5EhRy-pxN6chH3a;3;kK2hZ&0GE+7h8C{T^0gnE znpVuu0hM7;x{1qNktmHazC}Y>lP57BlGy=xpa+g-fSs!-jkB0G!Xa!pKS5lDE!zhVkp!jgl zQE_Sni=7cpjrBcQ=%gt0Y6(RdKAivcUTH&zTB%Vc*>`JBQjL$(&N%8Cwh%f_JOt(ZDRlU*c z6Q;Nq4Q~qdwFu1f7g)tAl}S))VKKaE*4-=_2Cm{@ILg*U81wyaaX^x#&hrwLW7+2h z_ODF)p}fhpS)%&<+jiVqo^ zA~#k>7W8RUgKhjQ_{%0mGSuhSub)^IF>;8H@J}&bne> zKPu9IFTme^)P^rgH>XXDb`UCSdw(HJmv0zS+t3I*IeG{~1 zthyO!q(Ycno2a;!qmsqBXO@Hha~Vc|n2SmhGo2*zQ7NZ|eZa|FSc)D=prU4+X6ci# zQvW%RFpOXXa95TYqKk`i9?nBCE;z-1^iqlbVxxzzQYl=Urxi{ja(fptjm^?Hi6u3F zL%E%l!dmZ;uPmux1%p|LPC`KlOg`V&^66Ckd+w*y0x040s=A`{i~ z1xQITvXaZ5o{u7=WPFw@U;W~?7M-*sVC2%(_xY-1i$4exlV}}2QS`7)Rtb68U6@LN zYMQdDj)*1Tv<>VuV4`*rU5mTNzBy~IpAHQ1Fzc7&38kM@+y6vjdr7-mX}BA5P1tfI61yIa(6f``iha7{6?h_n3Mq;>h<9vQU_{Xg4iMX z)Cq;B%b!|s56FUuXBB)-uY$URf~7;6Bflo@iQX~u&)XXX9;zJPT^Dk}CicRKI`RbW zqQk<-zUz(=U!>$NKfb0EF}Jv%c!meQRj#=>7t!Qi|I?pzvwW~1Nd7WHb1my{dV|wt z?=5&!od)^yL#Fe1|9jCDLoY-zFP8y98n;uD&{hLgum(K2(uJz0D@5BAP6OskJ{Eyh&cJIzPa<+z+;-ZA%~c7dTK9k%CG-i3)g<0na1o2uIyC=F00X-zAh zdflL4+sB`U(A>DUF*;cV))FxVJa)*{wc}{oT<2%dXNrM^bpB+~gDMN@h-$Y}|I9c} z*6|AQ0WcaFFO%d``&$N?EoGfCDW%*d!wisfuRN0_IyeTXB-fJ&?Cd(^;j65g9gHz^ zzGa*+H-G)@dyOAO%DFyB8abDeLnhuO-Fw(GLF#Joa~~~R6n_;RSZ9$bqmJk_=@#gB zg>spB$H`&r4-I22zFJ7zWA%Zys6wl^VRoOO8odFb)=Xi?zF><`;xyy;nbDv1dm$cW z>KgXtrDNk+F{FC++S)W!!>{;-xmHYIZ((ykUKfs-@>*dYDG&a})e?fLekRvn5W2#t z!|37*#2!%$7O39Ef9sG=+b01&!m35PiGa$LtpEg0V#bl-+O^8n+&eZBEHW+d@^GnA zyrNgT-h^j?lOi!E)|a6I+_#(WXKYi6%HW_V2kBXt z=-ZUI3zC#L15}>V*$|kC%slTnjBE??E_u6rj!!~T=RSxg)d%TVP(ggQc zD@ro#%HNhwJMPI7vO5FY$qLCQ+(%*JCB##-lNpPFnOpw=Ojoue{{XRh7fYzG-K{=< z6)H50s)gTqjJHl7@-snl)68I#fc6Z_45lh=q4eUaSh?R;#rC2!DnH5@PC)h=v=g-N z3gPrDljsdl}@8C*?S(;oGchZ0kkiu(!Z7kJ-VLEHvQ zB&bv2#yYEWXZ3@_-qgyArFAlT^J6?{EqPK zGgUolO!QzhkYrN=@%77_UJ6VU#*C}sRGqEL4~X2`k`y1~(MT)B*I;YVVu=1XXHDc} z8rjpk)lr0Qa2aKOv3|TGt$FtX{R{wHBT_e*OvB2GG!iCmf^gf%6@8HulRh;55V?bu z7`KgT#5gyXxH0n7N0^b{OY4d8CO;jbAS=*FpROxEFa+Wwwn<{V3bvk_K;0f(DyJ`}d+(G1XeYnZ5!Fv*ATdWImf+yus2`H`RD^ z$>Pyl8@8N;7BO z?7xQ!-o8bTJw9{i)vk$oJv-BFzPBE3vVxjuvs~gRs_#NJJdM3MNgld-l=7|e<|Z(3 zC9mu{wzzG-S;2h>1AE1MPSg;o3iV)HdD#AN#Eb=>?6X^ z2&S)ELuyT5=amd%Ej3DD*&52+Xr)#0SAm5uts^+?2udu}HD03-Jn{e%f+RyzBtO~@ zf;K^y(ji~x&&znF^&9s=IGtyZ^>7cpN_nB8_>3Q*B$)Qc6k0U6)*yio$*1cu%%UfF z%knVL7Iz!zmSQjuuQimAXq-p4AM^Yzp%ujJ;w;VijoRsPpk z)>PEh*wzOd3tbVYGAK}@2V3~)VLDVt$UdUxktFx=1`kK8;$s`qE%4-KG&fShWRV>z zGAaWj(KKsHv+Dl>(K_TsG1K?#ijGKlXa5c_F5WLcw{fvb`79g`4h<0axmua*P@nt- z3QE`_h%uSdSE2@Ishvq{u};EZFkJ+<_!C2jA;Q46A^Q480{=GU&3^vA59V^k38a%v zq$I)pTGV=+iPTWNmUbQ!bt4SEe38^Apx2Agtg|?aGT;|VNsxXOisvD34@3dvXirxEFClGtCwnv1-jax zd-vyB%yt0=D6n6Nf0iuSW&$JQyDI%uFjFMI&4a(aZn1vBiKZgKg%)2t*6eFVWw*FH zR<1vO+fIoreKA$D)Auyb%l7Dw9d|_Xu8acbaWGike%*1%_QH>FN-dYG?R5{Wj_krU zTk{{n^QeCS?9)$idcl^#iN5d$++Q+IX6Lr($QpH<6>>5(#M!WNwdrZ*IoW=Xd$n@9 z1JMcmqg@xjZGLy(hmzZLfJS&OllQG)ndc8E>P?0Uhn3+7VoG zwc=FtJOt#5bYO}k)b#y2XBAEsWq^ngPRg3uu=Y&$2o*Ixm$gJOi&|77cel%#rYW!b zRx?rbe3e)LuqdY7ahk>nBn+fZIxn;tN%@7^e=|C$W796R*8eWEPP(xLbsdK5<>E(< zzuU5tD~EPr@zb>3hQ{&$eZg;a?}m<*;c2K0do|o zN_F8U5eUfSqZbAAEq|&69cvH7m&N#NUCxo<J5Qkx9y$HP(08lIz;t$`tfuJy3nx<<$C<-~)=9}IaNGGfg~ zG*=hhyEg$d=&IkH7TZ!a+lD(F?$V79KJD2HYCNR5P!mv4Sh=R@9sAV#jLleKIZg_1 zHy!kOp!?#_b%4z}8l{+{TvRJ3dy>=LpX)B5kstn+caH!`4M61D3Bgl@>}YjExzrew zCRzEPlZMR6$KwA1baZO&Q^Qnw28i^L(hq!aznVF|uyFBom(yzfE@Ngy1K>Y5lsYAe zGsL$__0Ih6*=RN7Il_*STOU!Wi(i0?Rb-;7cN1_I|~nFaGye86$x`Zt0YoQfz(ZcvJxd8EdCuD!%{uZ;pAZ?>uMdL#f^=5BxV32*ygkzhfi z!cgNh?86cs4FyOWu867hhs;@EoWOkIi-q$!;riO3$ti4cRsF0AAEcE`q;4px!hoOF z3WSw}0C6~IbYXI790=i&&ioRj7H$fYj3HUWee6>aBCUHFJ5mt!o|X}*HiycO=qt`| zuh**qu}ooBl>0M6K8$NIuQwN3XS8$Z@n?&n!Q$uIqRB7QiaCbU#z(kqomRt5f1n84 z!EuZZw0?hu!$MwPJI>v#JaGD>P{VWiz>~9Y{Y2)^-5L1ykJ}|-(yHrfHT(}Cv{iY| zWOw=HS^BPw*m;n}TZzl=-+_0R*GZQC++s#9E1x#qaa-?8#)qBS9Qi3M&ev>Q?6n4j% z4OwEm!id`^d1ruO{!>=KxWnODCAz3a0?oMhj%)2hmdX|_K_jb!7n1{uJWW|Jh6Kkx zuP9^_3wKuFC$Y{?%CvV-(#Mae2^G`m3w-=g0kf^b;T&|PAYsF9?J3aRR1N?f z{0-q2Alj2U^(-3Wy8qA(P6~=*)1%>%`3X|>r$FpXGwD#KyDAF1F^a-okQqYV?U?%u z31pb1`2dWJ%(M%lsHXQ7k@vnMTRUMj214KqF~fzWr4I?|E} z)Mi~v%t=+IWe;&O4sv=XVKymRRAj!U#;N%4lW`34B`18F!7{`31kzUC=}>jkB$)yq zjYaB$ZtR{8N1Kt42pDmKDs}-sN=Mmcpj4EVp3qZySQU^++i#S#B1QVdO_2GI(5f%U zBprZWJN7vhi9D@isPwH*B-J1?Y!WJRV7x(wI7Q=0-K^8kI6+-yJt_F~sW5flV$%J} zDa&X=p_f$#juWi4A#!zWWgZn*H*@qw6m6pIoD)l>3Ug<8a(+?_QsTaBUC1MX0SUW3_pBK;Tdrsh70N6~bkr`GLJ*2`n*Y=? zI?c_$u|mrpQL0JUxxc5+SfU=MUJIe+c~8z-WUhpXx|b0>x(+g@irOf7{i11~cD)nE z){NqS43{}(b=S{)?=!#E_9jV?*$_!qFkB@*Nk)055SDd3EDUa`i`$a8y!nMZ>bghuc&hVTb`T)^c4Yl8;rC#LP3eWMKQ@0AC`rfK z2m7J3Xw8SZ(E+z)`r9frB-6lS8r; zL3G{w&RlOjVoJ11c|O_+7zp^OkH%-JX?_CShcEK`@X?HujmnUeg{5}pyV)(~u(EG( zNTR+IU?V8iGbfjPRhTEsq-owAD{`E86KtupV}_;RMA8q)WML&(CrFmKZm5VDvY!tF zhAF8_s2^&aO4CwQv(XCHx^xuq$@an>wwU`9yH_?wi9UXqu*I=F-o zY5o%OZdc7Xa?rDaryww!8H6TNQb{wvCs)?BfpxkiK}R9i*t&RG=5PN4sI#+<#ncck;G+{d}vGw z!k^385b8#JciLJvyS8)3V213X3hQVb>hLnkFQ4RNNbW%CGl4TXHLOHED7X;a{wE50 zBJU=Odr+gqW`WB;JQdR?t>`;_AS8`%w|X(5h1R9a)r=*aJ{$+P3jO1!PiZmFAVV;j z2qZR?F$go4vi%Ba^vrg{^^DQ( zqu)2>ycAYMl%!~pej}FBHkAg0pTG&LEdf>*X&Z4O72Rckp)&TnSQ7DF0PCRxj~&I6 z7(HL_IDNMJ?t@9ap?YP>u*2waBkXIJ>w5{bW8W@%p||3uWn$+Fwo-47evG0C&nd2( zJX5Tyw;pV{Bi??Sz17@O&K)`HzVF}itKp#UAUgQ zT61x7s}7cR34IMi+lX;0yO4C_vUc19MJWtm`1d5PN?g9iq3<%`Jm?M~%#N{sN(6mz zR8lv!GFXlLF$b+A87H`iGg1*S-`DtjqMi>!KxvO8ikje<@8Ev5N5>-ZPAX_&KFiO=-+PdVj8UWm43czi5aqqu4u;flg_QMhpCF0 zPO`GiyBYCW~PSQr$A`8 zs~4(gqmsIq!8fxhs+`l*0?19LL8DXO3C*mTcoR2qLXB6o-+IGxOw9Xvgi~jN$lOu8 zRG$d}2uGMXg6<=d)J2t@YrDDZm6uVOJzcfXc|wF%x|51^1jSTeZRw-Z6kW~gPO_vr zUTe9sSv8n(zy``CV|vnVV&?awPY3Mz>+z0dp{%-);eGlay^&5>c3lbGp>=SwlfYCS zRW-`$!)v;xS@v#Up4DC}g!U({`+y_-&SdRG~t25JoKb;3b<<6Une-wn zIpVJ^y4gG*h^6kFmy0}@T8CT4cbjLDIwQW z8REhgF~XA2A1)PA5o;?c=Jw%Ky`_iMv+fX`SzM+ziDQCimNC)MD=ypcS~IBiSjE~P ztW<4NM;U=gV&7CYThdNS{V7m50R-Wx%g<{)FiF zP%u-_SZgzfp-4a#k;9W>*}*KCHsOX`hn&8J5OUh5(3WPes3)CCVfk4CNkY%$#S>;y zl-%pJ^qaWqMtPJJg=V*17!4d*u*OdeF(w`zO`ErgbP}1+IsWWo#qXmWZf=qd1w#+{ z=HW2cvMcH(xP1zVeW-!cS`w_AgvAK2QCu>=trkDJXRZ@{AxGu^;{`8piQw5YYFWa2 z1dM_N48lafbd|z#WX^LbB$>slq=cL*6o;PXK;FmRRJY2lnvDZE-x;)|aPMCA>ef#Ln#Pj#eM}W7&ja&Xi)S;w$b~uRM=23N!V`b3WCoR*T_{J+f6bsR zf;v|cjDQtw4z$D`_?U>sa|0@M@q3I&Io0uMnbP zwHc|qzo;WrirXRTs|56Juxf$7r;)h*S!l z0WSddMYf3FDH0AP*E2XL6VV*EQFXu%aGAw(k82+wt-;`Tvm7>pu32^=Z0%)_45%b> z!4`0pC_Pe`2ppPBCwKr!Yal6^cpSKZOexhWP%L9|h)$vIH9~C+nu7G4dMx!-Rn-VR zCo@qLD0Qj(#G>eOpiH8bKnRi%dj%n|Qx`Zki)tcMku-{y&7?13U{pOLMu1a_0;r~Yeu3#LV z=%U`BF~Q2>rqoqKgtzZBBFrYyE22DB70F_GrzZ)U?xI zxopbhZ-j@qXJ|yyGnGX@@?9_D~Qtih#GcR3)snV!tdAMEf9 zjiN}qqDHN(Fb1c}(h9|a1?uM5?vl!D5=#hI+I$w*N==`XM_wsK75#AH2=(93q2fDC zKich-ez_ux9e5$&ne{Wq1Vd#*ic)R*ze;KaPSVn#_M>;znh3!@lX<@s&`p*H(vHo>FTMhVjP^fO`Pc;~d!M44T@~)h0M~7QP2JWLB z_9&|gG(@C#Tn`9g2ZE4f4kg}|t|>)#ywuP0XxJ0gYJPRMDs=V#(2vL2OEm1-2VD0( zZ`f(!U#H*r^!Bc9t1ji=-xPtSnm4z$4xVPP#j4b4I-bLkJV0pu-o(U^<@_;_q8(6APY$=*huGvy)Ko&WP ztcQ5iNPz>aI@PJPa&7?F>1$<6rV?%6t7y}SED9LRsRsc zwGM|Hq)c>JqMrByAEL>*9<}3f#rI@^FugBxz^@Ie;Xc6EQ=v1;c(zrPBER|+09cd+ z&7_44TR$ft)vJsXsz^c6mhte5hq|zcLRPA?Vonx-O7~H5BvG#A6r^xb`dvk>aMDVc zhgZnC@(O^rG|Y>1vo^-zHLtI_Z2}J}+QAy3F_8RSLX4ou1`XwpCm6xKGzn$a*i7w* zvRb3~hV()vRTlF+Q`8=uO74FT0He0H?4qUVJqMP&mB@|g+o|LN7T6z48Y3Z#QPopI zhUxF{paLH?LD?_@{qMyk;ojDb&xfP{5IlU%IMqlCpiwZ7cr#uAr{9X*h^3{g&uz8(p(zK0=N?erK;p2EE z{}~_Ldqq?SFeU0+C2b-#(*<2QRIGJyKT()+`Gn|of0{mambCu#)x|~W z>ubHB8_xhn8KRSAX=?7?=L#)9rf+Wk0Wv49u1szJx-D)con!u5{T`cGkNdw5ELwOCnF@o09qKGfqp&NG78SkSM4J7E^O12A!5o!)|#1?SrEk)JTDJ5tZ>O?=! z91u+^^&_xToYto@?$E%A>U%Qd=yPz>R;gy)rZB?K!BhqYv>fE>TFIcIBacU=;CK$8 zWvoF3&xa7u5!M(qX~N(l8MlKr!HJTibp08z>t5y4^nVY3$1ggJ+j$4LHa zK_(#oQCalsvL-00L32GBhn9?ei;WCovB2@(#wXK`@L2+_EDn~r{O|Pk98j4-q|M9v zNB{+g+aROAGg3Fr1gg(;de{&}L#|H1qg%Us4`rGlV~pP}ZDHq)*qOh3POL38&f)D6A`qx-rX9R^;8d%3A0iJc78 z^HVrgV+TT3iWBjpjlvIo31%&M7cE*1sXMJc(scCJRfH$i4Bz*!e)=@KC)urfMI`Wteu& z(U)ke^MRbPqQ56Ww+3VA4kThUA(UV87rC79Y)_5t+N)%T9F|MkPIAkLgg*1HKo zrd}K`Qm?MA7_)A_=f)&KzO|?WxrUox2QjyTh!t9l!R_2-3aQr*ygoJci)ABjV}}W> z6DdJwVHGXs95$@WZ-TjKd6+S7*3$);0Y`Si1;Wxbhw9UsaYI3QpipommgOr%O4SO@gri!XGJD3ezM=QH$ zQ;aqz#M@DB`0wrW)GAt^0_?5YA=U7I8Fwml-5jiua);NbZU z;xU zgdS+DwQJAUxyp((m66hr(UG{gN+94)vi&Y2FdB&^G$PlC{oKUcaFY?9MYsizS~(Wf z5={lfsX_)c(vn#^Tn~JRpY?P`Nl`^nhgSp*f@r{aC?|DXbcPI58OoQFNibqUZ!p`B zwfllv{qa|(R1*Pi{a_jJKz&ALbE3pL1I0IUv9lj7HmMZU9RZB5CDTW_XkLw+gq>aF z#b0@uN)Ek>E^Yqnc#_)pbl6cElgU%&EP{Mfv&-T%KVPcbmIYn@xJ�wYWQackJ>>s_5v# zon^wG*4M&MNALW&?7I91pv!9&)<425x5%9P*7ORo%Uk_XUNbujE7J=m{uCEgB65Se zQN2saHS_nKf;wlJa0fttf`)Ohdd~cX&^P(W(_H=XoA5!4WVzO5;_+WXGAT_VR%fcd z_d7$_DsqL|ZEtR*PJ)gdI%Y@$a(GbTkLrESCxXbMi3lQ13TT}r?zsnfjVAXIvfQHN zIUC?sg-JVkI%2na$V^4~x+2WEm6K>l80NCm@y^>{f7lpb#ajD8?-E*fWwAc-46$I7 z^ZfR%9ZFy%!9VP5!aXtb^f>5Vpl)HN-kSQ!vx|FY9syKxk8fik9CD@~t1pt>v>u4O zzH`TqqltxKrK2EKo&LVZfQtSLH&z+d_X79lK0bZ_myi;(^&2@fdB#kApih?MrXZ*j4th>5%~YQG5^)Tkr-F% zHy}~xFiX&XTmMIcTBZ6sJ#z@_e^2=z)>}V%e9;e{hWwYq|KF-kIi8kWeDeQZ{XYl5 zLJ~fZFRD=gZ`08*Jc$JEf3^OnTlN3V`Ty-F`0wd}|LDPO{?GC1L}A}bE(#FoHeWJ! z5Pe2GL#qEhfV5LFANrG}+yhBppkesV{jMwT1S^kbC_wZ-eJ0;kS%7lg>lf!j2|_-M z*9ugtyEwd-&6AX3OTHf3 z{GGoP^RM?%USV0!{o^F>(0Uq&Q<;n(^e2~6eFWP6Srl#-T?@HZ6^O&uULFj%*sZv8 zigOSDm`M5p+=T=a#>SZ_Jas4SqekAS{1vzOdJ7TYI60~4%zSWuPvJYD=>tt78DV#n zIXPRWrWCOf{8Q9<3F~)Amktsd%d2@S=Z8+|2&*)pwEt>%0{wXme)j&c#?(_jxyJ`+ ze?%?KVXC@Ng%2mXLMU5ke`SqZEdRDV(SOp5W>0w|^!18YBdw>{^rKJQ>a6Z()YVq? zHBWUR`Li_t0CQ6x!vX?n`Aobo)RqnjxNsi5GLyxLxRPuf{=JLf7nfbP%?V`KbFS+7 zC=K*MroqhP3%PIT?-vv)r)@0T@j|E`+MBXB8>wWwFa=pL;q6~ws6O9Hfawf zuFbc^u^g{xi*?ByiYeT(X$V(6-7V5=-O$sC*Sp^uCK$jyRc$UsU3oPw|fVgjv2`_Y$_D=`7uqgtf1n=G2$7<>n+kn=3QhsE9Fn} zhn{nC9c6^~(!*p2{O;x4frSKJ>X7fN+gQx3#9DeguR#c`a&5J5gCH5VTYur9BL-X@ zs*EbCJ<0Zbje2#k8L-EPQR?}o;#BS+7=@efMhSrGzIsXuf5l}&cDA6QU`;uYQ~9ZEQ90pPaqhh2O-^uzSd zsdDzCiFM4mPDgc*paFVbWRqq-R1bMM-kV!5C7DKMAA~&C0&+M7_fAh4EJtwba=)9Q zdCmLPp1VWwOS5y-JBMeRg!^1nt_EI@XGvDujUO^bF|e|S{NdrW_{+n1(dKRaAdOFz z@jkJykZguY9noX^?)3^3L-k{xmfWIsSv$z71iu>CA9uOjPoOgxpxN`puj_?TYs#;; zwodr~*PNB~3vk$`I@LSopo?cqEV-_)5%q7&{zl5>G5q{F80M(8X-N-q{L3#iU|E(F ze)7pD^zHrItxlSJ^0@9FAUc3e=^tR-!4!(X8q8F6=!$@w%~G%Uk6-=+zbcC1?%_x&KJQ%0E#y$%UBHZ4EUn%>FGOCPQ@6@m=MLin^x zAFWZjy*?(lGU2k#R1=?dnxK^ORsX4;u6d)zHhje%nPa?XKGej&?P^pr)P+*u)?roH z>V26Id(PInUO%C|Zjf;T`4I<>N&Is>utyP2-;@7Bj&E$ij4tCrYu??@2KhH;q07Vo zHQxk_bx{ib;ude^5S~fD^WGsR^a~6Fxzv#F7vNBvGu_&Y$K;nXV@VGqO0`2c$00M2 z-h4~4izf#gR(B6YA2FY|+64EVf=l68>P(CVy^7!R(&}N4{81>p{k7Kcu6hs7N6ymj ze!-bmU_i-sRLx30m#NS8EfS)(TC~Er*?m0R0XRkVj4!rd(e(-*)-qo4&WxBVNqK;fNprHs5~h)6W(Q7QUv?51VBQf+b0k}1s8!T z>RWrlLX!%r4uw&ARvjU$N4F;j2#^p6RDeF-`?M>x%X>j~zLGZtvK)P|7i=4(LUulM zH=aNEB;t7aSd$;KbMuLx`Z95v>2U7w&fj4Enrp)~CDw?U{(k`Se}HY4V>^0N)`s9e ziuG)luGM3QL;SbBsDA)h_1G;+t)&0X@;?AA!}oJ9-G6{-p@F9=e;+?3bF37aIwCqA znUeYkP<~NC+_(C?TEgLpF2bbYA3$C5SJEc%I`3*1S1HT(OEP90rM*w`;C zjq4YTU#?&Bn^T%xrGLFU&9uu1YHXP1B)x*&EUhnSLbMpFznOjp4c%r>+MiW^7Ff7v zX7nc(JJjz(Z68ciMD~^>*9-MIZUyTASjJGcLb>A}JW-Ax%)F{O3pNIUZ}(hHU48`) zPoWQ0?p+%dIm%*v57__wZPp?M8Z&KH-w{5|5 z$UGp`^}s)XIaQHIuEJKIvBK}`hhKs{$)$Gs#Hz)f(*}J~e>r!@K7Z^D)}NtKB#iD@ zuBWY5n2cU&KbR`PvV>Bdt=c2wlkr4a;KH0>#eh436LD`K*#z4$Z>@-BMJ(5!H=-Lw z{bQ!%4QvsZ;HCk-e!=hVQMrD+mm&WElJ*1J;cbjTd)Vhg*3ACglNQLf#ZT}5R^B~S z$PTpo3;Xsqjg?XMVnAKb(^+huk> zta;06p$R4m|FyE+$;111;iuAb{9W=xer>V87n!g2*XaFst1h!D?W+4Kn)Y_xCn*)b z*F4$46m@$g8uG?E~FR(CtV zjZS-@>z7XAot#`8e0~!;a93Q9>*o#^EoklYW26qe zJ`n|SzpOq2?N#e+G}~m506w2H%`9mI*dGbm57BE01YGWq(XvdlQ&CV5UIVTieC`5tN#PTLPP_19lO$eG{0Bvcazj2 zt}7p~&ud5$A|e#Sv$>RPYbHC+31$3=``&|}=8+R6T1&FZa`ea$Pg!|Zok_Db*?)M` z)VLgw(8f|3z`b)Yr;Lb#Pu>|&?Dyx>A8?$`MLvng%IiEY0%gkXCj}6bvfD z@-K((X3ZbA_}H)yRNm64ir$z6jve`!dblb|)hvhpFpf`z)sv@B2);PCnr5rrnXsKD zP=3;|vpPh7fV6XaaussqeU6Om=O&T84+2c_p z6#L%92t}*5_)~^o_wnIh(r=NF0-Y%;!t=-Eus3sv9|#9Y{GxfJt%0fJo5-4M_ajG1 z&)x9>;pChc>s$-X*3kg(S<$Tp`V)%_0#6rS&X z`qqBTF()+rw%;e1Cmq+Cn$w$wiSUX1BgSWDcMUqZpctD&ClYY!vLm6K!{ga-ViNSdQU$h4SEIu?{AC*9b5FyDBX+R841GRKF-T zk{iB+&wgEC_!JgRzJq>IjV900F}JO(_KveAk^IIkzXu!f1BblA0t}S{EVE%lLe*&gSV^BV zk~3kJ=?fOFSGz$%!)@I4njc(CT?@wV-KeZ%S#H3*&7A2wpac4s^?FHGCcV8=Jy#FcG87nabuJ2ad1*$v;VB_C(w!>Sjm zecT1p{ifG#>N~FEdj&4*z2{tWpJI7;eEH3`xbE2MQ|vUVRj+13|O#F|Vv5mYO%}idWw+mJEEiq{_pu{|&j7{(5j)Qtg=EkE9y6KiKHi#o8I6xctCHHBTa$04t)w;0e%bY>4>*4!H z7-1$=X3G06Y}2uT#q9Z)ytT}oNj@=)v5OAudw1KT89yvx@s@0SSv<9aB8-ojejgNU zE+?(ahTZmE?XOGhwDthxjl6O(?2VK;B=Yl+rH$FECQV58`dD6Tusd&hSj4luo;`=y z@^i-0TFY>W$QDLU+<6^Xb^}=5xr>OpWlAKk8E$<(0cq7FX$$I!B{-y^40^ z0xO8b;>bzyw%z)=U(4YMV+C&mFF6kU7;NN4&n!4cEWcJZdh+_9eytfh*#Tz63FikWYpAx}wJ;LG$ERWlL1n05mCRTizx$w2_9gdLX8xwbu zL4@AR_D3ab*^yb z50>el1;Wj@J{u3as1wL_hi@-Dg|l0xewQiivDxe(doJIJx^4V6!DU7jgD#|z7-f>- z$$1U$B?$(Sep&FjJp_9_Y$h*Le~EkqEEdERI{~W0@8Etz_IS&t%;&?;FXkQNe?NmR zM6?d-x% zeZ&srBOe+sRtOtgwx!!M+xMItoifhLm%OmdoDk9=!|1XjzdwxV0uYw}?3e#Ze^L4yTC?4Ylp{9zz7D)RUJE$UuNovD@lT32AUH zFdSb&bmRcb8L{QN?D00^$TPARkaFC&Eq31WM>g6E8yo@jIvD)(WSk- zEZ#&MoJ zP-wFEextT7$7A_i6Oe~er(jMuT%PytJwkes&U_9;5(nAuWi22X!zb)Ob~B5&AO*1A zBb>l}fObXYqt- zxocUqkTTNg80hjHFogxPXA!4t6D|aZ@kqP%MMx7zk zZHfJ4EuuT9wm>?MvhkUxCC*q^Gb7GPyRy>LW_Ts8TyAE+5S(lcU@AWXcRc#D%=?Gb zvr_aSO-btI`FKd_fa5h$^?IBUtI0PO#GN<(AkWnDR}IhV6J$UuJ~0NcgmrzVoMXfO zuh-Nra(&wdve1FeehfDm@yT?ay92Dv)bQ8IG~03Hu8`*=x?~^rOz4qwTf8*BZ1 zLXJlh?0qG;HpXCI)ZRB<{07FYmYZfe+IeLl8pa!I(!^Um@sK?}4Rmq`r;xtgOb%W* z^2ZtdJ(T`;sq!>9N=`qk`IKP8^25LG=#7j*EBk1*6VsU>O~?nIb_ks@7l|fm{LM}> zf6`8u)uTeXfRhw;gdJTeOkiY#kt{573~kcg*JfKga7qpllNirn5^bY+tOi+ypHt3$ zSo|Fs22qB~UyvdLrQ({qCN(Uwc;svxZzKcdo_|-=xyS{jB$SuFNsxG5T8ndVc+QNL zv67vT3{4+n%=K*c7j3}y`k}^5%uYAaKT^Wzbc}ZoCFZ=_?1{%@KVHBvPrQx$U|~MX zy?uZ%a!a%Mx;`x2EuAOprg@ug7)JT^E>H1jFl`cKM@72Zu0#G1T|W)aehi(JCfH%e zXBZcbOKa{w!1jYS7~#S>)?btSLY$lu9tpwvjUUP&3~czYvynY6rd+6xKe%}3EgU<0 zZa+&8EgSxpIZSwtjh($S-PqV5gvrk(cs#ha+hBGW*gTJ64{nTu37EsyP-43EWKtB+ z{t?FgN0Rd7*W?|!xhKn$2w?z~=U07<`#$8G3>Z5O@+G3j?$|>Q!pZj#WueIo-}s)4 zFA^AmatDNiKXRd)1DPl9 z;bDhv9z+q3thC{te^159c7Gf2*}T0nb+h(B^7rAF)wh;sv#ztlmWc=WvEnfe=^OPpC#VreytldVP3`+K{!#~$CH^mDgMTTr^6}g3ux|6|Y_Y6rxl_|KKNLFK`b89eOa z6{rvRv;j4D%c_SHX!pt9FKxQFtjVYCuk9>w^pTo{O&xAO++AQ8HaZzj8SBXdXLjJ( z#fant@0N&EjMM5MOby9!U%0rik{F+Sii2=1A5z*O?a=uB7uMd|MBZ2#aNF); zem8i@p9$N`eiKFgw82b!V8`9Nut2LJ;|8!=C}qe*XY3%Q3_5Pp`{A=6U)+ z3H)904C(hB((TRrg9~tt5wyt9WxOGcrKEp0klW@H8+yNQD@GZ~eJ5O%<9hhpkG3Bj zzSdW~ct|pfu{{Wd{dB2up z_wv{BPWT%fGb9vCHa_ft<;ZFM+=Qs_`X#s@e1hhGaO02zABomK^nnYa;d>Lq8~c-A zTN=AF&$vg@@0a6k{P+wZjM`v;{>#uKBuR~zel}Q+xgYeq&EO@^ggdhKSj)5i0T7*g zD7gl+5+SE;2a?Py150N+93Myz7lAH3`n7GZyLg(0NCPyDX34}P%>m7rxho76sTE7X zf2iN+C-lK`aB&JR+Q1+_+|n3p?h0giT{y_Vy4V=R!hWA}@I1T&2MbK)uMCOrBb_^I z8v`Q)#GviFUPv7B0zeXGF8($~adWB7H%?`?@*O44-0Zcze~IupBQ0$VY|b!Pa{9=n za5v!p0E7et#ExC`V1bk$X+1X2A%TO{oCXU20IOJj%;^FjFBG-)`#P!Q%>)JK#sbR$ zxjF?oU`XFAaUDF2xSs&t$nd|U6^ysfEId7fc?La%(|9hO_J3#g%L1W2!TRO?q-LQ~ zA(5ufE5&RytFZV>kFy?mdb&656O{Z}g14=gZv5xV@tHJWTw@lEgwtVm4*vkS3e2g7!oo(mxbc`54 zcGI6$7P{U@czJ1v8I0|ZFB}Oz?h@X%x3~R~zS^~IAe#J@TRkM8e1Xk|FXf{w2vrZ< zP!7OhoxbA?y;t3f$xs%imM4jAhyMUUNf)v%Sms{Z5Nm`vA_SN{J7{pXSa(P%ud~X1 z?A;Pk`-}?MAJXwMbvD;exl-XJ0;<=9Z9xzeI_*ZVh9&U;C>O9$Qk-? z`XSDi9kQWMEl#!qUu40|6Xv%a`i=zt%bPy#@)x8(V+nJXrK~vKVc9oNgb%q-HoYt| z+qT5Ko_qvH8+~Iv^n8N-d~2PMka4(v94*sqw+(FgkMZl0?oYNIT|h~#UO_N>4uo+J zECk#6El%yYKep%8(*&6R0JY>{Wb#6v$J7dSgBP#J{ri{q?7;EZ6z#7MPFvz`HZ+0j z)Bd;K9VeeYqqN>PA44|K&%1u-I{yHrx4C1azgZ>Zb!Ys>rni&s-4aYqpVack+*x#o z?dN569}GJ!g_6bugX5M%eo^pAe4fEOhaqG0u-P1!+b+p?E_8b%Gk+QH;1UK|4sR!@ zVTSGWE}-(<0U-U}ocHcA&&f0+0mt(qJ3nQ-gEjjBjI309ijvLL1oTWFOjI%VI;$+SJzdXW3%Accq#29z^_P z7lgC1yQ^Pije!KRUgd`%TQ*zp@+^ZHW76pUQWpsPTbxOC->9AYxG~QyUReX<)7wK4 zt4%NG3l;Vl@3G#Q{a-@gW!zxXv#`W8# zJ{dwx`nDR`4g2<7dG#vg>A!NoJdolS?$=|zzVb{=M+C7*_8U)ECnkR@&_4Uf>ido3 zJ8SN9EeuJ}?~ovUT28=mvVP(C>=1vLUfG&KKi1Dp!=70kWDU!-B{aJpl=9sBuKmMKz3HjUddp!7Yk z!n(06SB;Hrj|VOe67=|0e~ZiL%!pcl^R#;an2ECDy_FrzZe;H|CqqeZ$w!z=neLN&>h7amF8(VsaivjkMr{Q!*+4XB=9X*$; zR8!a=o*{kSLW zj!Dj+mkR#E(9g(wB)bEw#$>x7Z+%1|Rfv)8s@p9S$o#l=LQU2}FTD5J}3j*8c!g;QhdzkmB)iZ-vl!wZbO)%rTH=Lm%aR2bMXJ#t=RD z376ytf2r}kL%#>z>At=M`p3cT`94R;o9_Df5i#$<<9+2mt~C0Y4C;Q7TlT zRMf;OQ7}tC^?&~WdcXB@<;#~B{{Raw=u4L`{{X{WzoPvyE9n0K_}iBs{a^n8h_C)D zmoNVS#NYnzU;hA-zyAQ$A6?7>fBdc*z2`Ld&+-F@#CXzCw1VaX2pEADQ|UUycL<)y ziD~};z!ua{)AXihTXLf;MWzkI5#lDt{{RL38zL)&*y9*~`_=`N{{Z5nkNp8mF)iGu zECZ$l0CNW6XbD!-!c-S9CYkDC`$`w=GjV>tmVGn?V6#(V7_Nz8azzMGF>wO0CGX5Z zq3-}LJ`G13m70XogkI-007T6YOeW=zTtb8^4j^N99o1)9n`2jTmVTmQCjS8XkI_Mh zyno|-z%?Gy&qR0hse(|CL=rgrw!_6@$783?y zEJC%6(0kMt^QhGbIv+CQW>HImyk;|I<4z9Hej-|p*GOC$fUw+Lm)Z{u$5{gM$1FL5 zDBND%c9bniE!tv#W@j0Jgb35pab^H0u{veI#N5V@{9pe7Fa%cAsL%d3GYrj>Fgf!P z`Wg;dG7Ap86cvE$GGg$AR3%(o!9+V*!oioaEVy$Vo7_e?g@z^YHMr%NJOKR4&=n6i z41Cic(g_*VM5jz z;wW4OqPc*pSy`wd-Z_;-x=kRceTHFct|Dd?{6fKrV-h6HCF><+YETx%(9!$N5fE?M zUAci(34;r%f>lH@%n7McBD`3sO=U~#0cAxI#^c0;6L6y#lrGQaRwmXeu#opF{rVK-CS9Y$%kkST0j;i z#bOhJF3CZ>0Jt-}tCSMqPtrpGDS$S`r7O1r zYHN?8gt1&gnCO*v4{aHlS8%YkSeH470jRas*SZW?HrQO-rz2r zO0T0VsO?CtY+;Ys)Xt$I8$a=JsI@T6vn^VU2r+*^flg;|L1~tb0V##$0anU}nCXlP z_``Dws!Bf5jBTBnHtH=^RADJpl$nR!D{_DY>P|vYmeTyqYotUYLo5FP01Jv~SM3$J z_LwAu)EXxD1&SlNXrROZOmi78QCet08`@0NmZ5UyNk}&o1yF832>$@sM0J7zmV}j| zuyHCetYekRB~a}AKnmtopj$G^N?lAqig=?e%$Rp@8Olq7K~CyJwl6!oFJi7 z7t$@}86aVXi0(;Pvg|`>L&H!;D~2S7Vbd`eGkT?hX@VMc2(}r_^@ynGl&y^9=Tin_ zyaiieCB3m4zoORT!7pWwksK680&IlhB|Naf6QWayWd-Ny)I7S=$2FF~MlG zM)MpOs0FS)jfqe>L9p*L0qX{L6a%%h9*t^du68rVy0SBTIDu{3V|uV zphZ_IrAnjeQNEC}R9$$PLqP;$>jyAv9QB6e2Lu>yE@IxGQc5gKi0?DH@2mVrU^eaa^)MFGCOk+`)I4#H>ego7~V%LIl4Vzonl-;u2oCt^@$U zEgotNO3jdN7I=`0HSG!q`i|*p_tdv2I7u>f|{3zL1e=pL4jG8W>K5O z46&J#>6U#3HHmS&31@R~l_`Xz0IQEt>wsBhpen_&%*cx|3ps!o{G!bS5px7%>14`x z0GQGfzF?haQJ7$Z5~~w84aVaT!~iN7GYVw-TV^^XkRqZACPZ*er3@0HxoVQD6_V{? zbxM&!VLH-?@@0AuHK~=TVn6tz z8iO{AP~gO))>>)|AT7Cc=jfSz3-oEY8X2&{608&jB|gGM6imW?dq2nd;{ph%YGwJw@siHU>sdi^XR(G9_F3{eaQ4jE_YV9L8kByhQIS(AeC0fq=(7;c%Q zSY_;yuGpR?MZxYOcZGKjK@Ei3KoZ3$OF;v(09lrjF{tcZbe~L>s8)uoA#)u?b1B5` zg&#r0w&!?2w*4)FCTlPfh=m}x6PYvUjS8JO3JaTy6Nr^f!kEP%SJQ(Rn1U-}H!~L~ zQGtI=p%RF*nO+Eu!7AY#rl6uOB@ivSlQCPkuUNo{Hco*U$p~0fYM0Es+;WM;0nDQ- zX?boE7M2oRwo#as3f?+lm;wzYbc=9HxFDtkaV-`yl#5%27?xO^2ADB!;6J8!)WRznxxQldvvQS1NnM+ZN$0rbP1cO>Q zg^@*DfUp&81!7e$BB50Zsg1+I5V8d>R973b+_yyj)|7VPOAMkuRbLWl#1#|e=GdWkFq0L{y&edTBw zVOJM~c3sLBVj5=C7HlILOPoTik!Fc(rbgl1NUO%8r5GVmI5jdZIK*9Y+Ams^Md+9LvbrI}bGn48?;Zf!tMpj#|z zP$Rj@%N$H$LlBn_rFe>BlF@Zhrl5x4<;Eina}ci((3_4%=(bTV?0@fWZ)2U~w|C$f1Vi=4E1`sN6*{ zX74vyMV^GRpJ;Nm4a+E)*9Q|iULd&}YI>*kcMReeHXJ493YWyvF5=#5I3X5`s6kT+ zqCOFAQKm?OQk5)G$uDo1?#M0H7qoP@D*#74Os%2?+*nR*w(b#J%Bh0lU}jpa+!t0G zi&YvbIxq=F;b1qLoGzhz4^K!O^l-(oig8d#=nsype#%XfPe(l&2p&a)-w>5_jBhl zDy;g$4jA@=2##23`upZ(v&Yh`jCHP)F^-%U87LON@>nUj0-VevEXyh(#CSW7j__s7 z;;ykTk_Z+AvM%gbo_~;Fi!JDax2}*1xwUZ;qB7C}MzAiaZk3d@oC$gi!Y#Wc^%9)J zDHd40@5(Po&Gh?u~v@1p}V^N+k zG8<#@91lpSy+bN2ET1l1AXp0ZkAPpDrrUjphDLLL>``0j{{SEqp9x?* z!e#9$UFHpTW2{0o!Kg7|GYAo)DuEEa*@6aTlv;W~TvnmcaSN7AR`CU8z#}I*nu%r4 z5{0l^AYXXED?CmWgO$xlI8^{;=_pEMmtd}$LfFhvEuQk%68S=?d6xBlW~%EJ(&}z{ z(ZO=LFrp`UZ5>~|K#Jcl&-oJ=;w=}<8!uMn&y!zlOU2{QKgdaMGvg{-S!u72^X35+ z@8{YmJ}a0P^w4?cVj9s@c)onhfc3=DW;uw;@JnISXW9TcXY(wr;VNzP^VEm~x7wna zt9bL~HAO)=$J!Nv^A^B%g0($<@zRlKg2ltD0^H&WrVp|V+D&S7<2%zH%@X2bI7YwO z&{eWE+fb`f=!~slsNV8p!I?wqrRthfBQtLf>L_yv+Ti^32G`{gTY71e`Vq3HNi!5Y*(0aG~F$A(HZ#Hr3 z2JP3Mta|P^({TY$>;U%{9* zh%6x>O9^8-$wHZ96ocXfj6^mu_m$vE7V1#7@%!`f310Yv!6=hjVNgpIbt#Od_c4n> zqcM(fEV_sxF>Q-w7%VpisEea8oz9f3U0T2qVz7qd1(6OM8&Y z=6RHW5A=XR4c^cQ2QchKw$VbSf4}xM_eEuS+ZEd=wV%XAJk&K_B3oT0OFSdYaT|eA z>)9{XVjUd+03s1-zVUQSpa`7Q5H7|gD#&UToMuyk{eRdsH89*=A~UX*ODN$on3FX{ zV~s~swhBkw*sw)ayu=2gl7d(|CE^UZdIU{TIED*@(hL|XlElgt!xCZOj1$w(h>Kzk zvx#7lebK_QiIXh$mzN4-h~;ns?mFIi=SUWgW0fd_7?kERhPZy+>QY&q^ zuslb}W`Kaf0=}qp+_N1CQn2dcC^CTJQ8yS`#caEket>~fX0dRbMKk9)A zv_)(c19u926eZlNfI&eLP??NS1V{*lUPd(h$4i~fvyQP^ZdB*e3AZpbhQypsAg7cP z{$&H-HHT2;Y9g&D=2#kP9V!y9Sm4GrEQHHtf^7^Qu+H(!7icLC3~e;S5do4*DK}(e zDa`_N08=2Zd1qRbQo&29X@tZcn1Ej^pHJuV7i+{V702ENtd4Fx^UTZvs)%qNP( zvyJ_xeZydM=b!U25zwFC#tDKj10})3-ej8NAMC%ZW+xSxH^6ZLWOIY_ z7*45}A4e?Ap|~WXSrj<>2oB`}hw0FmD30b9-exLS(WcOp13BpnL5h@0V&cdE%n|m- zo{<8V)>Hv^(hzN993BNi>sf0<6?*0rv``nRc-~^58iA(kJOoy?#6pfEZ)96mN4#!$ zR$vZ%ao4;m24vo9Um+C3m>^-c*m7m*GJ_npG0;kQ?<^8gM@cb)T`AOCwV}8dZ3b-4 zKi||FkpYQ?<}#Iq{C<(Syp0u`@)%3Z@7cfec3u zmSR~~-UygLeM&F_(X$?T@hj0>b%j9|Biafo1XTw9k&5QG0UgB1aTdvH#HqF`3dG93 zA!W``loV>^%T6KGW(FX~q@jA29@&UooUt5@R1^6w>{Q=`KrP zw5Is<>yI@69P<$-RiawQv_YzWFc>$(*B^OabTHkfjr#uparR`ul1M%S6KG*+67sQ+%@|@aYN5OvypX~R@Pf@rl3)J zaeZE%bcouoE+Yw-o_R#GQk3*2F*1&7Q!ZRrVqx{}P+dYHxP>{D(-h)1u;-NI)G}fy z+vy8119?H<+uFx6E)*45oJ&C{#oPQz5yqaKR#_1~!8!p7NziA1kH4Q2K%xHs z0J9u;5B348s##8CJVqiKK79Frs8AWWqWbT+up=tvg-T$Es#YaT(qAZa#jQh3z~LOD zW};_t+Otn&YYj_r^v2oO2vE2ImMkC4PN1*8YyFmBshFbcpZVtO;+c0`1_q+H9(nqR zwDpKv)`-QLh$+qfq9c+D6J~#LGY1&&7#^Fx{&~b^h+}^DUT@+u9Dx20nTqV+Pfu@L z!sm8S0iV=ZEw&?*FtC{Ko^! z_GNc*94VKqTvW~99)BZ@JN@h1{z95)Z(f@HMmE!a-`~7UV19i60A>n|N*SF_^fqa6M?>CSnKi;8dCFl#6^j=SfL>zifkcmDb7#-cZIoa4{5pysjXpJ?Vh z^HR43_F;ouk38{pdupHELt4Y2?XQ@HP!0|DyYe?8&SJVs5rJ}< zj>}T?m=Psyvbr*(Fm3t_ax_Zvg|L|JTDhzx$^=sEMz{F>M~jJl`2PTW{6-;Hd-RH+ zT66i9Gf&zaPIHe;{w7)_96hyr`}KhIpOeq>S*i?s^{H=zo_pV(@o-kP(!JrGFF&8u z0IcxFsLJo=0dwd+q%|i005K4o#HzMj`RDF62YT(&S!b}l{3oOh9pxBz(|<8!o+0-_ z-(LRN^A;~mYjT0S?js{TWsRSBZcFW`BTu|1M{0_;A~r;H`%Eiws01E7^NCCjzI^kD z4zC|*%jWlhDQnL=%W?HDtJe_;pp3M-$?4V$vF(rcQ?V-}W(;ld_KI1fQzq!=&Gh+% ziL+eS^UqinT{qvU%t}q~{=EBDV8&U6RKvHVET3DrLoGkA@10>lZD{oJzG1e@!7Q~> zn(Vsm*UOal7=Q?Fi;Uc6hEp_BwhTs9KwSp!8cC8^OYDQ2mf~DtTr{{kLEx2f0jil~ zyKE5JP-;&+{>D&m@vqb!*`~4I?ba&T-=Dam+hSLpJh(QO3?UtF)?TLCTkD@b^Qu=y z4uYxv{{Wxs2I0uR2fm0FC*nRB3Ul?dRq- zT+|8%VImrDF%lD)w!jv{${YgfT+S{lYGRbjNs`!Bmxn&WUz zi~ND0`b!5d9(~|M^USya+RfzDMPU%hg`sTXP}W`jVHos$$Lb%rq+2@l{{WJtR0Gmj z7L$CNl`SUu!EcEMt*c3OHj=l3w5g6gmq*d0(Za<8*S-ba$25U>w8FE$i_U#gm zGl)+G+BxT#rbdq``P{BT{PX@sg_PzLEOb>sMtKgi`-Ium^SRn0I{EVum3N;!{+Yv? z_LgvP*n982bmAeg>)-vaE(KA_IPF$@@vriV4E5xxXEb;s8B4#WQTQW;u-K>ogt?U5eqw5&5HhYN3{)1%5XB^NTIc8%z{zu{ei-M> zOj11GSc?_qnaIug=V^qQS>OKveEiN$;;+v=rMMV;=@UTIz7~Bv@rea7;-HN8xk9!v za4mG;=N^6H{wt(OmB~FJlzVG#{NA-LOK?_!^KzBY@e>e5UOe@Ts+f>52n_d)OAd1e z^Kqn&P_@3=lw?kl!bRL+8B#AWf9!Zt%9g5Whm|%w$k6D6kDfX}4Wq`V7REWYPx-J{fFkP6om!dMnGDN+R(+#i!9)6;= zWMt3LdJ+U#hgJ0!Di#3@FO(#3ck6z6Lny{_{iaHTVRh!epLu{c$A3OQ6DO{n@^$gzTqtKN`u_S& z1Apu<@?JMh;~Dckqj_iFuRQDX2d%^Q{ml3pj2bc2^!;AGW6|xeJHse({D~tS&i??C zt{OP`<|^(Q1LYbsOV)SOSK$KaVydO1GcfC^Qoz(!RMO$7s5}#(W-tQ#dD3E5d8tt8 z->+}Hru^4V-+rIKL4d>6iVGYarOxj)DNjmz$_c0U9dFE3=;i`+MyqDoe9QrZ1MTS) zZFdAS+RuLo5;p@MLrl#Wb#GBw=`D0jU?eMQCCnDVh>+a4GU&LNpo3;Uj)qYMH#fM9 zT6)Wch0E&+aMp$5?Z34`SZ4nKXPNuNunMXo`bx*{4YhGGR2#)$F5AK9{D{X>1Qy@4 zB}bJh%fZZOG)EobzXSIv>F1siQw@zl188FS^NC%e$4BS>_k>C6H`n>+Fn4+B8%x|F z;npDcYtKd?Er$I~)&BsK5V+TWKJef-PrlFcJd8X2t~P>TQln!EG;X&n+qPK)g_oID zHkhL861GB({{WDdI)AVbRrBYhQhmLlg{wP>Bk5|w86&JY8EaIL#{{SL1 z#K>Tgn1m-cHqKU8OwJmcYq9gB%y?F&1og*!L{pCPt z<^{QV=a(3RWgfG03;BrGk8EGWSiCN(ch_3+~RBP0}E?#Tuxa4brEo`*LR

{X%pe~?H%k;h23Dd`@g zGJr1=)M+^98)M{-;r9{{V4X8%Q7} zAGc5WD&g%Zo~YxPn5kP~vL4_DwcN3Fb>|b zr1Y(M=4H@(f7rs8&+amjPQB~OiZ1#f817g*KZs;-rVjbd#|`b<-^^pXskR1fCWt)F zum!i~?o|g*GXjMUkBj!WlhS=WxlP)W{>m57{{VdZaer`nY=B+Mt?$ww@;LiJz;`AG z4K>=P4XAB%9O4ap@g5r@U&f%WHoNAzKVH(Leg66DamtoD9DMV>CSU+sYwaq#$3F70 zY_ZB+>HR>=hNOk;`Qk& z{m;FJ>sagS)#Us4g5FhLW2g37Y2Utl{{WJ;4m|$=diR&)8+&W-9FD>T!bqu@$H|N( zEoGMVmxgdk7;Hm|fmJbAF5?)$sjS1QUM1B_&5^4u16J2jvHPdY0Ewc5eDBWORB-Yw zYPaUSW!pLB@A|ILTB^4=-x#dgyXn#~)%AZhry+aCIu z+RMA+zdiAfc!c*IA{!ZWkWCz6uCd%)=g;{ox5t~`Xd-;y+g=e>_c!ksL#{mY?KJWW z^oG0C^mAW7IO0?NIKBB%om1f}mK*8s1x<+7Kc6u%aPr~tP-|4X@nRZ7@qToG?Xvw7 z)6yhNACog0%Rn9Q{$M37is!po$F_Klry=~GuARMRB4LTb1-QxI$F{uiqvYf76;n_u zQ7M57XmWzqvk|KiDkX*@BQUlPNNV#k{{Zo(CB%%&=`?B}YAY@fnl`$MA>4V)QKK2> zUVpOV=o`j;lR4^d?o9IU-3#)#_mpc@{&~;l4L6(`#ed&51X!xmoL9RV;{&#;1-En0 zIp!e&uJXe@WBT}%(B$*aZeezK2i*Qf8EcN6JobQWHRiqL#T1wDr|Lf&EqBk_8*X=b z*9CdsQy?;CiC&Cvye$0V@#diFczo9D_u&%rRDKGA(+9U%Yx(6^${?+%YN4o!K`}9n@~n#{BGk_`@Hk`z-{x+CB~=T6y^IL$eCHV3KzP! z%l3{#lh(iH{v)E8HmiM~8Bj9y@6Wa?xfr|3^n@cOeO;k08{7hhabD3_6$Mlv3Tf-+ zB`(8t+o|UGiCPwk@yswB*T)~YFr1!i)>)x+W>$J>ozwpS8I^T!yPs=}Gr7#cb0}gX zwSmGKh<(Tfss?D$L@7?D4X}Nbtw<UK&93hP@> zDDi<^G8TEN$E+AzMloYnZ;a}_dI^NW*yEv(m*N&tYTh@;Zjmrz<9z4&_>2IQJEY5N z&Ubn9zUFKxt$)deOj+lh_ck!}_Jj|)5Dq1I<{;SPv!0jp`i7LY^S?FtmQlhL?=JP* zkIcHzbnpD_6gl3t>5pvXh^5g5!Rh?{Up?W@2$l#aRJKn49{zO#uFcn+k3M{Mf z^Q=fy1LwpkOjbSEw$Yp(mFxEWY8<^Dx%*dl^DnW@eY)P@1?U?0ipX=W{?LL&;Zr#s zG5Ef({EqEcJ^S>GEyDbN*tHEe0SBtI-&vfGY-``&@$3!dSob`yD~X}I>)In@4}@U- zz>8zBM&*ks9V#oA9fWNr^kKFKX`B?eU6>5`w9+u9+DgH^2eHHff_U2|V8_B#5@ z#V{H3e}8zrDzAT^=gc9lZ_ht)5{#WKBf*VjiO;G%nK8$`d{WV2qiV~Ej9RzL$TLr07fu1 z+lcqRDenm6RCGJz=clw81=Ux!>)u<1<51D$H{k~UPMdaKT1@KPZ zyZ-=^${f1?0Gj#Sr9`K$f6q58FMa1|*F2tIui7`M)&hdv!#YQjZF6J^iP}LPfX%4IvL?QHOc1?3OxS+WkSuwA~iH-Jo4osaz0c1gi&yI zUr*8g>_m5IN2i~enz~cXdrM)%?HorvUU!zoj=EQPHRV4tsNbQvZyd0E;%^<1!LLvG zFGj`c57|2stuq**o65~WeR!Gmc|AQLWqE|F$+#W#$J@p!WaZ2ycYEKAB>>g?M*~mX zzX#q>Cj&Dq!*NkK38%sXg^%73Whe*6CAq=5Ld6aJy?XuNEphscXf=b+KGMiNYp&t6 z-XLMn2ljIP@6)7sXAWMmYmsz!l){DCyYJ`6T;a?Y8U<*IA!AT#ZZKFk66I9y1{VPe zFp3Fb9FS&KX4fjBa3F0|#ox>W42FJq?3;*|#C|FwyHFmIqijT;vFGMJA%WlP%Zykx zTy@{(8*ttBUeJAQMdq0C`{>sh--=J46s-lSMBHKD87pA7=F5f zrDoYZBcp%opOYT*Mh8cq5|zLz^OfI~?JI=~l>R^8Xz<#(T+D@FIM<$cf~$Og#siB_ zPCn3rwrVXO#4Mtpq$=AGWpNnlz!p{j)IOL%$@GoOub>=EQsI_SF4V9YJDEyxf8<@+ zIQWh@ZBrE+1^RRTA26cO&%d5gHFD*TL$@-<`?^N1zmY#aXRZVuZY2WZKw`fi_NCDI z(1JI|K6(3$+mvypa|Z}wn)QUGwv6|DL8f3~j~!sWK`IHs69nSMA z@HjeeGP6Bz&n_?Id_~l*EtUb2=GaP>@tr_p*jqj(vG%PVed1Fw+kP<<7{4dKW*(6KHp5a1wPOl5kC<~W2Z z0^r1TUrL0kCZ=X$eGRXeoKK#A*qM-jUV6$q1TWuzjLp*~@Em8Fh{+u1e-jy4FEg;! z-?dk6asL1&m5se%GY2?@cCS+w*e7|x6^J^lxAh1Ck-te%wQ1ry1gGSLL2X1B3#VT? z#Asu?Qij7P_s?i3ZmsJSrN*F}Cz}3TLMg7d{PkzdN*AcFX<;@E$~5Lyz{ft`y4)lf z9ar8Z)2-$G%jiuGtJkr)?({&@4BN{vAO0FxjFwdUZAZt?tz zWwnoJ)|0QI`1FMAjk2XgiEzOli9s$G09g>lt0iASmtd<9&(LOQi4|fb2x0*maqGkR zfpfRZJRY8EP|HqU?>r-|j{a+x@OyQZG6!#e)k}RL=o@-h-f@iI_7BGO^AJ_BF<`KB z_~|H`3_;xWO378OkDhguF^lJy=LD-WeLvZ-E%TTGPqPM=dqGPSpih_+W5fqgNPy{Z z4Y1E892}N&DM8ND&d@C4-*1@4jo!Qe0K0}vVxuW~9(nzgb^ib%@^`;nN|w!A_kyyk z{CGiTH|OpR;G*R+z(xAXa9L+nsZx~*Z90SsmVB-+@#`zw zGYGi9NntsE9utHp^Q5XVaW0`635{EK1fN0lsK)pu24I+pf-%e#GZoAd*;GD-{{WO` z7=dw*Bulx@2(E=|?GV~~@%sDA4K*m(dW-K;$ruv7FTZ}BI?dxZpKmoBGYqjgydLm_ z$1CT&Rp8e|oq0Hv-~az-WsGf%v2SCLG4`Tl$qZxP#=eBczNA7(S(+I;SwmXX7~5E* zC`lQ6B*rdDL}e=yEt2=o=ljp^&-<@)o%_Ddxz7DOugBx1;qb7c_0|JJO}wghr-58p zP%Onn%vzt|alQj5b35Zy+`zlb8E-#X3HiAjy{Q2eAv`X9epY~Jvz2BesWab_Aphd}>!TY**aFB5R=pE7A0Ct~0 zUhrJk)<0#iE{2nMJ*-_+di~WGxMq@;&gZbJI(|l<+O#f4?TaOX=$!W8nRAon=GnN* z?kBk0eq^}ya*?txOF=}394|leDLJK?-YvCMU#q*%;luZ6ahG}-yh;>29xjmN)w_Pz z&M+_K&z!f|>mTrP%Cf{(4_{=JPP#!^t;Rx<-V5@4fNSn0STKizs`k$x3$l5odD*u+ zj4Jau6z#tYm2>^n?KLZ#Z_}SHH_?li({oOKsEY^BXhOxr_e$=5@O0{``*V?}AUwsE zD=_)P3q7-n_K7d0YR6OTf838hAUx$gj6dk<)9TnQJaOszFu5{EN9tN%@`w5s$B~XB zF7fzECVu?t@GH%FSsDGkLWGP)v)C1Jcw^~)KmJl(g`J6^`k(8uRWC7bl*O(mFskei z?m;EnEtv^iBsJa$k<|gPiN*;{SVK`pa%G@Kpp(K!G3q&EQnUCIGrsrdp5FJYT3?>{ z_s?`^_}qO@i>Vv&_(?Goesx$w&Cp)q@AX${=c)gGCZ9bowcs8xHT+yGa&=}lb!~0q z@cY_#w|e&n!;i|uPiU5UiWcu%t*H4M4QNA+^8Gsunco6m>i?pCQ*QjoxCi}p_`TPw z=7~csl60-0{|X#&^&fG#?8or&zV^owRBbiqoV(dwYhC|xraac$3I$>hz6+-xRw~E~ zr`@vt0zC{!xu&%7oz%Tc2%of6D>**D`lJ5Qd{p-9Lp6^B%)8!HSrOU!hMa)GSXzR< zz%A0#F~|uOcrQ#u&j<&S^mJhR91rBdCo&T_fSQsV=kykLOL_G>8}n_q9(Q0REEC#> zhi}w}eIv#EkoE-B^_sU?8ypLw-%opLka$^5DO)=Eoxo{Hypy-=lk04%(b8$l1K@g;k1|?`4?~Aa9HWD zE@k!m$7h8VzB}_KX7heTmHNKPsS&P^$QNh6#Ru?mg&T+8j+wkHY)(6saQVvv!H&oQ zjf@D@%Sz5_jEqpvv&k25PRyIAR=>$Yd)(Wg)n#Fwo0>`IpU|Ih-W|9f2oz#&hIIPt zZ=Fa`-5rp*eYq%BM*Kg3AEl7vS?7<4V9(2IM>j+P&_40y<)<1AJ-?_UjEGNxmiBJN z5AzbY19);y&p6)psH3;_>BnD`+P4k!8InrFje#y7++{sJp zep>flW>-zbUSA41NqpY^pacc318yhM+t1( z`(0Q~-AvAY8Q;&ia{cM{?`)$#K_3%eCY|~|EjEvL{M2Q?rm@?}RqSPG-!gpfwO_(@ zMB$J655*=;>N%ZlO0E{|zYO`uK6Bf3fXZCTDUD6Z>cju@03!as1rP!{G6VyF0kVL{)5rPL1Cf86`$r-9mB)jm6cCN=*#u0a7k%>yIs4z2ZzV5 zH->-L(dYjUz}InZS7z02ms3%3U;myV+wef@hz-g(q#QLx`46zjsd%L3eDLSNp_kzS zsLp@rmYwxP>5px0rw*^&C;p#DqaBod&0i&MoFke&|0`}Ro$#ee-GnP^Q>-ct zFB;;A{i^fX)+v=*X^K)171Q;I10QG5dSh;Y;q1t0Cizwzd8kkKVCCq27_ z)6KH2>kmw1bQ5$xf}*eh9!HnQjUQg>B~PN1E1>#OTN}CXT#v*V_k+&UVM;Qk4^zAT z<_-u_UW^6}2PPcFZCzKJC|jhByrkx_-T2(jiByQz#Nn?}K#sxj1FaX>QM2CMpT^sH z*P{-O%q?ufKfitVy~C{k0Pn&rVOgRlh2P7b3IBxh%|jVwiJEmA=o?NJ^5ZBI|s;B%|kdQ@V@~yOeXSWc59P|D8?erMXY;AgCFrilj5oIXQ~;eq5t!U(+9yQ%)bD<8!44Y^aV4XA2no7G=1S9g<=tHt;v9A5~5 zm$sM6G1sDjKOb{5YtkPygV&>%;{zRMnNPMKoP zH}}jPk;eIf2HDA?f%gmRO0HF9XuSJ$nklyilwn4%XzJ=B9?_FPoMk%%HBG6gH+mV* zpA&v>HHw3vDfRkW7v&Wb!garziFa-8hPiJE<-dIKFMZ(qhsw{>#Wv@N*9~|Au%AACIm(zN{yT6hi|j?MEGR8@4XB} zsx?#>G@MR|hRT!awUL4fxqaCqhmC?giaUblSJ|Cb| zK){f{hu8VG3eN0xK4c_jJ-hlnA$j4(-ge@p50b0z-ikl;3Of5|Lcp+Muspi|ebJ!^ z?zUEf%6Qtw!?a!XrNivU9TgRS7FRS5rI=U#oEoj)72@AUt-rZM`+^9g#(e3Ik$V53 zVX59{P5xYQ-t!m(>4+Zb2f&|yGwRy>Hkr4Vf0@bL{b{$=1yfs}*#EH5nVami2FbRO zO?G{mdy8ZP z|3wSO+j#{BZKw1)Zp%oQpV5CH8D}plcko$$9t}V0gpJ-wl}KcDR-<(}7@W=uO8KGf zYkj@YDcG)_g_Ni{(<8f)+_-dk5bEJ*7Qa3g*$RZ7?&h)Z&o`Qa&4; z%dU>lGs1kD2^Yn)x>UC{8NBk8m*6-~Nc|%Zg0$Sn#nm_OhmS<0jZtwbH3 zzJA1=?o{M8V3E!}fC?b#>}YwY^4D|XcRv1I^&I<6aLMXQ-B;2OeYI{)u>Z zjDFzn9ewY$XZvAkev!IS*L3U2Ri5h-u{rh|qlCG0gWo?4AP3V%wm#e+7+jC=xXTRi z*ND!I5k3FmjH5S8Ms_ab>2#2k8$?2W^ylc-blIsd`+}#I_oJmg^<93SD01S<^71Qz zs0cfWxz5Xm4sk{kjnCIVOIV55T1 zjOsgQ7q-5fTk8$8y*EaS;WtJZ&X)Bvqz*2>fT_PO-M+?g6RIbe_(f#>M*LWfP z)yuc@wV^X~v|0_EC9-&Nrh@gemb;;H%&t3V=5o}bDpq1#P-k`-zBsUH=LK&_vZveL zW-@s6%Q=_idd&0s#w!^=g#p0TTj@G2RTLHB+q|-Z<&#y&|W)4x+u~kmkwXb?_p@m zeOvt8eY&OQ@5#Yy)3m4CcLgj{G+umOg-o7~G9auvuj0gl$X66rooVTH$xIuMyPvAg z{MDBia`M|d_gDa(vIKTWPGKAFalXIj-S?pPanIumi4H6}8cv1$tYD$S3_Xh$BN6#Z zmI1E~8)Zun#_OJ(fnU`ZjT%6*PSs5Bt#@CRUWqWAiYf2|nZ%By^kUx=zYpi-#~z9u zjOl*=7fAO#kV;f2&A)-jwgRhimzdIl0c^ zG}qCIuR0+rYMkK9hUz2nAJ)F$EdJfv4Sg>5z!9Nl)DXRtnlV}SlguI-rcTz89Ab45MXifrWfi`|lt(tgHCmmruFwfrH^f=UV9Gk5D)!)^~ z)#%PL@4{7fOUSe3#~Jx+M*mLO@c-Sv_wVh_b-U#p1IDRe?B8ETBDc&<3dG)>g+*E9 z)Rl!R%X$yOT^WjYcl!m(;Qd1;+Lz0-%z^p7<0c+qk{_CfdSzRodwl!9x0au_X?xyR zB>{f_$gYo;yCFSrvD-1ucX+omx^d>oR^RnJ#x(5J8C>qmz4ux8 zQ6jw;S*fYw#UIOjHGg8F&2Q|)uV8x&?V8x5VDZ!!GqF!s{{8Yl(y?yrjefg+X+ULk zN9O2IK79vh`uc6@rFYxsTg(832Xc%Q8zx}%UWJBf-6m(SihkJm?TwX4F-(+xL!~S1 z+8ZzE8N;a28>JD`yLwMCdoFr$|CGCxPK_r>n}C%LK3oOs3i0<$-H*O7NLXIU@1JMP zgZK8=BP4S87%z0~yuYvQWtxQ^<|lz)`j(ZIwV|%zx7w~WMvnSbohuEy{r%pq`pBJm z#eduQ?%1slKmG%plRoc|co;v^seV+B$r{bcO_Ldz=xE?8b5Ho)i z!|SJ(8XK<0tH1j<#KxKo#v3 zPrdkUu(@TCweToMO~W*Y{}3ND-(sJ1DzQ#Y-%drKwE2Q$YL4P5)|JUlF?_Ei>Gt}@ zATaR4r-jEF>?iA(T=DNfoiy>HolKY3LCp)r=}PiCDD?Iqqy}SzvBzWU;E%Ilimxmz zZ)Cko@kRgKo#dIN$mHQUrlhDb<_Ftfo|r%Ps0sdC`{`pBj-s2s!zq!OZi7g!vfkX> zx|($pu*>b}wR=C!69T$7YEgG#)wVWsTZrxM{NCPp&Y(q3-wpU$oP)E0P*#tmby-?V z0pNm*HD)aok`xr_k&_1oriCtO>pBmd=jAbx2UL8@)9Yq;zvyOqesxq42*kT#nxvG- z^%pj|0oak$iPj5a^;==Fo0o?8c7BB)d?6I6(BgZcFUqGkrDzt`>shr~qS4_a&ms=l z+nyt!HQoUvXk`7!{L(n6+a~rth1)@3)$`1`X)Z-$?{5cuxJ)0%MzMROBpx8Qk%%M@ zZz;*GfXy)Xp2f4v4Cg-|50KXIrj7C9`_2nGhAbmR13|eAOH4!S;k%#j_IEUMTBCn8J+OQc=QlDmbLrhm5W65;^u&piYD&Pv(NRPH z(kZEh?P+#(QcHP5b+5AdyT&W}zhMGN@zSj>r>-ln=o}sPW%Ya@#U=sNSDPH*R86^_=;Ipww`zccs}$ z$nb4$OFl)<(Y9*(EzuiE)M$)_6FxaJ0esIO{#|e|GTXxlo}pWoM~!+ni@4m+;4z+p z9?x2ux%EU!Yb+5NMuC@gGiV-bAo5qv-gj5WeAjV*MrD*F7YkQEetWZ3=`*t)S=zQx z{_onx*8a91eai2*h>JR&wb)(B#-k(I05MM=rAzc1A()p4nj#PC2B>f24|xCJ_sfYT z-GP?1%Ug^KONkPvj^?`e0U0$gv;5s!_wZi_{0VoW{bj}NPeQb-7O8(NUA;2U^uI}+ zcMJ_qNn701Xne^1j-B}RVMkH7Sr`DYKU z4UM7KIi!6X=V!x1U{jq6sQ^~A5jX?y-Yvj2lAfNycYEZb!HVo`Gu`}|PUA!-g>_s` zKX>3y*6JmXv(_ z`BF@d%jnn_ncM4uMMFQo-k>W_*$YGZUfABD6A9^+;v9pS^iBApy;Fpx_{SKU6^ALM zjgmkXF6mndjj{I@Yau~NHV-rlgX6bfpXy!hsWAWL4k`?z^VnwsndJ?l$=)1Vsj0&n zUq3ThkDm6zS|2Hr>To*TLtq!Iu25sY=$plOCm=*vSD4d9HVD#ASw)xJIq612?wG&K z1A`oyz;Phlj2(p{d<#60kkp}NdqdE2%!>5a;rqy`^Koj zlBI|HR!FDRQSo`FVVR-hgXNh&2jQaEOAk9e|9;s1_rv_s*pK~1`lYPuT?5C3B=fva z9Cd@{0wC8)-}cEGJ%_D^qjkCZO!vU<$MT=p&J!D>U!H#Ww`UgpM|>~;XY}js(FbR) zpRpT7DO|tx^=Ocdp^eLLmw&wdeb^D{@nP-P)|aCnAmz=VzR3RorIE+=zIwb}`STH8 z=pkj*B`;0Um2@jdxCzq6pHvWf?pY7ll-9`W`G{8oI=>n4U_j6)uoL;?P?z-HJgSe0r#SPoP;Q;j}GW3f^DyIX3fI zCyg8w=#w%ZF$pb4tWBel4YBg_rZ9x|j+jP{{Su#r%?a67&a;w|qmMUuT*~s)NqXio zoY(3AoCkaF-j3{9Zm?GOkEq0J$8lN!+f`Y<4gKGzZo@WA^6J-SSBXLsTC=rWevm?j zeeDbpJS*+^8S}>C)RGrZc%~~b;AVsIBdNeLEv*|N1h{1&*Q;uK$A0B%Hb=a~=EbDk zei>Fm+iQQ>u@b#_*{EV!aGy4GQW1Ctv2zyxNLO=42q*HRFJ+c~hUu~R{eG67MwMrV z+tj?z)Uw+-wbKD0E1NVDb+s%uU{F@&xX^BdK(7eKD+B&`epGjOfLb#|aEu8SE;T^k z?T=AD4F>!amDjKaB48_+J62BC7M%5p1-hClY+A0>yn}AETtzyE01N}nXoq?i0E25B z-FB4`0JL>D5}la`%8|xuBuhQ0+%_S|(X~;24IrGFfwIWh$mchem`CR$X-k2&V!>4D zsUir_H}|4lH3s5)?0z%Dy6h6D?QWuVaA|IKkbv=D^o$+I4Q;{c^^~p-Dq1A_%LL?9m3O9A!d*f z5xNSdIm6NerG6?#x!OAl;mg)JXYkp8cy8g%MRN;-W_P(XR30BORHX(`;Cf!xnFp^S z<8CkG#ER?IE}7w#jfI#Ij5s7_Pl{G{g|i(DdQ^@G1Sb1h$ZW^2@+#pYZY^H+=Ha}cj< zvdm-0X)GW21)(4kT*;LIc)CsM{XR7fEwrE{Cm+QZgseE_fpKMQOP)@eNPch7s$d`V zg2U<9;xz8nYnwS0RlXB_;sgdSPOp7u99S7>MJYst|7G9ShFReG9{G40L3z-XC@0$@ zSE~*y@PlfymZfIkzNHe9<&t%iz%_sUczAeTsK7;(DBrPrPBF<6y)LCjKdZ}?2Du!d zV*t2+|5mX^3((r|GEaXOU^{8wIkO}u6N-VG!0z!F0C<{U2DCUMZMRIIEmIjEESe_7 zp$d?t471r3%UVGVgqDHE%CTgN7F9WKpp_gc*r@d!C;0c9m*nahNXRw5K0pEeY{X>4 z6J|?=%EY6ev-T1tAeuG3KbW?(*xhy1?4!lH7L??g$E4(|xcS=>jyQpqJmp~Wt=T@I z#bZWJ0>v3q*0rIF-`#N*Z@YM|ur&GP&w@gBvEu;77v&{Q9d9~X$f~DR3t^e^I+zdm zo5FSsEkp9fQ7g=raCaRpd~d#rw{4$UYk_O2RJJ z=x9)`wlqbOqBqjW@s+@UY?H=b8$~*CvxvetAzN06k~8z&f*)aiBt`% zeQYf`HIKLf&NErlwKu2T9IPGaO)a?_uRb7)cMnynQAw(lF6(VGwk)o>W`cbiudK*MPOQyqNtHX%pK*?h8Q_RQ1S$gDGgNCDP!J&S7<=JrfFaiK-Z zJS@R^Nj|E!b~+@2-ogdikvW4RC-}d9=XdRN6FUn2IqF+A#k&aA1t|k?oGjEssE}_K z0FNQ>5PPdXPpT|yJKF?q`HRNC6`@})EYFLBnAqeCH|45}Tj$9-{{|;X<=C<0P0R~W zNBi~zIcg)IQbU-i&pT1-@7&5m!D7nUe3GomCT)DB`bThPm~)vsUP7*-lAhJeL(%01 z5U`LYem_c^5uB6`pXqfHT}n5&)=(sl=pkzEF481_zkC-*c`J-0 zfr2eFU73&;(Z#CTD)TY8fUA}fj7{eq_MzYrQZ}CO*;@{fBjndNMaYPWhGG?rlgTQn zJW0I}#OtbOB!|;x+M^DYzGaK7VkUTlERgZk$dMISKXcZ%?*+wt{8G+qLPZQuU|vj( z)Z241J6C5CUhc$VO#oLlxP~w$wyINj6)7zmKnZi1h!9diPr)4Saa(jLdI$+=X0U~# zLlX!F3&e$;z;h` zE38#IL&j2Ev^~N7k+m#a)@5wTgwqx|AU62R#mr$!#bXuHcpfdV^k zTl45;n9gN?6A12cw1D$m%e;G=R!sNlck=^Zo+t{M+4DH?-g=9oT7`cskCRj>e`an% zDWlZsaa)}%YKwlCdsK1DHDw`hf48wgzkftbNL9qlvH5`+msyA$pa{9D0e+l;*c-#K zc!hzW6v*u9R>&-o3y0MQCF?R9-lzLa;~L#w`^>|l#=m6&396&&R|{8@hwoOwdd&Hz=M(z-(0xUyM!g+C$a$4ZOTJ3(gn}Q zIj^CNe5_S2nwx%D73wf+-0$2CinWGb?0GxxVB;u9nM$n)@Y_zupG=Vi%?KDL0l1`@ zB|qk5x?xP>$(F{LHZ4EM?W$y4EsSrBlVU73#RS=F8{-RajSz%l;*`olZ=tQ>g(^VN%wgv!KYlq zPsn;4Kw~;|`#BB7pOOTU4T_SRbJ*^^WG-hZ3IX6@ghe4(n1u-!&ivhz?1piUlHy={ zcS|qn-`6-U$dTl-9YNUhFLV2-;eWnX@c;gFQj?LS)O*X5~hNls-~HIR?%;Lmp4|&?TIEOkhhi27U%iGnut{p ziN2Y9yOkzt>iuz%!Q1$`1Nj1$QIYWQy`7TuP=Ba)a0m-R$e)8}VEtWF&%#f++WTAj zK8bQhz<&GRJ5CDC$)R7qOo(vmAy^w5n=)~p%*qXuRZVU=HQCnw>i=40LYl-9^ z?B906n>E`bOmw?~f6+484(tOww=^yDdx?)iL9u_Nxxy77Lg8ntxEqRmWjrDZ1|98BSTlO;z8}VOY!9I zaf+pyTZT$*F`&#Bq`wT=j53A1pf zFw3&L5M)W-hw504o%iKEKK)+cau7`P_(^bDj08>7y<8fta2hn8hn+&478jdkrMSqs z7&)s(Y|R~Ye{m}_O7R5ooRGI(Ki{D8+oQ=BK%yZ85f~_&7D@JKp-EH^VO53EJ4OdstxJZ9Red2xstO>&Plxih7jS?b+>5Ot^*zX>tLpUZ4n zlinZ4jvs|smuHg?XCHVCsCH)m`K{{vMdjhQOP6j20Mrt5H3Rfk1Gc{GXuThJ$e_VQ zA6G)dHSf_);VCIlG;FngQ#UBlj7bC+sc1a_v@{^#y}@^qQcI$CBsC)JZSvbd$l6*D zz>y*(ea0r1KaUTK?xi|)WQwYm-NX4wnrEs^V%jmklIcnpd<%I?hu%g>TEa|Blriik zDnYvkZ*gU?5yR#x$uToPx}r7Jqs`%IAoQEb9zw(uW6mqj5%{0Cd6AV90li@gAOf&L z%r`kc7Dz2^M&K}vRmeGs4Tlz3aI&-4T5P~sup7=Oc>M-gfP{Kq|0*mxIayvRKX#Q_ zYXLb&2XlMhOlc!enZQ22TbD`F9tQKyRwhZMom03uW)@s{4uvZvmtt697Cbc<83)b$ zvVf;f7tja?zD)jGcl=d^b1E>a)1s%;@#x^6@{Z+n6O9~M1|O;>$3v0~AYBvB*Ahe{ z@!^+1F|rOEp!d51p8!3&p8TlLREoUgNNPSd9kqs#bP=Gq{Z15-|ITf2c-8N*l;GN_ z^LD`cnFt@Y^4wzWGs+}8_WUKe`a%b6ZkIw~c&h>i9ehJL8|I<)dUB5Ts!frU3j2}H zWoP?^YEfK(ABcsy+|S`z<+{{$$F>&%3!u;;Xm)xIgV0l@qE;g2OCrIpb-i~2p_HN7 zXIE)ES1hc8pJ}35g_{JC5?8!fQ_1GbKfMSopULz}RWZPXG|&Fvf|DwW7oc%pp0}W& zH#G^4%@9!M>otL4Z<1do*`^RUFz=6%#-WnV<6hPSj2N#pqAMObffoR8eQB^6) zL5T9Cv})e8r_@T7vqE<_v_yOh#q8Zl$N%c`&BI1xE?w!=QF}IlYvqWq{Q)0HeR?E0 zz_%CqfvmQ93#AxB5z4w!Eh}1TSEZBsYYPmX3r_$V5#@k}2e6PeF7zjO_w%({^zOv;lWuv)*U z2Xd^Qh*VfFvt*}JvK%;cid&>^HkSk+=wJ4>9&_h15_?E)vNQc<@*Sr;kL^b$AC%^GxhDDKd*vD-|64BobmJ_<<4P-xKPq z?PnHv`Ef*DGm_E9L6+2zDV8x2M6o{M9y3^ii#mBGVP#T`@kfIX(-;0G1aTT;os85{ z_nZO(ZL+4!EVINQ($c$rW!iO*;~=mE1W!3-wYA}G)3_E9Wl?8~+yhjS?vehg?kY;! z*HGR0{Zu?q6yfxzuTtR6j_@_(rUlJ4z+lp_Fubj@X?aDP1l(pDsc{y`o=G?0ep{is z3gRmlu$*ji#~2V%REX9X;1@)eAt2qakBGYnw%3a$JHdiW+|s*oOT6s_)r_%M`3drs z)R1ScH(dS$C`F70W(YkxQS+;b;<|MU%=kBht|GAl9um48er>!PF!w(?pf9de{9!c zniLzR=~=qUn+0pmd$_x$(3gxQM|f#KYL43a_&%57)%t18v#;xPpNAFr;1x`YR+3u+ z&mpvKQ-s0JT73`(1OmFo0XbdE4WLlnu~iI>riGQF3Nir9$Ir`d^Z|%g>?%F zvAPRGIEKPEYddJwD&-)BN!fD7#OX(D3s|Ni^;>4tg1Ajxivm^Q?6JZ$;9~$$=^9*= zG(S$cOoX$FtBza=m_^UinXzk*5ReG`78IPR(|sf*a?b<}-0ggi+*T66q_?Ie!;igf zP9Ukk_j7Wzv{ga4O*K5Xc(AQHFk9_z?OS|x7m)6>H*l@if$fk9uj=q&*l5w zS5F6b3`law$SsBWZO7()syvOPm1tmY+DRqMc88aM0f=lBGR!u~iXNk;-kk(V-~Y4w zH-2N|q0Y%A0U3^w2VK{E-qo5}aiBxgbc)sh*A_={P0mQ@Z_HWQR9n%Fk7IJax!saO zn#*Fv+&>jRoENBR57Br)fH#!etN6C_?Ranj{tXiN=p=RLVSqca zoSQ8l6(228!T~Mb4OmR|UL-sjv6CHCKrazT(nIg~gjoaLR*OOfE`2i9&bRa330MrJ}EiNKp^-UHg{_R9%77d~?G6;Hc|J554JxGOZX(fBQH8=-x&0oZa z*JuR`)Q_<6MG90=1P~7 zQXaFWq%QpB9?qlz)?+a$d1}eZT132Hb-Q44Lr8%&ckCs4dwV`;iKSBCd#Wdmyb0E% zEWCQ{ED8-#?y1=W3`V{*1UvUn$U7t8jHMm z2lG(8vX%mXok>?-ax0iaeR_P7oNR>Agk#gpCv@5iYO)_pTNpS&ZIOk32++MqGVYGHMXn}y=S9iNRRTkLAkYM<`Z&2lB(Ur# zr0FxJ%>2aJ64YxJX(I))@ukaaboTA{{a-!Y-o{o2Pi#9)$<=Be`S_yu`|b_*A$ zlaGvRBJN38W92z&vZC`%SP{&RgocQ)?(TGUwNMoF43+#*l}qT4Gm!MRTq@eJeWq%1 z@#M1#aqo?14)=wmP0UF!w9WRI@V4Uc3V#;bjo(xh1^>KirC466qQUn*a2TkrR8K1E|iK5?;5{O}r4iFkW81ij_Ts=`hp~Q3v`GXFARH@;N zqE1_7|8-b6;|f*}Z{5xugx^>;^)x?oYjBeHl1a%6yrxr###!j3{f$_pEFa6U(i8@w zl2HB3x`20W;^BhX$CLBU=esmSN-S*J3kjPU41(l63k`wOcUOK3BGXiJ?B zaGAY{(C$STo$!0Zgy`sC>)tYvsC?%}3=JBhWmZn_%cylxWSE9Wq1Rif{`{tQG{wW8 zvW$1m=;F~fZc(%Id`gxBj%v7yrzOYMLpd6sY40El`9vjsMZHc^5Q}mWlpV-s+SJE= zXDyJInG6JNqu97m!`=*^Ohkj~6vF8-xQ4|iY|HW@Yt4&7O6h%KyD6*as3KEw!Z(Ft z4w%cT(h^D@VG>&7Nr`++{ExumBQjQO98 z4~i6p@&v<51ro#o=!R<3MjFCD>~e`}yOV6eO3gBRR6wc*ZfEnlG=laJ4IbaCB@ES0{eYT6fYT_T>yT1G4uW>g= zRB44LI(e8zp%+i_4973hNxVRiCPF>(vVkOw*=@rZw)1*2448fwNQ82H>tzKc% zq5#ux8?EKc9h_vDj9(O^Ak>?u-7i4Q`KDrBILp+i`DP7KDC`duvN?N_X;cBV(a|kw z_4jkN9+QoDEvZTPdY2w=t7Hh9)CaxyHv0@B8T zF=WaEgdoa~m;!AgC(S?7G+b)Z&mj2ab=|0$NT2|j^CE9~6X_=oHUx-0)73@rJuMBR zDjV<6d0mWIC@t%+%vYDva_SHygE(A|#Zw0%;v1noV@qTYH(=K_1ua^3xvYZj$Z53K z*BlkjH&%}@qy)i+XhWhx!c8B=&6bGP0=g_+uP!hn$Gn?WQ7bQMyg8H|YtdO=KP%D% zH+g_B1eJIM1Dv#)VdeIVR4tGR_eBC)9QI4@n^=?(uP2%Vi-yOLi&wRe6M4jWB0w1- z)+Vh;h=7%cEREGIytMkw{>BAv_;Yl zlplT*H0XgA3I@^P4bmYbZAT7;K8S;=Ql%0Lo-F6o^xenxO3YrxTNvP5Jh<{TWV>0x zQz&@+hzb0VHhk|cTJ8c>>FXxvT!&ZuN)$0QAHz^qA`I8`)U};Uk|J^=R@-u-0&e_u zm6%C_=!{S+wJMXP__S1F#1=2v>y3K|R78CX(Zy7gx)4NV2@6D~EGQRNZHv$>AJ?v# z1)zP&z#8*i%iGdbUsy$Pw>&voI?sy<^obP=Az+nPW;sxR%wFh?&FLH}?x)F;_-BBu zs&Bppf9gV;5>|}i4bA!U<&0LJai}(TRWovOvoTWNk#hTOe znvSdz4}H1l>RqkD&+v7CDrQ@FAkNQj_uIG3Tz@Ld5xCFadT_p2R^#k1V7i2)8NR zU%4p56F2#oVRf7)-q+BoBM?1p(z!g!BY=DDiQ{R;R!Yr1{zX{1(VWKobpFOwJR2$P z+C>yVRs2CGN4F~+qm?GzvV_r4(6%DhIfAheU|*xM@OcGiB_(AX1$;}rCrBodA}>&6 z*Y7IBBl>y+Jlr6m;TjFRvOFeH1hmnrBG3y6~ExeCP~r92MnJdhGNb}_?@Ba6-jT=>?T3P|C`28^C5#Fds*bU+3W zs*UA=uq>eTJ?vdU8Sp}hjdYEDqLEP<@V3defS|N&C541xG`6wCVjU}C`$Pz{1hVIr zM+4Pt<^ber6h1Zb7R)FL%1+_oGd&Y$p43_Rkw$5X51Ql&yK5or@+k`InJtqwi2$e| zYIdJ&Kb`O&H?9>*PEqiX^cEx4s9z7cz2XAp4MSy!hSmv)AwQ6C)GiH9(F{fG{qTo(EX36TgHH z(VbjTagj3z_Er^UnNuJV8TtS|7;UnR`(0C?H9rI-$_u)ra%n!#zMYDTQN|t!7qWxz zxdVU>0KS$M1+Z-h6zNXqRZ7h#+TD@@m4mkU$i94jkZ5xw^H$avM~Au~Mzhum3sP;gDudnkBA_W6-#yh)Kx_4|OClB$a z?3(h2KMO3?povOn%kqsU>AdExewBg)EqvZ{j$JACKkMcMf>}Xqh<16x$pPDjDxou( zWcD(kNaGBWj}xVjx?}yfYqA%751hGn}Efxg`ty12I#f-b(_ZJbdIA6NH*h z8x=jgLT|#-iNLzs{mN&_c&UsEKyoEeC(lZ)oGR9wHJw39R}n`iXCYbjUT>t7Qsu@< zXw+M^HX+?4pJ{n$bu1~SpA^gVXX!Ct3)Jb#SN@7yNdS;JfEs+_ozvs}wJH=f>u0Vp}IbL$aJ8 zT4uo4w8$a8dESNb-r$04)ixLCfqc*S(HeZ!Z1IXmNEBAA!Rc)moC1^>ry2o#bm6W^ zQ2;0wgdbYv(3{-n(Ljh2;1jhkegiVoi!?KDxsps%mIOt7GugnjTR&W@If|0%?(keP zsx&cL3dnT8marm=G~K<$vApZ+`H_Kj>CCnfZaEN(2aW}yla{(Z54Vw3SAI4B4v*5P zputUPe)iV-5JrIpObW(jB+_&aT4oZEOb08*CdV#kXes%sArCYy1!_mqX*tXuk_WEh z+YlxXr*566qIIfKWR1HyTCg^C52k;-8wn~Xg)$6)U=9~FO^#$=Hp5bB3~zo8Dvqp? z<+xv{8BvlCKW=nVv-rM5uT;KVTRw~dRTbYi&s5%!Iu!YHbb(WnIyqGeC}Qpu2H;tL z#xB#w_^gM?e(yc2EJ4)UuT6~5X7oI@vAmFvyT+n^UhPTkK4Y{i3+2Hq4cHw z%lO?_-bJjV*t}rpls#1zQPwK!7iC+R$63|m4$_eiwzRrwt?J7Lao(D>)5%RQoC(i? z!Ma!a>uO>2%|?Wk8~oN5x9O0Q`65(UXRd%74#@I!`Ekn)#U-msQDPw%UDD>DUd2|) zt(aC2fQn5d+)-5HNWBZnhpakDE^*o7)HeD~YAY6XBH*NKf#BC}7}ipzy%V%Mvxdh3 z5ite3FP{N2h>RgErp#uBa;puo`GB6nfO|Dz>AiG@9>jyAN@EkyO1yzUM0DMtv?ILV zC};z~Jkk$bkraXHfHxTeFjfSQa2b~>W62sX!W1_FSlhcT`V0+R^7XB~zk6eWosD1z!7T#3hP-;Mw} zBKfHHZ1c^<3qMbY0IIO*-$36kV~FX94^9yCV3SJ$RDsY86RwY4M2vT##1wyjODY?rP-s$1 zeMDR4W6V%`6^8|a*r37M8x84D!SdksX+f6Mlcfh|SJ_rMivgfV-%j#!B16ufw^a3+ zx9kZQW8prtZy<_LYCn2!r@fOG2V0c)1iKTk@Usdng5>slxaZpAe>Kd{SGWQ(Utfu= zrM&e(#Xd7Kg~>KfSr-ZYf>_9ULRqpZ7Y_Cs14SVy5S@*c`@-A z0AJq#TM>%+8c*LM7E0JT{>_LEWktGU0&%t zq7jE$8_h6#&$gw(j`g7Og%Gr`xcSb{}@ z+r@zPkl8d}m$P4fK6Zih+$fXp8jt%~er94Wg?vh0K}wTSDkM{h7d}Eci$F!L*08HDX6U)Y@SZcD(oF7 zx?x(N$~^Pku`KPR2CBN-91&%12eE{1&wWw7 zvh8?)^mFmkXnekwiJbv_%D67Ya*;a%dI9Ar!fMKyQsJ+yz@>jCsKHBMIuoH}Sx^0< z32mNJc%2G43!MdhO73nu~>mw=FB8LbRkdR-f5P*_ zb6wB-dB0xw-P{0Fc*=B0X70n74)R0YiM6&RXA7Z4~3_*o;Hf!MSLw*XKGg0 zNv4givy5QN&ve5kP@{vne2~3{IMKF=eF+icw1jB~5k3jsT~O{FqJ%uKk)a8QVsM6D zEFy}WVZ*0G5HG-+;n5_luW;`m3xY4%DHC~AgD-!G$P0cJ$fIsk5j8TD(p2aw)GN}P zzL6uB-dNJ=RBD?9+Mg*xjFiK=dEX=k`a?7e+Qcg{?!Qn1@ii>Q<~Zv6e~(tOq4%dO zFxdH*L$&!!^9*PsNx~qx+8|0dk%X=*GMz@0 z|9-f0-&60cK+q@^Nog*Q5Artus2yh2b0|_9%YPfu{F22+MVyFnP*_zFH%}h>DGAJ1 z@<~Xa+V6}&d03K2-c-HOVtr%q=r)K+%G7uEgHW@+dcNv;7+RZ~VXI%{@x`t43LQz< zg4~`6cDcPhb|B+34ez4&Z|>cdvE+rt{Xcglo0OtAPdaA(Gw36#T^K3>yQkX@?-2)%dG9*T$dMi22l zgezh^x<>h#Qq()$mSz*@P)oP zxJjSC4T(`At!LYO#6Eir1S^ltwpcl$bZ|l-NbuorV`M*Fm;6J;#i!+=A+3@tRzu9# zl1D`n8S5)=Y2V2DZ9w$hHhcE%3$XAqR|}Wq)mf%a(ou1F=e9aGZc=dZ2*1D z#KDVw_;i2Jn?W_&)Eocr!!9o!)fDeEZK<5VCe${<7ZrT`uEubbfeWuq6hjB9RzQ{T zO3aEuY0OzmbwFf`4~8=qnW9GUY&+Q?sBzzl*2JjIZ@ zb*aFmG(;@dIibA&5;7mn!Zz*&QwYJ96!n5z3SKC5!~xVM|X2jt1uFEPpk^9eNg8yEQUsOQs)s_H43FZ z4?48T_+!*m8pig-9O~8J4UcMzK6r)w34VK3poM3W@XVHjfj02BpZgxEx$y$s(w3n6 zbJU09Iajcl0i843M+rq^%&dFJOAH!Z8ES(JHD-<1>C}i)x9H(aV+7Fm-E-#3nkcm3 zw8B8bOQt{Lgk%bg+dpE8TM+Pw|DX^P|IC4;@n8mM#g^ZVws}x6+4Ri64xg2_w7)pO zn6uWWQS75?3*9=Z`tutATtaP)0btsOPi{mk;KHC(7-Jh?L6W{>KUkv3P5Uw+B7YQ0 zJ7^1KgyD-WcLm4yFqk19i9B3c9StWYuC?*W zJ0Ca(2g?_Bxp$D;$TEoP1VmNzc+{}TR9Q?j#?h)b9=8x@#$J8jun5wy7$yj;-|itp zO#m{Th}LWtuA@?LYsitmbsqbe z+U(t-b`dspm;g+ca)$z>aBzSAHiMrL87KDtUViJ=;^lPpDL~bH8yhe+8$#TKOab-v zV`N%rpqXeHz{nyfldBvob2AL{c_1CYkBv~+tAk#VRk`<)-$_pp zoW{=@H1~Z^;1y)hO!nu?2a}2&jpM4=JzB|Mh3E|M1q8}jIVNEePI>e;zPH&t?>_C| zOa+>$mGCSaaxp{ms(X3y*tAvdJ^Jf>)F@t{A5p!FtfpcB84C_xbrD#2QLGj0!NFaH zL9a`xysTT%X%m7y8V#|juC#9jB5~gACZykW4`@}5y3pgH0>QP{ zr72%eqxPiZx_+87$bJ!%`dRc2roBZ;dKmA-OG5BCC0iRxI93$k@1_Fa#zdmX0rA|f zaqTS(8Hh{uf{|RoU~LcU#eD6&bvrNjs8hE)zX4`Y6QIY|PNxw@@}CVFHr`O$z&Pz~nB7qGtAjL{i@n2Ig-R?e7{aOx#ucRncTMsnGsU|vxaG?XHD}sH;c;WI zw7KzAqc)2@-4~S#(V8!li*dTxB_!1h>|RZ3#|OCJp@^GV{%2D^%)*xW5a6nGH^LM< z{p!OWm;3|Gr`0G|m8)Y)CRYZX_|j$43FEWs?&Xt7jB>H-gL;$09Z=aXRYaPO|1Ra> zW)aXDh%P5yw}IRVST*} z40ceZTOYT4QEOfld?m-0x_>gsG_ks3ZTXS8Htl8PsmZ|HO`HJ6 zfdd_JBkn!~ii{sG}|q@pA%oAuEL`mQ_e7MMzBx_DHaekZpwC#fpI& zpc`TXj4kw;MG0D|=Yh_OpuGqi-{Fw&%fIhUIzr2djHeFZ-B3HZPPSyaMw2bGzOTKm z`h6ip^(&Xia*7h6!Lb)Mt3a_XLToSz6aFQf(Cx}-o{#eug?o%eIyppg=lL>)St;^M z85~T8YFb%(@|DfifB{eU14u5?jv2}1D#kHF19Y^9d2u544JNs*q{n(`h{Z!}EY*(#f;Xr8rEeu5OzEt-Bd z1Z&7p(%y_Cw_r4^cTFX63!aQefSbkpp2txL@E&+$xe0h|$oru> z?T99SGpH|k+}Ms^OFNd;sic_(I)FANV8~g9Fm8-s-Yp{CYF^?j*d{yA1V@u`TLYTX zY?)3O1*WQH{*)&*dJIFl)=-V&{&w$s%@H0rK=Jlw-}B7cNJW@~HT}hCFDpR^YK}~d z--o29r$_)>9xSm2|i&fouH@b_(}^Yus^WP@v&XDwK$&rKs%)C`37%-ZNJbQ*Fc& z#~r2~r`(LaMO-!ANm0h(x|tG({2Eqw;`t95w&@%;eNn>C+V^qawJh0LlGWWPxpwe2 zd`=R47`&6$$81}Y`3kk%6?QDv)DsGN#uViOEl5IRY~BXeY$-S}|C_r8br5U}hY?XIHS zud^EW*!IG35Xt#Bg5CkmbZY#av{~pPc395{DTmk)$@09hjQW90caBlo(XPfU26Ls^5^<9e8GI%$j55T z#QlL!;$I&6_D+=Lu#rB0j;ol5w^9+s#I2eNd7qu>Oftx|^yXV^P5n`UM!!`|M&m32WU zEmwX)z|y8qIALrPm9B#hQ=TLldyt#Mhr$15x|cCABE>l=?=6nG zSuaImE9}Fr){))sg`bm?XmcvIJm|2J{IfYw$GFVfbLsU!16Xqd2oDATq3wrg3`*Tn%Msy>H1(6=jJ|YycR*hYhRj%-pdXaksnR3Njc~KRi;!w3|BQ?^+)#M;Wns{$ z|Kq_R&lK;@)Hfc%5ITriWn;8gk)@GmaVopFwHmgWK&v89GlD-Pp#FjfP0hPCocaEe zqw_1qT#svr;$zICSbF*|S1x*YD=;Hk~fAq(c%7jp90Y+x2%G*0M5>kA5jg zfJ71_(CfTiZ;9TnNcXGrWi@)=g6eHatp1$n`73@*?%_N_W3npMms6BXTlT-iniMe< z(1rs$N4yXMf(4Urx9(bUjsOdPtkV-;4<&REynX*>28YwU(Ie{R(izG5 zF&73cXaARkS*+BKqIf%K$#mI;btH@m%ylHxSeV0w1Kem8tYce!&{*c;!opbUPD{RX zOW||Y0gbyiyzQ=N=D7JCx5h@7S2GIF-LrY9y6f~P{HN#ZjJH~&Wu%vFIuR=Jq&92U z$g{_a^{qe(;_CiV)Arw=1B`ZFpL{mCTDFdj&Jk-c?SIz}^la(hXgX?rzwuGbOOQ1o zVNrB2?WnHXBB;R}=7&c!K*-lvq|DT1C_qOU*qSRjtD*< z?ZD0vVhAdT7yWj=yXqzO{7fL_^YS_f_lWoO%jg>` zU*9r1)p-Bz=1%f)pj!VtbUEZGN{|*vVZfHKcWes)2j2{H4fo#RBWM(O>d6zPSmfqP zUb*nR9BHP>*u_VdBA#^%^b^d}1`p|Q=qLAiVPPOxa(7e-mwwN6197Xu0goX}yM!X8 z5qckV(m`=-?b`imun|PEy(gI)%2sWpkS{FL|ENk?(est$-aI$Oy+IzldEo3*YVCeU z7C&8=;u!H(#6BEjxLR<9TiyMnJUfB+jD7LVxn+-+UrOE@qwmsDJ3x1T9eH&zWXEm7 zvsVu{^j!RFVLmyoFZJYxZAw0G1Zy8gc3d#l`T45hb){Sxr54S~SFR=$kDVGSHd3B` zBr)&rd!MU13QNhJJ`%8p6+c=-G)HkmAKCRr@s|#kBr}n4!ui|=fSdYu&krVu<&017 zghgMLFTaB6EJHaPZcKpH=%qVMgP%l+RNXOYTuo@50jd(M{~McSua{5Q?xSX&;vl5@ zE9?w!dR&W{g2XkOQ^^TE?3ja07}Z=lEj;7#uwya+x}ogjF5Zxp|I+TUYCnW&@wvGvYHFBHHnY8+pS#=-p zCnAfkb@~f@2wdq!GWZzv*U*H+Y zdAT3{`NC^ss%qfl-1`EMX;($=)yE^SZJ!%?8DZ{>J-JNiQI9iV8W~&;I7PlRS$2~Z zm{wCZ7;Y+AbYV|Cev)}NAw^9~&|k?dW9o7Bw-KFiQ9R!!mm7QZk0nr#Ys)^H(agM= z5zOPQt8y&m!g&2#c<${+*XF(&Cw&ihKh~c5D|NcZy2E|tkr1{;7y{bFIZfWAbeKOF zzUZ!Q1D9b^0ij27Fe|}9^Fwl+fpYlH*Yoo@r$!#H2UV@OrCpfGjY^n0?P`W~%F#^i(D&z%mFLa_WUJYW#q-e;i`2zbVxDw6^PIYnWW>3${OYLn**1KS*X+ zvKZZGM{%`53iXlh9nmHhYZU_i!)q$-2V5DhfY=gzAvtMO4VeV`Jb`Ur9=z{(A}w`1 zprL~#ITFsEbP&I^e?~L@q{Gae%!y$hbaf2)D8oD6q~QSbiut_A^{|PnZ@^<$6I8CF z+)breD7S}}#^(D+e$4ilR2pLfOQ&@nDWR2X0ryh=T-#$*Hdk(70Y!);UuuCUCq#8f zYn`8_VWW)+Pm|qOPP%mPLsPsY|8b;=|5PFZC>7w6We}11xm|HdbadL9@#-+ha7vtA)zNc*t&iyvdTx~d> zAX=A@O`O`hw!%)o{AV3wd1CLt#SdTa0JT;pO#b-a`f9njWo7kb5!$@g^v2hL(-}D!gTjFxHe$dLFOVB;h1E(VDhYNDOyv3q275UR+2pWKW6v}(a{t9sLb zf3Z(#z(0$S0ZSv`y0+q;on!x1X%iddsA()33k2@u-wSmcMfBdc|8oDy><9N-Qm4_6 z;uVMSU|90O96{V-zdo%~<+KWEzBc&Djs7?i5=Se}ouu8)Eo!PehA8-@ritrIff2Gz zB5mAA>Kg?jk$Xq0$uSdf*H96e#CwQGoW|O};!%%GO3q_XeK-Er{PP|-`*GDvE!zY3 z%$ZE1*@hrDn^T=dN&_Gl7@LsPKm^_x(>(Hrs8(b({XRy=;-HQPlCewO7RVf8J)alBAU6h7brr^KbfKz&mVHj!2XdVN8-v;7{dT*e3hVFYjcn17RnPu7q}Z|t z#A&u2>ZwZ9-C8nLutiR9-j=45+fg_l3>NQNvW`t^A4r!)^Q)Ox`Gz)3P&a3k+l?|) zht+akTmCxg6K?Z()z?Ku3^gqqc0n2tG--2%R~R29M`VY%2IPFzyzI}akt$0Yrxqp& zU?TGP*r$DJubg_uM}G;UA~8Av)g3SsbnrvXL*k8iI7;J$Ckn;0XUJbzqCwJ#ZfYQNKRTJSX* zE8kvh-fF)^EY8l5>p&7|^H>V^Ut4(Jw1Igfi$}ntMF$!CvQP2ATJD_z9<)1F#)kzj zA@!?pfi(J1?KWKitTV4kCW2FqklsW(R~M)w6tgzBC3QlHzRKABxt{(iS?-<>wN5>m z!kJxaR+F)S@p$*}fT$}Dz?|WGzuHl8LfjYWVI4_(evM02o7?Sxg7*Lqy$If%!#Vg4 zP&2r%l0IIZcu0DC$(#uYOa;3TCJ=+#ST*^h?2pn;+&V_DY8*(OCD~Mu%wO^+M2Q53 zm!{dmLoV{G2lQUi(&cfn-H>rmXE0P+C=||W>GgcBgiR?mZStPtrb&(r+`eGX9R5t$ z{-lU@TDdL(kDjcVR^y@j$K%M#M$3cjjcg~{04+~Cf;A0NuK z+5h&5ru9X{c7X=1*M_e;69EOJHkl3AtxcMZ1qQm^(|w>EM!donX$zEdL&OF9?nZ&R zo2oZgUO>&RJ2Xc9=-Q7zSt|>CMc|7>>@3H+BsE9%Mt|mW*f!2FX&U+#Q zox5$A63Zh830dX@Hk5>d>?vM5wqPGKR++T6>E37oUXW8vhe*P}G_ByNe0igFxJ&T2 zN!P|2Xp5VoBJ0@I-U%erDr?M7U?J|JIitv<9<+JpQ7!2!y#B#bq|6aLRM3uF{C?^@ z?XWO}h+^{CHj;QZ=l}XIJy-dD@`7hYp+AIrWuz>OCX&`|bUO(1I@C6Ec}?M0wH>tg z{-K*iop#5LZFnju#tB%$AXm@)Bi{zpHYC?9?cQ;+QG;Z=xAkPof0a+4p4~C^PO$G~ z*-Ufa^J6Ff_Ks(mdf|7>_6w(X51)(}x}xEC?c>MHt2&{(KV12DD~IHQL|BfJY{skcNhLSn!D(-4@&C18?40F8B}RRK;wlfc_LdJ&JpC#ha~He z^_oqkk3smG^GKkZ&?0xW?nu7%_RJuwjTbqEyH5B*Yyw`O9HIyd(f`N8(&k0IW z>uf?Mbsm6gsY~_Nq6Va|ki$|A|DAxLL#3t4+WiBhaDUt;iy4?|_wlfE_}p~80Ls2t z*vwuC6C{^4!e0?jL4CknGSrO8KWn3`+wZDDcZu9*TUUQXdqRpstK#_>bbQ2Q^a#6Yf zV6!%N`@G+r#zhL;6vY+-614M?)DQ9Y~y( zr(Btu@j$7Ym>5tM|FFh>gnci{RJ)7Tc>1*JRNG2v?Tl}(KA1m75u#VkhSQ>8p4qr= zH%8M$r`A4~0Iil5&A^FpnK#Jj+Mft{XE0D(J?H$c?EwW@oFGlq|D-RZTe6n4o>4y2jl8xT&;ZV!H#?|%2< zUDn#)FHV+&*8Oym8{VX^*QbmX#V>@VB7NR$NvKgjYj^Y{P+CpaNU`fr)p1JUtoX!`R{1*v6d5u*M4R~ z{jWpoo>LI(jrvH6S_L5}(jA)sak=X6sR!X6^uGTiRy$=9>{*0ke&`s`dQxR$An{K+ zi1bf(b$4|wd@jhqP7xi(OSn@JgGlgs{)G{2kPpgr{>)#c4Mx5b`}vPFhm>3F*VFf* zj;y0G&vttGGhR|ZZR5W@lKSIl%}$CiyGC`lC0GS?ttyE7RRLXdQSO7>gQM5=0CGMn z@d<|4`@uuQhY3|KUQ{um*KLcd5tuzsRo1 zXJU5Y4jgUVl0*woIw8u6)UPwX5*JVp2NM26F~uT|Dfe5xZo z=+zd{VW66Ux+V=k;7!BX$?Fucti7X{;!lo!8hI(Zz?dKMoPiyFTe=U2h;2xp1Cl8C zw3&yvDxIsM+(>VL*H_MBXUUYj0#?+(di^;I+P=Qc5PR*X`>VOW67&N4<-RkgJH&W~ zX%b}wLRJ`2Qwx<@ai#j<3KE7G`N~09w<8$nME9H z)VLrDx#r!E0p{&93{7a3b#>!}m|@3cB{Ih=Z+b@ixJWLZJDg3rvVfm7BGc}YBxlI^ z_)jh9a#bg=wk*YTFdfkGHqV>Wl2XCS5`95B-M8x582{h}`o^f*S_Zh*Ym-!TRr7=I%8dKLgO)y%d)x0&Hi6dX;u zp`#8QjXoNI9ZM{!*xi2+8rj9KWzs0Y_Y{ba1-IZIQqQd1--c|UhPzdxVw4x+EDoph5Vcz=EUtSiCqDt7X}{Qu0}l+?ikEx!FZ zdf%UPu_Ef{xs!i|0i3Kiyld!3XaBu~a7<9Q%+em_I47)_Iwex9&89L!>dGAfmC!8k zy;ri&s{>J;zq$FTh%T5-ia7bIlKW{5R@RC|*VS4i`xh)a_|JmW49TOZDn zmT0sncNwtaDWc-o(3ol2AVT#%w+FneI#L4$3bcRC;!$phDz+i(h%Q6-(Q~9I+Z8F- z>I(pPN|7MO`NzVRI*2gX9*-m$$T6+@3HdvSaN+I28C6IhESFfohQCnfCzQLkM;M-` zum=wp*R!+SOyrnjH4}HmSrS77cxJj;M465(vSRS;Ziq<%vAS$AqQ;#!ExGp`;t^4o zo$Bgg6Iie%oePspRYJ1mIxlDU7F3$p7z2}0T)E|`0GIU;W0PR5HC8J;uyr>;=(shJ z*~fB!u^BI?Jw6XVLuEswJ3C4cJ(VwGn|Y~l$C?R1?LCI3@{)2>Np$&g2_+I^w1GgV z78^Vw*MiS*1XUpyBYnMLgNcm_CxU3&>+zfiaK|07-Vf3i2_K>wZ342KS)Vem1f!zL zvd8f;RWJ~nVX7zpJ>o68^j6b#yLeAjNPh)p)45H zm8+Sx>l_Qs1%zf)gL$(>Kmo?RRJFuwc-M`Vd2kX1w=KDZ{{RsJX&ua(Njdo7&`}

Hx4ja2;)Se^ZA#LaH70yxJk9Pqd`o1%A1ScPAhEAMLZ~ zZ%aa6I5`^?_MJ)u1@GS;hdg=VEO|+exyCpQ@DSXGST@OTA zJ_Y~`8sav2=Q&F>{YpI&3lfoL&%#v`F^T$O_h1q(3{}FI4CRxv`g^RLoA4F4``VkM z>i!h_q@$`Xd#F6m&4EzSg%)nwv9Xe8iqd)GzotogD*i zON;KFG>972m9HIp+Ag_#S_<8?_p8OS`{HtrqwvV`$(zFOeb4wlQ5y5zyLzy+(M-ID zB*bVsp+VOLX1Qv=dfuSm0Wpiy-Fia3Qn_+#7dG5k^T?M?J8bfRB{KA!mi5fM}0H^fX^UQ_2pF*4PJK%NFv(SqsKxmheeX4`X&e)K36iG7ZYQ? z9$l_yGwQ^wK}JFoI>s>&Qm;r1NOKX%$BLgjTH{0P+(0|I1lueff9u3tu+gTc?qu%x zOmJH8PE}u(kp996*G$eNnluNcdIwtV42g+u6LDvgxfpnE;a8TwpfQ2qFxb&hEYoe8 zL%Cz&_wzXa(v*vk@}1dDL+h^;YIWpcB9=i%Rd3#uN3s*B1oj4yLcI-5cxiuU(F90J z(9s)hwH}#JCUqv42Mkhgse@5=`F%C7c{NGMwQ=GD3Z4r*kxS#FVwjZq{xuu@C*58X?t*SD za_oCGIeU}Evtu^A0na1yDvyR%YzX_PbR-PwNkecRMg&pD4W@8(Q1FOLO(_e_Mta_? zE5#C#)+EOgCKMH*M4ROX9ITrU=dtX!8barR%d$Ym)_Np_F6x2~-=FCNp zrV7S;CrokZTv*4P@8r!e1TTon{#Kc>;@0no<|~d>_mdb#LgQY;ADP@I>Li2-ly#N) z0nZGuV470}pj>-abq-Of<*lqqyH13Q=J?W5H-0Z6AMVtwHKBxE!T<#QWPeNFpzuqT{2BQEif@P**q{*cGTp@02g`x(M&Q5 zMbymx6n(PURJ#U6QWeTw7%qyqFmL|c#H7Ck1psG@mVA9L#jcfZAXm)@EH&;aEz^Fh zTXJQ|)kP&AJR4^J{cd0zsI6!Z{rVaa8Y(Xv`0M(=^`^?@q6=ATZh!Cl`ZS87!K&v&kLotHL}hOUnzWAS6wHkbSpvxFfynx#vt+5}q~kD-6v zzfgbvOT6OHBiRyo9jGmd*5rsLm8fV6CFH3Vd*SX4YEMapcvkYd_p2LVxCO3W4 z4RpH0*rb0cF#ve`eIEDc?;{;2&-QdFYUg>d^H(fdgL0&`K+~lvD1Ndb^L`vVlnh2D zQ45n21m)dAI>W{^{}JjG70@$J4{%}U_4irE!uO~|7}QU)?_Pbz`MO1&fZDKYOfH6z zT~%i;+2KI3uY)Y0kh2}xnB~Vv>Y+jqI=8oMcaxAaysyk^%GWa?`y!pK(2|-3J>wG{ zQKD@G{<$qxkA2Z^ftds5r81tyfdXzA0j%;HTF{r^k9ZTRUJ$1fABU1sDZi)5YJ_Gl z_2zy}ng7UWaqkgTyV)5x#%?Z&*IMg$i*Rpe1dElMMa0`AYDN<7xJH<((4RSIE?d#E zZUwxnpS;{k3NY7(OdvZE+jd;l<(@*{AX1c&95BdW?sa#QQv4_J!|1WEdO#a!_S-?V zVhFf8Z!}^63#qOkG0|e0a!~(iTRRC4Rcef9h!t&Iswc1!r7|(_%&vk-(=#vfcyC1G z7I5^^fUtaAl}l#`5^%2rp+>9D-ONK&NEwWm>Vi^ZDCa}Z=-Tn4Gev!8)Y47n5Vc47 zSYexLXR4GrP|&~j-J}(w7m3@Z=FG3#N+r}c_Yb@rcGf@gw=%`xQ!@+94)sz#9y)W4 zUH)vag#~JCh;1~*Vm*1eN444eiz|8n?+$+Dg{Fg4<@1%kx!ca6z%8!Wb4}_=Gu3qH zDzB8F*s59TUr^B>@0w=NHRE_AAagwT&;Srt5lUb>SnIS;y-Y~aH);M z1;jhMI=Y%lQ>F<_KHZ@^>&ezcg_UlbVZrbAO)OCStI}6@=063W<`$Y>y3t<~{xfrD zc24`@wgdZfi^U1pJ8rLft2urR6#_%lre3)+iP@9kO(deIG%|lM@+&E_V4e)PM`U4p zkZP)$q&sCssM>bG(-}XX%xS!i6qtBXQ6FxL!Gea0d;@Jeuqcy}Q&vguTJ7`q z@`j2>e0uTV-V0hO;2+Gc+25T=Rs{0U3t~kuSjnA~$c) z)2#YY%P&}PmKoJO##Pmx4yC)A-_(W1C6Gj!8CPTZ`nZItSLOl=UtYTP7NaL16ijXm z2w-nhtGSNzZ>$*N`de&?9_q@9CmGT2n6DKOn|%df^NmnrilFH^qQ$n3Wx`&(RyI%1 zO`xJi6+$WJDm~1OFCp?Da-z zl!mz3Gl))B=rjClF_jAR2^0n;T8fSEt@7|?s{+}z2mI$p2sYPZ0Vx8;)NCAqn@q}A zL3119x%Sn?G?A%^4|WC{;g3^B>4&C4H5ysYHdKz1Lp`EDC=^v@0(AvKdf@FKp6^(8 z^7~0`T(|FLJ`qfGLwWiz6hYS{yv?hWwlhh*?)zb=QqafL;vgjQNrtx?_A2-8w|<~OU6d~1zG8GD=CSV zx#iu&?{(*s`=TSxjYPr!?EjXNVix1_fv5~l{VLj zmiSJ)z3M+`Id#L8@^17eWIAbmq0xYP!7X-gc2AxBwRRLgISRQa?&DWWC~{szw<756 zxEFibgLO%u$Ax@bRn+WfD1IH=aie>6>KxcMy-M$lgGsCR-#bFS-@ToG)FS4f@b{lE zr_%OBA2iIWgV?fK-G5_5;DA8H;Ho1FBEPEgZWz$pi zyLAwhkHRE|<<2zujjh8jXN^(H3-BJhnq%P62F3xl|Gj?(on-VNGx+yTiLes4Qpz$m z6b-pD^&Oc%4Xa+b&js)78@{B=J)f&~pu$bn$f0#3+%^Jf(TDlG+NA3b(}5x!3Tt(a zi!y_Edg#960a%J@KW;Q>GXX}7d;@OJDptw~_zu(p#sjRGheaP(YtQX#geB;U_}HCU z`Rz_hw+=zA+2BVq1DZW2?f%#igg$cx?ckf(r1KXSG1d-puwf|ZJT`aGO@w_l6hBM? z_0O5#i_0TietVk8j#In~Q$5oYvQSg7WTgHAmYBoV4|Y4Hlcf!&Yii%@6%J+sESL+J zW==CZx;WS9TP=z3N<7$0?YI0!PvFl;YHlMa4zyoU>L4buFqAsoe2o4<65C6#L7>Va zQw0g-Sk4beL@J?|WhGMP|6#UNZ9bTe6A4WWgyaX2g>vH5=IBY(2X(``%m?mQm_$J(SPd3(HCIkNCvS{3H<2VbX`Ex2H^3lutsu6~*R&%s5oeJJXzZ}#4tuEoiX6-zf4n)JMS|xqDE+gVaVK9nB6G!S z`MAE3QJo0d7CW&|_dZ5aPy-1pI#d5GNiU`83-8bOAsJgzK=>_^7tL-rdKV3Ym z_wtK4@Y2wsKPNT+bBL&2&?qXLn4Q>Bm9SiwNZ1Tc)uGNcsTu<)?T9Ki(tM;7&MJ^- zGkXugV?U7Pp|&U&c0)<<_C}t= zs^*qdTy441hV{HWDyiZ3oR32a#p{!w<6ZcJ32LqyL4ylF0!O~0>r0>k4{ zk9Rqjel@0iYTLNhg5ZPxKJX%0_7^AR$=)L1N9rjY^X>`d55|W*>zeAm(l?-Tm_M^b zzAqQ(E9Ztl-6s&2m$l!#x>lhvd6E3z&$**7?^omaKVVBv{4*rL=>X3nmWYmukA2`D zbz4W-BhdQe3FAx^!-9yZM+)Q6egV1IzbDRA$xR%o9q_m0_{kIeWf5wKt6A5i^g(6e zwcacoWjUS&xYCL(pBz{bTvRP{D~p-sxOalMu)Frkkp^4S)OC;R1;-}zNPirYpQ6{J zKNA%S4p64ikYAe12yHPJv(1&jc(rJmwiZOQuKSZQ7_Q~+MS-@q92*RS@hJ~BTbv@1 zxh;zt8<;RI*leU&Uv^(0PoQcw%Zh(@n+mMHnI98S`(=4?7+t4Mv4$^1efrQD!6|aP zqr4)5%is<7_|=wz_r1bMkh7Bgq^II>l(EZ(vuG1)nOaDvGbjQz@)Y92V^hhMwO8J)&AcZ!DBJOu5&H; ztuR&tC6M*oA$2y(v@E~^bv_2mR|DaXjy4Eo7#p>!W4QWTv?Yj0m{WtqzPlxKq<=^N ziUR|J2^~BdB3R9I7sBoa*U3$3gFt~CBemnAhN7Rb1MSJP5_E?@u}Ck{KhP-;=fi#QpK{$Vca4?bgQD&X@)fz z(c}rT8tuKg(YUYb_fBYh$+qG132|~XqnG%UPy zJ|Fb$a;8I@qSX5)Dxh`C1rfE)u0-AFUmwMp2IWKiPc-91ha~B%XVX7h1%C;Svvd}7 zSfv|A&uuw`#n^v$J87WtdSc+je|gwr)Xs^&8Vb2BD-x4mZs~u8O|}axcf9}gKcL-) z8)D?~kND)_FZqP5OOQ9>iHuJ3hpuaL|Ne14x$D;%UR5m0uI;bzBLmfvKz7}u{{gRa zZ~+WA)n|WwxN8(Ak*CN&RhpUIBJ85!+rYxo^lnp!z*W!^X$9Ma1&3QNN zZ~tZ>bWG}b#sM=ROmF{Kue3#(O;4$DAEW`-Yn_!K>zA+z5-% z8P#Oo3z$%SnXbRQwn>{Caz<*TdYzn=(hN=0w9o%`W@jYW0@t*Oi}hxEL6N3Se!$jK z0->Yfxv$5;BeUxCE&B-^+Tr?ELvAU`$hMqGO~Gzw(j%1KRl`-SJQRCs>;75tO#K=0 zwfZkUt73B$Xd`IT3WiLQ!D;ljh71(5ACO;IFwJ&S{)p)sw;@R9j+DoS8epqZ-BB}R zmb?SCk1yX)ad)_|7_nFeRUKwp#yoH~93>yzafKDw?HWeMpu<^GeTAgXrYfWuKfh%_ z3cZSw$`rN6-E&(@riq;1bH_>e#v&qBVZw6(OE!`v@GAs*F{V-e2=0IS22fsEy^OtS z%N0klO;9GmFNaWB^Q~E~LN6c#_C*>D49Q$H-bx$W3qy)G!F=>feYf$d+ex3*WHH+> zBbPn3jmC9^7?8KN4yI8=uG3cm#ZabrYn%Cbb)KTq#oG6i;J>Qt1HBXMz-OMw`FVkS z)F?f>DA*i~VZIxDxUD!;5{2J;k&-)kZW?)Fa(j{K0xJSfCZ4SF?I}xo>a! z*D|;)3k~nn352}H``AUr;N_q^i0~pjRObqQC@J3N4OM@fqTDc;anE6}$3MwYu)NQN zT79()L^cC3o5b9z^vG;>d2!Bmdw}4U-BSFX6Qx!+wq&Ft7@Yyk!5*`}UcdhF<|*xH z{C_Y0&H0l!TZ3$H`8s#U09C-(CyFT^bM1ynDF8gORAyx1;#(yW@^#H`DsG zvqdLBK|?$BVZl{@VJK%v)APr0@6>V_|LXeOHJBF^+ju2rdFN8(*|g)U$M^jY*w=f4 zWXT8j9W#9KI+X*QCr$mndT4h>#*Nxv@Q?re5BOU*^>}~dZN*7fZ)mNRf1oMY3$vG>f1<8U};Hd!Gu8b-3|oMRn( zOGS>oWmS~Q9(`0uRtr&9rIJv5e}4C$a6caR`+DEkb-kWXtG{z8g`7;No2$C8Zi0^C zHUMkL9}xa@_R?auE*mdXBPf+cd9C9C@cvji4`n$C}yn3IOL2!Z0z= z11)0L)q#qnT`i~5nL>vCjJZ2#-f+o$%#8%M2>U6U01PE+r&cqmd$dF&f71%QsWXri zq9%dz4>BcC^C$&Lz}J)er2+IVz@cTM9FO$dNA-y0UksHj}2~q(L*kOCWFs z&|y!u^+NL*?2CL{PaBGdO6JpxMovz2l-&e|$u|8a>FWFy(7wC)WWIxgEQ$%7Y>ZWe zp#%jha&gr<1W{ncjd)Y-wlaru<$jGc-e>tOby4GDd-1nD#PUKzNs?C|?xABnXq9O$ zUQZRrmWw`m+3enM3edPSv4usDAillun&)0z9fSn>cM14>x3aC9-RX-N0CMGlPK$ z-&@p`UXbX#qs4WXO^broE(T2xV2E_6Gzryi#WX=AcgscD5Ev377kCBwFhZKAQslgF zCl+W=OToDl*{(J8iPU8ImyZBl*_N^9S)3Jc6;0ZUKq{3$oA^ShU7-)6hPWL%>Fo$*LakkUTPQ zmi5<&7~-!&s6FN~+YS1b|o2##?Oq zN;6QoMlXz97>d94kaG#=y0COrQrip_&DS}9>0_8X*uHNdnCu`?FKS;=xktm4pIvSg z{R578FMIn;u>9*9N*l6ak-jUg=htj&vLc#l@sO;gWbx$redZk%5}c5B<+`|`U%loe zcG*|#*e!3Vg1Z3@wZy&eFNJ9{q{ON=WT)muJ_PZeCeDgb(BqeGF|WnJU@xRRtq)m^ zLJ*?1;iOiqp`A2H9p;l#oV3SbZXF|=6|`i=Dc?Iu;!>0=ti+gA_U6BABrMXaS`-of z;0p@j&zA6-Nvjdjm@wt}K`-Ud|K?bf2&Qxz>cJ-z(Qg4?G=M6Ca8CANmoZDt%8z$m z!eee4rrk`Up*00Ud@D{Q!Bbkol_N5u0g)I@tRA1q?dJm6+$OcsA0xWly)1N8Mk>Si z4YfFpUR`5zeO%i}clt$3UM&lqWc1it5R5u_EiID9H- zE}2QS*h@%p=&D$N-k=ZIW(Bww~l@73YuDf z+%KlpSamCgpFbC@hgClHaH*)Eds7GuGZ<*a<=z)=?8+H#NMkY@o>B+b$Y#`f^;i;ITDlI9V&@)U?~e z?_tZnGic!4W!=A+-bRFjaQa}!pE8w{>Bx=?TRR6a9DV-58emL$pS0aK9;D0iI^!=$ z5EF5vGh4ZoTGiRNvZC}V5dpX?+&8PZAFwfX=2KV}ME`%le;*K}HK5~JKd@JTcH1`+)O6~KOPCmYGLskqN3`7P>4NB*K+in9BVb>E=zubr=z`p6 zB#kM7l_qf~{b*p=oU-Fj3jISTW-4cvkseN zM7RWy-z1bJGlSq#2`GyIpFLT8tIdZTD(L=>I`@4xjci7lE*0XpxOs*NR607&hwj2L zIAN8%x7ckdQhT2kq+a{$x}^^dkWSQxjZ31<|T z?RM#Kaiu1jn0jzx%zl8EQq^rL*`WZqz-X^BB!P#faJk9nQT-s(oXElh`fMsj(aN}> zm|2BCaOKG3E1(iLmutr?L4)~4xncmH3l#$oq69GkL0Kz$%F;vgRyigl$V)^j0->Ls z=fdm6tpz3aoshO&b@JM@&$19Zl!AsH*suxd@%Tm|)5jzSvdso=mo9u42qZ%}O%s*V z2TY|6&E{|8Sh&XUX<8ekJW*~&_F#R5@#&6@$tA5KFvsi|6ryn!B;P*h$!=E8n15%uhU&GxE-m zP}*E@?66MS4T+=Rp9%Y!8As3Rj)VxfP|d@5J?e_eG?EjyrMFZ67u?m|LT&4)kn%4g?p2zLkAKmIfR-O}GAgoI{9SDw=O zR+jM65(&;(si7f23%@4H2QKX>U}mFIGf%73K>gOXJWNeO{_5OKdUru!;^2MMb6}6O zx6b(U1oAa+8HA*^7RlWk@&5TqQKG*{@XO(Li~e6Lurl z74j`Zyf{=iKYYB#cZiX0k8T^*apI=GjvX>w$n28t{#2NUMGRuzSP~pU4WPx-v~PSZ ze6L?375d5diGi4U*&m4Xk!;zCe^3y2_LyEv&LojmR$`AsHwz@_#DTY; z^0?%FKQ;*=?!Ico$HR)lH=I-ObREL@+KPeF`%5H3-M@d%p9;%zNe0%kkN_155_#7Z zhPs02Rbc2(LqFN2uIyZ_o{#gmnem>O;;xJ$R>G~>5!byr{2Y55m$a#_idth)m9Drk zEAX1l#l7&%R<4X5Mr;~!6L~v4&0LOCxR1+G7%Y?x12P)D6g3mqCeb;@Y^{yYQIO#u z^~!5Bf}iqHH{Uw}7T0|FP~-d>&rY1OFqo2UPn-1;XTf1oCnE-naQUoJ(?ybRc9l3i z@MJ3EKrx@6J(Pp!`hAKIUnA(6t+P1|A zY|`)-tx&Gq=*yft0|9<){gO$Xb!O@aPB*_%Y9;4isOiHPS%qb+tB-iAs0+f;#5_Qx zOb@D!n)YH?O^6jDzN$6`8V^z~C38F25itJXcZ@dVdZ%@b>e}Gc595?5>`7oJ zGus;_OsSDSM?L!<1;cD-w@+NO@=uM9IuneK|MfY@NQWzY#R!~#2*FrnTkM!YFOj~B zfJ{ZE>-8xrA!XbQK_aipy|k&q+SeUpUS%T>%W`H@GXCR!eR#+J;?1we*MPpGyt@IK zx})E*9r(ppjGN#6N-f7VQvLP2{9niU1UAxtpZYo^Br+%z{P+5gQZ3oUwhil;B93dV zBLcL#K8fi%JApHmb$0m8lD1&KZ`Fst0IsJ~KX=XNew5#IuXmd-{L>$oX`q4Kq}k-R zGRu#TAVY^hUfD1%6nq^|s&C|M8e0Jp zS+Ye>I(T7m0qU>{xgRZ32$wBW40n?T-R>7({0R-3&<|Dut`<=6LASaIJggq(08BAP^qsTx?&fk>{tP5n9UqQKe}LMD5=n`H2ZJ^Bi2BQ zg#wQC;8YShq()+#)*P)8i!n^On%Un7VdiH7cNwdktc5tvpt8KmF0_Y8TdFJ4*{*QSMRVtAp%8#o9O(|;+4fN%SAEm2U3+ki*Hw(j1>y}ZU`4c5uUSa%cy%1n zWKkY7Z$_>^J#C&}`ui67{@_&x<{S4`FO+Rb4sfb0nzp{_Y%-UaW+@fd=Y~9B9aomP z_6TJvXSE&@1qn4oe|pVgMfSuzM`Ehi>*To75J>9AFH#6KYB>pA| zXuj9zWOC`|6SXeu*-HLLr-ZevQr`kAua;Q0CYR={KTx+^!MmS6c8kUW9Qog)1qSuC zdSM0V;{^Y2sUO|Hb+MrnpXoofcoc32W|tue2f0+%XZ-X(y%)<~*%Cx=A7T-BeDP(U z>P-5O2D(j=Ho}e!`x}znnZS^kKL@_P-$x`mWu(t9z2En*le$2b{pd<+TFSzW3IVij6o+X&* zc~=XORbGjuO*KCWRCya^9x^6L)yQnar&CKwwW555`71S0PP~)x1bM0=kG+VQnJFWE z(dL{Y_LHr^sbWH`YI&}&TFJRZo=*F z5I>{T>354BnHJwTqx0V)o>#Oc>POfPISO3Vxbncyp(*NeM-5kvr81{#((zhR@b-4Ttnf>Y$vNq3RFAx}=oRjO_ zaB)6TSDA{b*VlTgP!W3rU)`9FYubn%HUNIX$U;*K0G?XQl-PI76@4^&U|{2`BC2W9 zO}kIbFe92Y3@JCHKWKa1CLu@9mAPkpO)=e3CLxGVmmes( zxOSnU=XI>&h41x$POuOLX9Vmpi33mNlAB)V?;1^=@AT85p3e=b&ypwF14xR9qbLTj zDm3uH&jnZ;cI|^D6E{4)Hqc|#@P2F$le+jO>v@jov z)Lm7l(ALcT`?xLdr9GcwXOqrToWy!Q)>Lgw{KNwMRe#wVQ7S_zuQU-xWgePU>38&h zbUkT->d(VE`T;ozObQG8jVQa=2ybYU5GpyG1U3r_&ml4WH z;rH%VW$9wYA-D#n(jSCeR?B55$k^c6A%;V$E&*C#47D_xD?_FqO3u|ps0>|srkWUj zy_}Kz0TJZzNLUZvS5176gY$I|;J$1#Li>g%UL~ zhgj&-BNY>$>?glDd2g4|QLU$cRQ0q0% zg$W@K_2=ahz1d%ZLeTx&%E}W>aTwz-jDGQed1HH(vr^4NiDNoc9#Z0<)=Mb|9(Fl$ zQvau#-hJ{s!of3qN6|;!ocdwpp|PxEoAdw%IV}PcF|sua&E^s-b|z!^6kOR&1m~?A z6QvvgjQqLU3f|k{o9*_=>9zN#Q;|c(3uX?}!c;2~3mafNkgftUqURGr*-9_DT^30Q z%1rP>12FvR6$_93;x2v%kGLzBjZPKhz2h|U*>9SMR_dYD{JW!qIsrTARlKq{&%z)W z1wDiX$vU~2_EBi_T?i;DBcMi3nqOKV$M{O4>X|4k4&BU)6;&Xi_21~j z15Uh(8LsF#BiQ>(X#hJCm7A zlP;dC-2i%rRJhyyXsX7mv*WQH>^eraeH%yd2z+*)HMrS@V=_^m1eU--7qa{1nRj9r z69Ey!lMS%S<0wvxpJ$Fva20n&)`J6%{~J}Q>QQ|;;=lE0uuti6(3dA7*Z&EX$_UHH zvw8%;W9NkKx>Mp_k9PbII2BbH&eHkk-mHGS9+GZ@;AFozYSMvL3+Jxw_h6~{k$=zu z%Z?z+Xb>ma)CJpnaqq5~@Bj_>KWiRKTi7zl#iu^_z*3CWaZ zy;YF>@JXh>fGQ(&b39kpGK89ZF?Jpel4OMgn3&Z&R;$AUZ$eg!F4}1pVe|yxfJmNi z8j|U_wBktnYt{BzR#Mh>VT9wC%o72I$qMR=3k1`zD%16l z*(Jm~zY9Xs>+tmVxwoRjYEtCq$N1(@|;BBTBkqUNo9InOro>1o-@P_FA~eBKim{9!!6+6t0n z8nMR6y{M$m>5Tx~$QEZ(sl}vK{X!ONA{a=*Bb%{FpL2?95Fx+@?=^Y2DN&3t8T`&B zpC+Dm<*up6%r1jVNGaxk=5$9P;OTkoeX`m)a)$NmNingyM56Q_w-csbHB}-gS*X&U^SnIro-zU-ki=qSdQmnD!E7+U zq`({))tq)p9n%UxoIyZTXxA@=)zQ|ISZc{OK^8+49FT&_;QV0kIy$*P+c0X^uxfjV z8>M_(;0`l%$IQS$wjF-SGtlb;Brt*cYEs#Y@7RadVC%Rm{!936;BB-$<&Dw$<+$IO z8o#Qou7j!{>~8L}%{=%T7}PY!+rm;8E`*09$Z9*tP9|5FmW!guzocGQyk06R`d9sD zMJL|t{OLR*-14%M59H)0_6L%xV9aMbzzl*>WZR^lK~E zk!ROdRk#YA385}e`cIjISyQ2LvqX$bl8yX$_OsV&x#aZq)p0zT>m{mEmR-2!^Vs>H4()o zqa~3Ik29BsDy#F+KN#z|xr5d*KawzyLMEg61tw-nLsG=VFqso}C)AVu`CQR zt}cwm7b^n488MY)WnR`)3H^4&b(>-L)nMho2;Ut`)$6wLb1t$z#dJE#60Iv!NYJ1b zuJmG|VAc5>z71tAh-+O~X^Q0j`wClAev2JKptQ7T4;^x?jYUz?7hmJX@y0nIwnGWrsa0KVZFq0NmVt`uFzEJ-gp z0J11E+#?VL-LJ8A$2^eEPDxQME~gGs^M8;9I$egVE1o|sf+@=HBn1ety7rT#NIr@4 z8)z_6G*lof2xt&0db1P|C*tZYAR;F80NnSEm`G)fR`;S$aPp??E7+}zTyIkel+L7u zEQoC>@2Y4T&hS2upg^apeCnIHssB|@lj(P)5T2m|kK!){0VVG$?jm5L;5R{ndEpX#5E7G?`4EGE| z2VX9ltQ~ztD!J!vuN5SLZQE~|?g!@BOs00L&i*(GM&V>0uuM`zz8e>>%m|*QFduF5 zzKSgsy=y}focXBy?DW&yqhs-|g1_|iJHGu7`0_kqT&icVFY4Lj8y}WsH#XotF%jBd zjP}lq3N@IreTBWF)()r5LAWGqY)DGg6%E#UK`2|~;`woQuQvR-Bh&=vWtNswbRmzo zFy$5mnj6h2E^M-|yl6uy)p>nWT27hOFhfs@o#M4iKW}X-!k_qX>3;xWX}N0WA_?Rw z_r^;hjD~(?PXEdpK5jHXzSDI%Kk4ui6y@#i2YDR_mJ7=$QMkgd<&O7=_C4B7`8=fL z2l7k^82Te~frS-I<_>PIBLBZDCxf$Q2PA znS3Ca*!jV>*flOczg;8Er~K6)>{Ltpt?u1VtJXT*68cElPs|W=50zLc7C5aqn&I9A zCg7{90&G|scFYj!fH(L)DbIK9G9*aXlZwO>eS0MMKdEz>@F>2*9!|P=ffz5nAZQ6u z#0E@Ofnvy0qE#bA&b!$BV2xP%0JV5yB!VPXN-e$n6`b!&7!XG2Necc+e29MQhzM%MoTgL{C} zxu*k>hq!-m6!@0@%j1U$W46mdn3g-f)3N_$>1Yt%^G%(*Lt;u2U!lIq5Wbn25~ju)Vl349LY}#gwJ{Cf3EOORRqMuuYtA(G;*Y)9Osig) zK<^PZU0jJq;)Uw`58mBPZ#EWHR}k^3y(B7ocJ03W=?415>@}~MDp6acLNbdjvrp?> za#G(F$HY2W&98U>-f085q7BJ1BwQY|)L!9qlqCl)#0Y!kp~3SysoQT5aA5UJ;0GGe zZ$d?n(weLq%@9NtV*1Wwq3b>pQWftoc{?;S<3nz55Z9Z*QQo#oMz)DQ*&S#>F@vJ> z(w8f@L#R-lud4ZPl%Rr{P>IrGrjEQ2d8c`>nV2YPcFU&_&cV_l4;W4fL#J=jo@_Gq z`V);zuX-)(I&{GUU074Twt`b!r?qjx<7Ob5=h&u%?X&_qsQo{|plE`uXs&IGA^^KS z1~4tr+j=42|J^0++4bell<=e8=!DOP`#X^HW+$;A2Pnvig6>!}rkgTsuIM;p2L>dx z+6Z~{wjZ&o6ZnFSmJ+k)L>u!o)#Ka=>^}>b zLi5%?pV5`|7rJO!R-O|D6Br{0~qm=a>0=vk0qyqwdBJpfG)R+rMNEYIFLEEp(rF{!^onc+)4)y>s{%s%bMbsyyhtgx%nH*04xEdgB$T zH(mZ&-*=i0x*#Gm@U5!po$$H%))f0)3rhS>j(M|4`lF_=Vc$n~M0c5e7WZradzU;( z>2kPswe=NdGxBs4xLzR^^;_Z6yDaC^*3=!k^d2+LLenM{>5}{|9828Xl%31l~G03Wa3`ey%!V{kw$Lq5d`$ zE<4{>Qqnr;gp7(9HynHlQiy!7UZTo$38D`vbQUR^?>2a$0my}9Zo>gNXU6--;tzXx zRENo>=Z2Sj5D&yr_VtFLo2M;3l^lhG0uq?E`=x`e3OOubA8DC;w^ zRtQWaKYO8|+bNv8Oqt&s$qDJ~B5$Ab!3poNfXWvZgw z9H{KqqpxV76`EsYq~_E@q=)fWhkv*-|A>PKp@CHEYosxldsQi%uWT%OSFW*nN={*^ z_o_@&b=bKCiY7?VK*MXq`sGRiQE+M-y015rlkpZIOhGt^4|wjz(^##!&zki~-DM`P ztcQo|<#Ne;21XzkLUJ#YUVz)~8-P|nl~T`uN`fhQ7?kR!yzJJMbh|rQLbB(_F1+u5 z`JUCgS7}@n!bm61fHa}?UGkZ<^;y}S+DY2ddILiln{rn)DSrek`QQ40(_+=^3AsPT zMVG;?l)%|Tp;eX4bkXNK$xJaZwY20BnP9?{Z4On+gR>&!+ZdyooAYr{ z>%#2!wVLB?z1Isv>bc*C&R>r|{prc4+<%+D%fJ7-a!7Gwh!ivHJnB|nX$8Do`-c`u zI3a!3sH|}0xE-b?Xn$)NW4NLpK&pxp10PWd2C@!~#HH%oxoQY0V{1rkc_Dq!Gv{O| z<|$jBGR1{|%cES7Lq9cfo=R}02*I%BK$or~icw`vXk*pU0PBK*ihl70Jh#?2_B4A! zL$2IH@dlgk1$XXzc0LIcVnD50dYrEo@7V^IQJ!+(BBg_vtg!Nd^kE+vnMZ76dLmEO z4enb@i{HO?vPYDcR@ka#OvkoQTjZ92y>17xW&;awMXEJb&Sy=a&QeM);&Z^_`|84wFqO_wZhxKu%J7@z4HD3#o?ZP3$nH`nl~rr}@THPi*Q3GzHP zOuB~$APYn61WhmI;96=B7#rRsD$e+ti-rcQ`|z-geM;#%YwR47ZuZ?2>T)h%NL_bL zxDffmY~GM~iqDp{f3ij}k!2kl)C-u)CgoP0nY@sk-Y1W0D%vC1$f@33tuJy}mx{3t z&zSHrtY~&IF0ySb+vIU0dvPYJQf5IyPE1r&#mmbz_Z~Q}VI@&a16lux3|hKz{?nJ_mce{C+gq-fWqF;Gn0A(N5qd27SVvfqbIi(wG4Fby)))+QoUbJdxk6L zkb08)%-DIbZaG1VQ=juM{QI&9tI~@jwd1JFC^6x{^1H{c^FV4wY)&$PxCr_7Rm_=n z1zVdOM)t@uiAR?BuvcTN>Fpe>1c6+DUD3q1R16tNzhWt8+p729AYL)SN-Ja~dHp+n zlpW9Y{mqY?H`H$WR+{`%X8-r6;7f`48`f@v(UF%kdPIV*{YKTHkpXP0{Iu=a&9bjL zN5}BSaQMN$zXwM{N`uFLjt%tVGHsM#cDbdK#5U}bv4yNnijbk%py+*@4aNo7*^=k{ zmR?({>2N(x?rV}l&TUGTS6wP%oH7D54qABAZ^9lw#=Ijb*C{scI=>Btyd9Gt<(^&p z-Ke^}&)v~WcJ}=m)i@^V$$OEM1gc=S38@L411$NI;daK^q&vnA=AHv4-DPM?4yS>1 z$Yh_Xqb2;ro!oNA0xa*O312$!CxtU*^k0g}T_?x{R`LWLxn z)E##D+=9HrWK7YO14Z2#)G@o~lWRlvrIHmIhN}Ab2bHm!NnR%Xx@Epb)HEvdlcLg@qs?B4*OMi;01XfuQhg zKb7tER4Ro_M#57GsUqw_;SoT}_g&-ZqhXnug)hzeh{mcp!<@2KGjJ9XgwZ$Mms8~Y z@~gfP0oS_|>0n3^m66e6U6jtO`QmsvjgI(;cTBogL1-q`m^+S4Wtq!efk0IkCP4OA zP1A4L*)Cp}J14DBS<}So^&io+>$euJK?&F8Es6k7M}|_4}PSyj}}1nyV~z_=R_ zYmoR-cjKP5-}kNK{habIX$etXI~+&;H*Oe=k5tX)IH{DY?tYa?d->~G$g7Z_-~Ppv z0YIC)K5G2zB|dl&X`1tZ)iz?~=_>`$BxXU)b8mK5EgCW}y0voT7k6*B>p=kg$Bl;{ zd$;3Ke+Bc<(6u)kO~d_J^D$9RIyPdUh9kXT^)J ze^LqQ82ZJ5`15Y*TtFT5Wv={DCkT4#)^lB~qp}S3fDpBolplagi8IOKTo&U79{eT; zDsZNPXMF+392XA+Zn&teK(t=pY)v|fiyfX@HjEu@dcHM8F@80v0&}Jba&qs4#15Lb z_g&-SvAb<%xhlg2a-w(j(<|GcS~}x_H^Xr@_sKH-G`le%$*Ui0BBYVW>N!MT`Bd8( zErA~=Q*iGZIJBBAgwpz6ii02qe$FR4_=O)Yy-ol#4LuJ9@32;xwg(GhiOg1 zmKZOpqkx?icOC(}1VV+I5n>{nb0x*viCWOO92wr`ACeqFPYx4$5uoie_oa;&0^&jq zO4vq{-M`%7J1XexsRUujt4vPm*XG%5?W?};F9)rh`p8yl+Ja@gzxakoeVw~4VQ|sC8U@cXsYkzmW;F zPNwqqd;8g!GK*o%bBpQo#guNh#dTvb*y`t_A@F=|qHT<#qy?hzLc}`VH0=}mw1*i( z3LVISH-xHl*JT<1BsF;Iz~lU?p#YBqAJZs6yqN_t{x1F%OC^O-jI#;HfTmVONdzO$ zFn%-HbWy_)Ej_^`YpagU$0z zCw$3IH-tW(40rz|)RetThY(ZgaeDubV^+;7tFWPtltIVeFE%zRlG7FMW;pscI8ix? zH{wj&9ngPUM~d{R@URXU2QO%m&1_8<@4>~?;gV>K{=gk{N?i2Mf6TC)3XaBxl_Dv# z*Mp}X`H!FMa=loymsw1B%5Hji{uSzxQ+wt0IH`qacZpA*yprrNR`7beXyPJ9s9q09IA7`>A{3>)_(ur7# zURt~UvAciQvo5k}^R3jb4f1FCz;U_AkakfVe9f&q)_^H1^s&WNxXf#lz}|8D%{<;X z4V3G=;Bzz>BJK-%i(y7Ma5@<;)Pnp5>sqC*5HHPnpP+_JsN{3;ye%oUiM6%qOlc~3DJN%*KD93&r985^-4O6 z8QRcu7cE>0L)h&ZjjDcN--T}GoWBG2%OtL~clZ5eCoh*|KmY9BZ=V9fXR76S%6r*%8@drmoNh%?F(v?$ z_%g#{kS&IH64IVhfEV65;!>Z3`$>s*FRhPm3v1@ruJt4uoJH6V1wU zUq#&7(^0z7Ma~@5b{eTQ6P0oVASWLcqVM$M)6$fYKr8e1c4sovG>D5>8!K@)ry5M) zw@qO~AU^eUz{LpSUD%MkF+p4;CX3}ucf(j7&O;=!t3vDCbbrk?z&M~i+}1XNE&>_v zrYQ!*Q3{2$GQgmub2R<`0W182DE{nZu>Ng^1a>FSXWOCz<0q?65t#UeZ?@$m7I#0L zdfsn{=(Zzl-F#}}M*L41uX9Qs6CBJE3vaW(IQaBAGIB4n>}A!z>iDrOGwO|q)SXZ_ z{=J*~1|RRH9zSy~lYb_BZ}ZD`)UJHx6U~YrVXutTveezyy=$ur{>|YyTy_BED2KhL zZ#)xxC*(04z*LOldA2lQ{2ml_oQ3#oWdQ0kLgfAMtpx!KPCAB(#z{@p*pH~KH0 zoFV7qSG||GzqcGHogdl{x|3r)7I~)})H~*)EH^>wTtrt0nfVq&di7-o98a`XR?#RH z2i)?Qc8OCTYzR@EO%Xln?P~thd6h$~^FsS=VNZgqqv%3oJM>YAz~(Y8udl~>s+DGpomel$oTfV~Lafbe zANMg)hJGcbEz`FC^)7P4qTP9hhKY)@aLW15Jjt$A>{`-gAH7N!VoYI<>NZgw;XFc& zS^rZoUU&p*MlZjglvE0lGoX9muUxb+fOKO7uk(D+F~K*&#+7ZALMIxWK2RtxZ+$(w z1Kx(*N=q{KO!-+WAW=uPJh$_}(1?nLgsO0}$XX{}l2wx|p(lN@g&2ZGx1hq6q7~OY z3_C(!nz=6CkCMHu!IM60X*gA_#wpmwR)QVmC{mJnj%5cY$^ymIcv#r(1aSmci1h{^kS<=A@ic=fGIBuaKwA9y(Z?d_oqkEpYu zS@pSIc2Sq}48_Nj9e8N6<{?yWzIX~ri-gX)c~+8TYOgB;!ni7yY%P z>lKG26msDr+HVHL-M+~~Gc1hE7=ogcjo(EmVC!XMsI3g}dYiEi=7F~zCeN{s+Eovp z#JoH4rvqQlz|?9*Jlc3b<5ypG&E-feFJ($(|xYK4VTb z>+7qg(ZrmS?@ekwJN-ZiUmvS@**X^&N9K^%^muq-Ej(`dcu;Ck@%5+Kf45UlY>_Tb zW6!+z;yETW^N&rx>}~x$)@~E|Yk2-O?HQcap~+76@h)#S+}*(IMnvpEODspGFlp21 z{phC~Hb+AlRrY;zpPqPke(C_$A-_!io#p*T+BgSNQZ|weG~; z^yBkwvi}28Uu`DaCt8c7?7HYT!(dM}pNuyKyb=)+=z^#p?hL{?U#&m0R)_qikn0T@I-`YMgG}2Jd zsWe!Ume`;(#g$ymqe4~wn;0HdGQg#t?t1GFrEMI>d-!xOeUYvA-3$uWV_0Z$$?Ty* zYgz{<#~UgkQ`x4W!8i>TXujeV)SGy}hOviEWq_sOg{n&2nqlKaZ+8h_S(7zuhzbLK zR+%1_Af;<-|K1Sbbn*oG6iMX@7}N~qKzbrZe##MXck`A?r%yarW4=>7p1ifZL3sRT zx4u1%&0Gw0`>7r7F6hD#iBq<+?4x=jNw#HBw|&~ zOH0&EI|W53N0rj0S1jKW?bj>Q+khnpD14i)oa+P)%mpqnyg98;`uXoQ@R9B`NVUJ{ z zW^%o{@*pYX4RX^v!7)1TtnNycc^7Iy?7h*eMvf~E2zutpXK&3tm4VG9_FJW5&^R!g z04v@qFx!x$%|@%sO2xJaTQe@OO_$Ab)nGTgK2m;Jt38!M&*L=rY70BzRh!iOxx~w* z{#@Xr7nG9O$XQj{e9Jv>{vOT;)hY0pIRP zE8I;s9i0usHgkO|Lrj4dskK@G_ECoO6;?sf5MmlsKaHZxoC0keB~K)*N| z7Dfmkk$v|GG`H^;MPlVb-M7WUZ2w0uer$YPm*bS!5+7c6Joyv?w)$d`?BfqsYatwZ22Ss*FTO4eplG9T29z$>G1gM-?6~k%*b?z9Yqrb{#^c5TW%y=19?S~5` z^C?orIULB2z*5-#^kqiw3rzCZ)l`eq?o^9DEu2~-50$*%+4ttXf4EKmBhel`b{kd# zuDMgwZ^RXP2c!JV^jfd)y~dVzg~jxsHT&W{*y8j#dui{`5`>NVdX!O>1QLtjxhu+a zG?riw;eAg|P|*klU7(VsJ+Ek0nwf@@sb6%f*CCEai92h`v193fxT*;#VQK&v5(`D3 z(;`qW@d`~6l=;M%5JI53riAK?Ffe@L{WC(+P+lt1%1I)_5rl1e0dZ@a?Jzx=G3lKU zLc!QCnJCJpMU4kiHegaf3POYD_Z5i;yi5+r0;RO1+tW4BgDIN@vG~Tv%C>)vp64pd zOV5AfAnMuA-siO?K!*=e*~Gm7Y~HPPBBJeMR^lnDOm!MJ<7pvbAipVPjTo}4YMn;V zYF=CPK7@IFlCTEIDCuDNHuZ~@zx!XFtC;F~MFsBwM6IpxQ++6bJGQ&sb>4T>ry!PI zY2b%QvSQv;&&lYwI3_bIZt0ApZJ`AA3%T%GJ$e)H=G=<9@3vhI`ULFtk3 zmyA!dJqjV!{~Z0Dqh>vaog*LgdJ(vVQH7@p?=AKcXd@D0#$k$ePk(TAoW;ag=D`aC@qNyJGXI*k#@7vaRZ9$b=UACvzr0RnCZOT{heCP-{;Bw#tDU`( zUx0t`QS{S}XA=IObe7zL_n)=@6Ees#XwaTJ)?1NF4gM3wL-Op7m+~)<+cm_!accbb z_gDY=LBhe!f3dID-F1$&A`(u0OzpmLDozP5{^t8$NaX&F!QX=tdD<(0?5By&Mi~)20Z+6nV zAt4)13JFB03Z5@qb1H_L3b$Ye_7UL*0}Y)|!VQ=AMTmzUtX|B{x%47|P=G8H4`2-F z1Q?5G#w`GJbO81MbR4U?w!30qm;rhN+0T+AG$dhE6cxdEaWRkqjDkZ&eK3F^1OODY z3>vz8zdZ8xSOO9mLXT#1wNBQbPj}N89{>OZ0O#y4 z?@@k5)h6!{D#W7GJn!9u7Cg8-LNV6 literal 0 HcmV?d00001 diff --git a/vendor/cloud.google.com/go/vision/testdata/google.png b/vendor/cloud.google.com/go/vision/testdata/google.png new file mode 100644 index 0000000000000000000000000000000000000000..333bda93710d1668ca6452a507ab8a12c343996e GIT binary patch literal 5969 zcmV-X7p~}uP)eg5P{r~U(eijF2cCFhh?Nm(edZ%&MpJThhWwz@(-0^$F-{*_JUn17I z+3sLg+Vzi4cU5<18X6k!Or(1@AIzP~dE5=|vR(hr;A@RPeY$rIobJ5VAOhZFsY=FDobldUo%edYS z-0Z5CAd&&@`U|H!!Tu~^(9ke{cxrXD#Te7D^Bbq~eDJU;q98;in{3x#fcy&6(9i_Z z8pvH)`Jf<)pv8(e@v1*<}nQo4NV||3_dS;Y`KOWd1z>iS!e>8dF{|{-1XK-lxbHX z*~&CDG&F&5CpbrfJl3I%$}}`IG=U)ZA=4x$l9gXzfQE*KCJ^+x$Y?!4I1JFx(9i_J z^TC}|0zq#K12i-=G=ZQamfC5Xas4NB)unZyp$Vkg#omp@Q~}~EYV<)GpNp z=qP5*dBv=CcYzzfEoF^D$+jwmtTlvJ{2l);{{OC`ZGOF+H7$H!B0m3W*1oN;y<=N% z`@C%{+V39hOZ|51id121U#cShTor#F6stVg+xCq3x*v&kvH05K@qH`-z%VlI1Si61 z+jfIqxYMDd{I7F{@<9m?Lv(kGuldZ>E6ab|>IBl?dCZ=rtTjulMI~6Xy&#Grh(dOW?^`#xBDGk&PGT_t7M9%gZ$(3A2v`4`)7=9T zYWl2ll<$v=K&p(rtJrCrAPC`35Jb5BW?s3DPdT80@~!s_)u@Ca4> zT*w-aM6%axYl|R~1&Ct8UhIF>{_9^@Hxl_!xv-kfFZfhbqWUbBrqOFgu*-4%=aOHI4p z5lrK+DBB&pz^%3T#j9T8N%KZ2XQo+%5c!LY$X&dNn_Trm{$#MP&1Mk-H#m2^Csi48 z!wUzNnn@y%VtV2Paa4I;?OwCC6}fdLqri6k&k;kr&HtzU&?s^1oPup^$1l8D3qP-D zn`cFm>*!6LEshx5adS6o;pYj$`f^mc4j6F9~f=g&@96ZM~ z09NMKT>OG#TipCwgZYjNaYM(o1c}UTu;pSBcY_Vsm+uq;>F+d;E@aL1P2X!d>)g=@ zx2>W^*`wUTUnRnPDwD0@35F?N~#yWvyyEh%rMgWpa;zABK5E2%|jsHZ^ zwpQa6CA3D)snCr`MINi#CyQLfwg!$MYsKqh;^tq8SMgr`4dg_&ZClwsl}Vs;(a@gU z4W18sebE34?T7gDN_gI2j&jlo1Ol*P)?68KzildT>tXTzHy5~ht=Mn>QR3!f*gLZO zcJ0jL_8YE*yOby(yJ@Tw$VfxE*UjwMt7u!V${#n3r^N;Qm-RWLu21Y*d*X+S+?*rM zx6g*yAg`3CP9+JTL=|K&AqZO|eY5wboDaO_yfiJenlimpO$@Zd>(u* zx>t?ASe`VzS8+^-?2QoQ@wnJepM7o0l<~vuWgJZ)Dx<%r zxonu5)>}fH?Q-+jCy>gp2|h5mT#HIjLX zR|osXeX?4meSUnM`1(%^Qg~gGNUo!a9lf;VFtG|I15iDB@pT{)Ohrrzfj}Zqu#JuM zyhwi|R6VZt{_W5Ozq^(jOd{ zHfOvkS29Twat&R5wK;&xFf~!V~501JT6oBHum~Xb!i+cxq|A*k6CWTx#AV?hRLZB%xE&?fJ%}0sX{Q?#OsHzmXbukk0vPoJTnZsS6 zo{GT~0{sjs7J7ZcS``Rl_zg{$VSa}x06KGt8|g7t2Z$<&M~Q_@0gxo6MnCm)5`Lao zqJ1`Mr8IXc2Z+71wb=xM{23+ZhJ-tgyE2`-kRqUW3lF@}-k+4XM^r)2Jo|H5>j4|# z)5IgpE?MRc>0|dMUdTZlb(jL6%uD(VNT$}5bdd<9Bv0Y(fujynfNJmV2N6HRQ1zE= zpE}i-r-%vge`E?^=Y!juO(11%ev^Foh8B-%Hb+w9RC11H5O5(W8&{dlo&A3;9H| z38a{{zD-MLPCjOo==E^%Ue^fX7-*C<5U~_(;||zXAd;!A#JEIJ&UQ@fZ$ZQ8dkMOM z9dNM<)AJ%O(pMD|$h;7F5cIrUM680$VG4mf4DoaP+pbUljZ=w{@%<|8J~8ICi3EZh z{~k*66eZPq6%|xU;3X50oPCXWFkRB$dHnGAJ$Jq75{bqOSPKE_LrPsAgSqQG2^Ld+ z$*N6%O1AZJB7xjclmA#uym31e6UcO89%OOIDT&`?3IPpDGYJGXmxR*&GrNAzF_6fJ!=uzJ;AZWO?X!wfz6L8zi?U&K0IJI zN4)s=)ZDl~rrfv?e=&tXeubz(xv%Evc`@y1?ax+x1pd$FMj*y3lsp8E5+&X-9iF>{ z{uYvw6{51DJ?YRezohHP)tJ10M(fP_^}kKrgy;W%0x=(={Nf2L-4M{z5F!_Y+`~hZ z8&{=cH&9*1#u0Ndy)|An4lydWnL?nAqnQLk@zed^NW-?Ci|Cm0crn}YdF(UgPiibQ z9wA@QBGdN%NYuF;#cBX&YZ8Gdz5$_ZB{uH$)euM@rTW1DwFJ^e?9g3RQ^M_`J zAh3b0T%wbw@P43$zzgd)8JSBc7X^w@wf2dVCeeFh=vFV+v3LSSeR(ZBLD>}}ZImpYwRK5e^2 zaT)gx%ICo6-Nk_{zxfd2K75HOLT&saN^*wA(%H+Z zZ7czRdQp(tZ^=h7r*iVJ65#aYgNa?Z>%(;6Qf0{0w(i;|&F0m+9Zal#yd_%rm3)5W zS448uqkN9d%Xd4Z_UTWR7oZ5h6aX5NrYli%<|!eF!!L2`doW*)|0Y}*d_a)MddY6q zWf=0qV#Lg@b$f9)ctj$4dU~`5ENZu6Ict7NQ8V4Y5;xEv>N~Kvy+S0Ba}-Uy`zqz< z<8#(?KjZn}PGTPGRt58Q+fi|BHh~~%4lx%|^z~9PYkh@~$Zsw%a9Fw>J|8>`@?kMx zMt;+WKnRi!WH~kp&`Ur}RJ~bD9~J4`VBs+J9*G&diyE;qgcYHPLL&YAXy(H}DWdo4 zHN&W+PouV3pcEf*74NK=+WDbA{w3wQL+r>TL34q(j*lV{c~gjyfc>~Jn_RA)K}mGc zEgI!3AXr6iMQS{aDv*R<<*~ygFegd)lim>VtPLj(H)Wrk&vN|7qsqr�U0N z3m6JPKp0sW8@)7ef#2ByzNjH_8IB}mH>f&eAXG;q!hPy}p-9g4)~36>eW@M_@Y*0J zu()+zMC5S+-TB@6)yQVYsmCP7Eu0(m(S@+;dYRA#Wi`_Spy`oEjm1TS`wRsw9sEbOIWFBIc zOd}Gyb8#`kUPSRe7fBA8nmI}&FiV0!hA^gqg#-vodh*@Mwh^D8=m5uFB0s#G;JZlb zhac7*EK7i?GLFx##DnX_Mm%Ytb{3JxEyL+Z&?6&RD5xo?Io|6H%v%{m%Qfs1Q6*s^ zFuSWd4o4O4_&=!KOj66KoEM6j37#A^>vkl=q;2MG>xXjF)UDpp$O<<#Wa5%z9hN{r z8e-D_U6K>(R#HYs2vWFQ@>;L7^P4^_H*a_IN9Yl^dIEu=RmnEiNtAD2M{^yM1e)g% z#ZpKrbTpFg9+<#gZ%qWln+Bo1fcn#Rg0pjZZyJnzU>}Dg38EEy{>G2OVgT*j2w!bC z=%tVb;aGJU3yJa!;|N(BxSg|1FCXp6fNop3ONr=ZgCu!uq5805bb`n;Y?Yiu_m&`% zO9e5Ecoc;pGDK^Fq&7&t|5ki1MgVdlJRiI$m(=$*amQPP2jFN4cuXh2*C%hVly#ybtNS?fMsTH@Ft<4uUv_k~-QFBv8|nMP556&n<;Q8KqwAJD3CEs8Y-t zFNA#0=P)sVg$1PR-0^?XD#U@!VY@zUqY{oXuysqqaBa*v#pjXDPmolh>KDjkDLzjK zVb>RNUEfK=wbA&7Cb+ow9Fn#DogIfn(k5>^!90T1TdXVN1{*U@a1x6UkOvcdLX@ro zH-CntS6FWj_=LnfnSVLv&nWPCX<-5(~Bk~r}GnDnnkOjxmCE1_UNRbW&ERdtdG1bBJogx&BkK}AF@d6+j45~k4; z3Cg9m>%G>jhy&_Flzv&0Q8Ng65z4Aj+s>7;=9Q`&r9?hrFA*Iq!g$L_i39J4r~{M! z6#_G1Y}fx*R1dGeR2uzbm;i-9kf>KeAcQuH0=FKHnm7>Q6m!Nj7AK$tgz(FD{9Cvi zlp`jMZSW!CPH;|c!Fq+8Q@#nQ#I0^bB#lNK#jNSn%;HcE=sm=Z-`!|Lva^A%2l|j^ zcacNSQPVp^UKsy@u_X{KzO5gulXBEiMj(XBU^#1Kkdqr_2t3JC7piN#XX4C_5o#h= zVmrY-Fo1!iT%v}*#&*01ITB}1a3Vp?8Y_^e5a-4mawLVU_0(tsvhn|z?2nnFg`AaD z?eb2_kMy?XkRuUCoTo7QaRB>!ouGHXSrd;~fa(3-qC6$pWPC zf=ni&SE|U3)8JT#2%|qUX)>Z^#gT@Fh6WVO1Oi#bYN4T_p@Ad!Xas^hA8S2mXlNk6 z20^2$_l76y7@HTZp`kIOYv8@y_2**pAj+sJ%J}FACu&AcU>X`4$p<=eIjSqJw|Uo_ z*na>C(PmF4fc7N8iGB?Y4U}c!4Kv!LL3~YQKhvq0h-?V?kDoFP4UOaiz8yrp&}PU2 zSc8CNMJ6A-K%C3_&rCx@Be}@U-*^~Rc!;LXMyB;cqzDH*qR4T>tgh-FOhZE>*~qy5 zjZ&=+Av&@#-A=}F&vty5wFWdabkzkVRmnkTFJd?pqKc7vw&2T)F(l~dmgt)qz-MBu z0SyfeNYTUkdWakTQQPqs+pg~*azI4G9dCKK#yc#34r?uFXlQ@{;x?^9jszkz){4;3 z(8^P9XEwaA&)pjjXRQwn4IPOfm(fH5d5K`s-Ad5V z(7@OZIEkq}wgh)7bH3Bi(1_nKh&#b`Q9HCljS90XHuj;Rp#g_6?gUq(G~37|L!J-% z;95qQ@ITg|p`ih9Q`lk(BDsb;{^KJJX3ylkEl{7@u74XuXE0`F8X6iJZ=Ym`(reoF zkLFIL1OBMo^^W4M|9-9KXfh4`?D(XA;=ve6Q(J;~8p=P+xz{JAJ&dz?1 zo|BuCjhlsyo$cQsc)+bB#3Z*#NpG{!P}8vee}3J30a26Uo8g-g;BkWRsqqM?@oxG- z?0}p^c>j5U{`-Z8Pe2GrNqUQn9JrwI4hSEQfB+wmA5Z|eItch4L`Y3^_r914@jU|@ z5>8JV@vxL4QZCi{9$M%GmRrKsEBqE29X$gh6Av#RKUhFgN?Jztq1@vqYU&!AS`b4c zV-r)eXXbYH4lf;@oL#)%`1ro{^AC6*5g8R76AMdCOV7y6`jDMdTmnawmX%jjHhe}l zHZ`}jw)OV)4-5_se;q+hPEF6u&dsCOFzXvXH-Bwy?;IWfKKXO{7k764PcJ+W!GEa* zy#JSC{};We0ln}6w-A#2(+dy(E$~Y~O-OWKjQFmK0f~+0Jx=j3QX18iqWYd&ToO<$ zt*zGt86CIe8qd)`)&8T||2xIP|9@%rzZCo5dZ9rS1bBe+2&h3ypq)sN(HCP@JLsp0 zlWiYX$WP%K+rcwtKEhjnfWy8&VN zLt=vOJd)B1!IFhsb2gk0h;CX3NS=3E{|xa&6{Kd-j*9kM6DtOG+7vjW7ueJlx{%OPj-5lwBY=PAOFB!?Lbg&rOtmd z+alxXXAI$|lZC0^I~(FD$!gld(6>BW(j^Y1F6hY2j53{a7a3qo(k1SM33U%8D3N#F z5qU3&P7WT5m#Xt|mFm)~)Yyi^SrQh{_sBL^E~vY`v{QP<>2*zUYjcJ}l>J@xEiXpR zVUF`VE*^raq>&62WGLJ4sQuE63Nc7;D3_#R{bMpvOby6rl+|XpO8|Qv7bxVzs;uT^ zch}#Ud{{MAna}p4BS{_NNf|@c*E!F4_&%+-5t*eiD}g0SYO6F&tAie&{uck-@UZ6a z$R|S2ee>7IbsnBKeDw4$)tavfRA@!`9V)qBDyE|Z)M*4zE!1mZ-^pqId9^P#<}OLg zQ}JNWr^V7VYuB~;r4_C@wb>=ln3v42mq!*^{L?h=+}o0vx**F$nZ8JSJ^YC-UD)dv zhW$NMN$gK}{S7D({f++jJS^l=h>G+EG_GrXiC=I7imC3Z*FMWyPB13x9=Tpe49LRb zbXyyzgkE$texemz1@=xWxT+?hOv8fRY&~ zR#PTtn&zG2OL+X+BMLEl(@|{5Rj~6HV?0*o;Pgvl;kRa!Z?x~ft9&kuBhM;#jJD!G2DFwX;4^{PX z88O=nK@~VSXXWNwCMSHjB>HhH+X_OEEQq2}D~y50=)=5`RGy*UL^aceVVi}kj{|h$ zUZ!0rh><%r;NN6}fISiTf&RxyH6rLV_eQ(jYwih2aM63Ct~;aRS&3rI z!+JZ3AWH&U zF)EgbpT+>%8K5A2r0@YvHD-Zo}_3EN`)o4W<2_C3qx+thJ3uvc_RMm>1&nM{i2ZpQEl>(w30dR4(LEn2)DmYTS^{;{a7e-G0>jv6UAwcd8fNefN@n z{UZ0moy-NbJXTii_9oSA(So5rw}}N3_i^ua_BQlV6&Jn64$`pWgZGv&&|@F~lLb zNio}`lil@omQ;wtn8}bO>e9d3Iklt7v`eZxpTsYpb9my5iomM)(Ojr&90C=y3yTUKH!@{XzMy?^oP_Nv_@uxQBi8nD&I{26XAS7&6OyZM18e_wwUk z)e==2L;d*zy5Ju(A$Js>VMp~^umw?Xayu3Ku82xIrW4vbVk#{*sGCT>iH-R-1=Hfb z230RQVR1654XI1i8{KWwOErowG}Q!ep9YJ2JP;T6uSvr36`#zO^;yNnd-;=zbIhEHbaVppstIPiADC;G+&Tvv=qS$5vy^PN%e*f0<@&^g7 z`v!DNCR&Ij>e8)b!OUt|$4xkLDWfMJh2@!&pJvQ>_FRRqN>tZ%_XjIr@ua$tIqCHq zkeZaA-u^C)W6v__$MWCz27I?_7717)k}lr_8=ug1+xqB`>rXxjmw4;SjW_&#X~$Id z(p6yk*R!%m6OBv{igTjAok^@IMF$JGtWb13EZ=6Mp2*b=lBNyU?zHdYD(2AnsZfHH zUg}Hhp{1;g3tWjh#toco_M1%Vx|L*k*f820cM%)y56f*Q~? zW8~~L?=t^rDEeCXaiLaibA;rrS9Xmu_I9!$dli75Pms3 z|1jpc^64~ABoh1jxcAjrY~AwU#vfj3v+mP808CD{cbjEa6A#PvJNM7>Eu{|csI1E$ z;#vAVtG%>y<95k;o$Je|VH#`yo5{qOx;Xmzw?&Hhm+kklhwB!7rv-Zjgo9%3do@*y zjNMC%9P!O2tH0c`ca>vfv_ZjWA-=K}yz9dUrZ=KW>P~)1dkXMzL z!mC^Ed2h5=ZZ9tWaG`PYf850K(UC(P^sDs`LTKpD{vrMsebezHy}N#;7ro;yzWMyv z{h5^7xrM8uH)o|L+HfHn1ql)3Nl#F+xIAp9wuAL35skKpRbX8kOS$hm*fsx!QR#TF zQOUhiVENY?U0&55{%^gR`(~vq4-)miaynkr9H%PH(;nvhy$n6P0qvsot_Y{K#cENX z>%~U5N)KuS5!ii2?n`BCMj7lyYX>%|iX`fqMMr*+Pgjc-Ce9bV>YM0M}h{JPH2w(?xpW;Ibl zI}WuSMt5ig6}qIT**U!yE`eE#(>F$DlZ{k&2S`06P%yf9r7nCSefXDtuiVBI3F&Lx z3N*$jp|vr_(cvdS@`kR4Wm1z%JH%7>F9byBNf%m8e&}ZKTt^>yO9HbN!kY*h z)MFvJuYMe6@ixmZ#VD&TrU%8fR>{y;naJ%|isF8!+BNFGpk1Ald#?`;evz>`TWQBP z(GVGg5`zd}fAyGgeZAMWChwUX>DIZGT4-@Z$t2VHvm8^?s=QRV7fLBwjo}a8f7D5^ zb0Tz;eyx?-|=X`x1b@iDN8g$K?n0(;#F)NuFkOY-ktzT z<1|VW$8krx)-STl1$Hz!6W*D|X@%pv9k`3s zmRh`%mv{O*<}DF4rFP)Qmk%P$*;jIAW_Wci;K%%|SviX-q46~LO<#3NarMNns~M`K zzhf_O_iaMZ^ga0N%qBk!{Uny%1;ozw#Dg2R`!9*?kS%6cpTj7v}BY&A~Xw z{e^!6U$rgV!!zfU3eBv4G?U=47UbSbe1yE9^u5*bm=@0VrtSsR;e${*2ZD!WGH)JL zeGPFcNVowuBvtcH)nKfX%eQ$B*3_FL-B54A(sPw%KHv0DB^H+LZc1M9TG|?(;M5lFIvJKftWFr@#EE(J}2&VnAxY z?Cih}hSe&+f0N>r^D)7rQZ7TQMETcM^8BN!#bAo9Q#4htJDIa|Jt>Nkcg2Iff1kn8 z?%9AyshW~Yq~#rG%HTM>l)arcIQlq zMTU8k8&E(;b!_UFn0b(w{6{9lI=QT5cT&F8XQT=Zz3(YgntBI+&XW}GR{_oG@biy3 z3s<>(Q<8PCd_io-+cDl09|yJ;VYKA8x8lRLUM!UTw2^*%_-#kV2vgFs74I~1h-AGk zYm-RuEP6Jfri0bLWILZc@oyjf9Sg;xT4@B$%Ne~!(P0iH`Kzf7&)*kJe}NJPchqtN zs@1uy`c<%y+b=hhwU%^yE5~OmeCS!no9?6$jo+zUEHH6((@jBZMlFC*+<#msdt zC^xk5$we5;uXv*_`P}^#q)fbnUBM?a=O)4Z56iYn$Ktcv7Y{tw^eKD29*fRcY`TVuY9yGEUH+=Nu^2@ygE`J zr*TKo&Ji#7e9oV)s`j&6znxL09ikM5jDyE9M*Xl5*SXIPq7F<+OO;E0q*)`1*1OjA zUQN0R(YXN?4OrD*sO$!mMO~BO>MSN+=S=D&Cztkr>EB9-CGhyc$XLr{)`lz@C9(IN z>P3~G!5bA^{`M3wR!f%7yjB3O?Uqw3e3e`fHs0tm-ceqXeMRhn<7wE-N`$4JLM{g@5gyP$` zwPh6yeHj*RR907oCpw1x1w@wyMNUmg{^cvr&ZMzmP`85SXeQKSU6gs#ON*geVbwGA<(P zW@`N{RS=tH86-X0EBAaQM(Vlly+@tt4Ds+6`;OTQKVu&|Xt`rn@m(veqm`J@?8uOp zeyn)RUEk1+J?SBzTU44A&j#2VUR^U!C}%Abgu$8P1fAqR1)zR8Tt;}Xed&q3HKu9rXaAoAlW;_ z<_;%~VGoKn-I%*EAtAFYH=xk~%nj)EnCib34jncm0r@O-(hMavXrY~Uci8T?a4EOj z3l)+PRt=YP-U+m}+4IC_DO!rSYC&58W9P5A6Md8L9GK!G+#9JeCEc`MD}hff4M(>l zl6wNv^jLm253(|7#((*#d01dR`ECgcK!Kpoie|i4>dHx-Cx?IIPM<5|*6*h)0tD~F zMp--r`PPC4YwDb#ek=WQB|GLnVxZsy!~?j10+PBoQkjIT1VBy6g~(19+hm6lrfN*X zbW&J~`Nuu0Tfq}vDZ^0CdiC4uc}0eaiv{L?Tre&MeQJLkv$Uynn>e+L9~7+34)(|G zlRU*Le4)6?d1Sr{32J7L{&h6q|2OuJo5`bhswj~+w~&W)&dYo##B<(L{WuBcjN_4S z?HR={8Gh5Sc&AwHV?Pgc-9MrlI*gTjXWe-iDT&*=_tf6li7UC6B8;0LRf{|Rqo<|m zzMC^spfirOe6WZwtv|&A(pMcLQuEyCf|xL=qE2ad`lF@&LH1Sh#A#bm!1V@1 z{}X3}X=$X2wqkAi97K2Ynm4JxhD(Dn?C%4;&k6k$AMG7W)%>G-$M`VTuxoZF^slH( z!4e&W4oQ|9&rE|&M*mY=p5!OK8%67UyVj6E#~V=UqjkLtdZ6w-$=i*;itpY)E3n^y zENeqfDqRLM0@B@w_q|<^_e~aee#N^^25O@usi=3~29r;^y$Y1YWDAErc!X;*vHmsgtIhLPR$M z#|r(l*F`!@G^)`!ZDmx^o7mk#k+<%}CVUjE7*gXC?1W zJ5pr>dDa*md=e#(9IE8SzUbd?;ZEDOdb{Q(fgwrQCoWn2K`}p2(tVy_XwmL!cA)XM zB$j%^{?Ug}s#mk77ycFlZ?RQ%dhJV2`etCT3ER1b5kd^Gcneg@w0|5nO6_CCDhtuX)NpPq(jIrZIhlMCx*gK zyrnk0G@G?+=$e|0_-}nUQ;t7mRt?u>j>lJXziZ~V-a~I9xSq$Z{ao?drzzSH^?KQl zE4ypMt2!nzw~e@m@yoi@P6(aCv$`#yFVscj=dClvw(@j_XDS5wf(&j+7)eHul7dY2 z8{*BouoYyV9~$r~d6h}nlq17xjN0KPe0TK3n`}p`*>e<6tu$4kKs9NgOT4SK5z8KJ zHzi!d^T5)jdDi-6gOoLWGfme@ zQOAORW)EhDE&HhxN{xn)cq7MO*j1i9*oXe6X3Xy11 z5Wg(QXTXnSkY7YW;zDmnZ|&Wf*QiBTxZsi^CDVv}g}r}~Pa7wgrcThQR?oWQ-7NFb zf*~%l;!-hxa)`eBj`^2wQS(xdh|x17t-nU0M}@Bnaufg1{uDSwOE(=DlMaS&Xk87> z6)<)XmEkUWvZU`mN!gj%n%t%z0cE0osvBgqg=*qOtBMQ$sv~lVw!&s+Rm%~k9?Wy6 zXs87SehGe9SG&9J{=TXpnn~%(JLHOCdN%#*?~zAB-kYA5v3bfgMhK@mk0WirLb5D3 zUIDGvk_$PCz!yh{SMMe3uE=((uO}ZX%HmvjyEuOI4q2y?6(}EGWc<7=$t>tb& zD@yx^Q|Amfpw=7ELU&Q;1s~9rYX8>HZML`nF2} z`5S)8u@FdCwt|i`mxjAPOLaL{t`M+e!N_tk+qUAj(i1h}q40d|_^Df{6n~J?a`hM$ z;|(a>{|5Axu;9AyE&I8Mz~n^t#>JoR3u^is5TvNq`t;TjU-X-#uu1csw$lt751-s< zYRhGfd;4Iz+YIj$wp`|Z^`2^dyxGnsHY+E$tRGj%;=uP@1WnHTrsf5AW@ z``b4lPNi_5Z|K%|1DY^29nV2*nEPh?lIA!iG0Hs|K$nv2VI4W;x>qa>(1aXUQtt;-A!uHG!Dh z#)+uV^6F{*N`xJ%LSj5ic?~<6Pdf^vmm+qX7xxx!K=^gMD0iEwlPVp^;wE$XUB4d} zLPgPAt`pb9foj;=Myy5e=N+JZU)1gyQg(#T7n}`VPl&(6vmH~Vd^RhP~MI`K%@sJ3J%@_;YefD}_Ycq+IoFTg@KPnOHUXlR<1-;&-QyqLpw&zYPxYE#7oRDH}nyr<<;V89toa_lmH6n&Ww zz#wGLqz!1Pm)71peKmWs5=sBH%&*dC@=X%8b0!sAk<2Jj=ZSvI2FzqDch;F=j?sj% z&bM5Wr4DO%ObG`Jf z3d<4hVPW=}VDW(O+_Y(T<(L5I2G=<&o!qk)y`IkJe~+n616C&1p|vGb`^ao|gYGHe z?>IJ$?A&8l=Bs&zti`!u1}iVkx)5wqQV07wJL%(^XRZMu^RGF^zIj=4c7CQkThGuM z5kDqX%uoNwotkDYRx!|&HqAn{2duMsC)VoDcLB&FptJNC&w`|GYNg%Myc-bQ*(w98 z1jRiA>XxWeMJDWzMC8Jl_@L{D_59!Zk@hJae?>wfXcx`!iAh26@6$HrBKI@Pj`}dP zr+NPF{;u3@ep)pfch&o!70Nmc%PN>!J-^0wg`|A_5Q2BvEm3lnue5>m8@>M0_VzlV zwTc}%@uS~8Y9il=(%|TN9Cn9lW9WBvU3kv-q~c4bBX3uxny8PTo!b0jmMppN9|;e7 zw#2WaOL127y%mxDX!E`7G9xu%I>Q5pZridi8e`*c$OZV|CyA zpm8anu7}ZaH#A|3-uIf}{k(n{d)~cIU3V#g4H}AkRr97(g!xZ_VMC7Ni(cW;^ptm_ zL##yhmv@c^ldu=Y)0eE6g%)HLr8YFpV4_hlYW~&cE6JvVUq3ZWDf+lyQx0*FOzZ_d zTGipgyskg8zP@YW(>ZlAxA)80y_l}zuA6+=o?hn1%psDQRza2Ck~q1oG2L4w!`Gtd zOSQP6Wjr>ysY|JM z*$K^MS)n0D0;z&>7tu9qW}Nkqj_af8%mh!#fOwDl{&Z+Rm;$UMbFPOW>^okaZiFKX z3v%TeZ`1UnLmaGYs0eot^Ec+r2?|*k-$k9<%amE-(d282d6J4cwVUv$9wTRe4D3ib zc?fcyw&7e$7B6Lw_!8gn(?gDEYvL1p9yRP)TzE;3vh2uu*MErO@(_49;M4VBGMMS| z<)ngqaK1=mYtzIZ*+zM1A<^PdxscI^c!Z(e!(VI?r^7C^al5Kh9n|H4a@fr(*9u(P za}}o-$Yh;U*=NzYk^{Qbs;|GuawmgMj&0#+gT+7gDAUZLDjaDPRwQ&ph2&`yN8$~r z&HR{ZF63Ivw;kvq0=Aa-7EQ9gbf1Z$53gV|-RH(q-b^oxd@vylen~@F(%xnRH9BNw z7P{>Y+M4(5pRVJcjml^2w~(Q6T2Tpaq)JV zr+D^o-+*fLL_Vh1*VOH*M(_XrSNjnIJhEGGg(tXO^?&H6{Zv|rvmS4J=T4NsXFlme zK=gBTMEG^8wXiRrF!Cdnx3#+U|2;F-f%gL9ynn6CNgwWUnT}iTTkLb`POn-UHm;^$ zdk&q=j$hM_t7l+DX?NbK=Gj4E8+I+)+J4<1-K>Rzhi2b4FH2w(PUGM^-gD+orcCPf zA*V0t>J{h85)}G+UMK^l#H)bWIn-Ac1BHe*xDz7#!zw?kmXr?^Iu6rm*$J$3lv(Jy zAE-L@byOSktOnRye7VeDM1bX=UI_kXeJmdc?7-D^D&2szT*s~%G~GKy(34SARV&Lb zPg264%-)qNxY(%i{kIb8EZSO@!@+U}KbJX=34hO@h^vA}w9pmam7&f#InvG|A=*BZ z_}-_TOE0r~mLB4)boTH^xYD01nd@lU0HuAPh~oXdZhb-Ad>zraQGis6xqRe*p&7{3 z`FR%E9%o_kH3#EVyZ$DEJ}&EpM|VO1)xHd7g={5|;3xr+6^pdUjeWCehKai7dn=z6 zTGZe={K&8kCw2qke%BLnDmoXkEob^Yn1Fo?y>b;VS9YDLZMA}ZKd5-88K<*)@h*wh zNq>wk(mqxFJOVP@OGtRfg5y29`WZc#j`fmz!D5vv0!~`Fh9ur z%1l2`ymt4Ao<^Rzmzr_5MVZ-}knX8wnkIx<{z?ghC9SJ9haLnA;+Qe41qnhM-8wG4 zgBY~1I&t%{{&$QC!6qRi>G!=CiGrzh?Ixzb6Z1 z#HHSV1kUPCSG!RPcGw@P;~fucoCETUWMb|N5!GyXwGQOX$U;5Dm4E1}1+0WA3i%QA zvoSZcCk>dZ<$aAb!HgX2bF?%iQ%JfqCy%HMZ@*6z=l|T;yK}1OhZAVU8Lt7&PhGpS zobBsdm)uy#eqq<=N3(HA*XPP;m*37a(l0rZF_e|4Q6z!j=YzGcAiSYc_DJNIM4y%T z>?Pyc%AL_#zjxYA4XM4(yf8b)bcofXqjEyZxSow*sjZODda=mo)R)?;05ddB{;{dG z+>!pYILswuGE1mg$F}F*M-L_TObnQPxW+ z--XaR8S02c2BZw;L_GaINCE;ebElzOsphE$tQaOk9QAI^ zyC`x`>>U&*u4+;IofzEC&DI>rB=k@zq!Prf|MrVsx68M9P^)K2smMYGtf&4@jUe11 z=FiX4lJY?3uE~Sd_Qs5os6+4u#P=8Tor`ZF6x+i$5ZXYkIN4OinAePoA@@9Mxti=GwTQTKK;{#Aw-@ zeSN-R@57PuWfgYZJwMvt#nOP=KWaPpZH=m_Irw9t-jr!EJi!ZE+$iFsAlA?`&~#Kg z>buqrSu_ zv2U}zLA|k2a}0sgUv|CA?77=c!eQHah!Gt~itb`Yy*l{As;4(2{IUzSgsIPz|7yp_ zV}G2U`J%d{aQM5rX3yF7p=1*C^+X# zDPzCq7587;&M&~_j}kU+TO2)9N^r9c6Dw(vn0%r>h^?Q-e{qzF%41_*A$2%$G)c)@qn|1&q?`6SN3lB+?hr z3}8*)-+;D^#=EiBKu_e}b4rmFgrept(Zjb7YA<=#T(bigk|>=fwU8p)LUwkc4~QWr zAlni}YHU%ca-c-9(0- zU;#glwE%WV()FDmqy#Bz&&icesel?Kwl+5Upz9}*(?-&Ze`5aX4W?_&D{H~dBuLhJ z{osjzI`)Uh71F0yKj~&NtNXd>b2C&EMw%%}_-B>ExDz=;3jxc+`FKxW4Ps5|jtsvn zvnKT^vUFhW&yA6P?=Ey+7kEu#oR@7`;3M$&_gV11(D-HQvEnccdmTO)?OT!#DGpg$ zs8>2j849>~?DM6i@jJJ_g=#W>7kr)DjBYg{4$qQGEK96iAN68fjlz(GWB4~?2iauw zNBYL!&$RYo|9%;h$qD2te*HaUeMBMHY`_Ns_IxfL@6h&e3c;GKi$X7>mV|3K5|d2ul-tVp|>8a*rHGDat5)~FQKhe8RIK? zsg5Xe%0b5RR+Mc2J$UaTHP!$z3c&xe3f<0UA4gW8gCx`@UpY9Sf4I4%h_Pks%ho6( zk*&suHoxV~f4YuZxQ@PUYHx;N=o$QeD)00yvQc_hU~fP@w`8*ed5TF$xUOzJpqgZ; zt)nTGgs(HcjzIA5jcnglV;W)--~`bcy};WdQV>=GdYH9b1yh%_Ym~Y_Yuzs;;Fh{W z!h-1gFV8>PUrU9kO(64_2;AY$nZDF)5DV4%@E~0hsZx_DS^;X^ZJJ-a)$>_3PUg~$ zcL%DSP2|Ewm*^h|_=p&P=& zxrDy^F}1;tY$mv>$O-+K%OyuA@oz@ugY7>YBWt^)p-$?cWRyq|TSZ`5Pg zwS8U!PKJ%+z4hlK0To|1*dY-((O#t!fvpgn=zriE=iVqH@X&7V;m^e$Y~*aeC|*;& z<7(?A&sA21i*UJrKt{wNx9)@nqna2b?h|Pii({HehjwM4{!wDCbP~p$X4`~GXXL}d zK2rG6>bsa9%kg8}i6d|i-_?|qd{;g9MzHy&pTKR)F;?s6)g>d@Cf{eswawhzip>fm z>15XpLMw?uY+uOAD*fK7Op6c=B8r`}J`)Eb2ZVJ0wxrW~zT zQ5(y9h@i5$ojYgrr@ntZ9p(9A?JciJtNXF;5mBCNZWQ(}*7f2FE`6I%QRPMOzsm5L zZ(rhiu4!OunW7XF7x3O`pFR`^z-*mO)0-pfwvcQ>Zux}%dUIow{W~HD9621nJSfVk>kzw2k-nw zbH|6=rF8jJ;>J?nSj~d!JaBYwGs#9g8Q%`@$%}uzkS$zjmRQzhh$C-6lz!g2Z5EwM%BUKVd-4w9-(?paYe^X^ z+mhZZ@RVEUUaU|$)6+jWaJEOPzf^>;7Yw#hc0-;cWz}aso&9{b_$3X1O{RLP9)(ll zp4YQ#PVRzQ^s^4WHGjLrhk-IBa!<$xYwQ?FB|hRcho$8#3@8XInw{SEfBS$FTM!AA zNl=g|uG9+M5wgXrA0A#9yMxo~otcE60uL(`WlB?|YS=L~q>P8tzqi;yMnHf5Co##G zpd>r3R`$@fdCnoE52+Vb8~7DlUbsr=V)Y^<7Z7dr*zYOsav!T>opRI9-zO@OZD%`! z03tsaPlsouN&KOd(3{%79;+}zFZZ;_WO?2lZe_jAK%G-%x0XaXx5P7@6@8Lx5%EyV zrXhhfID!I(UN|W-$%VZ>CF0Gfm7obVtk*Rz^Q6uwh7ZtK3Rw-@H&5mva(Vrju_?y{ zVU^xey!wF5Rro2<{o?g{;t>;Cesk%`7HS&!0(2j4qxQvt!#tS`Z282mv|%a&x?j9* zFCUzqE8vy>;e4{CS`Efv6amdow{)f^f|Eld;SLC|m=QzOsWoS^@!0x1G;S5rlNF1c zPl)c*+<>|vbfH}MQM6!lWw2%OBV?|Nr9KfoJ}VO8@*L?_yfGI?g8|Q#;Xzg(!#vHC zh55}*`NQ*|c*-;fg49Ha1?a0nVR~}vh{8V=5CfPgHJdZ7%PXkc`Gi+Y1{gB6;9TU? zQvCpm`Dl_(W=c+Bjz77rI>|_Ft`F{URt1if*ybBLD3BS>By+MrWgzJ_91!RC_c_z~ z{_Z^Q1}<)?R#sDTN9C(=k%@?Nf<5&Xbmw(9Qt&h`8_RMmp{-9WoaUhlA&Z58efHrF z6$Z}ISNtv2PpTkl+D*wY5h$0SCS56KnmZ~kCN=<-Q6UYDEb}a8MH;mfXY^YQh@dmn zXzB5J5_8man&J%sdE$+VoK+`0IPg<4t7%FxabeWvssgs)LIaQbd%_|D`d|hFPqR3h z#71E}rP~OR5-cTGRKH-@HE{fEIh9fPXC_d*E@ZcZ# zXV!KXRpcTt!fUi@BQJO%7%6RV3j!Fnh8Jy0tkIN4%V932)-Rr111bJU7Z(ZJLH?mR zM#7MxuYt2STja`vwz;qm!p!_FF|hF}XqXB_gwV*gY+mapFrOR3xZ)1r1bks6-0(!~ghxFwTA?kAW)*2Eq)ZpyC@&10XaO3PO$m)6`~&jgPt?HpQ@ffbabdT? zq!qqb&eYC}k9{Z2jMl*MU`x{a$2?jv6Pg1;ztT#h_MmdG?mEM$&VV+I*4dJD1dt=6 zG!lR+#n8TR1sX zz$Tqq_B3Emjatt2P2ynv%22n8CTL{ws!CHjS(+O!`Yk!N0O0A0NM}nSjpl8wrcyx& zk$htReSSehGIfY4IkjzgqsWMX+DJVkn) z+RR0}@tbG4ahQwfMA5d&)3b++?A{@#*=b{oC zdHKO=gBnfYz$k9TtEyjB0f2Dv2qyNw;;(qdUktQ<5%>kL=Z$$oHla{zK*jL5$e0iM zbO^!6!t#+Zz7w!qc8D~p#Vp!QlI)X7c2iz&{|iIkslD#D(v0FYn-8TK6=ajJ$riKF z8W}g;-Za2ZYB1Wskq^SWU~i(Bqd%pXQh>t%yK_RR4e@lqYW-qr3>B(SVL*tN35}vz zI{0yk{SHp{6`}BXwcb7uKwNWhLCBgdaFVLR7Zs+vW{Z!!>NfW`Cj(*$h{`NCz^J6# z(nevfXo6}}Sd3yl6V&V-v=OSQ$ zk346RCtIpDnv!`Sm}G#gBme?}i3rmFV~8e$riNCN`vd&uKn3&9PGOq8h=milATaVf zCvpH0><|%oYVmP3O($}-0b6k}5x`#mcvW9b8)gZB%9^I-KR^Yt1^`1%y8ldDBQlcA z11?^7ij2eq*qR7n>zCkafmepUS76I3ilLd%MhGz0K1usLDzSIzjZP7bm*r>0C(pCc8UR70?*Ukir4Qi z=$7sMi%GLclW>6-$lml?I)(rs-9(nS(bLA9q88dVa_}!e@zl{Gg471El+-LY zdcv4U{%xrEI9wh#Ch zPRb$?fY{Xg=laD3P5+Vwz)%ZN+j#vC)Nbccz&)xHo^$blcu@WYzz&>9Jm@F*(9FM~ z9}80Otfg9GO%Ff|6Vp^QjlP2;e~zV3!zS4#1<;$XY=P{FF$oe;IOk z8VKV^E;3UfnGsefBLsoirGtn;ZIgNAVSSo#eE7l%0t`4*7-`fFuur|W13yqVFb7o$ z=9tB1MnZxB`j7x3HV3>2(DSY`&1DeS6Od2ZoU}qrQw`7v6K@pe^a!vmus|a;>_uS! zLQ->XsZcH?17ZgQM3@N>9JEok35^_uFjI1BHl!^AKg|ue)eXS({yEAP2wt;7VF+FL zKa81#HUbsY#5dy@PZe4)O{7zcS4t;2^cCX2;%`B3fDACRjrnurRo4_AAc}%Mk*1m~ z5Px^}fs`6kN)5q$8V9^aR0u@-OSG6C0FXFXA7r8l&IWF!gNwfo*a0WnB4a%Nzy%G75Dujus)1vh0ySDN=?lY_Y6&;MHsdB4gDGra zF+(@_008FtKPBuyEy)jnnDY@oP?)Fz&sWhbA>rdppCvziD4`)x!+XIQZ;)d-O%&A( zQAdn{C5a*i7!bmmHBdSxHHNa6QQOL9ulWardH$qOE1Bd~f zlN(gd8JE}xrqt5G|6ukStXYPq4Ok6uo+c45I5NsM52D74#_to;9gufNy@#xS>O&=y zrO_DDM!*+z2WVvoqcHJ?!*GBzUZc%@!M+zjxUvDoQ2Lr0{wEX?PYqlsLW+yxTiSW5gT2)O|MCJ3;2r7e@@sppZM71`J-V{#QY6XtSOj(qV3F7g)K&LHppi?wD1vO;TbUQjHc=nwf zsa4Sk;*8KwKQyfhmo&}uB5eoHpH4AvYePSMZH69JE>t|8#P>~25-$|i+SDfsKMC9E za+aBRA#|4BrzN$}&zn&9%C!n_ zNFU$t;B^0tTP{z>>4aVv*_Y!L<_OUVxx+>EHN^q8al(-k z0(}&UHC7v%S?=r&>9N&0S)INEw?dlP|J){@5&}k#wDmag&mi8B(EalDa)h?BRkG6A z%tVL|v6x}mDY7FZfA0`MY1};d9!eV3I^~~E6#H9C|17-b0|TYl%Po+=5mmW!PBBPeFI)+ zt@6)m_)pSNU>PWZ@`JLM*=<>GQDL{?_$dkGsPJeqH%52>C__hbQsM-`n{4S}PAQ%2 zQ%J;{_Y_e`LPjYgDGDK$mlQOBQ4)a=#A%Ynp#%_0ff`NQCuhITjJxG>!yn&)V&dY; z6^$B`eqIYNIiE`O@Qfg4M+%m$mQQj8lbyuvb^ejgK(yev&b{|@CQm140xe-0Hn@A3ZU;E*TJ^W67!U*~n6*LmfuhFeo6E7!A8zfXj<0U5Gu zH8B*@ypJA9MgW{T^GeN_bt^M%CUvysq50Y{)`JoCCBCGf=9+P<>w&RQfye5!$K09m z7S*oK&|+RSc{IIt<=?Z#Z#Q)rBt*)a_;_l^WtS!sYPT+OK4-b0pOxO>2}Z_A8PIwh zmc-eN%(-wlrPXnTlwA&^DTUwEgI}zGV`WXtBfe&e6v;N7jwgQA%Yvgo`C?#Mq7&*@ z89lw0>_K7JCpNH@zLuL0J%gwk3lV^}78+nF!(Y%=luaQs*c|_WU@G7_BV~9wp5k($ z7)}@oGX9T7kp*Wc1RRh;#eBIXrN(JC{-Tj3i+E64F;+Aw4HsvCn7pz(@f0{z9(AVY z#i3Zb3X`|1%v?ToD>G_wF)z;Nz|ESSHKLMeM=iRmcfdkn$BT{Om zm1Z3%B1t?gJS9yy6DA3*hA#7IGH4zN{v=6fq!eTB80Aopb_h*KCJljX>edYI!K>`N zcIu|yoj}#s%X8))VnDxE3(--O^@`08w_Cr-Ylcy(^HV0`>6CV5X#K~pY(tpn1_k9f zTd0XEqvNx(yjQ2Dn{s^uO+VLK&Rppmnb%ieABS4S_4h3`&99lb{6w{6@4(uSm5l7F zd63>SNJ|COfA4VPq&&SohHOhE!30rv$0vp5_{_8+BQ9%^qD{JsF4bNFBb&bdNaSpb z+Y4oNxhpA(3#9fg>7&z0b*TDeR4S*xY-z(w)fxM3am~0-PXf*uO>7_n2_|JTn%=rJ z5C!A$Em4MxVPJ3lhIOsZP7fgE!1hjNc!)-e48p?=p8!EOYBcUS)VS8hpZ{Xocz>;K zT2pkt8Vv{Tk`Zhg;r${a4pn+D2n2u!DQz$rbrs%dA*EZG0pw-gKVh||pR|Bi_Lk-8 zYqhG^@7?^cvcPt}xOG%KBraPAy32CC*0Z4O0tZ}NAv+H*t;Tt)EdQX`_?53aCxieL7*o^vTq2ueB_NdoAIWy1du#S zu}Peb@kQ2Ru;Jy%b**oLm*6WYnC@#Rwc??ND5-9?G&w<`sbYH(BF_h42{#-O1LI8Y z=&O z=^d#~Qy^T9dme2;KUz`;n|@*weoN0Twza-J7=za?tmR=ZmzEfAhe^pQ zYZ+{=pgU0MLuZVzGEjmdB4@W-e@-r79rZovNpMSAw%uj&|krN3b|c{(UT7LEg`z($7{_3_sP7TwF zquHWQe5Y@Qz?uui;cuO6(zB_{Qk&d-K$C~A3?N)-LNBmfm7JCUJK+D}?&gLligx@)OHDO)* z7F@R-(1|7?HF=kCk-b~>=%w*lPeDbR_0nu|QVZN_HqZ}-p;BZ9%a?`Uk7YTZgTlsy za(E;Ui8Vt;ib~aoYcG;ArR4QAzaM-%5j>$Eq({$TM8e(#E*XL9|0K zK1<n7b^ zWBEO8cGYtxOV<`6=UVmj;iH~=2^f^{YcfVas(9SSra{DcU0tp~9D{sifB4T-({~yq z3;f0k#cD2Cu+aybBbC%~mEG7NTrOG3kfn*fsU;(uKnP=PHfDQw)x_*VoQc7l;T({f zrZpB{{$Q1<2>`1D>=0MEnpQqu!et0_hXw1xpx+#J!x;IYBqOw6lPk_tcSutXbSHm2 zuyir((Wpv4C!hcdE$q3=^0tXj+45;D@4`uWtgAeHnRQ4}BtLaE054LFH~qVCw%a+yBs8#bXO`#q0pyXEHdSjh#~ikiY?ly;UH@xBe&c^eh6teS{RHF#p)AG7Mz zT%)YVg?#kl#1Ha=BtZj-8hkPKEdBGFValnGQCrb;#wJ~rGgWn|Ol3-yLNZw_ObxMd zcq%A0Z%R4Mln{=}PjrSRT0%B-q-1Q(eDvG$OC8o=0>Yv(@`Na?q)A4(&@zzt;qTzs z9aWEfuP#4Fb$(bZ5;w?Bfr+*+g#2<<@L}iYhG50ZSk9OBV>PSarpTZ%96qro<7|BZ$7on)Ox8?RQx7TK;K^ z>WmId%ac+Hv#RTVFd5?=np}I zVGG-};Ap%M40COvrRYPcLH@eI<;q$TeLI;Il_{$|2X4e(3=>ti@cR!BW)Z=0zxE=; zX|}x!-FutZxw@vwyFHfaZ>FX!8WS3t);YeSQ#4TcTgNPQc%VKf-iSPMB#z*NLl76^*A-Y z=)9)BuFmpr64T*!^*;orKA@zJLBb^ogV{HfG!%t0M+82Tj$#oh<-FL%Xa)YB*Q=$N z*z1;-5`i-gOdAsq>@BpgP1{^U$cMNn?yKF*gvfhIXSP$~j2AW#p!e8mIR825lO`w4 zlUxJ@d+lJvg`U~Hh}l%$WW`ukmpNmg%%FBbehp3&5o=P5GCmnLoHM)ojbp9NoRBey zWgd}Mg%qTWF8r+sbv_c29$R_xtlteQOg8(9Ej>G^=nQfj(vgk*8w;!bTbiJ9HE*!U zPbH8Rvhirr1w(HwXyId+sKD~`g^<&LXWHmRG}^ zvl@k@jeNM`O~Yf?pRo~25{XLmHM;v~o4O29JO{m~zEZ}Q@!$ly07x017GfGEN)o{L zjf<}fxmAp-^FXJp?WI46F;~wN>laTaepHskSnFLxzj#0LmC|oFt{j`m-JW^o=dh8t z%KE&LWg#WeyBx)m)w7mj<&FoTeyr)|BqiTMd9-15>k%buh~m_vw396(!M99xqN-$8 zJFx5GC7k2k$wM+Vpm1e^2V>apo(gY)d>?L3>LX(~ZQLt-bd#tx6(6AlA((~hyz0?f zo@<%4BOSS!&~4LjEr#U0aSrOW)S;f>99nW!4~aI~nf|)-xH&{xlim__-Bid*U(I#- z&UpMmRoaz`N6wEI+Frv%b#*cf47}G3-=T*UVo|$o{g)zH6>8wx-$@EHUgsbr2{^5{ z!4UO}#`TPp)8`RuqQd-$3yb1;FEHll1*k7iaNath7&`R%Q5-=nZ}u0o9(Ts-LV%18 zcD=(LvXs;%*zKoodEv(SM>jcnKO6L08%e~COO2Xta!}GGV>T-tMb!yUN z#{RL;yzwA0IpbpTf$I9*wOd5AL_)0%@~&Bsr$EJB6p!lR+N_v)kH|EXNV;JTRP)}& zp{%r1vYX#Cdz_Bd3+hDg;o6l&Bg638zle^Eh9zu}&1^Cm)en%{+|W7HuUt&W>(#QK z&1f16;Dp&m4AP)w&Q?+$x(1uM$7*#yyank@D=z^59{*JMJ&1n%E^gjY?+L#t7oNY_ zGyuV8dFNS4vaZ5N(AbSmR3osvM%cnF)G!duM^0F2vwFCO4!5d@&WdU9j25%>xU|*^ zum=VYg!sOEG<)V=p%cP~we54MycwdmjEIcG?yWtf1zQN3XHAIe43yN$f}_D7>gz<} zs@#jb>dYUcL36DM9FTvdG`JOB@COf9;jwjd4|zv9ljzUd20^y8F!Z6DO`H^B(^BP! zkm(wUV@&lIK8htA^$Nu}-92D^^)q*?b&o8mXy)GEM?wTz>eWP;G~@5tIuDeO>5ty; znHfSD>YS!{)c5vw>t_TfJR%4mjKmx0jMB_=OogAct4}q|&jflu>a*%Qx>u zy;hBBx&Y2E+~kwpyg602LIkBD_~2^a_X+JzsS=OV@pzOx04Ne7WqYV|N?EXRpeP(Q zbEyIfE>GKOfmm~@QT~d;Eei+k+qy`~oV zGcB%`fO6p=6%XAT55&=@{|A>b6FI0H2W=h{k3SweG6uQG4okll2SvxpnA}1o2pZo6 zK-4UWJyR_u{3ct;YXfHZN!qngq9#nTEz2rgS`FM;Fh{~ls`a6y3>fsAL@xXu0f*x% z4?vW0aM=|8>NwralUSH=h!hs;d77MZ;+w=Rvs#h~uqO}jLLzgdVxjWCKLcG+K@M?A zEElA#z-6_@ikuhIuF@!c=8njQ&bjnl(rR&c>BoSJ>#Lb6SjPXPPWJG?6A_LcaT1Bs6 zCpt81yy|ZJfftSqJ4am>>U~<8snKev0mnck_-Il-Smi8Zh5A8iEp1AExv_jDE@}=?G?_06)v3w=_2`CZnH zXj4=GVe2UWzkpU2_)RrBJ;R>+IUR^nCfLxeVgbpmHB>RdSc+pbP6L(Qx>g`3trJtj`PEiy2e$wi{P3R{KAT{)w{Uo>uOi5-qk%2m*}T`syrS^1oT<8G%*~? z60SjRyocb~M_Ox|gg4k51lA}|EQ6*NY}$;CyDjWsuJ*SGJzbX7^Wq_03pJ68aq_Rm z8aG%r7p$02$438Szc>xXc6I&XjgibgM(2T@i?`oib%sInBVL-3+u=<>bW z;OVj>(*UgK1xEp#eCZJ+Wo#it^jgkeXp{!V=G(!OwVx7Dm^K-r)T$+_EG6AhkT|?l z9-f;pY}jG2ZBkqP$f?+@B$k7FZNP9EuAO#Y7Fec^4}-=%1hx7>KI3uDj9{ffrbr%icE*PXm zu3`V>(+p2$$i8;G1r-xeRE1tBSz6bHu`%7>4vIZAl?U!wy|6v5X(jbwmzH5VNl@=Shc)tKp3hzPxn>trcq(rZ~)4+`KQ$r9l2?ZS2G0)(+jZ>SVC z=G?T4T##{r34GtWNwXkbECyH%n?_Kl(Au6S?D%UU^J#=WbxjL`IENARITQ9$Y@A73 zb{kMwEniZDY3q+=uDuQc^@baEk;fofYkD!-3nuC4Eku&E0zz?(#XNSt(laPy;2iV- ztF244zSy=}%xpFc(R(D3))7{JN@I;mDR`O&`dae=3y#O>iZn8R78LF?pv)=$E&R=( z&wB(Bay1@SyIF$>yy>W{Pv}p3_x-7?DRiT-O#6#J&WNed45T<(gXu_PDXGodFa{fp zVnImsyy=(bcl~KCWli@v>$l)VQ*Srp zxk+SYa@oxoiEWXhuQ)G#+?2J2TG|tC*hvlg(uml0an$& zR0_ZcBz9gw;kgz4bg}~IuV_8=hi?XyG%K^yGdrp_LAfCy@K+If#(kifAlHw604@Se z9WV+4;eZ5!q{3f3{6A{&OGb&ToVQhxk5)$+JQ;~soH35z<0Q?o+9sB#l*BSxU=v$dEl@{K`OO--x z!hK!DVvSJDpEbwJhqWTT_sW%7x#Ga_t*gosC#tJhb0iM_sAN>+9h|i&$e17~$|7R* zoYiCtOfxpxFPe49loR(hqiDD&&0IXH&oZ?_%SCjPB&~wlhB4l*zHI|J&^%ha< zS$QO`e>@5XDbD+9mL*&P8J8-!cgKRLCEJK&mMwkG=UmoTp~d{(0mu-ToY&#sn3Ek! zF<9?#ut_>jsUMXoa)DWy}onxrzU|~e@VJltTf|eKvq`WOrms`WM>pG zs%w~XD#Tewm*p<6{WQQ(p;cq8l0SfjX{<-UGxu@+Su;pN+`=gl{aO34jVXmNErAp6 zL%1V-nx|$U}!{k8M|3R&S*quRWbWw_Cm9Q!W@gAxU$tvKv>h zokr@H^qXq$1{pmRmE5E|IvTUvWiEYJ_2MjbT-rv^Y0e($_XGQxQrJhd_}1rky1zx#hEFlkFA!1NLRchazD2^eN)OPNiWGL0VMsNS(;6Ef@~M8 zqE$CXTc}y4AFFgZ^Hy5T_rU=G`=bnT7fn4$GGsI^8V_&(Sv+ctcE1 zx;ysFPc@rHlmDx!_fLl_fc8(9CpiL0uE61ZEhGBCft95R)bZ2>lC;8Ju@|u{$W!7_ zAtd#?MAyf4mZ|}UC|9+#ItD@W1vqVChpuxFYi^;S$bwdww`+VBy;)#6)18uuoeC#x(~Q%DFa%Gt!p5d!i|A?hvA3ijW~SO;m?9!8#9GY;G@-JR@D z{!q?+Y4jZAO}xbqd_;T$@DU;9>LIGTt@fWkV$GW1qID-;<{OC!^*1A%;zz!Tr~g^ry9=>zyqxmtYFK%F*8I5&O9% ziCaK)G!av9O78w#pK^p=2&!~O-*56AcZOo*ZV^Kw>v+WI)G$H7;R5_T^m4*&MBYK zv7LjYEAE#i?U}QjMxBF}aCm^#_=P)d@Fu^?X>Hjqtf}@L5h91rTFvdQWT!3*lG~Wo?2b}yVnoEdt<567<6JT zUr@lsRPMBv^697|7jy)mhw#oy4QLgbamb<*vSh|umPcoA>Pmnu0M6*3VzNx^fRsmR zw!zxegL~j*nQPY$nzUY!v^fhBJxHD$MwKV8-Cm!+Z%cFm5=a&uG1M6=N5WtH%RkVdZy2So{m-y1y@ow5gOHnm@+{68 z>_e*1a;4J%m<+9KO5C-7Bqr8tDS~h&B|Nuj+L%CdSr$n(0YVV0c^HSF%U?j2G{4@S z0Q=F~Z%>XI^cxs`L^r^wgZrw96$5y6-o7K(mn2zq4)^_Y&=*lpq(%xaC7HTFRRntA z-?&Z&%Kz9HIMW~D?Z?k72%lZWzUrn@oR$TwOq>wIuYH?T$N8;+=J{!=pWc@nx#nk~ z11;oP-liM=#R1nFNt@J{)98(AnCv(^$dSj(W>7rvpp^! zK}+34-8QI~e)%%@iQJnVfMjqgZ>*wPE85Ehp-C0|NP>s+%-UWJ5RY5^I_r)eu4@0f zoyGYQC*fg7XO&HlJi7Ps8_Nawk8DUA(Iq`n2m|8-aGI&$-LGqiawDv{xid2ZHg{xX1nQTw)u$rffim-w^Atrt5l{ zl_SU)6^2EHwLb9b(M=v}td!FvNRR&=Hvrp+-Qz^L-gYm8!EY%2z|g}?3`r)X)T^U> zT#|ITrIG6*HVCp1RROM+pleHf`F6quTVdR1p*z86M0SRUWFPsK_d%zgYwmgB#PD*h z@GMB%)SB=P;-T|`Inc=Kqje5aBGN>jgXSs<%5{GSRIGerlbMk({rcZz+B+c9Vpy3| z-))$=Sw#Sq>^H97a<`Y3MXVE&1+5cZ12SMz1osL?b~&yeX@PocK)U&k_B}ZMK*`AD z#7|d37Xwior*Fd=AH07{AE?aWr-Zoz^>=l*{Wc^!Y!sKZ49*Qvyr7s-PKX8D1;ttZ z(=`au5{4#t%8$p$pJ7nUW|@%UKI?DOO6555EQI!>S|4+h_Ss77&2y0aPM!aYuQfGt zqAb~dz8WZ*TOGfv{-Wi#Co~nO)cyV(o#buWA8$ULo!K~1@tF9h-t*xF>^e+io%(8^ zfs$TfvYr9F8=mc>W~yHrDskH=StZJ~$QDML%A?Xyhg>Yzb$2%csvcUU4pNt|$6G>c zlYi+|l^#*I=VzwNu)B;$0NQDxzB1>iI*5SUo5#%a={#+oQCZl7Jd{&CTugQR4aqhJozm^LN;%qj1C&#%9r)5-k=&DS5=w!qVn4sx6s#% z>=pVq0Sf9*p%N z%S3t{KZRU=4(ez&dA{6~rZASVTCoHJ{y&}EzMeHvCR~f*HXYBFEAEUw zdAx8m%u47fX+L5`TQx8w9j$|r8!oup-wCVdAVlItUs;j*pVe3JSGQY8cY|^8aNqkj~U=7r#Vna-=5iZl58U8QjLx|izdgO;G@+L`4@WP z2BFZgr%*4p)4`Ug`Z#zo9i>0GyY}()%<$16BGq>%7*qRk(h298;#oQ<$~t8dlnb0n z6i4Th_vfH*=OBt}{LkSPcb$q_t-&Fm@|JTD?*P1bPuaAgZkluZWCmYWhVtc11w=}} zCt%qFh4aw2Y$z@E-B!DVQBgLxgQ&6`M;=>@QbJNDx9nIcS2nXWgVkL5vFGBKKp9^2DH}}LF5nJ3podoSMo%!yJXeO3eT3s<17Q% zr2xk(%K)n^;oS-C>}L;)3Nzplylv~i!Tz+6N{G;HC|MvAzwDc|=K>6Cw)x3;mPNA> zU}GyuMvQJC;zMu?*ck*SQ#6^~GZ|fp!*Ayekan<4JisyQCk)BME@?^;>%eiAN-qxm(w6`9M0E=-4T)EmNxyVbk5zvorhZIh`acQ!S1J@Q z%vJPb1(sr_T~8w0<|)zb0Y(Eoo|u0c=H?HaxugL#xdxKOKZga;|_O zvT6iziL%mr!YWazvoSq1R%o8O#V>zGD>tcCSQs*-!g>yRl?u4a=WPWN+qdwVmCbCp z&xwiO0IRHtnW3}x9K&Yi?OjR^dfpQaU7vzc$#sU!9@S?%=b*NxvX>E*rC{L4$n;|3X4Y%y^pRMB{bA;3lArw% zvi^0 zygL%H8snR7Pddc{Z>r<7>EJX}Wa^ipiH+@QH;h$20xTX=8UY)g(_Bp`6Vhizf|UKO`&HLvSAXqV*M90bG%)l-t^!k_k>bI@I7 zJK|dQw#0_>%v*^f?L_PnnsFLEu$>J#2VIr$Iga~==AN?+&eBfx7Dd1E;-yvx5oOyi zxghOd^jR;@v{Z!7e7mALu2w-MhQl{ZR@%(Fx#3194vsg2=S$+YgLNvb{#p6h8pi)J zUq;xK#66632;yobxoz?u5O5*U+Mdvnw6fR-pscmJQ8q{X0u;w!;uBH=0c1?3 ztWTe0pE-WUw|rS>$tfxzIgff`Rk&lAp(?;Y9YYcZOkfb>_tJ_F4}tBQUjH~X#D|~< z?<#XY5lQoW(En|DU?VBSN#Ca}(=Rwh>oa=3Twe)gT-g6Y3)|>h);3WCMdd&e1~KK~ z*B$D_Vbn#WR61H7J+zMwN)MoHB3hdkY;T0WS#b%(QEO`5e;pxuRq{!$V?aindh9xP ze{g)Xx@^|yQak^6`qc1pAm%l?Y@5oMUew%*{_L((0B3aZvRn5O2-T_pQm_H!!NAmB z#&D|N^O4#1?ET3UqG~f0;u-f5Qnl(uDY%3VA#T=fFrN9XEX**joB?7Wn`rAj^sma? z+Y?Bg;vPO_XhdUP{xo1!Htk#}PET6U6 zs662~8A=sL7NAY~MB8U8jD_ESlG{JQ@Y!LVa`Ji=s>zDDi^X}~5`w@2e6DI$ zapSLL%Xld1@j#%T^6>Zgg^lu5>$fPsbCAlXNA2^TFoSxpGoJ>^=T3wupY3$SVOIR; z8QBA{EGfExFxOf*+G^h(45YyPG&&i#Jb?|K^+A);jR3N{xH<$r<}l!4jIX}^38^uI zQX2AlkF~(9L;(F_vVVc{^C)j{a@HHZ`G~qHj7Y=rWk#S+P=1o+ijK2Lky@iAVEt|T zAkY4x68z5SE)xen)Lp=NPa^7LH3LEe)gwM z9S5gQZ)=)N2-y}um+JdXN8W`T^mh6FrcSRNPZIShyEy}8!md5`LFMW$kP@aEjZ{_f z9*4qxPOgJ3qrq!6c9v*#0J+>%sLklaDSl$r#7#xH6;Q@VGE#D^sX-dAj{4-}4~4r! zbOQ4Hyjq zDbd>snMWV2yo|=eUOiu!M@b#;YK8e7B?imWE{t>9AF)gtEHG8(9R-Ev_&r}p@H?ER z+nK9AZIu@CIs_v4k9rT;Y*CA;?==l?z2<&#uv9bUK2tJX+|;D1=W;y*E|NwKe1Ky& zI8H93kjwChHsX#-tLWHr{Xzp}ySYv^gWUWuStmnh=g71<)d{yqhdpWAaCfsB`%@ad z*%mljr8@6mOHilw=r5b?r#jFY?X09aHf%Hpi#BZ4&*uH8ot(S5#V1Lw^VKpm!@Z!t zAz)U)YNflj?CJ@Rwa|i#As*`((gWQNP&ay@jyctV2jE`_JWfLK%2PB1XONhJIPrvh zR+jyy_bj-%-qYXw!%K!x$(O60^||2DtzZVnC&7gGcbcQtmbLV5-RrTq7;4yAE0=Wr z%xz4zVhC=l^XDKI<+qV+$ z-ko@bjHh3ml@d|1%8Xi(%*82}%D?$0ZJDZ!86zU^5v0um*k3`af0ZYTrT9Uy&woKI zJ(Gz!-P!ApVxJlL9^|1)blms!;eDm5m_77@T;Uy~`d6k_t`*|gaX)P$DxbUtqR1$Ax&C?hEApG_fHFtz!tgS2{||IQC|w3m|;<+>&LZ`)|I7eT3y_O z?3a>K8}Rb^+;P>JXTEEjrv2ni%TH%Y$78&6lff4NS=JQ#4Dpl*KVI+5eLzrBUZOiw z^y8^^_EBP9LrHVY{CDH-m(CZA*}n({deW6c`ek;Ug#v+sKeHQ@{Ht;{(Xnc33Wa&vZ8K+YAlSkTgphSYd&jcv-A8-H2YL++HsW2N2RCLI#v z8(3{Vx9<|(VVV=-KSZVDp8D=(6?hv?BVHJiIV<~SnJ{OIC@L1!dwI3OX{1JML%1V z#FuRdpMy%IBv(-9py?6xK;Uu)1sJ~8Uu%2wc)svm2S#3^Tgu8(cs4@(95j#1f`NOt3M|pRlX%zmt~%?$vnZf}$^Go$f(LkeNsVDBtMRN)I9HAa@%?!ZRy0PGu*;X(wWYL%Bwj$ZWYNzL~3qG#+lm$ zztQ1znM*SD6u!cpB-L>EURvGD%!(K;Q1=jp3-G zFsGGnyBx-g`Api~+@Um_Cb0GSS1php=S|jDZXSkwXI`LSj8!ox_X2oow!f1iQcLHxNGi(38ef!?;y(?#s`I+1&KPx{GHY%WL1z;PbXv|uF z+IkaBYw;9Kz?I~@TkGLji)Rd8yBR%HMnbU36TjbF*0#tAi9b;;5;MA^-!wikMyM&5 zaVL&w8Sbse?_x{8p|8HwG2fNjTnLl!@U3|0B^1T=sxFXOP7I+Jd{M1q$lvPO;=WOR z3*AQ}=I<;YmY&n{;_q5`Lto&5Ry9LWpdac^Z6n~(&2r7aSnENYaqatSJuC(75pkeC zd6o)srto-heGkh{n=R zRqtVGQ{2>7>a9R)uQNomL;ST5I$%t@Ktj?VJJTJXZl>d_`<=NWZDVzAIb3Et%9w;M z)TEJbpATDaOwAp!3wnZFpAtd%_P4n*b;qe?$3Y8^2!8)+H@U0*xK8A_rLASJi`zX= zlfKZng-N{Z9p~Rz}k`nmo!7RRZDEXcG*D@vj zB_r0C&N|t)yY^k;0lK3eaUoTtYWI~J6%W*UnBhYM*Fi$9=KsqbQ&@l7li4@g51`1r z9&YolEcWnfQXZRi-H+5~Bf|os?C%M>pYKK@(6RAtb{Cc&471*cZ0s@ICodYsc%%ap z0_+)be5Wt}laRy1d{2bzx5e&qkMs1@@6MDDFaBy}MnYk>_F`d|l>7hCJwoJjchy-@M}S(C%dzbJNa)Jf=F` z++1Cpt8cbFzBrWrvUm9?IkUx|Ll22*3z_x#Ha`c9f6DL@!Q# zw?On$hLe1cJ!y{$;duUnjbCcYpY;M4d6`~%QfI~%86$;vHq2kdc+_!JrR??mRc68( zUvoEt5igLcZI88f>ATvQf)($V{uyvve;~HPHtZGXc{fFMuBhND9T!v~`Sr-;QM@d- zYL5!*_?cq4Mye(8{;(J1KvD0S z=VFNqa7ah-54syQLG+HQ-r8qP>5fX0+$vAlkOZ57q^-hq)%@bWbhoG8ezC^7-27!} zC{gXF->H?G`Q$=lOJlEXa`gl5pT8Ncet0M@u6(-i%04Rn(70X159TjY%svexa`MCtIsC}+0)Xex+m8q6DDvP21 zQ?+`Tjh2tk;md+gZ}dg)CH!@W=EYijX8s9EkKDC}YB!CCa>4vOKeV7Y zE9E_D%VOWSFR0N~Zu0javnp@uJ&}oy9Q64r>%lUjt56)%A^q`yudRx(jrUWQo`Tn; z>AMA7HM$n&5Tp=gdK@A8RTV&k*DO(~E(=VxKaupniPKf$+Gf$w((rymW2uFIHaeV*v8lE_9o_Us1x_^pZZt)PU86W4HqnDS~RO$blE!a52o|e7V9FM z_NfsL&u`M;cH~)I{?c`a}F2I4}<*KxuBy?cEBXz3d7bqTR8=c$eGrig$#WW=rDgk_}03xU07!r=*(+JQX* zB*vR2X&-k~6vo8szMttyMhEK`%pC{7_2pW1OH;sS-z}%*FDLwD%+&qbV>j;QnT z0e{fV&e%;AE|kJ^rSOm{ZNc8=Z;7;k&YdW}WPkn9l%4FcUxj9AwX_{z3`sQUR{UK2 zGEHR5pYw40tvypMW0W2DYn$Cm3U79dfcUUf^G5%!m-t`dOX0Nwo_fQs#0`o3woeVM zb!ch&m~@Lgj@e3S#=mZ=9xBUZ4u2&^jpk8Oh%~p6cGe8>hu_@ok$RF>@6;!5Etj~W zT~oDMU>+K;{;ota^US|oNV6L^aze$Ts7+*;p))bghQDYDlvBKK>>4ULQhS^oTHpns zzX4eYvDSDf&v6PPv#s^px7^Y~5x-}(EpRV(zr*Ed4E5iE@bxvvy_ZzoQvYDn zK8B}yFvb+V3R8(>r8`_Wo+>yV0}RrFxp!rtggQ~hhAGl_kIm2tFOSy8k)aNgYPQ+f4g7RLK@7=gUxq&yV|4d?$qwiw->_hH=*e*1Sc^0 z>E2$Pna`a1KzwC=%HHAM7h5H6Ib^Y92{GP|yJg{fC;qk0y(W|0-Y8!N>mG%K!;D23 zkooFPY+NCGV*K%kG2@h}+Z(>hIeM3y8H_r4F6>0TmovIyt1kdulv;f#E|VLRTspY% z3Pe|}Ln(AG__Qk=ret~XQmd_t%f3SVE%BV=vimo6^qqwao*(UOJVUA9!`FQAQh z$(JE>-7L`jwUS<;Tj5IQ*R9PudJUT8$TJwt@+alo`=F%iJBM;?Sl}{}EW>}=8IDzv zyWAvvaXPB zxe2N4DMW#I;?+4;m8Lp!puen+yB4{f22(xIe?0hB#7HySFS-t7s}xzaOLzIHF{?4E zJI;VSuD1g6Yf)}u`OJztIzp}`kC|(rLeDo@uI3BOGg;})_h)6Ko7eFzx5Vnvr1 zqJg;*{^mN=qWL>O%!f&b|6D`Ysqh2-o40pqsn<4Ck4esU!H+_O7HohqoJJO-b>e*V zZc(37bcF&5i<&IfobsXrI9dS*d<-lbpe3SJ4oZx6x#Jn8yFQU zzsPuY>n9LKho;U#@GlYWrMuBxXr_OM0aF_NGtbgzc8ePwopjS5JaZPTzsJGtZLec~ zb2qw&%C;jBTY8+)eHt>c)6houguC)0>dhug<}DZyc_$I&aGFchKqJIIv&Iwd!MEEu+k=6#uBd)-o&qJ7%8AvEqg88g5+ z{VnnxG=FopG`Z&$?HT_JEd_Kie0Irbr^@`d{o6C;#iWaq?~GaqdF^vpBW zABThpQkzPN0a*@>HJJ`|O^rr1MeM#;e?4;;`GBb|Xybk= z%X`WR*0XXWP=G=Ux{Ca7Fu6_Iuhe@<>fqU<=QT;>e-myCH;;$AMzixc z&ZH+c+*zFT=O9F7_=7Ow4_v)*{tLq{yVpyqalp`pyhK_1E5uL4%Vgx6g8F4LdqLeH*o9 zlfSfg6H^0UPR2;xHa{dQ^)UrWX5P;K$ZaXJZo9ql2QWgu*>Mg!yi^Ztj;C*Ep zQ=7CQJHy?Rc6W~zOV_dOvRS>@LP8}Jl>;Wc8sesR9tKxkYsm_I<#kessB}> zH%_%i9FN>unSbTxYbv0=#F4Hg41V3QgbkA4qya7>+G~F(IS=?)W>g6ux(R-9Tv`l~ zMN{rTIhf8tUcvv{N^*<5|KC;`7-{2kqk?m{4R?~da)9I+F;?YRJHfoQ$fa)$UV4h> zvT?-x;(rh?f9Yo2DbE-3pY3kO%S7t^sV{VYeu)lP4+*)&R&j53YI+OjBL9rm=rhUO z=nhYLHTqFndZ*KXICEaV-G%*6vGsImIR=8C6e7V(B&rX3XH9u>a$}zlrCcE?T z$HH-%75X0rZ~KBhyX$DrYt2KL8sUZ&bf6~&ZEpa~rhsTi`=R7C`d?b5&#VOc-VL8Yxg4aSg-G??)H9Q2G})G6`G%_{E?HB%g*9Bx@ucQW3p zNx3p28N=UjsyM|<1}f%#ch5nMlchkJB4x^cH;<}&l%Tp{4-8Qu1e6>xD^(oI-!=Vq zuj~Swt$6fOsXCSQfC0cboLh5C;&TCO_NSgvZEp&;=p9xW!_43Ec7_Nt8c*?_nh4q- zkG(YNXl`>@>oKFi5jn9b*jw4oKal5MG(IhNB%39rP>oQ%e-}x3Blk+ zH{Pq$$NKhT!6w1Y2_0m%SU+yc*98Y}PU;rlP{dZzgN`A|u+W=EyYf2qyTY+Z$?M0i zCzmD_kl9|h%>#_(!2(Ad{j_%eI+t_bf6;W9d zvG-M6J}=ez^Z#5Z+dDEU3C)kg!F#Cz)))kl}M^Ga|@;ZQA(82Fi5k?xB> z6azv`e}1gVxU5hPHMiwo1b6Rv&zgHX;VkIq&=<%>m zn_kZB`ED@}t?RsoW=C17x>~`{R2>-@;;l|%)U9nVAnN4c66Ek}Xd?E*JUKmXJ<{&n z##(_24S})_)_;iDyl*=}FTgECqgEMRv{^dub{b6qh!_*O=YKT{$ry6lywU`O(V4!% zh!57i=ri%t2!-hC*Ab>%&0}Lf2(rO{vWg4u!E7o}aqI}nZ9@>-S^ATLB$4HsDef?Hu1*PsFSKqOuiqgwNxru|~)LRBMTiE4ZhaZ-OEU+7Vw`QJwhQv^-C`_TO zUd88@)F*#_s|bETB2~Dis7*@;amZfdOfCz-r?%1TJVrZf><*ME#S|c_X2o$GV`pWK zuPYo~M<8?MH{e`Vk1Sq~`{A!K%kTw?-V5hKCXI%R15br)e&0}y9iT14b*V>>h9NVl z*_3texNwPja#A=mMJ)zDS&vC18A(Yr*(njx*`M9K**^)a#S5sy_)a~JZW>)%gUg1V zH@4#p(eYn?R3#7H{N+1a5hmrxOmyw(1hhs@JJ2iu;wxdZZyo3Lzd&mTzfk%`_V+{` zpUi<1_|D~?y?v{(?ae{b1qAU=Z`cJNZZPJbw7TNS7CKLJF9VQ9O~f2n)#JVJ!akLQ zfslrXADA{%P-;C0npx@DjH1wFC_XnVGohup)C7&WiaqO-(F7a&dbFP1`nu2?J))} zH|BkKKLD$jwNx3TLB@+OI-69vU=PTVdhRedS6L& z6)^KC?@2jc0#6Cw{R_F;{0LF4d=SR~2)8PK<1a)f=81O7Z~l+{e0ROSq9sv`os(^3-V)e87TQ4LMfVyx(fBXm=+HnN=uN9HIIubkYf9)fZ6cH| zGYVTdy%z!&=O8a^kRE%W3-ln^6%wZF1p5bQZNjOGjMf<_s2q8Y|1PkJBLP<=y_kYt zI|lBdYiL8l%pHYjE`JEAoLsY0=h4fJo6RFl$~&MqD9x>Xc1Jk(FXTG@1=BK=4Y81l zQU(gkO_wuTM0lp|Nd<4v13{K$WCB+7U+gLxwA<)DPCR9@L7w zSg42lsF{~~gbA{cvoFRF>}em|R|r1}hF_B!!0DJHjNtIt1Qe;X4<26;P0623$Hr}6 zoUcA2{L^IrX*|Dv>W%{iF|f&E^w-Wktpyrd{`Q;M)d`U*THWXxuunuorhqhIOfo_ zLw4w;DjmN6vX4ac{r4CV6ir&(9mGF68B^i^X+Mde{&QnbLJQ4UPX+S9VO%CT1$fWC z*)xVErCj>lRf(o=89xNJSwnF&4Rl`zDvHs(y+PMXxD$5S@ziT{pC9%0Ux>rjApNA# z⋘fonic@i+g{g1n4R6&CJ%6?iXeXo<>I$z}zRD&0})LX=)q(m62jBwCz0pj3SSR zn3b7kj`KP4%K#d|!9v|ZSAn7Cn)b3!quH1&UQRRH5Zan*u4JdEm60xC~A zn`CJ_>%(UT89GLI3Tti~fpi(MJZVa2`L6tQbdeDF6hC^k2D--tbLsrLQLqBtUR4I5 z$yLrpHf;%7vvKeYdk92}I9C8`Z+`d-L8@S$>jiR>*UUr!O`a%`jlY)t`ZQX7j zMS$zpMK`&BT=b$E+zuGuRsod@>3E0#1VojlJGX?X<|nrBs+a$D-R3{9m+rCzDX%#j zqucKpFtn9{It<~5zW)Imj+%b{GU}}seXC6+t?vMdoQvQjcNj_C&cllg(nAgH!Zk-1#}AHI zF70%jn^_*6>S6rpZVfNCx{~$TCGSALk$RJ*j@*1D{`I6}xesLt1guZ#N-n$k@a!q}ooDIP< z)2z^aY!o*1V#U8~jId`JrnPPW^>MKa!;N0^1{1A^A6s2@t24!Y{>9j3{-fHDdSgDi zuH`}AnVGaUiR#Ksu5&#)@k9A8$dqhp5StgmG=SCoIZN3t2Ga4V9E3vw8QXfHxiqxB zp-*7JVZynfM%i*P7lgbrGLsJD_a1y--i!9U)zFd8?PDr5fmVRw%exWCM83CRI{1F@ z*SqSHu&N}(BvOC>`2?hIjnmGp#X0U6#Pt8>y4~hCMc`~G(q%|0*Q`2xd>vi#7*U<^ zH2}lqttD`wV8u~rtnz)O8M#|M`*)0efK`fK+YUTq<R7Gcy zbm$`mOA(249L4PMjLu!-usJB-O!0%XL86BZv*g1wB=$dS&BTuzPiOeZ*hoLISuX0$ z-^jR%jaM_ZyRt!g9WwV~eH>!o+A0pokkAO4*knsGg;_smeen@;MzE1~R->D$%>P1U zCE$~{_Uh_BwW!@HeUMCR3-*ci8$3Qu#oed_WrdY~A=afxy+8RU@7c)c47V7aySL!q zBe3QuLT#hmqG|JW;+UEYce;c<{PtW_dc&@0^oOadkLU8LK49>94lzd5p}clCK?zS) zu79Uxn;q#8p+dU;1f}8rnBuEN#iT&&9c*5xG9{}Au0(AkG;`F#oQ`^2FL#*m6!ja~ z!>^NnXw5chUg?ffd8K5iO?eicP<;NE(_{}PMa&}Oqjjj<8>60KE6eGgO}3VISPWcg zDRV}FN~olUFOecwTYc>`Bhq7xKR@9f^J1|IA)oJolGCW6D6~VGV$$&vcXAXCRlC{;mp7&@A$@d49$b(((s6ZUPH;z1kTPX= zl|xfn??{o{^H%X3$REt)izqUtmwos7YVd8})D88i*B1{W-VVdEsmT>x6m}v!i{K#A z)g4bj{u=Wy@BFRatP`4e+dHY(?rW%<*llr1*tzZdMxphEk!XVNcfhFvEx^rjZJTQA_s(~W>E2$O|yr0=(OD{z)R1W}3oBxAows%68 zX|ABQ0-$PVd^~O|;nJ@1A#`slCiYOma8mKF_Ri-C8%7VwsuTD2{0A5x& zh;FD>nFjD!FLOI| z=i)MY|Lk~RE6I_Y6NPo8BHtO*5Bi3|u?6Sz%Kd#bgVH-=KX)mFVjbSU5$-D))y(glG$m+x3qlfs4)g0H%y24ra$ro_{40>%QUS%Pf z(wO1eyZp@Hy-&;x$It;2;dO!gp{1#yjfb1|UtKc|#W%n9yq)7N;BTE7^R^r`{q*_K zp*B7P!=*!!=#tP;nuv~9-E+Rk_n;r)ms#+$k)LY%4FirDG#%>O!l5a`ZfvPdlLgV* zc3gO9;fEdKi|X`CTG=~05!%3$^WQX554CyBVNxb7bJjmBPxeI5kNE9)vt!(({F5-Z z_>=T2JB;=tHY_$bo^T%Qf) z@G|TXdKGTLhTK4p0XsSB|0|>d%UeHT(qx?)u+rp+3ndYJpUpFejI z^}ei#KDAu-m-fyI-v|0_XnRXFHVff_IJ$&6s*2#Ct{OGs1vx@}xL%h{h%L+3LiTC~ zPhNQ>&|;|}aSpjmvzcqGPgt3B{~dXcS5M}Fh=W1z9eoy_$2pL_sGU-Ce@d)zf-7&> zbCZ#oVQaSs>8oeUGqz4SoA-F`r+kFQhclCJ)Kqx1s8dX8rVK%Wsfu@!xxw4h{fVbj zTUZj_3w_#NcG!lX21&;O7RP~_n0-J+HKpx0TN8Wm%#;~u4(}AuoP^&9_tZTJhB%-N z1nW`yWAzRF6=ES01!c5m&hDQvV*zyYLIBH1peJx40LnOi9Q=V}=sN+l|G5Z2vqM&H zw9DweBNRM6aTm9LYBh-t(ENfw1ytQPA{ft3DHVjc{u}Zk|3bDmu;}Flt1$s^ytVcV zu$BEQ$I#LY;^j0G?lRb-gmJeZFyyp(n%t;rnB`x{%}*Z$>w{fif6VR-IG*M8NnHMJ zeh7Z}`yYun2bjD;S~YsPdgeKESuRn#lc#Ak^a3?e-2AOuzz~_ADcy9?AtvIT4Kha} zrmnQtwHnuu^806h#+j7YFP;U7AMXl)gAt5rR#t#>QUtff>}W++XBZ4dq1QM(z`I(O zlpkG#6lNx*<*R#1tb*o1uL?V3W9&SddStxwq0S5X_LBYl=ZgbE?UE;lFDz~0xDWIx zj#;Jc-|o~#llrfSB=VFL%89s)<$Urd;Dlyu2Rs5PF5Sv3OT+!qq=)?-Eakpo5SQlmrV^Wp<@N?s<$jkgWyo&3O3e(bIpAJ;<&bB_00!gUv$ zdOL2OP(8~1s?zMj96MONV&`Mw1sfg}Yh9N|3UetLBLihNml-XnEJmXNtLsndXva=f z=x#lZEI5Ncl7HnB=23KNv}%|fiH90O6iTSI6RC7<6b!(6J175ow9yc1GI6GJ)6~|% zv_n#EY-=h{-JJSn9+nIH=*SsoL()lCXSKN@_3f~Xh30I$?P+w`9{}zvedjl2YoJ8M z?}L4=h0{i}9iI`EtX3zv+@>e%$zJ1GWj}vX-MSs6$dfZNFkjXe&t6V<@OCECdM6g0 z%T_1f*JsT$fedS4x7*S96BDIGS>;96Tg&?ub%#4rIliiy*Ptkuu34?7;GcV)kP!e%H#X1DeF-Rz2Fw5-f1 z?-q+3l;@`s>aieWBxTBb9{mZllxJ6nDMwG9@dY_zPoZN6PG>LgKfXR;sgam$iPszV z_Q$YNac1Q2q^Z3u59dVfiZ8oCiVLiqK&K$^ZQV}zC5lzNqMZ>1nz9{cYVyo0f6%ll z@rGUWdL?E3*;{cQk;_=)8nSZqovejnO9x!ng=nx~u!7oAzg1=G_Gkr2J;eTs)?ZD) z_+e4xFt$I$>-kw;#Z==?#`*dYk~GhxPK)k)bL^3Vm%&UuvURG^qX#Y&c(!a75h{eV zy7ZtjS+Q<<=wx42ROOYP&tfkmwP91k9#?M9obn8Ez;%QLtTL|T1DE#XUG3Abb2{|X ztD&+Z78Txv@w(4pM&AhDUX>)r3#bx%!jpSDcChA|GyAV0V#7Q@A*d8VGpz zg6XoQhF3`2a#)2vQ9{gmEL4MdxM(xWeDtTTq2wd!Of}Y1CVyhsCE|)PH>f3H9JOO` zB}c`gW7T5E1SiJ{v9%w)-u%2Rx(a#YT=GE+WL2oXfrN=lFOc(M?-o@o8R)PdV6i&; zRq{SWEBaUbs~RtsU#Y5vp~@io-G|MngZ`C&UNwNpqs!C#{8!Q$MK}*Ns(nVjI%N~h z?NQI?KKW&C;DY5yV}FB;{DMq*W-Gz|*;&|99-rmN_P{^Y17lvQVhS-!m5L=cF1~C1 zzV>1elvcvLa<~vx&2CSXlasT8Yv)$h$6@UK(I2UAd8*&S7eqZG)_AD8?L*W?Cm}ro zT~&3jXtQ`suQC3|LpIKNZHUMg#IGzgXdcZ;g&JLojaPL)^ZjGY(e-@7XBqRY`rNCBL}*RHcD_LG$PAP`#$bh9LF4F6GSlZdR4O zoo4`svi#Qi+c-V|Oh+(!%A=j4U*~6CA|U0G9&1F!I2THsCzAe5VGlRvCnEz-NN&ns znZ0H1NVV(ES-U^mh2gJTU6mz}E_^}x)jkWfM8&?my>;SZ<=7E(z3%(&qOutTy6iDI zy$_My-Ar+W=!ZpPwC7{b`?Qhz{Kfwa7uVPjq1r0*mgb#YY0OLuD|HBa_id+b*foCV z9JJ+qu|R-$!hj?n#fL8N7b1?HwZt4ae?B=j6`y{17e{Ns^(V&7DTfPAc}AIG94S8W zs$oW(obJ2~qV?7BK9=98ryeD-24E~fn5bS*=H5*r_Cm?%~*d~@I0;%>^XLq=9#l5Y?TxD0zrG;fgI;z^{F zwe?i&{7c0-y%ZU4_bcy`APC=yBkL0B7t500pcrAG(F%U&V$cD{TSuqmO=?t6%W1sg znzD3r%=o^Yf82mbzse8Z29`yU%Gj=a;UO)H62+8H?hPv4L~DB$!#mIXPzyIjgo7ht zHw^L?fc*@lT9zv4wGt};HL|F4TquI#<7nqftMVLIn@r@Ux%XEe-*ygL-nB(^!9WHy z%$PV+P(RRFWHMawzPP70Ng``S#UV$=`{jNxIg%60ybFPrP|HIDZ^R0n^x&)o5g)L7yU<+{Tn?1T=h}A!op7TH-?C#;fM8pynBrj350zBVvj%y{|EaSlnjxQ2D2Wv>lXe7AB>Pm_P0ovD<{>^QQu>ZN`i>}d!R zW^j|!-O~7qbTY)k%tWLWa^$)(;akaj+eF~74Jj$u(%Py%)r`)}c8#V6E3>Y<$CCTs zL~;u(u>$^&)^!rXLf@_8$6mV#OZ^{xX+71ZsG^>e&w6L(#zm=owh=ltAji)o z0QJ8&CiRND+rTE3h-^HWV8t9|8aq1UIqAOJN;_Y~2f*0HgPPBWe)gjjNDe`LZ zLG{^g-$9MiA1EO^@~}Gtiuz+g{u#x4E&Iap6+v^iS9;>O3%Xhadu|oOu+5yvfYSfK!t;_RHe5mBDjf<1IWrmOxwM@16spFl+c}X4E;PRr- zOXC+DPEQ#QwC6`mSt1yh`7KMSd|4<+ zQ9Efc9m&tsMsn!>*Y*pe&Ud*nknAbwjn4>%ip`D_2dTENVxtxzjKUawPgCMuG>>#Q z!8->v?P;|R18KAAoh+x~OUx6LS_wD;C&h{`$Sr}M4L~0_*B}Vyt&M{RGRE^h0@(PZ z?rYAUm}+J!qhoKlVDO0jtveqjT$897Ma^AjNf%p7?FxdL>;U#1z!q zL07b4737&TH;@IlH<28#Kx%=5*x}S-+3vUKlvJGXJ}MeR4M=Gw}v(r+_FcKLCtX7?_RLaVI6(NwCmgHtXRCg>#cgu?3381wPU5+CAi zP>zAL1TB!!cW66SLeT9IUAlZ>QC^Pq&w6r8G?GzdSnWi+&p}7t82=$fdv+3xCn(?^ zNjPo)G7Z+fapU-~o}tO@6wCtMy|Coz)lG%=)AGekLxJ+3ZR_rfPkH@f)E4n-Kr7e1F)05B?~Iza(aTRxH?e@MPv z9~Qb<1M205!W8ajqCdvXs#BCb z2V87yEF3&sJiPxFK9CTIk57P)hetq6KtM?J4|qTlQeq;K|Mvem`Oo}6tACRS9}oXO zjsG3~4gkpUF#v!8EDUx4COHNcImX`+z!Lxf0~_nV>;8Y>VqxRpVd4WZ00jT!N@M^G zOzeNa!o|VD#`>p(fr$mc#v#Y0U=zcmR58S7xAmqHj|5V4sAiPWa6(!JC3emT)Mn4^ zqRRi-GhzTR|A%`37B&VZ4geSLpOBXvfQgBLje-52%Ktqd1CtyJ`=4tS97=XWsz_V! z3~|-JYXBmwe}2fZ$N|p)^F57ua0l!t%=o^ur^YoUY0eGkD%K=;Q;&*(5;Gf#^B~wHjQFjbeAbnboW8+9(CjKV_Yni_G@^ zs~~qO{__}ZUinlADR??N)wZ5Vtip|oUevViLu!xG%Yl{Id~aLXleF* z&C+5Yzo+PhgW(yd?~nZR)i%Z`56NF?@ifxxBY$*WQ3Y&h*eeBbe>QgG32j(<>Q3$A zc72sh>w`Oz5))FGT}Z4qCW5zp$RU}uh9+w)$1G~*#H!?c^+l>vrB}=L&

G8s=CH z0q3Bq4_MuT+ULqCEW#3L|u<6PSo2MyZYcUhF{6 zA?83A1t6w)ig#o$u;8z>B%q((<=vZQ_*|=(V0qSPd!aEk673>b7{I78v!_n(y!Z@Y z)~L0ca1Tl8?yySOJnz(#aK6iNSoo5r80s|Z;w6kM&h88wVO(--Rr0$|tcdn!iK(xm zZJwg#vU+O|@**B<(IM~H9dKo45~`h`mcGzOXfULK66&-cbh!;jUQC9AL~G}VzS3Z@ ze{a5yEQx*QRR<=RL@&VGuT%1x65R&w(^i% zU#IC-Pk8Cl&sr-*XvHb>O>LRMMOPr%{eayth z2iV05bnxs5f6izUE0vko1w(+sP&eF-BInu+lS;~#_C0EkP)#t%koLpRIYqQc7_eJx z*7am>uy$SVJpXvX)BB0=Drns*%>KZoZ&z7ywg)JBU44Wv$bbYpX<~NmEw9GCZDLhUE%I+2VU9BK&-tpY%j7F; z-ax23VHQt5%5l1Mj>~A2m3!kg$adUr7|(4qaEgajSS$U)Lq=uRu!adX zybB55?DoU!?>uE(Ie1E`FOFC#cd0`4+i@6o{fee$laicI00vl%ay@tdBKfDnckUYa z(d|T=eLbUUf5|o4B;*iJZ0j$8oLsz6_~y=!Hmcw%9==#TK(%EZh3Aw`LDEBB={aOP z(BR;^^p$An*SvKcS-{f8arhE*uY|x%>dflMN>;zyA<^?pLG_amI?x8w05Q_pNG8|n zC$Nl-Gh{H(cTkP>t}---}P@2-nzuT z7?VXLNYA7@Sx=V6L}d8ZHP7pY6o#nVU!g!yMopt{dB!IYQpD5eaKDwY;ai2BJoO4h zT@0C#kCgX}hy642I<70yk zoXFzbg(%~gKywTUr z&3&DG9LJ<@)!ap6g-}SH?ttqKsGFwm)mhAj#~aWaN{U#Bwz=*FuQmSOEP)Y+--EzQ z%L!Ax4E@{Mp^fY6FjR8Zl(~gFD#h~(WlgNbB`7NixO-CxJTD}vAv-(W=aSn%ypRlR zZwz-1R1?a_xfW~hW_lQtL`g`trXzKK1V+@y^qJCDzbQ)!O<3UIGBlS#1J}2lkqwZ9 zuNiI#x5kihEhY6Tl3K<*3@){fXx*~H+QT|A{@+-Jb$HstQlGvfW8eFWE^zH&xvUJJ zqJyrXuaorF0T+z8!sf8a~r+k6S z_9%;oatbO~AUYDR4|=-ugHA%30&p0O`3-39M_UZtZng2T0c!JV|5B)e zYq|jv_SZUj%t|kF7ztEv834@jc52%_%lDI!?&__Oj%Fso~K549mG; zaxfAt`h@4V5#LnrejuRr1$V}AsMnCsngkdK*=5q#9Z|JQ^c-3&aZj?`(0sy}w9dB7 zd561dDs)BH{lK&Pc_&q9yg}MXCp~LrR75mY_jfdQ7akdZmUYb(mffcbnX6t+v+7$O z&OnWd8hzPZ1_G8mYw=+zksdc>mu}n`>zQM))C04A0q(;RMC%&$QbCmQ(t7C_kkm({ zSvOlBj!V?04VtxMb@M4|sCBuG81$>KoY1^#Pl$ItDL$}zs?zJM3YziRCaIm++8u*s zr?(J_epXT$^vr+1#`5`7)cwFh*WwNzH~8?o^wNliRpu79qh!5nHiDOF6|5h|CheFd zCurf#ld}!cZy{&X0UHaWPE^=u*330X9a+uz38}X!18NsY$%8Qb2sVZ_+OfY*WNGXJld7d4PUu?{UY(^h`dUi66%;WtL8w+W~Yf~F&&NtdHZ#K0m;DaJ1D+30%? z*JyRxrz48tKj|-U9a+$kHNCp!D8@4ZV14Hcfez0%^c`K%hsiP$3EqVQ1&)WVFn&I@ zcPN9X$ZqVPzH3|$N@tn=#9%1ZU3yV1lCYTdH3zS8qFKL2o&4MKo1e7$w9Q-!aSyTu z?s!%?j=+31*MHSXHKM<-T0+9L-b3s}Q0(epMnvNy2ll5jze(}oOx+}*>ooZ{L2T)} z2vTOSkFoH8*N?eO2`nnVUzA_-TWhKbb-RQU96azEHMQI25-fd?Dz~xkxqW5hMz8Mw z<%>T*{oYH`IFCZ486r*ccHVK@z1C^k&S~J|xwxlmI~QhsNg+OY)r~E$IN&Fy=@lOT zeOHR8MKbEmI#Z_sl6bqH#DsbHni{$#0}mJaeU;p}Vu4!b<#c#&C7K#;^m`~f(C08L zWkjl02d9dgJiBE4yb}cWW=^^-#yYD@$$;$$wp#ie=hEvR;!vY^EaC~8Q(Vtn(q2EY zc2!vxEVRV5+ZdrM>MGBf&fTQ_$nYv}Exv0iGCQ4?Jt-wwbMZjRj~H{_O}#N{Zry`LfWxe-sf7qlkbt~;G&L|!aUuN%}!=>ZG z)}GX?+M1L?auCZ;(W`4-T6!5_Q6;A|82%UjtD2E{MxH6Z?7XV;zX1HcbH~+s=oX%Q z;axcUA0p@PQoo-}Tww_p{{?IiKWUH{&x7DY2qf85>9rt_e6ALa$_k?mV?{tI9_c;f z5c|xgAKVODyr#$zBj4odxv2DctMqJ#PK}k;@r6E+^gt0gVpo&?ZNBC4 z1yuY})vC9aP<1|?H&bTgN_5ql?AV|Fhp}`(iF{iqPUc|e(k`y(GLt|a2I3Ps8#SEt z6q#YluCDb6C*c!RAvi66ai>dAbwkhA}2na@2kzH+gN3uq)}IY)bWw$E_V0XgL)0`lm6s3VJIL)8X^`%G z8?+oXG23PGi2htaznam9tUN7jxk{o**m>&K%{^{Dt&qLCe;hNHla({zzMb;&>dDVTUia!qZidD3bz~rmn8y z|2<6n?3n=^U|xyZR&#yfmZ|)cVf5x*5Z)u(FlDdu;00UFD$adlH2dR^QqqkuTYOi` zR?CMJmrKaXl!#N;!bubrt&g(kY?1m9lVM-&mKn)V)-->1QYKAOWt#Kg#Lw6wFXr4J zNmPTc=Cs@^L*Fx=ch)p>jDf@eemS`84pOEkJoX?lU z-26b#(D#di#_(B3Q)@$Tp&9GQrd)Z#jS9=J{Jsi1lJr->7xo32erm}th#4@<&%#YL z5c>66(^bxsYA4yeX4WG~qgy*KPu8ln=FCTXwLPqTEM=LiXRBBqk)LhQC;=;rCZdNTagxKfZELT|%fZ(2x!j&)ujjEAc#!vqYvcoUULSu$)&Ym+I4^78J zVlRmKJW$6JKcjZ^HOSs)AwT>DOw#(arn?G6_3ZT2tpvPFoJ4QrWP?8?X{xsK!C>ue z{35ThceC~OdQ5^pJ_)E3JQ;u(-a4i_cbT#9qdg!B+`L84-xQalC$(r6mgO)?L?_8` zYj{p;e`5EN>vH&}p!BLyz9?0quDo9ywu zSWXxQC(f~SKN|HKErw%owkVDY`IvZ5EWqR-ed_H+)DDXfMgWW!1!-#mw$iY&}rSTbO;ltCX*&7F7h$?Tb%>F2`!puEm7@W=M&h$MhN07QLItyNip7Ip>G6$JEYfBTY+GnuIQ9U?puo{ zQp@K}T8gL~$X|e5+`n%E)pl%>*{179A=%5js>%)Hly~-3K7FK!J%S9QkG{UguJ6Tv zw_Y`WC$LXnOH?+yMmhH<_7ks^3_pGKX*Wc*imt$1{8EQ#QsG2%bGEiHv%qqR$|6;C&HBh=jfa<0rL7Vi4G z8nES^J-YKRp!8Ec$JfO|p&mLDCmSvuftDraT{)U6$3j@BKu~OL7YfX!wO}no@5#ma z2C-^1t=9<$l8sd^8C7v^3CiYsA$62U5@_NUHDZ&O`iZ21s%}A2qpCR`3;$>Ar=hQ*ICPoNr`(|I!sPMp zmE_LI{QSxm*&!og1O=7C;JO4ws;%E;P8Im2mhlzT3WzJh+TdROVl&$_mqx_-YVEqS}Q zKRre7>5@x~HzG~EEfa@E*sis+YsX71u=`U&Q7s0RjRhr8-HsWnnH3xYE)4-Lbr&1Z zGM;RPd98`J&Z5*@E!I7qwj-dIaGJm3?($lD)^?2Xm7 z(&sd;u__OZ=*o8!i=%`uRsuVr_#Z5*cS7uoMn88SOb$`pt2PE0(kp{_HU&00cL`GI z3dH5Azc~@07e;#GRe|4qegw?nE*l}`3~i>x>*_1^3O4GkItNjbdhQ=Qw_y0Iang$` zv^?Cf6_$7&661r7yaI}}Nlrl=Cohu1ts38@^ImJ>UVkH5dfAP3Wr7Zd0*J8$KLVSt zoq5CkO6FeRSE}AQC8LocG{-!jFTmKcJe${5BY`0#Qz;{RIi5|mPS~NKqV1hHJyd6! zJ|N;R;Gk+;o&wLhnW?Y%hcF?!;F*3;Q45%JVl_NI@b_(B(W7&K4s8~Pa+(x;W5U_tF z^r^Ydq3ZgFS)Ast@xNRKWdPwVQq+bAl1i`*^3RuQ=eA*qWtVnn|Cw_ z5UN#n*+ShR>q+~3SgQXD?or)4J=@MTL+zBpL&hGWV4Ug%<|R`%c$PhM4*esD`LZ`YZf#kD4Ht#O%0687$CTVnQ5NFmPd^oFd^QVBc$fgc zQ>>2<3B4&^ASTjYT3|w~SM8=$p6q|881Zn-cUr#N4a@J$jj3K&?sEdQto$B<`PCdR zI<467$V_TU1DSW-h>cvTt7LRsOBXJuP-+v(7oFi|A>_K8v8~hbU%lP*ywD|*j>)}K ziVqZP2GdNA>THe*g=8w7l}25|u?YFZXrIFn`1Q;2w`Ly(IoKFe~&4PO0gH!xxLD^Me6 zPU(FyGJKPKXBel4*rdYf14D_HPD7fGbW9^sid}u|5YYc zHC4@Lf5!0Uh&opeS>)(`kgTd)qKcMkO3(F@(6qv|0NIX7bq#H?9-`FTJbP%av0kqv zn~!IHO(6Bjj(ny73ptm5hzrm?vv-vd*il)#Lb`FUMpwN7FWS4P);oJFk>H8_Mw4rn z>)uHCt%akyh5W@R@30h=;=?b|I-4@E@f@CRXOev^6MrDquDlW(5)85<@NGlVCt4*| zp`|!vuE~_QKp%QF!rtbEj~?sVZ8AN&zCD=7G5?eS>>Nya$dCI{XVKfhB*8K}Wxi=+v@aozZi6^*(*z;BG zAIOh`3?SjxKsSF_&55cEO?LnnpUgU@()^@`z%FTYYu?)uQSPeBY?`@r4?+84LN+&! zG6v1T^Ad{1ARd*1DiGh=TS{cBrC?ze`@78y9^e31kPwPwAT$y;98%=6YQ9LZ5xXy-mu3`Wc$Zg4P8tX7!OS!&yfH zJdq`-Xxek@_wlm1kipZ~ovk!*vEci_%($V(U=7%j|A!O6rgTADYUOQgjM8;R7@>v7 z`jUJ{i(BPQVhOzgef{EEy|A@8^uiULo0D$5rPf!WtrqesLEib@PolD}=z)+_=6q-- z-FYr7WXI)JhN3P?cL5zimlbRID=ERrrmzb8JH>|?s4oJx71YGA!=+ijmuAEd=eqPS`?}z9?}1Y^&&T@c z7WylNcY3i^IH6H&WR*aO8A9pPT9gpOZ*5z%O41@4?f%`ntw%R5B8oVkbx?*hs5mA_ z^YZg{kwBd2sU5s^sD3iH+R0q?O`fEcIdLY*?tW;5UEPSH;y#JaMDS?!N9pzNq;~R_ z0as~tiRr^9QQu)dr3ERJCySNr^iOSU1$zYPUrfmS_WKKHpZ~M+D%gV4`Crc=;2#q- zLDOaB!)(jlb0TH1tapF?ilZy&!QO*VU^KlyEcw)l?t$#!i7)@qQt-p1U+j8ktY-8< zjyK!RHqZ~HyEV5A=|qu^_i%ruSv52?y~^zhGueTcS@kku1k0b(7i5Y~ovX z&h*|o7(B*W$e+zu{i85^V3f&qq7xM8{3RA!FY*_}M24K4{ku<@fAyZTKbsl+9+I&% zgEwL=H+O5Q?LU#r_$>mYB|l;ZDe9=2Sam2zzL8R|o2ZPtVN?Si>5TjJRGD-0J>myN z7VpM7xx!s|5n6Jd3w}u&DEc)iHYLlqc69!84?@Rn#NsA$c?%NqcBb3Q`Jtp&V$H!4%L z{CP3`Q4-q@mpm{{UiG&vFs^iAm}e( z$LL~8x;dBb%c1W{KAy$|o4q95?*g%0iI>4Ah(<8<-+?h1cZNMwR?(IYYBHxOP5ia((+Zy+qTWKb#AE z%0>=VIdMq4IqO#&gddc)RCftSgRFa3AKsT#30@drT@h zPtq0B``x|#rNJtnG<_6E25Y{OoP$m=-=x5=Y$nPeJITr8n=#=DJY4Pfu>5<{PR_bY zSti>zauX2Zg3-tZh~NH+JIYlw8;jn2>#!>s8T?dSe@V zS{;ej{V-_4-|*+=zKCyp^-ckUCN1`n9dh8()&=g^ef^`}HCFKZ+WYv*j6_=bc|l!R z&SpdvD;ayh>r7V&Z-7DNzNSs(_kUSUlxU?Gp0W78qC)a0Dye{+aLU*yZqe77u-t@2 zDl_`RjiT*ZWZ1f8y90XiCDB$_7=5mgTFvgazY&(MUd4>+vSW&b_ zh41YfErMjot38rY)ULQ0pRMW6Aq~T;?+jr@Nh_tr6%MlcyEJ+My5?{3E#gY^gx}A7 z1Ih_Q6=;IMe8EDBrb^Gnzs@{KZ1a>7E!zD(BF(yMQ{S}>AJ9l0!*TYnspj{@;f-jr z+LlGVw;Ma8*&_1J&-6aDI~W?B6TzvimqZk*d(jtJ=>R39)gy3j7ogh&DP?(YlLf8ZdIq(HpbnWVZ-D!4!389dzC-#kyk zUGXA(9_C^Kk)mbaevG4u_C_!r>`+~+CW%fC<-@gaQMFXm0`Paz$E1DqaHTd6aFMcm zM1#*saTw^?CZQNXRo&nO0)5G(onZ3Ta(sdJk7UGM{lM#f$=S&j;##hNc`sqMg$Lt6usxW?;~RZva-_Y`Kx($ub-|vPna?BVhNO1jPSl zNeuo3q9Xs~oJ)L{r;^Oad}%7-s4HLICH*J5Uxa5zQ zsNBx12yjQ-Xx<7P>rJ4HlNT&ESMGd7h0- zJ``px#Kd$-@iLAQifGmD$X$2bKXC*pMA!#r$unmgi1Nrq^97Z+R)4KpHwhDQquZDZ zY1|>s4BGgd&j;K2F32ATIhKe>-*-0!Kn{S z@fP?baN2qD4B*}jW|y{8jPb3h<;-}x-Xz95kBPY@YGGx3Mb_=TIgT4Tt(3J`Agp33 zO*3LCLCC*4cHoRi{9x2k)rBDW!kY0hg=)M_yUo#=?vySu&T$LoS_r3M_@OW&rCOaY zr-4oS(GMo7uqNP6I9$-!`*S3Ju?&8S-0tXP%8aX|~c8<;id3Ho)K#Cz3E(L=ye zPkXlCFiFkrerJID_#*Ef3ngMY1_pl_Zx$!ylBa_4cK)PB=*Nr9*RkNv=W{cg!@(Q} z%!BdhCUS;CTurNm7z_m1B;RlIkAHM%0G6A6`Z2w(A`CjMlUbu`m?f22G_)V`TiHZt zEUsX^QM`5S0IbFNq99Y6xxJUrPMEb@RIxz?q&t&=6$spZJwrAOIIB|}N!mYS^GcC#lM-VG^>%LFBL9s7%P1Vgj&K7!9Tk;VY zK&ZlBpAES-MvqDYR*nhLQ~Xvp6PKLMmC|v& zB;p!&F~_|6fmoaxkO&@p88-v$YyWM%U`*~Uf z$CPu_e*uZr@^gq#VzyU#a!{^ktsW(Y!e}zRJ&-5Y)68ac~)ds8ujXgwf}zK$gc z9C>F~~R9 zg2EOU31KJ=V9bPFWrI-E)f}ZqZB7-V@ihZ;K>XSiy!uE;o~ndz^BhU1WH?^{QXKX@ zFvcSzPWWzr;(%TPFILUkIslOp^`@NwKl61L~ahuPp}}ud>|o*joHH{&k<_#J3v^Y@O)Q|9ZJ=bTKX~Z_UrO z=x5KMzOCrsiA>5xUcJ6$F>;ft3vQ~Ms3EAgZ!g)WZ?**Z#;E>Yi9%2weT4i5OVDia zd9RzhOBwGNtk7eyO~)d&a!hybT}D_KDgiOEQY`daQ+ERMsXvPMqI1O`SEDQ~P77$b z4D%Dq%?Dtv@E|)9p2cW&#QY2$s;#aH_G`XL_|?$*TQI?e1j`H4bBS3SeDK7P5me0Tv?1-t?nuf`a>KaIVw^JGTt+M4Cw=xWR3*u5&kT&Rn;&9k=gQP&Q7{Oe zQ87nJ`_;|YC&VTN$L@$@(_vSxWM%>mfk@0pH_M?Tb?y!iPv(~|uuLuZb?@;9;h37~ zJ1+A}< ziZz_?`o%-X_#h^22z8RTEigDu{mCsSN>CX8yL|i|Uo&l2Uk7<{cO5oND0NK}jAKKz z<`O1F&i`H>(wE#?BX&IRhJU~Ol9zkzw|^R}EdYCi_=Ei@Wi#F})81!1?p*a-O^uZo z&J!N1tS`()^@4xN54#rqT5*-MLsd8MVPVoeRY} zas=qeHQB^9;dZAhOmbM~+-!7eu*e!239Z!O5-Wlmkzapk^vcveNJp;F`TC{R?nG?# zMp{lUI+?b1PSIjOhkGv&ScYm z-{m=~L;Md1hMi`db|ns4bEqIAD!mVq$yCL};~c102rIiKhR2MI@5JrBIu70Hd_2QS zc;gjrn9^wx{EYW&irs+2uZ(7%EWY@hX6gDM#L#wv28Xx=r&$uuwODjx&V)CX=ohWO z0CuGwW$yc5$vNOgoVc7hcCkLRhwkbT4Wd#&+95?){yCCE7ypwR)|1vDRq>ToSe{oB zj&pVumN(qa@x^zm&VdrvOUkfFMq(X)uFN$fpyA<9LI8=!(%i?OYxm^NZq}>5b4cFa Jr?$VV{|7i+_m}_x literal 0 HcmV?d00001 diff --git a/vendor/cloud.google.com/go/vision/vision.go b/vendor/cloud.google.com/go/vision/vision.go new file mode 100644 index 00000000..c9f2f0ac --- /dev/null +++ b/vendor/cloud.google.com/go/vision/vision.go @@ -0,0 +1,357 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vision + +import ( + "image/color" + "math" + + "cloud.google.com/go/internal/version" + vkit "cloud.google.com/go/vision/apiv1" + "golang.org/x/net/context" + "google.golang.org/api/option" + pb "google.golang.org/genproto/googleapis/cloud/vision/v1" + cpb "google.golang.org/genproto/googleapis/type/color" +) + +// Scope is the OAuth2 scope required by the Google Cloud Vision API. +const Scope = "https://www.googleapis.com/auth/cloud-platform" + +// Client is a Google Cloud Vision API client. +type Client struct { + client *vkit.ImageAnnotatorClient +} + +// NewClient creates a new vision client. +func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { + c, err := vkit.NewImageAnnotatorClient(ctx, opts...) + if err != nil { + return nil, err + } + c.SetGoogleClientInfo("gccl", version.Repo) + return &Client{client: c}, nil +} + +// Close closes the client. +func (c *Client) Close() error { + return c.client.Close() +} + +// Annotate annotates multiple images, each with a potentially differeent set +// of features. +func (c *Client) Annotate(ctx context.Context, requests ...*AnnotateRequest) ([]*Annotations, error) { + var reqs []*pb.AnnotateImageRequest + for _, r := range requests { + reqs = append(reqs, r.toProto()) + } + res, err := c.client.BatchAnnotateImages(ctx, &pb.BatchAnnotateImagesRequest{Requests: reqs}) + if err != nil { + return nil, err + } + var results []*Annotations + for _, res := range res.Responses { + results = append(results, annotationsFromProto(res)) + } + return results, nil +} + +// An AnnotateRequest specifies an image to annotate and the features to look for in that image. +type AnnotateRequest struct { + // Image is the image to annotate. + Image *Image + // MaxFaces is the maximum number of faces to detect in the image. + // Specifying a number greater than zero enables face detection. + MaxFaces int + // MaxLandmarks is the maximum number of landmarks to detect in the image. + // Specifying a number greater than zero enables landmark detection. + MaxLandmarks int + // MaxLogos is the maximum number of logos to detect in the image. + // Specifying a number greater than zero enables logo detection. + MaxLogos int + // MaxLabels is the maximum number of labels to detect in the image. + // Specifying a number greater than zero enables labels detection. + MaxLabels int + // MaxTexts is the maximum number of separate pieces of text to detect in the + // image. Specifying a number greater than zero enables text detection. + MaxTexts int + // DocumentText specifies whether a dense text document OCR should be run + // on the image. When true, takes precedence over MaxTexts. + DocumentText bool + // SafeSearch specifies whether a safe-search detection should be run on the image. + SafeSearch bool + // ImageProps specifies whether image properties should be obtained for the image. + ImageProps bool + // Web specifies whether web annotations should be obtained for the image. + Web bool + // CropHints specifies whether crop hints should be computed for the image. + CropHints *CropHintsParams +} + +func (ar *AnnotateRequest) toProto() *pb.AnnotateImageRequest { + img, ictx := ar.Image.toProtos() + var features []*pb.Feature + add := func(typ pb.Feature_Type, max int) { + var mr int32 + if max > math.MaxInt32 { + mr = math.MaxInt32 + } else { + mr = int32(max) + } + features = append(features, &pb.Feature{Type: typ, MaxResults: mr}) + } + if ar.MaxFaces > 0 { + add(pb.Feature_FACE_DETECTION, ar.MaxFaces) + } + if ar.MaxLandmarks > 0 { + add(pb.Feature_LANDMARK_DETECTION, ar.MaxLandmarks) + } + if ar.MaxLogos > 0 { + add(pb.Feature_LOGO_DETECTION, ar.MaxLogos) + } + if ar.MaxLabels > 0 { + add(pb.Feature_LABEL_DETECTION, ar.MaxLabels) + } + if ar.MaxTexts > 0 { + add(pb.Feature_TEXT_DETECTION, ar.MaxTexts) + } + if ar.DocumentText { + add(pb.Feature_DOCUMENT_TEXT_DETECTION, 0) + } + if ar.SafeSearch { + add(pb.Feature_SAFE_SEARCH_DETECTION, 0) + } + if ar.ImageProps { + add(pb.Feature_IMAGE_PROPERTIES, 0) + } + if ar.Web { + add(pb.Feature_WEB_DETECTION, 0) + } + if ar.CropHints != nil { + add(pb.Feature_CROP_HINTS, 0) + if ictx == nil { + ictx = &pb.ImageContext{} + } + ictx.CropHintsParams = &pb.CropHintsParams{ + AspectRatios: ar.CropHints.AspectRatios, + } + } + return &pb.AnnotateImageRequest{ + Image: img, + Features: features, + ImageContext: ictx, + } +} + +// CropHintsParams are parameters for a request for crop hints. +type CropHintsParams struct { + // Aspect ratios for desired crop hints, representing the ratio of the + // width to the height of the image. For example, if the desired aspect + // ratio is 4:3, the corresponding float value should be 1.33333. If not + // specified, the best possible crop is returned. The number of provided + // aspect ratios is limited to a maximum of 16; any aspect ratios provided + // after the 16th are ignored. + AspectRatios []float32 +} + +// Called for a single image and a single feature. +func (c *Client) annotateOne(ctx context.Context, req *AnnotateRequest) (*Annotations, error) { + annsSlice, err := c.Annotate(ctx, req) + if err != nil { + return nil, err + } + anns := annsSlice[0] + // When there is only one image and one feature, the Annotations.Error field is + // unambiguously about that one detection, so we "promote" it to the error return value. + if anns.Error != nil { + return nil, anns.Error + } + return anns, nil +} + +// TODO(jba): add examples for all single-feature functions (below). + +// DetectFaces performs face detection on the image. +// At most maxResults results are returned. +func (c *Client) DetectFaces(ctx context.Context, img *Image, maxResults int) ([]*FaceAnnotation, error) { + anns, err := c.annotateOne(ctx, &AnnotateRequest{Image: img, MaxFaces: maxResults}) + if err != nil { + return nil, err + } + return anns.Faces, nil +} + +// DetectLandmarks performs landmark detection on the image. +// At most maxResults results are returned. +func (c *Client) DetectLandmarks(ctx context.Context, img *Image, maxResults int) ([]*EntityAnnotation, error) { + anns, err := c.annotateOne(ctx, &AnnotateRequest{Image: img, MaxLandmarks: maxResults}) + if err != nil { + return nil, err + } + return anns.Landmarks, nil +} + +// DetectLogos performs logo detection on the image. +// At most maxResults results are returned. +func (c *Client) DetectLogos(ctx context.Context, img *Image, maxResults int) ([]*EntityAnnotation, error) { + anns, err := c.annotateOne(ctx, &AnnotateRequest{Image: img, MaxLogos: maxResults}) + if err != nil { + return nil, err + } + return anns.Logos, nil +} + +// DetectLabels performs label detection on the image. +// At most maxResults results are returned. +func (c *Client) DetectLabels(ctx context.Context, img *Image, maxResults int) ([]*EntityAnnotation, error) { + anns, err := c.annotateOne(ctx, &AnnotateRequest{Image: img, MaxLabels: maxResults}) + if err != nil { + return nil, err + } + return anns.Labels, nil +} + +// DetectTexts performs text detection on the image. +// At most maxResults results are returned. +func (c *Client) DetectTexts(ctx context.Context, img *Image, maxResults int) ([]*EntityAnnotation, error) { + anns, err := c.annotateOne(ctx, &AnnotateRequest{Image: img, MaxTexts: maxResults}) + if err != nil { + return nil, err + } + return anns.Texts, nil +} + +// DetectDocumentText performs full text (OCR) detection on the image. +func (c *Client) DetectDocumentText(ctx context.Context, img *Image) (*TextAnnotation, error) { + anns, err := c.annotateOne(ctx, &AnnotateRequest{Image: img, DocumentText: true}) + if err != nil { + return nil, err + } + return anns.FullText, nil +} + +// DetectSafeSearch performs safe-search detection on the image. +func (c *Client) DetectSafeSearch(ctx context.Context, img *Image) (*SafeSearchAnnotation, error) { + anns, err := c.annotateOne(ctx, &AnnotateRequest{Image: img, SafeSearch: true}) + if err != nil { + return nil, err + } + return anns.SafeSearch, nil +} + +// DetectImageProps computes properties of the image. +func (c *Client) DetectImageProps(ctx context.Context, img *Image) (*ImageProps, error) { + anns, err := c.annotateOne(ctx, &AnnotateRequest{Image: img, ImageProps: true}) + if err != nil { + return nil, err + } + return anns.ImageProps, nil +} + +// DetectWeb computes a web annotation on the image. +func (c *Client) DetectWeb(ctx context.Context, img *Image) (*WebDetection, error) { + anns, err := c.annotateOne(ctx, &AnnotateRequest{Image: img, Web: true}) + if err != nil { + return nil, err + } + return anns.Web, nil +} + +// CropHints computes crop hints for the image. +func (c *Client) CropHints(ctx context.Context, img *Image, params *CropHintsParams) ([]*CropHint, error) { + // A nil AnnotateRequest.CropHints means do not perform CropHints. But + // here the user is explicitly asking for CropHints, so treat nil as + // an empty CropHintsParams. + if params == nil { + params = &CropHintsParams{} + } + anns, err := c.annotateOne(ctx, &AnnotateRequest{Image: img, CropHints: params}) + if err != nil { + return nil, err + } + return anns.CropHints, nil +} + +// A Likelihood is an approximate representation of a probability. +type Likelihood int + +const ( + // LikelihoodUnknown means the likelihood is unknown. + LikelihoodUnknown = Likelihood(pb.Likelihood_UNKNOWN) + + // VeryUnlikely means the image is very unlikely to belong to the feature specified. + VeryUnlikely = Likelihood(pb.Likelihood_VERY_UNLIKELY) + + // Unlikely means the image is unlikely to belong to the feature specified. + Unlikely = Likelihood(pb.Likelihood_UNLIKELY) + + // Possible means the image possibly belongs to the feature specified. + Possible = Likelihood(pb.Likelihood_POSSIBLE) + + // Likely means the image is likely to belong to the feature specified. + Likely = Likelihood(pb.Likelihood_LIKELY) + + // VeryLikely means the image is very likely to belong to the feature specified. + VeryLikely = Likelihood(pb.Likelihood_VERY_LIKELY) +) + +// A Property is an arbitrary name-value pair. +type Property struct { + Name string + Value string +} + +func propertyFromProto(p *pb.Property) Property { + return Property{Name: p.Name, Value: p.Value} +} + +// ColorInfo consists of RGB channels, score and fraction of +// image the color occupies in the image. +type ColorInfo struct { + // RGB components of the color. + Color color.NRGBA64 + + // Score is the image-specific score for this color, in the range [0, 1]. + Score float32 + + // PixelFraction is the fraction of pixels the color occupies in the image, + // in the range [0, 1]. + PixelFraction float32 +} + +func colorInfoFromProto(ci *pb.ColorInfo) *ColorInfo { + return &ColorInfo{ + Color: colorFromProto(ci.Color), + Score: ci.Score, + PixelFraction: ci.PixelFraction, + } +} + +// Should this go into protobuf/ptypes? The color proto is in google/types, so +// not specific to this API. +func colorFromProto(c *cpb.Color) color.NRGBA64 { + // Convert a color component from [0.0, 1.0] to a uint16. + cvt := func(f float32) uint16 { return uint16(f*math.MaxUint16 + 0.5) } + + var alpha float32 = 1 + if c.Alpha != nil { + alpha = c.Alpha.Value + } + return color.NRGBA64{ + R: cvt(c.Red), + G: cvt(c.Green), + B: cvt(c.Blue), + A: cvt(alpha), + } +} diff --git a/vendor/cloud.google.com/go/vision/vision_test.go b/vendor/cloud.google.com/go/vision/vision_test.go new file mode 100644 index 00000000..458004fb --- /dev/null +++ b/vendor/cloud.google.com/go/vision/vision_test.go @@ -0,0 +1,283 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package vision + +import ( + "log" + "os" + "testing" + + "cloud.google.com/go/internal/testutil" + + "golang.org/x/net/context" + "google.golang.org/api/option" +) + +func TestAnnotate(t *testing.T) { + ctx := context.Background() + client := integrationTestClient(ctx, t) + defer client.Close() + + tests := []struct { + path string // path to image file, relative to testdata + // If one of these is true, we expect that annotation to be non-nil. + faces, landmarks, logos, labels, texts, fullText bool + // We always expect safe search, image properties, web and crop hints to be present. + }{ + {path: "face.jpg", faces: true, labels: true}, + {path: "cat.jpg", labels: true}, + {path: "faulkner.jpg", labels: true}, + {path: "mountain.jpg", texts: true, fullText: true, labels: true}, + {path: "no-text.jpg", labels: true}, + {path: "eiffel-tower.jpg", landmarks: true, labels: true}, + {path: "google.png", logos: true, labels: true, texts: true, fullText: true}, + } + for _, test := range tests { + annsSlice, err := client.Annotate(ctx, &AnnotateRequest{ + Image: testImage(test.path), + MaxFaces: 1, + MaxLandmarks: 1, + MaxLogos: 1, + MaxLabels: 1, + MaxTexts: 1, + Web: true, + SafeSearch: true, + ImageProps: true, + CropHints: &CropHintsParams{}, + }) + if err != nil { + t.Fatalf("annotating %s: %v", test.path, err) + } + anns := annsSlice[0] + p := map[bool]string{true: "present", false: "absent"} + if anns.Error != nil { + t.Errorf("%s: got Error %v; want nil", test.path, anns.Error) + continue + } + if got, want := (anns.Faces != nil), test.faces; got != want { + t.Errorf("%s: faces %s, want %s", test.path, p[got], p[want]) + } + if got, want := (anns.Landmarks != nil), test.landmarks; got != want { + t.Errorf("%s: landmarks %s, want %s", test.path, p[got], p[want]) + } + if got, want := (anns.Logos != nil), test.logos; got != want { + t.Errorf("%s: logos %s, want %s", test.path, p[got], p[want]) + } + if got, want := (anns.Labels != nil), test.labels; got != want { + t.Errorf("%s: labels %s, want %s", test.path, p[got], p[want]) + } + if got, want := (anns.Texts != nil), test.texts; got != want { + t.Errorf("%s: texts %s, want %s", test.path, p[got], p[want]) + } + if got, want := (anns.FullText != nil), test.fullText; got != want { + t.Errorf("%s: full texts %s, want %s", test.path, p[got], p[want]) + } + if got, want := (anns.SafeSearch != nil), true; got != want { + t.Errorf("%s: safe search %s, want %s", test.path, p[got], p[want]) + } + if got, want := (anns.ImageProps != nil), true; got != want { + t.Errorf("%s: image properties %s, want %s", test.path, p[got], p[want]) + } + if got, want := (anns.Web != nil), true; got != want { + t.Errorf("%s: web %s, want %s", test.path, p[got], p[want]) + } + if got, want := (anns.CropHints != nil), true; got != want { + t.Errorf("%s: crop hints %s, want %s", test.path, p[got], p[want]) + } + } +} + +func TestDetectMethods(t *testing.T) { + ctx := context.Background() + client := integrationTestClient(ctx, t) + defer client.Close() + + for i, test := range []struct { + path string + call func(*Image) (bool, error) + }{ + {"face.jpg", + func(img *Image) (bool, error) { + as, err := client.DetectFaces(ctx, img, 1) + return as != nil, err + }, + }, + {"eiffel-tower.jpg", + func(img *Image) (bool, error) { + as, err := client.DetectLandmarks(ctx, img, 1) + return as != nil, err + }, + }, + {"google.png", + func(img *Image) (bool, error) { + as, err := client.DetectLogos(ctx, img, 1) + return as != nil, err + }, + }, + {"faulkner.jpg", + func(img *Image) (bool, error) { + as, err := client.DetectLabels(ctx, img, 1) + return as != nil, err + }, + }, + {"mountain.jpg", + func(img *Image) (bool, error) { + as, err := client.DetectTexts(ctx, img, 1) + return as != nil, err + }, + }, + {"mountain.jpg", + func(img *Image) (bool, error) { + as, err := client.DetectDocumentText(ctx, img) + return as != nil, err + }, + }, + {"cat.jpg", + func(img *Image) (bool, error) { + as, err := client.DetectSafeSearch(ctx, img) + return as != nil, err + }, + }, + {"cat.jpg", + func(img *Image) (bool, error) { + ip, err := client.DetectImageProps(ctx, img) + return ip != nil, err + }, + }, + {"cat.jpg", + func(img *Image) (bool, error) { + as, err := client.DetectWeb(ctx, img) + return as != nil, err + }, + }, + {"cat.jpg", + func(img *Image) (bool, error) { + ch, err := client.CropHints(ctx, img, nil) + return ch != nil, err + }, + }, + } { + present, err := test.call(testImage(test.path)) + if err != nil { + t.Errorf("%s, #%d: got err %v, want nil", test.path, i, err) + continue + } + if !present { + t.Errorf("%s, #%d: nil annotation, want non-nil", test.path, i) + } + } +} + +// The DetectXXX methods of client that return EntityAnnotations. +var entityDetectionMethods = []func(*Client, context.Context, *Image, int) ([]*EntityAnnotation, error){ + (*Client).DetectLandmarks, + (*Client).DetectLogos, + (*Client).DetectLabels, + (*Client).DetectTexts, +} + +func TestErrors(t *testing.T) { + ctx := context.Background() + client := integrationTestClient(ctx, t) + defer client.Close() + + // Empty image. + // With Client.Annotate, the RPC succeeds, but the Error field is non-nil. + _, err := client.Annotate(ctx, &AnnotateRequest{ + Image: &Image{}, + ImageProps: true, + }) + if err != nil { + t.Errorf("got %v, want nil", err) + } + + // Invalid image. + badImg := &Image{content: []byte("ceci n'est pas une image")} + // If only ImageProps is specified, the result is an annotation + // with all fields (including Error) nil. But any actual detection will fail. + _, err = client.Annotate(ctx, &AnnotateRequest{ + Image: badImg, + SafeSearch: true, + }) + if err != nil { + t.Errorf("got %v, want error", err) + } + + // With a Client.DetectXXX method, the Error field becomes the return value. + _, err = client.DetectFaces(ctx, &Image{}, 1) + if err == nil { + t.Error("got nil, want error") + } + for i, edm := range entityDetectionMethods { + _, err = edm(client, ctx, &Image{}, 1) + if err == nil { + t.Errorf("edm %d: got nil, want error", i) + } + } + _, err = client.DetectSafeSearch(ctx, &Image{}) + if err == nil { + t.Error("got nil, want error") + } + _, err = client.DetectImageProps(ctx, &Image{}) + if err == nil { + t.Error("got nil, want error") + } + + // Client.DetectXXX methods fail if passed a zero maxResults. + img := testImage("cat.jpg") + _, err = client.DetectFaces(ctx, img, 0) + if err == nil { + t.Error("got nil, want error") + } + for i, edm := range entityDetectionMethods { + _, err = edm(client, ctx, img, 0) + if err == nil { + t.Errorf("edm %d: got nil, want error", i) + } + } +} + +func integrationTestClient(ctx context.Context, t *testing.T) *Client { + if testing.Short() { + t.Skip("Integration tests skipped in short mode") + } + ts := testutil.TokenSource(ctx, Scope) + if ts == nil { + t.Skip("Integration tests skipped. See CONTRIBUTING.md for details") + } + client, err := NewClient(ctx, option.WithTokenSource(ts)) + if err != nil { + t.Fatal(err) + } + return client +} + +var images = map[string]*Image{} + +func testImage(path string) *Image { + if img, ok := images[path]; ok { + return img + } + f, err := os.Open("testdata/" + path) + if err != nil { + log.Fatal(err) + } + img, err := NewImageFromReader(f) + if err != nil { + log.Fatalf("reading image %q: %v", path, err) + } + images[path] = img + return img +} diff --git a/vendor/github.com/Azure/go-ansiterm/LICENSE b/vendor/github.com/Azure/go-ansiterm/LICENSE new file mode 100644 index 00000000..e3d9a64d --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Azure/go-ansiterm/README.md b/vendor/github.com/Azure/go-ansiterm/README.md new file mode 100644 index 00000000..261c041e --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/README.md @@ -0,0 +1,12 @@ +# go-ansiterm + +This is a cross platform Ansi Terminal Emulation library. It reads a stream of Ansi characters and produces the appropriate function calls. The results of the function calls are platform dependent. + +For example the parser might receive "ESC, [, A" as a stream of three characters. This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU). The parser then calls the cursor up function (CUU()) on an event handler. The event handler determines what platform specific work must be done to cause the cursor to move up one position. + +The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go). + +See parser_test.go for examples exercising the state machine and generating appropriate function calls. + +----- +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/vendor/github.com/Azure/go-ansiterm/constants.go b/vendor/github.com/Azure/go-ansiterm/constants.go new file mode 100644 index 00000000..96504a33 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/constants.go @@ -0,0 +1,188 @@ +package ansiterm + +const LogEnv = "DEBUG_TERMINAL" + +// ANSI constants +// References: +// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm +// -- http://man7.org/linux/man-pages/man4/console_codes.4.html +// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html +// -- http://en.wikipedia.org/wiki/ANSI_escape_code +// -- http://vt100.net/emu/dec_ansi_parser +// -- http://vt100.net/emu/vt500_parser.svg +// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html +// -- http://www.inwap.com/pdp10/ansicode.txt +const ( + // ECMA-48 Set Graphics Rendition + // Note: + // -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved + // -- Fonts could possibly be supported via SetCurrentConsoleFontEx + // -- Windows does not expose the per-window cursor (i.e., caret) blink times + ANSI_SGR_RESET = 0 + ANSI_SGR_BOLD = 1 + ANSI_SGR_DIM = 2 + _ANSI_SGR_ITALIC = 3 + ANSI_SGR_UNDERLINE = 4 + _ANSI_SGR_BLINKSLOW = 5 + _ANSI_SGR_BLINKFAST = 6 + ANSI_SGR_REVERSE = 7 + _ANSI_SGR_INVISIBLE = 8 + _ANSI_SGR_LINETHROUGH = 9 + _ANSI_SGR_FONT_00 = 10 + _ANSI_SGR_FONT_01 = 11 + _ANSI_SGR_FONT_02 = 12 + _ANSI_SGR_FONT_03 = 13 + _ANSI_SGR_FONT_04 = 14 + _ANSI_SGR_FONT_05 = 15 + _ANSI_SGR_FONT_06 = 16 + _ANSI_SGR_FONT_07 = 17 + _ANSI_SGR_FONT_08 = 18 + _ANSI_SGR_FONT_09 = 19 + _ANSI_SGR_FONT_10 = 20 + _ANSI_SGR_DOUBLEUNDERLINE = 21 + ANSI_SGR_BOLD_DIM_OFF = 22 + _ANSI_SGR_ITALIC_OFF = 23 + ANSI_SGR_UNDERLINE_OFF = 24 + _ANSI_SGR_BLINK_OFF = 25 + _ANSI_SGR_RESERVED_00 = 26 + ANSI_SGR_REVERSE_OFF = 27 + _ANSI_SGR_INVISIBLE_OFF = 28 + _ANSI_SGR_LINETHROUGH_OFF = 29 + ANSI_SGR_FOREGROUND_BLACK = 30 + ANSI_SGR_FOREGROUND_RED = 31 + ANSI_SGR_FOREGROUND_GREEN = 32 + ANSI_SGR_FOREGROUND_YELLOW = 33 + ANSI_SGR_FOREGROUND_BLUE = 34 + ANSI_SGR_FOREGROUND_MAGENTA = 35 + ANSI_SGR_FOREGROUND_CYAN = 36 + ANSI_SGR_FOREGROUND_WHITE = 37 + _ANSI_SGR_RESERVED_01 = 38 + ANSI_SGR_FOREGROUND_DEFAULT = 39 + ANSI_SGR_BACKGROUND_BLACK = 40 + ANSI_SGR_BACKGROUND_RED = 41 + ANSI_SGR_BACKGROUND_GREEN = 42 + ANSI_SGR_BACKGROUND_YELLOW = 43 + ANSI_SGR_BACKGROUND_BLUE = 44 + ANSI_SGR_BACKGROUND_MAGENTA = 45 + ANSI_SGR_BACKGROUND_CYAN = 46 + ANSI_SGR_BACKGROUND_WHITE = 47 + _ANSI_SGR_RESERVED_02 = 48 + ANSI_SGR_BACKGROUND_DEFAULT = 49 + // 50 - 65: Unsupported + + ANSI_MAX_CMD_LENGTH = 4096 + + MAX_INPUT_EVENTS = 128 + DEFAULT_WIDTH = 80 + DEFAULT_HEIGHT = 24 + + ANSI_BEL = 0x07 + ANSI_BACKSPACE = 0x08 + ANSI_TAB = 0x09 + ANSI_LINE_FEED = 0x0A + ANSI_VERTICAL_TAB = 0x0B + ANSI_FORM_FEED = 0x0C + ANSI_CARRIAGE_RETURN = 0x0D + ANSI_ESCAPE_PRIMARY = 0x1B + ANSI_ESCAPE_SECONDARY = 0x5B + ANSI_OSC_STRING_ENTRY = 0x5D + ANSI_COMMAND_FIRST = 0x40 + ANSI_COMMAND_LAST = 0x7E + DCS_ENTRY = 0x90 + CSI_ENTRY = 0x9B + OSC_STRING = 0x9D + ANSI_PARAMETER_SEP = ";" + ANSI_CMD_G0 = '(' + ANSI_CMD_G1 = ')' + ANSI_CMD_G2 = '*' + ANSI_CMD_G3 = '+' + ANSI_CMD_DECPNM = '>' + ANSI_CMD_DECPAM = '=' + ANSI_CMD_OSC = ']' + ANSI_CMD_STR_TERM = '\\' + + KEY_CONTROL_PARAM_2 = ";2" + KEY_CONTROL_PARAM_3 = ";3" + KEY_CONTROL_PARAM_4 = ";4" + KEY_CONTROL_PARAM_5 = ";5" + KEY_CONTROL_PARAM_6 = ";6" + KEY_CONTROL_PARAM_7 = ";7" + KEY_CONTROL_PARAM_8 = ";8" + KEY_ESC_CSI = "\x1B[" + KEY_ESC_N = "\x1BN" + KEY_ESC_O = "\x1BO" + + FILL_CHARACTER = ' ' +) + +func getByteRange(start byte, end byte) []byte { + bytes := make([]byte, 0, 32) + for i := start; i <= end; i++ { + bytes = append(bytes, byte(i)) + } + + return bytes +} + +var toGroundBytes = getToGroundBytes() +var executors = getExecuteBytes() + +// SPACE 20+A0 hex Always and everywhere a blank space +// Intermediate 20-2F hex !"#$%&'()*+,-./ +var intermeds = getByteRange(0x20, 0x2F) + +// Parameters 30-3F hex 0123456789:;<=>? +// CSI Parameters 30-39, 3B hex 0123456789; +var csiParams = getByteRange(0x30, 0x3F) + +var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...) + +// Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_ +var upperCase = getByteRange(0x40, 0x5F) + +// Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~ +var lowerCase = getByteRange(0x60, 0x7E) + +// Alphabetics 40-7E hex (all of upper and lower case) +var alphabetics = append(upperCase, lowerCase...) + +var printables = getByteRange(0x20, 0x7F) + +var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E) +var escapeToGroundBytes = getEscapeToGroundBytes() + +// See http://www.vt100.net/emu/vt500_parser.png for description of the complex +// byte ranges below + +func getEscapeToGroundBytes() []byte { + escapeToGroundBytes := getByteRange(0x30, 0x4F) + escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...) + escapeToGroundBytes = append(escapeToGroundBytes, 0x59) + escapeToGroundBytes = append(escapeToGroundBytes, 0x5A) + escapeToGroundBytes = append(escapeToGroundBytes, 0x5C) + escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...) + return escapeToGroundBytes +} + +func getExecuteBytes() []byte { + executeBytes := getByteRange(0x00, 0x17) + executeBytes = append(executeBytes, 0x19) + executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...) + return executeBytes +} + +func getToGroundBytes() []byte { + groundBytes := []byte{0x18} + groundBytes = append(groundBytes, 0x1A) + groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...) + groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...) + groundBytes = append(groundBytes, 0x99) + groundBytes = append(groundBytes, 0x9A) + groundBytes = append(groundBytes, 0x9C) + return groundBytes +} + +// Delete 7F hex Always and everywhere ignored +// C1 Control 80-9F hex 32 additional control characters +// G1 Displayable A1-FE hex 94 additional displayable characters +// Special A0+FF hex Same as SPACE and DELETE diff --git a/vendor/github.com/Azure/go-ansiterm/context.go b/vendor/github.com/Azure/go-ansiterm/context.go new file mode 100644 index 00000000..8d66e777 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/context.go @@ -0,0 +1,7 @@ +package ansiterm + +type ansiContext struct { + currentChar byte + paramBuffer []byte + interBuffer []byte +} diff --git a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go new file mode 100644 index 00000000..1bd6057d --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go @@ -0,0 +1,49 @@ +package ansiterm + +type csiEntryState struct { + baseState +} + +func (csiState csiEntryState) Handle(b byte) (s state, e error) { + logger.Infof("CsiEntry::Handle %#x", b) + + nextState, err := csiState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(alphabetics, b): + return csiState.parser.ground, nil + case sliceContains(csiCollectables, b): + return csiState.parser.csiParam, nil + case sliceContains(executors, b): + return csiState, csiState.parser.execute() + } + + return csiState, nil +} + +func (csiState csiEntryState) Transition(s state) error { + logger.Infof("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name()) + csiState.baseState.Transition(s) + + switch s { + case csiState.parser.ground: + return csiState.parser.csiDispatch() + case csiState.parser.csiParam: + switch { + case sliceContains(csiParams, csiState.parser.context.currentChar): + csiState.parser.collectParam() + case sliceContains(intermeds, csiState.parser.context.currentChar): + csiState.parser.collectInter() + } + } + + return nil +} + +func (csiState csiEntryState) Enter() error { + csiState.parser.clear() + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go new file mode 100644 index 00000000..4be35c5f --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go @@ -0,0 +1,38 @@ +package ansiterm + +type csiParamState struct { + baseState +} + +func (csiState csiParamState) Handle(b byte) (s state, e error) { + logger.Infof("CsiParam::Handle %#x", b) + + nextState, err := csiState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(alphabetics, b): + return csiState.parser.ground, nil + case sliceContains(csiCollectables, b): + csiState.parser.collectParam() + return csiState, nil + case sliceContains(executors, b): + return csiState, csiState.parser.execute() + } + + return csiState, nil +} + +func (csiState csiParamState) Transition(s state) error { + logger.Infof("CsiParam::Transition %s --> %s", csiState.Name(), s.Name()) + csiState.baseState.Transition(s) + + switch s { + case csiState.parser.ground: + return csiState.parser.csiDispatch() + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go new file mode 100644 index 00000000..2189eb6b --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go @@ -0,0 +1,36 @@ +package ansiterm + +type escapeIntermediateState struct { + baseState +} + +func (escState escapeIntermediateState) Handle(b byte) (s state, e error) { + logger.Infof("escapeIntermediateState::Handle %#x", b) + nextState, err := escState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(intermeds, b): + return escState, escState.parser.collectInter() + case sliceContains(executors, b): + return escState, escState.parser.execute() + case sliceContains(escapeIntermediateToGroundBytes, b): + return escState.parser.ground, nil + } + + return escState, nil +} + +func (escState escapeIntermediateState) Transition(s state) error { + logger.Infof("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name()) + escState.baseState.Transition(s) + + switch s { + case escState.parser.ground: + return escState.parser.escDispatch() + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/escape_state.go b/vendor/github.com/Azure/go-ansiterm/escape_state.go new file mode 100644 index 00000000..7b1b9ad3 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/escape_state.go @@ -0,0 +1,47 @@ +package ansiterm + +type escapeState struct { + baseState +} + +func (escState escapeState) Handle(b byte) (s state, e error) { + logger.Infof("escapeState::Handle %#x", b) + nextState, err := escState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case b == ANSI_ESCAPE_SECONDARY: + return escState.parser.csiEntry, nil + case b == ANSI_OSC_STRING_ENTRY: + return escState.parser.oscString, nil + case sliceContains(executors, b): + return escState, escState.parser.execute() + case sliceContains(escapeToGroundBytes, b): + return escState.parser.ground, nil + case sliceContains(intermeds, b): + return escState.parser.escapeIntermediate, nil + } + + return escState, nil +} + +func (escState escapeState) Transition(s state) error { + logger.Infof("Escape::Transition %s --> %s", escState.Name(), s.Name()) + escState.baseState.Transition(s) + + switch s { + case escState.parser.ground: + return escState.parser.escDispatch() + case escState.parser.escapeIntermediate: + return escState.parser.collectInter() + } + + return nil +} + +func (escState escapeState) Enter() error { + escState.parser.clear() + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/event_handler.go b/vendor/github.com/Azure/go-ansiterm/event_handler.go new file mode 100644 index 00000000..98087b38 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/event_handler.go @@ -0,0 +1,90 @@ +package ansiterm + +type AnsiEventHandler interface { + // Print + Print(b byte) error + + // Execute C0 commands + Execute(b byte) error + + // CUrsor Up + CUU(int) error + + // CUrsor Down + CUD(int) error + + // CUrsor Forward + CUF(int) error + + // CUrsor Backward + CUB(int) error + + // Cursor to Next Line + CNL(int) error + + // Cursor to Previous Line + CPL(int) error + + // Cursor Horizontal position Absolute + CHA(int) error + + // Vertical line Position Absolute + VPA(int) error + + // CUrsor Position + CUP(int, int) error + + // Horizontal and Vertical Position (depends on PUM) + HVP(int, int) error + + // Text Cursor Enable Mode + DECTCEM(bool) error + + // Origin Mode + DECOM(bool) error + + // 132 Column Mode + DECCOLM(bool) error + + // Erase in Display + ED(int) error + + // Erase in Line + EL(int) error + + // Insert Line + IL(int) error + + // Delete Line + DL(int) error + + // Insert Character + ICH(int) error + + // Delete Character + DCH(int) error + + // Set Graphics Rendition + SGR([]int) error + + // Pan Down + SU(int) error + + // Pan Up + SD(int) error + + // Device Attributes + DA([]string) error + + // Set Top and Bottom Margins + DECSTBM(int, int) error + + // Index + IND() error + + // Reverse Index + RI() error + + // Flush updates from previous commands + Flush() error +} diff --git a/vendor/github.com/Azure/go-ansiterm/ground_state.go b/vendor/github.com/Azure/go-ansiterm/ground_state.go new file mode 100644 index 00000000..52451e94 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/ground_state.go @@ -0,0 +1,24 @@ +package ansiterm + +type groundState struct { + baseState +} + +func (gs groundState) Handle(b byte) (s state, e error) { + gs.parser.context.currentChar = b + + nextState, err := gs.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(printables, b): + return gs, gs.parser.print() + + case sliceContains(executors, b): + return gs, gs.parser.execute() + } + + return gs, nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go new file mode 100644 index 00000000..24062d42 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go @@ -0,0 +1,31 @@ +package ansiterm + +type oscStringState struct { + baseState +} + +func (oscState oscStringState) Handle(b byte) (s state, e error) { + logger.Infof("OscString::Handle %#x", b) + nextState, err := oscState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case isOscStringTerminator(b): + return oscState.parser.ground, nil + } + + return oscState, nil +} + +// See below for OSC string terminators for linux +// http://man7.org/linux/man-pages/man4/console_codes.4.html +func isOscStringTerminator(b byte) bool { + + if b == ANSI_BEL || b == 0x5C { + return true + } + + return false +} diff --git a/vendor/github.com/Azure/go-ansiterm/parser.go b/vendor/github.com/Azure/go-ansiterm/parser.go new file mode 100644 index 00000000..169f68db --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/parser.go @@ -0,0 +1,136 @@ +package ansiterm + +import ( + "errors" + "io/ioutil" + "os" + + "github.com/Sirupsen/logrus" +) + +var logger *logrus.Logger + +type AnsiParser struct { + currState state + eventHandler AnsiEventHandler + context *ansiContext + csiEntry state + csiParam state + dcsEntry state + escape state + escapeIntermediate state + error state + ground state + oscString state + stateMap []state +} + +func CreateParser(initialState string, evtHandler AnsiEventHandler) *AnsiParser { + logFile := ioutil.Discard + + if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" { + logFile, _ = os.Create("ansiParser.log") + } + + logger = &logrus.Logger{ + Out: logFile, + Formatter: new(logrus.TextFormatter), + Level: logrus.InfoLevel, + } + + parser := &AnsiParser{ + eventHandler: evtHandler, + context: &ansiContext{}, + } + + parser.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: parser}} + parser.csiParam = csiParamState{baseState{name: "CsiParam", parser: parser}} + parser.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: parser}} + parser.escape = escapeState{baseState{name: "Escape", parser: parser}} + parser.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: parser}} + parser.error = errorState{baseState{name: "Error", parser: parser}} + parser.ground = groundState{baseState{name: "Ground", parser: parser}} + parser.oscString = oscStringState{baseState{name: "OscString", parser: parser}} + + parser.stateMap = []state{ + parser.csiEntry, + parser.csiParam, + parser.dcsEntry, + parser.escape, + parser.escapeIntermediate, + parser.error, + parser.ground, + parser.oscString, + } + + parser.currState = getState(initialState, parser.stateMap) + + logger.Infof("CreateParser: parser %p", parser) + return parser +} + +func getState(name string, states []state) state { + for _, el := range states { + if el.Name() == name { + return el + } + } + + return nil +} + +func (ap *AnsiParser) Parse(bytes []byte) (int, error) { + for i, b := range bytes { + if err := ap.handle(b); err != nil { + return i, err + } + } + + return len(bytes), ap.eventHandler.Flush() +} + +func (ap *AnsiParser) handle(b byte) error { + ap.context.currentChar = b + newState, err := ap.currState.Handle(b) + if err != nil { + return err + } + + if newState == nil { + logger.Warning("newState is nil") + return errors.New("New state of 'nil' is invalid.") + } + + if newState != ap.currState { + if err := ap.changeState(newState); err != nil { + return err + } + } + + return nil +} + +func (ap *AnsiParser) changeState(newState state) error { + logger.Infof("ChangeState %s --> %s", ap.currState.Name(), newState.Name()) + + // Exit old state + if err := ap.currState.Exit(); err != nil { + logger.Infof("Exit state '%s' failed with : '%v'", ap.currState.Name(), err) + return err + } + + // Perform transition action + if err := ap.currState.Transition(newState); err != nil { + logger.Infof("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err) + return err + } + + // Enter new state + if err := newState.Enter(); err != nil { + logger.Infof("Enter state '%s' failed with: '%v'", newState.Name(), err) + return err + } + + ap.currState = newState + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go new file mode 100644 index 00000000..8b69a67a --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go @@ -0,0 +1,103 @@ +package ansiterm + +import ( + "strconv" +) + +func parseParams(bytes []byte) ([]string, error) { + paramBuff := make([]byte, 0, 0) + params := []string{} + + for _, v := range bytes { + if v == ';' { + if len(paramBuff) > 0 { + // Completed parameter, append it to the list + s := string(paramBuff) + params = append(params, s) + paramBuff = make([]byte, 0, 0) + } + } else { + paramBuff = append(paramBuff, v) + } + } + + // Last parameter may not be terminated with ';' + if len(paramBuff) > 0 { + s := string(paramBuff) + params = append(params, s) + } + + logger.Infof("Parsed params: %v with length: %d", params, len(params)) + return params, nil +} + +func parseCmd(context ansiContext) (string, error) { + return string(context.currentChar), nil +} + +func getInt(params []string, dflt int) int { + i := getInts(params, 1, dflt)[0] + logger.Infof("getInt: %v", i) + return i +} + +func getInts(params []string, minCount int, dflt int) []int { + ints := []int{} + + for _, v := range params { + i, _ := strconv.Atoi(v) + // Zero is mapped to the default value in VT100. + if i == 0 { + i = dflt + } + ints = append(ints, i) + } + + if len(ints) < minCount { + remaining := minCount - len(ints) + for i := 0; i < remaining; i++ { + ints = append(ints, dflt) + } + } + + logger.Infof("getInts: %v", ints) + + return ints +} + +func (ap *AnsiParser) modeDispatch(param string, set bool) error { + switch param { + case "?3": + return ap.eventHandler.DECCOLM(set) + case "?6": + return ap.eventHandler.DECOM(set) + case "?25": + return ap.eventHandler.DECTCEM(set) + } + return nil +} + +func (ap *AnsiParser) hDispatch(params []string) error { + if len(params) == 1 { + return ap.modeDispatch(params[0], true) + } + + return nil +} + +func (ap *AnsiParser) lDispatch(params []string) error { + if len(params) == 1 { + return ap.modeDispatch(params[0], false) + } + + return nil +} + +func getEraseParam(params []string) int { + param := getInt(params, 0) + if param < 0 || 3 < param { + param = 0 + } + + return param +} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_actions.go b/vendor/github.com/Azure/go-ansiterm/parser_actions.go new file mode 100644 index 00000000..58750a2d --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/parser_actions.go @@ -0,0 +1,122 @@ +package ansiterm + +import ( + "fmt" +) + +func (ap *AnsiParser) collectParam() error { + currChar := ap.context.currentChar + logger.Infof("collectParam %#x", currChar) + ap.context.paramBuffer = append(ap.context.paramBuffer, currChar) + return nil +} + +func (ap *AnsiParser) collectInter() error { + currChar := ap.context.currentChar + logger.Infof("collectInter %#x", currChar) + ap.context.paramBuffer = append(ap.context.interBuffer, currChar) + return nil +} + +func (ap *AnsiParser) escDispatch() error { + cmd, _ := parseCmd(*ap.context) + intermeds := ap.context.interBuffer + logger.Infof("escDispatch currentChar: %#x", ap.context.currentChar) + logger.Infof("escDispatch: %v(%v)", cmd, intermeds) + + switch cmd { + case "D": // IND + return ap.eventHandler.IND() + case "E": // NEL, equivalent to CRLF + err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN) + if err == nil { + err = ap.eventHandler.Execute(ANSI_LINE_FEED) + } + return err + case "M": // RI + return ap.eventHandler.RI() + } + + return nil +} + +func (ap *AnsiParser) csiDispatch() error { + cmd, _ := parseCmd(*ap.context) + params, _ := parseParams(ap.context.paramBuffer) + + logger.Infof("csiDispatch: %v(%v)", cmd, params) + + switch cmd { + case "@": + return ap.eventHandler.ICH(getInt(params, 1)) + case "A": + return ap.eventHandler.CUU(getInt(params, 1)) + case "B": + return ap.eventHandler.CUD(getInt(params, 1)) + case "C": + return ap.eventHandler.CUF(getInt(params, 1)) + case "D": + return ap.eventHandler.CUB(getInt(params, 1)) + case "E": + return ap.eventHandler.CNL(getInt(params, 1)) + case "F": + return ap.eventHandler.CPL(getInt(params, 1)) + case "G": + return ap.eventHandler.CHA(getInt(params, 1)) + case "H": + ints := getInts(params, 2, 1) + x, y := ints[0], ints[1] + return ap.eventHandler.CUP(x, y) + case "J": + param := getEraseParam(params) + return ap.eventHandler.ED(param) + case "K": + param := getEraseParam(params) + return ap.eventHandler.EL(param) + case "L": + return ap.eventHandler.IL(getInt(params, 1)) + case "M": + return ap.eventHandler.DL(getInt(params, 1)) + case "P": + return ap.eventHandler.DCH(getInt(params, 1)) + case "S": + return ap.eventHandler.SU(getInt(params, 1)) + case "T": + return ap.eventHandler.SD(getInt(params, 1)) + case "c": + return ap.eventHandler.DA(params) + case "d": + return ap.eventHandler.VPA(getInt(params, 1)) + case "f": + ints := getInts(params, 2, 1) + x, y := ints[0], ints[1] + return ap.eventHandler.HVP(x, y) + case "h": + return ap.hDispatch(params) + case "l": + return ap.lDispatch(params) + case "m": + return ap.eventHandler.SGR(getInts(params, 1, 0)) + case "r": + ints := getInts(params, 2, 1) + top, bottom := ints[0], ints[1] + return ap.eventHandler.DECSTBM(top, bottom) + default: + logger.Errorf(fmt.Sprintf("Unsupported CSI command: '%s', with full context: %v", cmd, ap.context)) + return nil + } + +} + +func (ap *AnsiParser) print() error { + return ap.eventHandler.Print(ap.context.currentChar) +} + +func (ap *AnsiParser) clear() error { + ap.context = &ansiContext{} + return nil +} + +func (ap *AnsiParser) execute() error { + return ap.eventHandler.Execute(ap.context.currentChar) +} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_test.go b/vendor/github.com/Azure/go-ansiterm/parser_test.go new file mode 100644 index 00000000..cd4888ff --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/parser_test.go @@ -0,0 +1,141 @@ +package ansiterm + +import ( + "fmt" + "testing" +) + +func TestStateTransitions(t *testing.T) { + stateTransitionHelper(t, "CsiEntry", "Ground", alphabetics) + stateTransitionHelper(t, "CsiEntry", "CsiParam", csiCollectables) + stateTransitionHelper(t, "Escape", "CsiEntry", []byte{ANSI_ESCAPE_SECONDARY}) + stateTransitionHelper(t, "Escape", "OscString", []byte{0x5D}) + stateTransitionHelper(t, "Escape", "Ground", escapeToGroundBytes) + stateTransitionHelper(t, "Escape", "EscapeIntermediate", intermeds) + stateTransitionHelper(t, "EscapeIntermediate", "EscapeIntermediate", intermeds) + stateTransitionHelper(t, "EscapeIntermediate", "EscapeIntermediate", executors) + stateTransitionHelper(t, "EscapeIntermediate", "Ground", escapeIntermediateToGroundBytes) + stateTransitionHelper(t, "OscString", "Ground", []byte{ANSI_BEL}) + stateTransitionHelper(t, "OscString", "Ground", []byte{0x5C}) + stateTransitionHelper(t, "Ground", "Ground", executors) +} + +func TestAnyToX(t *testing.T) { + anyToXHelper(t, []byte{ANSI_ESCAPE_PRIMARY}, "Escape") + anyToXHelper(t, []byte{DCS_ENTRY}, "DcsEntry") + anyToXHelper(t, []byte{OSC_STRING}, "OscString") + anyToXHelper(t, []byte{CSI_ENTRY}, "CsiEntry") + anyToXHelper(t, toGroundBytes, "Ground") +} + +func TestCollectCsiParams(t *testing.T) { + parser, _ := createTestParser("CsiEntry") + parser.Parse(csiCollectables) + + buffer := parser.context.paramBuffer + bufferCount := len(buffer) + + if bufferCount != len(csiCollectables) { + t.Errorf("Buffer: %v", buffer) + t.Errorf("CsiParams: %v", csiCollectables) + t.Errorf("Buffer count failure: %d != %d", bufferCount, len(csiParams)) + return + } + + for i, v := range csiCollectables { + if v != buffer[i] { + t.Errorf("Buffer: %v", buffer) + t.Errorf("CsiParams: %v", csiParams) + t.Errorf("Mismatch at buffer[%d] = %d", i, buffer[i]) + } + } +} + +func TestParseParams(t *testing.T) { + parseParamsHelper(t, []byte{}, []string{}) + parseParamsHelper(t, []byte{';'}, []string{}) + parseParamsHelper(t, []byte{';', ';'}, []string{}) + parseParamsHelper(t, []byte{'7'}, []string{"7"}) + parseParamsHelper(t, []byte{'7', ';'}, []string{"7"}) + parseParamsHelper(t, []byte{'7', ';', ';'}, []string{"7"}) + parseParamsHelper(t, []byte{'7', ';', ';', '8'}, []string{"7", "8"}) + parseParamsHelper(t, []byte{'7', ';', '8', ';'}, []string{"7", "8"}) + parseParamsHelper(t, []byte{'7', ';', ';', '8', ';', ';'}, []string{"7", "8"}) + parseParamsHelper(t, []byte{'7', '8'}, []string{"78"}) + parseParamsHelper(t, []byte{'7', '8', ';'}, []string{"78"}) + parseParamsHelper(t, []byte{'7', '8', ';', '9', '0'}, []string{"78", "90"}) + parseParamsHelper(t, []byte{'7', '8', ';', ';', '9', '0'}, []string{"78", "90"}) + parseParamsHelper(t, []byte{'7', '8', ';', '9', '0', ';'}, []string{"78", "90"}) + parseParamsHelper(t, []byte{'7', '8', ';', '9', '0', ';', ';'}, []string{"78", "90"}) +} + +func TestCursor(t *testing.T) { + cursorSingleParamHelper(t, 'A', "CUU") + cursorSingleParamHelper(t, 'B', "CUD") + cursorSingleParamHelper(t, 'C', "CUF") + cursorSingleParamHelper(t, 'D', "CUB") + cursorSingleParamHelper(t, 'E', "CNL") + cursorSingleParamHelper(t, 'F', "CPL") + cursorSingleParamHelper(t, 'G', "CHA") + cursorTwoParamHelper(t, 'H', "CUP") + cursorTwoParamHelper(t, 'f', "HVP") + funcCallParamHelper(t, []byte{'?', '2', '5', 'h'}, "CsiEntry", "Ground", []string{"DECTCEM([true])"}) + funcCallParamHelper(t, []byte{'?', '2', '5', 'l'}, "CsiEntry", "Ground", []string{"DECTCEM([false])"}) +} + +func TestErase(t *testing.T) { + // Erase in Display + eraseHelper(t, 'J', "ED") + + // Erase in Line + eraseHelper(t, 'K', "EL") +} + +func TestSelectGraphicRendition(t *testing.T) { + funcCallParamHelper(t, []byte{'m'}, "CsiEntry", "Ground", []string{"SGR([0])"}) + funcCallParamHelper(t, []byte{'0', 'm'}, "CsiEntry", "Ground", []string{"SGR([0])"}) + funcCallParamHelper(t, []byte{'0', ';', '1', 'm'}, "CsiEntry", "Ground", []string{"SGR([0 1])"}) + funcCallParamHelper(t, []byte{'0', ';', '1', ';', '2', 'm'}, "CsiEntry", "Ground", []string{"SGR([0 1 2])"}) +} + +func TestScroll(t *testing.T) { + scrollHelper(t, 'S', "SU") + scrollHelper(t, 'T', "SD") +} + +func TestPrint(t *testing.T) { + parser, evtHandler := createTestParser("Ground") + parser.Parse(printables) + validateState(t, parser.currState, "Ground") + + for i, v := range printables { + expectedCall := fmt.Sprintf("Print([%s])", string(v)) + actualCall := evtHandler.FunctionCalls[i] + if actualCall != expectedCall { + t.Errorf("Actual != Expected: %v != %v at %d", actualCall, expectedCall, i) + } + } +} + +func TestClear(t *testing.T) { + p, _ := createTestParser("Ground") + fillContext(p.context) + p.clear() + validateEmptyContext(t, p.context) +} + +func TestClearOnStateChange(t *testing.T) { + clearOnStateChangeHelper(t, "Ground", "Escape", []byte{ANSI_ESCAPE_PRIMARY}) + clearOnStateChangeHelper(t, "Ground", "CsiEntry", []byte{CSI_ENTRY}) +} + +func TestC0(t *testing.T) { + expectedCall := "Execute([" + string(ANSI_LINE_FEED) + "])" + c0Helper(t, []byte{ANSI_LINE_FEED}, "Ground", []string{expectedCall}) + expectedCall = "Execute([" + string(ANSI_CARRIAGE_RETURN) + "])" + c0Helper(t, []byte{ANSI_CARRIAGE_RETURN}, "Ground", []string{expectedCall}) +} + +func TestEscDispatch(t *testing.T) { + funcCallParamHelper(t, []byte{'M'}, "Escape", "Ground", []string{"RI([])"}) +} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_test_helpers_test.go b/vendor/github.com/Azure/go-ansiterm/parser_test_helpers_test.go new file mode 100644 index 00000000..562f215d --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/parser_test_helpers_test.go @@ -0,0 +1,114 @@ +package ansiterm + +import ( + "fmt" + "testing" +) + +func getStateNames() []string { + parser, _ := createTestParser("Ground") + + stateNames := []string{} + for _, state := range parser.stateMap { + stateNames = append(stateNames, state.Name()) + } + + return stateNames +} + +func stateTransitionHelper(t *testing.T, start string, end string, bytes []byte) { + for _, b := range bytes { + bytes := []byte{byte(b)} + parser, _ := createTestParser(start) + parser.Parse(bytes) + validateState(t, parser.currState, end) + } +} + +func anyToXHelper(t *testing.T, bytes []byte, expectedState string) { + for _, s := range getStateNames() { + stateTransitionHelper(t, s, expectedState, bytes) + } +} + +func funcCallParamHelper(t *testing.T, bytes []byte, start string, expected string, expectedCalls []string) { + parser, evtHandler := createTestParser(start) + parser.Parse(bytes) + validateState(t, parser.currState, expected) + validateFuncCalls(t, evtHandler.FunctionCalls, expectedCalls) +} + +func parseParamsHelper(t *testing.T, bytes []byte, expectedParams []string) { + params, err := parseParams(bytes) + + if err != nil { + t.Errorf("Parameter parse error: %v", err) + return + } + + if len(params) != len(expectedParams) { + t.Errorf("Parsed parameters: %v", params) + t.Errorf("Expected parameters: %v", expectedParams) + t.Errorf("Parameter length failure: %d != %d", len(params), len(expectedParams)) + return + } + + for i, v := range expectedParams { + if v != params[i] { + t.Errorf("Parsed parameters: %v", params) + t.Errorf("Expected parameters: %v", expectedParams) + t.Errorf("Parameter parse failure: %s != %s at position %d", v, params[i], i) + } + } +} + +func cursorSingleParamHelper(t *testing.T, command byte, funcName string) { + funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) + funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) + funcCallParamHelper(t, []byte{'2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)}) + funcCallParamHelper(t, []byte{'2', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([23])", funcName)}) + funcCallParamHelper(t, []byte{'2', ';', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)}) + funcCallParamHelper(t, []byte{'2', ';', '3', ';', '4', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)}) +} + +func cursorTwoParamHelper(t *testing.T, command byte, funcName string) { + funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1 1])", funcName)}) + funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1 1])", funcName)}) + funcCallParamHelper(t, []byte{'2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2 1])", funcName)}) + funcCallParamHelper(t, []byte{'2', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([23 1])", funcName)}) + funcCallParamHelper(t, []byte{'2', ';', '3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2 3])", funcName)}) + funcCallParamHelper(t, []byte{'2', ';', '3', ';', '4', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2 3])", funcName)}) +} + +func eraseHelper(t *testing.T, command byte, funcName string) { + funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([0])", funcName)}) + funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([0])", funcName)}) + funcCallParamHelper(t, []byte{'1', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) + funcCallParamHelper(t, []byte{'2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([2])", funcName)}) + funcCallParamHelper(t, []byte{'3', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([3])", funcName)}) + funcCallParamHelper(t, []byte{'4', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([0])", funcName)}) + funcCallParamHelper(t, []byte{'1', ';', '2', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) +} + +func scrollHelper(t *testing.T, command byte, funcName string) { + funcCallParamHelper(t, []byte{command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) + funcCallParamHelper(t, []byte{'0', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) + funcCallParamHelper(t, []byte{'1', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([1])", funcName)}) + funcCallParamHelper(t, []byte{'5', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([5])", funcName)}) + funcCallParamHelper(t, []byte{'4', ';', '6', command}, "CsiEntry", "Ground", []string{fmt.Sprintf("%s([4])", funcName)}) +} + +func clearOnStateChangeHelper(t *testing.T, start string, end string, bytes []byte) { + p, _ := createTestParser(start) + fillContext(p.context) + p.Parse(bytes) + validateState(t, p.currState, end) + validateEmptyContext(t, p.context) +} + +func c0Helper(t *testing.T, bytes []byte, expectedState string, expectedCalls []string) { + parser, evtHandler := createTestParser("Ground") + parser.Parse(bytes) + validateState(t, parser.currState, expectedState) + validateFuncCalls(t, evtHandler.FunctionCalls, expectedCalls) +} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_test_utilities_test.go b/vendor/github.com/Azure/go-ansiterm/parser_test_utilities_test.go new file mode 100644 index 00000000..78b885ca --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/parser_test_utilities_test.go @@ -0,0 +1,66 @@ +package ansiterm + +import ( + "testing" +) + +func createTestParser(s string) (*AnsiParser, *TestAnsiEventHandler) { + evtHandler := CreateTestAnsiEventHandler() + parser := CreateParser(s, evtHandler) + + return parser, evtHandler +} + +func validateState(t *testing.T, actualState state, expectedStateName string) { + actualName := "Nil" + + if actualState != nil { + actualName = actualState.Name() + } + + if actualName != expectedStateName { + t.Errorf("Invalid state: '%s' != '%s'", actualName, expectedStateName) + } +} + +func validateFuncCalls(t *testing.T, actualCalls []string, expectedCalls []string) { + actualCount := len(actualCalls) + expectedCount := len(expectedCalls) + + if actualCount != expectedCount { + t.Errorf("Actual calls: %v", actualCalls) + t.Errorf("Expected calls: %v", expectedCalls) + t.Errorf("Call count error: %d != %d", actualCount, expectedCount) + return + } + + for i, v := range actualCalls { + if v != expectedCalls[i] { + t.Errorf("Actual calls: %v", actualCalls) + t.Errorf("Expected calls: %v", expectedCalls) + t.Errorf("Mismatched calls: %s != %s with lengths %d and %d", v, expectedCalls[i], len(v), len(expectedCalls[i])) + } + } +} + +func fillContext(context *ansiContext) { + context.currentChar = 'A' + context.paramBuffer = []byte{'C', 'D', 'E'} + context.interBuffer = []byte{'F', 'G', 'H'} +} + +func validateEmptyContext(t *testing.T, context *ansiContext) { + var expectedCurrChar byte = 0x0 + if context.currentChar != expectedCurrChar { + t.Errorf("Currentchar mismatch '%#x' != '%#x'", context.currentChar, expectedCurrChar) + } + + if len(context.paramBuffer) != 0 { + t.Errorf("Non-empty parameter buffer: %v", context.paramBuffer) + } + + if len(context.paramBuffer) != 0 { + t.Errorf("Non-empty intermediate buffer: %v", context.interBuffer) + } + +} diff --git a/vendor/github.com/Azure/go-ansiterm/states.go b/vendor/github.com/Azure/go-ansiterm/states.go new file mode 100644 index 00000000..f2ea1fcd --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/states.go @@ -0,0 +1,71 @@ +package ansiterm + +type stateID int + +type state interface { + Enter() error + Exit() error + Handle(byte) (state, error) + Name() string + Transition(state) error +} + +type baseState struct { + name string + parser *AnsiParser +} + +func (base baseState) Enter() error { + return nil +} + +func (base baseState) Exit() error { + return nil +} + +func (base baseState) Handle(b byte) (s state, e error) { + + switch { + case b == CSI_ENTRY: + return base.parser.csiEntry, nil + case b == DCS_ENTRY: + return base.parser.dcsEntry, nil + case b == ANSI_ESCAPE_PRIMARY: + return base.parser.escape, nil + case b == OSC_STRING: + return base.parser.oscString, nil + case sliceContains(toGroundBytes, b): + return base.parser.ground, nil + } + + return nil, nil +} + +func (base baseState) Name() string { + return base.name +} + +func (base baseState) Transition(s state) error { + if s == base.parser.ground { + execBytes := []byte{0x18} + execBytes = append(execBytes, 0x1A) + execBytes = append(execBytes, getByteRange(0x80, 0x8F)...) + execBytes = append(execBytes, getByteRange(0x91, 0x97)...) + execBytes = append(execBytes, 0x99) + execBytes = append(execBytes, 0x9A) + + if sliceContains(execBytes, base.parser.context.currentChar) { + return base.parser.execute() + } + } + + return nil +} + +type dcsEntryState struct { + baseState +} + +type errorState struct { + baseState +} diff --git a/vendor/github.com/Azure/go-ansiterm/test_event_handler_test.go b/vendor/github.com/Azure/go-ansiterm/test_event_handler_test.go new file mode 100644 index 00000000..60f9f30b --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/test_event_handler_test.go @@ -0,0 +1,173 @@ +package ansiterm + +import ( + "fmt" + "strconv" +) + +type TestAnsiEventHandler struct { + FunctionCalls []string +} + +func CreateTestAnsiEventHandler() *TestAnsiEventHandler { + evtHandler := TestAnsiEventHandler{} + evtHandler.FunctionCalls = make([]string, 0) + return &evtHandler +} + +func (h *TestAnsiEventHandler) recordCall(call string, params []string) { + s := fmt.Sprintf("%s(%v)", call, params) + h.FunctionCalls = append(h.FunctionCalls, s) +} + +func (h *TestAnsiEventHandler) Print(b byte) error { + h.recordCall("Print", []string{string(b)}) + return nil +} + +func (h *TestAnsiEventHandler) Execute(b byte) error { + h.recordCall("Execute", []string{string(b)}) + return nil +} + +func (h *TestAnsiEventHandler) CUU(param int) error { + h.recordCall("CUU", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) CUD(param int) error { + h.recordCall("CUD", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) CUF(param int) error { + h.recordCall("CUF", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) CUB(param int) error { + h.recordCall("CUB", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) CNL(param int) error { + h.recordCall("CNL", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) CPL(param int) error { + h.recordCall("CPL", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) CHA(param int) error { + h.recordCall("CHA", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) VPA(param int) error { + h.recordCall("VPA", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) CUP(x int, y int) error { + xS, yS := strconv.Itoa(x), strconv.Itoa(y) + h.recordCall("CUP", []string{xS, yS}) + return nil +} + +func (h *TestAnsiEventHandler) HVP(x int, y int) error { + xS, yS := strconv.Itoa(x), strconv.Itoa(y) + h.recordCall("HVP", []string{xS, yS}) + return nil +} + +func (h *TestAnsiEventHandler) DECTCEM(visible bool) error { + h.recordCall("DECTCEM", []string{strconv.FormatBool(visible)}) + return nil +} + +func (h *TestAnsiEventHandler) DECOM(visible bool) error { + h.recordCall("DECOM", []string{strconv.FormatBool(visible)}) + return nil +} + +func (h *TestAnsiEventHandler) DECCOLM(use132 bool) error { + h.recordCall("DECOLM", []string{strconv.FormatBool(use132)}) + return nil +} + +func (h *TestAnsiEventHandler) ED(param int) error { + h.recordCall("ED", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) EL(param int) error { + h.recordCall("EL", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) IL(param int) error { + h.recordCall("IL", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) DL(param int) error { + h.recordCall("DL", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) ICH(param int) error { + h.recordCall("ICH", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) DCH(param int) error { + h.recordCall("DCH", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) SGR(params []int) error { + strings := []string{} + for _, v := range params { + strings = append(strings, strconv.Itoa(v)) + } + + h.recordCall("SGR", strings) + return nil +} + +func (h *TestAnsiEventHandler) SU(param int) error { + h.recordCall("SU", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) SD(param int) error { + h.recordCall("SD", []string{strconv.Itoa(param)}) + return nil +} + +func (h *TestAnsiEventHandler) DA(params []string) error { + h.recordCall("DA", params) + return nil +} + +func (h *TestAnsiEventHandler) DECSTBM(top int, bottom int) error { + topS, bottomS := strconv.Itoa(top), strconv.Itoa(bottom) + h.recordCall("DECSTBM", []string{topS, bottomS}) + return nil +} + +func (h *TestAnsiEventHandler) RI() error { + h.recordCall("RI", nil) + return nil +} + +func (h *TestAnsiEventHandler) IND() error { + h.recordCall("IND", nil) + return nil +} + +func (h *TestAnsiEventHandler) Flush() error { + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/utilities.go new file mode 100644 index 00000000..39211449 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/utilities.go @@ -0,0 +1,21 @@ +package ansiterm + +import ( + "strconv" +) + +func sliceContains(bytes []byte, b byte) bool { + for _, v := range bytes { + if v == b { + return true + } + } + + return false +} + +func convertBytesToInteger(bytes []byte) int { + s := string(bytes) + i, _ := strconv.Atoi(s) + return i +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go new file mode 100644 index 00000000..daf2f069 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go @@ -0,0 +1,182 @@ +// +build windows + +package winterm + +import ( + "fmt" + "os" + "strconv" + "strings" + "syscall" + + "github.com/Azure/go-ansiterm" +) + +// Windows keyboard constants +// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx. +const ( + VK_PRIOR = 0x21 // PAGE UP key + VK_NEXT = 0x22 // PAGE DOWN key + VK_END = 0x23 // END key + VK_HOME = 0x24 // HOME key + VK_LEFT = 0x25 // LEFT ARROW key + VK_UP = 0x26 // UP ARROW key + VK_RIGHT = 0x27 // RIGHT ARROW key + VK_DOWN = 0x28 // DOWN ARROW key + VK_SELECT = 0x29 // SELECT key + VK_PRINT = 0x2A // PRINT key + VK_EXECUTE = 0x2B // EXECUTE key + VK_SNAPSHOT = 0x2C // PRINT SCREEN key + VK_INSERT = 0x2D // INS key + VK_DELETE = 0x2E // DEL key + VK_HELP = 0x2F // HELP key + VK_F1 = 0x70 // F1 key + VK_F2 = 0x71 // F2 key + VK_F3 = 0x72 // F3 key + VK_F4 = 0x73 // F4 key + VK_F5 = 0x74 // F5 key + VK_F6 = 0x75 // F6 key + VK_F7 = 0x76 // F7 key + VK_F8 = 0x77 // F8 key + VK_F9 = 0x78 // F9 key + VK_F10 = 0x79 // F10 key + VK_F11 = 0x7A // F11 key + VK_F12 = 0x7B // F12 key + + RIGHT_ALT_PRESSED = 0x0001 + LEFT_ALT_PRESSED = 0x0002 + RIGHT_CTRL_PRESSED = 0x0004 + LEFT_CTRL_PRESSED = 0x0008 + SHIFT_PRESSED = 0x0010 + NUMLOCK_ON = 0x0020 + SCROLLLOCK_ON = 0x0040 + CAPSLOCK_ON = 0x0080 + ENHANCED_KEY = 0x0100 +) + +type ansiCommand struct { + CommandBytes []byte + Command string + Parameters []string + IsSpecial bool +} + +func newAnsiCommand(command []byte) *ansiCommand { + + if isCharacterSelectionCmdChar(command[1]) { + // Is Character Set Selection commands + return &ansiCommand{ + CommandBytes: command, + Command: string(command), + IsSpecial: true, + } + } + + // last char is command character + lastCharIndex := len(command) - 1 + + ac := &ansiCommand{ + CommandBytes: command, + Command: string(command[lastCharIndex]), + IsSpecial: false, + } + + // more than a single escape + if lastCharIndex != 0 { + start := 1 + // skip if double char escape sequence + if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY { + start++ + } + // convert this to GetNextParam method + ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP) + } + + return ac +} + +func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 { + if index < 0 || index >= len(ac.Parameters) { + return defaultValue + } + + param, err := strconv.ParseInt(ac.Parameters[index], 10, 16) + if err != nil { + return defaultValue + } + + return int16(param) +} + +func (ac *ansiCommand) String() string { + return fmt.Sprintf("0x%v \"%v\" (\"%v\")", + bytesToHex(ac.CommandBytes), + ac.Command, + strings.Join(ac.Parameters, "\",\"")) +} + +// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands. +// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html. +func isAnsiCommandChar(b byte) bool { + switch { + case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY: + return true + case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM: + // non-CSI escape sequence terminator + return true + case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL: + // String escape sequence terminator + return true + } + return false +} + +func isXtermOscSequence(command []byte, current byte) bool { + return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL) +} + +func isCharacterSelectionCmdChar(b byte) bool { + return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3) +} + +// bytesToHex converts a slice of bytes to a human-readable string. +func bytesToHex(b []byte) string { + hex := make([]string, len(b)) + for i, ch := range b { + hex[i] = fmt.Sprintf("%X", ch) + } + return strings.Join(hex, "") +} + +// ensureInRange adjusts the passed value, if necessary, to ensure it is within +// the passed min / max range. +func ensureInRange(n int16, min int16, max int16) int16 { + if n < min { + return min + } else if n > max { + return max + } else { + return n + } +} + +func GetStdFile(nFile int) (*os.File, uintptr) { + var file *os.File + switch nFile { + case syscall.STD_INPUT_HANDLE: + file = os.Stdin + case syscall.STD_OUTPUT_HANDLE: + file = os.Stdout + case syscall.STD_ERROR_HANDLE: + file = os.Stderr + default: + panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile)) + } + + fd, err := syscall.GetStdHandle(nFile) + if err != nil { + panic(fmt.Errorf("Invalid standard handle indentifier: %v -- %v", nFile, err)) + } + + return file, uintptr(fd) +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/api.go b/vendor/github.com/Azure/go-ansiterm/winterm/api.go new file mode 100644 index 00000000..462d92f8 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/api.go @@ -0,0 +1,322 @@ +// +build windows + +package winterm + +import ( + "fmt" + "syscall" + "unsafe" +) + +//=========================================================================================================== +// IMPORTANT NOTE: +// +// The methods below make extensive use of the "unsafe" package to obtain the required pointers. +// Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack +// variables) the pointers reference *before* the API completes. +// +// As a result, in those cases, the code must hint that the variables remain in active by invoking the +// dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer +// require unsafe pointers. +// +// If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform +// the garbage collector the variables remain in use if: +// +// -- The value is not a pointer (e.g., int32, struct) +// -- The value is not referenced by the method after passing the pointer to Windows +// +// See http://golang.org/doc/go1.3. +//=========================================================================================================== + +var ( + kernel32DLL = syscall.NewLazyDLL("kernel32.dll") + + getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo") + setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo") + setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition") + setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode") + getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") + setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize") + scrollConsoleScreenBufferProc = kernel32DLL.NewProc("ScrollConsoleScreenBufferA") + setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute") + setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo") + writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW") + readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW") + waitForSingleObjectProc = kernel32DLL.NewProc("WaitForSingleObject") +) + +// Windows Console constants +const ( + // Console modes + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. + ENABLE_PROCESSED_INPUT = 0x0001 + ENABLE_LINE_INPUT = 0x0002 + ENABLE_ECHO_INPUT = 0x0004 + ENABLE_WINDOW_INPUT = 0x0008 + ENABLE_MOUSE_INPUT = 0x0010 + ENABLE_INSERT_MODE = 0x0020 + ENABLE_QUICK_EDIT_MODE = 0x0040 + ENABLE_EXTENDED_FLAGS = 0x0080 + + ENABLE_PROCESSED_OUTPUT = 0x0001 + ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 + + // Character attributes + // Note: + // -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan). + // Clearing all foreground or background colors results in black; setting all creates white. + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes. + FOREGROUND_BLUE uint16 = 0x0001 + FOREGROUND_GREEN uint16 = 0x0002 + FOREGROUND_RED uint16 = 0x0004 + FOREGROUND_INTENSITY uint16 = 0x0008 + FOREGROUND_MASK uint16 = 0x000F + + BACKGROUND_BLUE uint16 = 0x0010 + BACKGROUND_GREEN uint16 = 0x0020 + BACKGROUND_RED uint16 = 0x0040 + BACKGROUND_INTENSITY uint16 = 0x0080 + BACKGROUND_MASK uint16 = 0x00F0 + + COMMON_LVB_MASK uint16 = 0xFF00 + COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000 + COMMON_LVB_UNDERSCORE uint16 = 0x8000 + + // Input event types + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. + KEY_EVENT = 0x0001 + MOUSE_EVENT = 0x0002 + WINDOW_BUFFER_SIZE_EVENT = 0x0004 + MENU_EVENT = 0x0008 + FOCUS_EVENT = 0x0010 + + // WaitForSingleObject return codes + WAIT_ABANDONED = 0x00000080 + WAIT_FAILED = 0xFFFFFFFF + WAIT_SIGNALED = 0x0000000 + WAIT_TIMEOUT = 0x00000102 + + // WaitForSingleObject wait duration + WAIT_INFINITE = 0xFFFFFFFF + WAIT_ONE_SECOND = 1000 + WAIT_HALF_SECOND = 500 + WAIT_QUARTER_SECOND = 250 +) + +// Windows API Console types +// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD) +// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment +type ( + CHAR_INFO struct { + UnicodeChar uint16 + Attributes uint16 + } + + CONSOLE_CURSOR_INFO struct { + Size uint32 + Visible int32 + } + + CONSOLE_SCREEN_BUFFER_INFO struct { + Size COORD + CursorPosition COORD + Attributes uint16 + Window SMALL_RECT + MaximumWindowSize COORD + } + + COORD struct { + X int16 + Y int16 + } + + SMALL_RECT struct { + Left int16 + Top int16 + Right int16 + Bottom int16 + } + + // INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. + INPUT_RECORD struct { + EventType uint16 + KeyEvent KEY_EVENT_RECORD + } + + KEY_EVENT_RECORD struct { + KeyDown int32 + RepeatCount uint16 + VirtualKeyCode uint16 + VirtualScanCode uint16 + UnicodeChar uint16 + ControlKeyState uint32 + } + + WINDOW_BUFFER_SIZE struct { + Size COORD + } +) + +// boolToBOOL converts a Go bool into a Windows int32. +func boolToBOOL(f bool) int32 { + if f { + return int32(1) + } else { + return int32(0) + } +} + +// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx. +func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { + r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) + return checkError(r1, r2, err) +} + +// SetConsoleCursorInfo sets the size and visiblity of the console cursor. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx. +func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { + r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) + return checkError(r1, r2, err) +} + +// SetConsoleCursorPosition location of the console cursor. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx. +func SetConsoleCursorPosition(handle uintptr, coord COORD) error { + r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord)) + use(coord) + return checkError(r1, r2, err) +} + +// GetConsoleMode gets the console mode for given file descriptor +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx. +func GetConsoleMode(handle uintptr) (mode uint32, err error) { + err = syscall.GetConsoleMode(syscall.Handle(handle), &mode) + return mode, err +} + +// SetConsoleMode sets the console mode for given file descriptor +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. +func SetConsoleMode(handle uintptr, mode uint32) error { + r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0) + use(mode) + return checkError(r1, r2, err) +} + +// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer. +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx. +func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { + info := CONSOLE_SCREEN_BUFFER_INFO{} + err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0)) + if err != nil { + return nil, err + } + return &info, nil +} + +func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error { + r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char))) + use(scrollRect) + use(clipRect) + use(destOrigin) + use(char) + return checkError(r1, r2, err) +} + +// SetConsoleScreenBufferSize sets the size of the console screen buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx. +func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error { + r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord)) + use(coord) + return checkError(r1, r2, err) +} + +// SetConsoleTextAttribute sets the attributes of characters written to the +// console screen buffer by the WriteFile or WriteConsole function. +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx. +func SetConsoleTextAttribute(handle uintptr, attribute uint16) error { + r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0) + use(attribute) + return checkError(r1, r2, err) +} + +// SetConsoleWindowInfo sets the size and position of the console screen buffer's window. +// Note that the size and location must be within and no larger than the backing console screen buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx. +func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error { + r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect))) + use(isAbsolute) + use(rect) + return checkError(r1, r2, err) +} + +// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx. +func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error { + r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion))) + use(buffer) + use(bufferSize) + use(bufferCoord) + return checkError(r1, r2, err) +} + +// ReadConsoleInput reads (and removes) data from the console input buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx. +func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error { + r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count))) + use(buffer) + return checkError(r1, r2, err) +} + +// WaitForSingleObject waits for the passed handle to be signaled. +// It returns true if the handle was signaled; false otherwise. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx. +func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) { + r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait))) + switch r1 { + case WAIT_ABANDONED, WAIT_TIMEOUT: + return false, nil + case WAIT_SIGNALED: + return true, nil + } + use(msWait) + return false, err +} + +// String helpers +func (info CONSOLE_SCREEN_BUFFER_INFO) String() string { + return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize) +} + +func (coord COORD) String() string { + return fmt.Sprintf("%v,%v", coord.X, coord.Y) +} + +func (rect SMALL_RECT) String() string { + return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom) +} + +// checkError evaluates the results of a Windows API call and returns the error if it failed. +func checkError(r1, r2 uintptr, err error) error { + // Windows APIs return non-zero to indicate success + if r1 != 0 { + return nil + } + + // Return the error if provided, otherwise default to EINVAL + if err != nil { + return err + } + return syscall.EINVAL +} + +// coordToPointer converts a COORD into a uintptr (by fooling the type system). +func coordToPointer(c COORD) uintptr { + // Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass. + return uintptr(*((*uint32)(unsafe.Pointer(&c)))) +} + +// use is a no-op, but the compiler cannot see that it is. +// Calling use(p) ensures that p is kept live until that point. +func use(p interface{}) {} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go new file mode 100644 index 00000000..cbec8f72 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go @@ -0,0 +1,100 @@ +// +build windows + +package winterm + +import "github.com/Azure/go-ansiterm" + +const ( + FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE + BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE +) + +// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the +// request represented by the passed ANSI mode. +func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) { + switch ansiMode { + + // Mode styles + case ansiterm.ANSI_SGR_BOLD: + windowsMode = windowsMode | FOREGROUND_INTENSITY + + case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF: + windowsMode &^= FOREGROUND_INTENSITY + + case ansiterm.ANSI_SGR_UNDERLINE: + windowsMode = windowsMode | COMMON_LVB_UNDERSCORE + + case ansiterm.ANSI_SGR_REVERSE: + inverted = true + + case ansiterm.ANSI_SGR_REVERSE_OFF: + inverted = false + + case ansiterm.ANSI_SGR_UNDERLINE_OFF: + windowsMode &^= COMMON_LVB_UNDERSCORE + + // Foreground colors + case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT: + windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK) + + case ansiterm.ANSI_SGR_FOREGROUND_BLACK: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) + + case ansiterm.ANSI_SGR_FOREGROUND_RED: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED + + case ansiterm.ANSI_SGR_FOREGROUND_GREEN: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN + + case ansiterm.ANSI_SGR_FOREGROUND_YELLOW: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN + + case ansiterm.ANSI_SGR_FOREGROUND_BLUE: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE + + case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE + + case ansiterm.ANSI_SGR_FOREGROUND_CYAN: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE + + case ansiterm.ANSI_SGR_FOREGROUND_WHITE: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE + + // Background colors + case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT: + // Black with no intensity + windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK) + + case ansiterm.ANSI_SGR_BACKGROUND_BLACK: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) + + case ansiterm.ANSI_SGR_BACKGROUND_RED: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED + + case ansiterm.ANSI_SGR_BACKGROUND_GREEN: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN + + case ansiterm.ANSI_SGR_BACKGROUND_YELLOW: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN + + case ansiterm.ANSI_SGR_BACKGROUND_BLUE: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE + + case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE + + case ansiterm.ANSI_SGR_BACKGROUND_CYAN: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE + + case ansiterm.ANSI_SGR_BACKGROUND_WHITE: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE + } + + return windowsMode, inverted +} + +// invertAttributes inverts the foreground and background colors of a Windows attributes value +func invertAttributes(windowsMode uint16) uint16 { + return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4) +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go new file mode 100644 index 00000000..f015723a --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go @@ -0,0 +1,101 @@ +// +build windows + +package winterm + +const ( + horizontal = iota + vertical +) + +func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT { + if h.originMode { + sr := h.effectiveSr(info.Window) + return SMALL_RECT{ + Top: sr.top, + Bottom: sr.bottom, + Left: 0, + Right: info.Size.X - 1, + } + } else { + return SMALL_RECT{ + Top: info.Window.Top, + Bottom: info.Window.Bottom, + Left: 0, + Right: info.Size.X - 1, + } + } +} + +// setCursorPosition sets the cursor to the specified position, bounded to the screen size +func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error { + position.X = ensureInRange(position.X, window.Left, window.Right) + position.Y = ensureInRange(position.Y, window.Top, window.Bottom) + err := SetConsoleCursorPosition(h.fd, position) + if err != nil { + return err + } + logger.Infof("Cursor position set: (%d, %d)", position.X, position.Y) + return err +} + +func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error { + return h.moveCursor(vertical, param) +} + +func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error { + return h.moveCursor(horizontal, param) +} + +func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + position := info.CursorPosition + switch moveMode { + case horizontal: + position.X += int16(param) + case vertical: + position.Y += int16(param) + } + + if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) moveCursorLine(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + position := info.CursorPosition + position.X = 0 + position.Y += int16(param) + + if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + position := info.CursorPosition + position.X = int16(param) - 1 + + if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go new file mode 100644 index 00000000..244b5fa2 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go @@ -0,0 +1,84 @@ +// +build windows + +package winterm + +import "github.com/Azure/go-ansiterm" + +func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error { + // Ignore an invalid (negative area) request + if toCoord.Y < fromCoord.Y { + return nil + } + + var err error + + var coordStart = COORD{} + var coordEnd = COORD{} + + xCurrent, yCurrent := fromCoord.X, fromCoord.Y + xEnd, yEnd := toCoord.X, toCoord.Y + + // Clear any partial initial line + if xCurrent > 0 { + coordStart.X, coordStart.Y = xCurrent, yCurrent + coordEnd.X, coordEnd.Y = xEnd, yCurrent + + err = h.clearRect(attributes, coordStart, coordEnd) + if err != nil { + return err + } + + xCurrent = 0 + yCurrent += 1 + } + + // Clear intervening rectangular section + if yCurrent < yEnd { + coordStart.X, coordStart.Y = xCurrent, yCurrent + coordEnd.X, coordEnd.Y = xEnd, yEnd-1 + + err = h.clearRect(attributes, coordStart, coordEnd) + if err != nil { + return err + } + + xCurrent = 0 + yCurrent = yEnd + } + + // Clear remaining partial ending line + coordStart.X, coordStart.Y = xCurrent, yCurrent + coordEnd.X, coordEnd.Y = xEnd, yEnd + + err = h.clearRect(attributes, coordStart, coordEnd) + if err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error { + region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X} + width := toCoord.X - fromCoord.X + 1 + height := toCoord.Y - fromCoord.Y + 1 + size := uint32(width) * uint32(height) + + if size <= 0 { + return nil + } + + buffer := make([]CHAR_INFO, size) + + char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes} + for i := 0; i < int(size); i++ { + buffer[i] = char + } + + err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, ®ion) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go new file mode 100644 index 00000000..706d2705 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go @@ -0,0 +1,118 @@ +// +build windows + +package winterm + +// effectiveSr gets the current effective scroll region in buffer coordinates +func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion { + top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom) + bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom) + if top >= bottom { + top = window.Top + bottom = window.Bottom + } + return scrollRegion{top: top, bottom: bottom} +} + +func (h *windowsAnsiEventHandler) scrollUp(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + sr := h.effectiveSr(info.Window) + return h.scroll(param, sr, info) +} + +func (h *windowsAnsiEventHandler) scrollDown(param int) error { + return h.scrollUp(-param) +} + +func (h *windowsAnsiEventHandler) deleteLines(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + start := info.CursorPosition.Y + sr := h.effectiveSr(info.Window) + // Lines cannot be inserted or deleted outside the scrolling region. + if start >= sr.top && start <= sr.bottom { + sr.top = start + return h.scroll(param, sr, info) + } else { + return nil + } +} + +func (h *windowsAnsiEventHandler) insertLines(param int) error { + return h.deleteLines(-param) +} + +// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates. +func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error { + logger.Infof("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom) + logger.Infof("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom) + + // Copy from and clip to the scroll region (full buffer width) + scrollRect := SMALL_RECT{ + Top: sr.top, + Bottom: sr.bottom, + Left: 0, + Right: info.Size.X - 1, + } + + // Origin to which area should be copied + destOrigin := COORD{ + X: 0, + Y: sr.top - int16(param), + } + + char := CHAR_INFO{ + UnicodeChar: ' ', + Attributes: h.attributes, + } + + if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { + return err + } + return nil +} + +func (h *windowsAnsiEventHandler) deleteCharacters(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + return h.scrollLine(param, info.CursorPosition, info) +} + +func (h *windowsAnsiEventHandler) insertCharacters(param int) error { + return h.deleteCharacters(-param) +} + +// scrollLine scrolls a line horizontally starting at the provided position by a number of columns. +func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error { + // Copy from and clip to the scroll region (full buffer width) + scrollRect := SMALL_RECT{ + Top: position.Y, + Bottom: position.Y, + Left: position.X, + Right: info.Size.X - 1, + } + + // Origin to which area should be copied + destOrigin := COORD{ + X: position.X - int16(columns), + Y: position.Y, + } + + char := CHAR_INFO{ + UnicodeChar: ' ', + Attributes: h.attributes, + } + + if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go new file mode 100644 index 00000000..afa7635d --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go @@ -0,0 +1,9 @@ +// +build windows + +package winterm + +// AddInRange increments a value by the passed quantity while ensuring the values +// always remain within the supplied min / max range. +func addInRange(n int16, increment int16, min int16, max int16) int16 { + return ensureInRange(n+increment, min, max) +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go new file mode 100644 index 00000000..4d858ed6 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go @@ -0,0 +1,726 @@ +// +build windows + +package winterm + +import ( + "bytes" + "io/ioutil" + "os" + "strconv" + + "github.com/Azure/go-ansiterm" + "github.com/Sirupsen/logrus" +) + +var logger *logrus.Logger + +type windowsAnsiEventHandler struct { + fd uintptr + file *os.File + infoReset *CONSOLE_SCREEN_BUFFER_INFO + sr scrollRegion + buffer bytes.Buffer + attributes uint16 + inverted bool + wrapNext bool + drewMarginByte bool + originMode bool + marginByte byte + curInfo *CONSOLE_SCREEN_BUFFER_INFO + curPos COORD +} + +func CreateWinEventHandler(fd uintptr, file *os.File) ansiterm.AnsiEventHandler { + logFile := ioutil.Discard + + if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { + logFile, _ = os.Create("winEventHandler.log") + } + + logger = &logrus.Logger{ + Out: logFile, + Formatter: new(logrus.TextFormatter), + Level: logrus.DebugLevel, + } + + infoReset, err := GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil + } + + return &windowsAnsiEventHandler{ + fd: fd, + file: file, + infoReset: infoReset, + attributes: infoReset.Attributes, + } +} + +type scrollRegion struct { + top int16 + bottom int16 +} + +// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the +// current cursor position and scroll region settings, in which case it returns +// true. If no special handling is necessary, then it does nothing and returns +// false. +// +// In the false case, the caller should ensure that a carriage return +// and line feed are inserted or that the text is otherwise wrapped. +func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) { + if h.wrapNext { + if err := h.Flush(); err != nil { + return false, err + } + h.clearWrap() + } + pos, info, err := h.getCurrentInfo() + if err != nil { + return false, err + } + sr := h.effectiveSr(info.Window) + if pos.Y == sr.bottom { + // Scrolling is necessary. Let Windows automatically scroll if the scrolling region + // is the full window. + if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom { + if includeCR { + pos.X = 0 + h.updatePos(pos) + } + return false, nil + } + + // A custom scroll region is active. Scroll the window manually to simulate + // the LF. + if err := h.Flush(); err != nil { + return false, err + } + logger.Info("Simulating LF inside scroll region") + if err := h.scrollUp(1); err != nil { + return false, err + } + if includeCR { + pos.X = 0 + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return false, err + } + } + return true, nil + + } else if pos.Y < info.Window.Bottom { + // Let Windows handle the LF. + pos.Y++ + if includeCR { + pos.X = 0 + } + h.updatePos(pos) + return false, nil + } else { + // The cursor is at the bottom of the screen but outside the scroll + // region. Skip the LF. + logger.Info("Simulating LF outside scroll region") + if includeCR { + if err := h.Flush(); err != nil { + return false, err + } + pos.X = 0 + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return false, err + } + } + return true, nil + } +} + +// executeLF executes a LF without a CR. +func (h *windowsAnsiEventHandler) executeLF() error { + handled, err := h.simulateLF(false) + if err != nil { + return err + } + if !handled { + // Windows LF will reset the cursor column position. Write the LF + // and restore the cursor position. + pos, _, err := h.getCurrentInfo() + if err != nil { + return err + } + h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) + if pos.X != 0 { + if err := h.Flush(); err != nil { + return err + } + logger.Info("Resetting cursor position for LF without CR") + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return err + } + } + } + return nil +} + +func (h *windowsAnsiEventHandler) Print(b byte) error { + if h.wrapNext { + h.buffer.WriteByte(h.marginByte) + h.clearWrap() + if _, err := h.simulateLF(true); err != nil { + return err + } + } + pos, info, err := h.getCurrentInfo() + if err != nil { + return err + } + if pos.X == info.Size.X-1 { + h.wrapNext = true + h.marginByte = b + } else { + pos.X++ + h.updatePos(pos) + h.buffer.WriteByte(b) + } + return nil +} + +func (h *windowsAnsiEventHandler) Execute(b byte) error { + switch b { + case ansiterm.ANSI_TAB: + logger.Info("Execute(TAB)") + // Move to the next tab stop, but preserve auto-wrap if already set. + if !h.wrapNext { + pos, info, err := h.getCurrentInfo() + if err != nil { + return err + } + pos.X = (pos.X + 8) - pos.X%8 + if pos.X >= info.Size.X { + pos.X = info.Size.X - 1 + } + if err := h.Flush(); err != nil { + return err + } + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return err + } + } + return nil + + case ansiterm.ANSI_BEL: + h.buffer.WriteByte(ansiterm.ANSI_BEL) + return nil + + case ansiterm.ANSI_BACKSPACE: + if h.wrapNext { + if err := h.Flush(); err != nil { + return err + } + h.clearWrap() + } + pos, _, err := h.getCurrentInfo() + if err != nil { + return err + } + if pos.X > 0 { + pos.X-- + h.updatePos(pos) + h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE) + } + return nil + + case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED: + // Treat as true LF. + return h.executeLF() + + case ansiterm.ANSI_LINE_FEED: + // Simulate a CR and LF for now since there is no way in go-ansiterm + // to tell if the LF should include CR (and more things break when it's + // missing than when it's incorrectly added). + handled, err := h.simulateLF(true) + if handled || err != nil { + return err + } + return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) + + case ansiterm.ANSI_CARRIAGE_RETURN: + if h.wrapNext { + if err := h.Flush(); err != nil { + return err + } + h.clearWrap() + } + pos, _, err := h.getCurrentInfo() + if err != nil { + return err + } + if pos.X != 0 { + pos.X = 0 + h.updatePos(pos) + h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN) + } + return nil + + default: + return nil + } +} + +func (h *windowsAnsiEventHandler) CUU(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("CUU: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorVertical(-param) +} + +func (h *windowsAnsiEventHandler) CUD(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("CUD: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorVertical(param) +} + +func (h *windowsAnsiEventHandler) CUF(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("CUF: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorHorizontal(param) +} + +func (h *windowsAnsiEventHandler) CUB(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("CUB: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorHorizontal(-param) +} + +func (h *windowsAnsiEventHandler) CNL(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("CNL: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorLine(param) +} + +func (h *windowsAnsiEventHandler) CPL(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("CPL: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorLine(-param) +} + +func (h *windowsAnsiEventHandler) CHA(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("CHA: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorColumn(param) +} + +func (h *windowsAnsiEventHandler) VPA(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("VPA: [[%d]]", param) + h.clearWrap() + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + window := h.getCursorWindow(info) + position := info.CursorPosition + position.Y = window.Top + int16(param) - 1 + return h.setCursorPosition(position, window) +} + +func (h *windowsAnsiEventHandler) CUP(row int, col int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("CUP: [[%d %d]]", row, col) + h.clearWrap() + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + window := h.getCursorWindow(info) + position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1} + return h.setCursorPosition(position, window) +} + +func (h *windowsAnsiEventHandler) HVP(row int, col int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("HVP: [[%d %d]]", row, col) + h.clearWrap() + return h.CUP(row, col) +} + +func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("DECTCEM: [%v]", []string{strconv.FormatBool(visible)}) + h.clearWrap() + return nil +} + +func (h *windowsAnsiEventHandler) DECOM(enable bool) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("DECOM: [%v]", []string{strconv.FormatBool(enable)}) + h.clearWrap() + h.originMode = enable + return h.CUP(1, 1) +} + +func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("DECCOLM: [%v]", []string{strconv.FormatBool(use132)}) + h.clearWrap() + if err := h.ED(2); err != nil { + return err + } + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + targetWidth := int16(80) + if use132 { + targetWidth = 132 + } + if info.Size.X < targetWidth { + if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { + logger.Info("set buffer failed:", err) + return err + } + } + window := info.Window + window.Left = 0 + window.Right = targetWidth - 1 + if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { + logger.Info("set window failed:", err) + return err + } + if info.Size.X > targetWidth { + if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { + logger.Info("set buffer failed:", err) + return err + } + } + return SetConsoleCursorPosition(h.fd, COORD{0, 0}) +} + +func (h *windowsAnsiEventHandler) ED(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("ED: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + + // [J -- Erases from the cursor to the end of the screen, including the cursor position. + // [1J -- Erases from the beginning of the screen to the cursor, including the cursor position. + // [2J -- Erases the complete display. The cursor does not move. + // Notes: + // -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + var start COORD + var end COORD + + switch param { + case 0: + start = info.CursorPosition + end = COORD{info.Size.X - 1, info.Size.Y - 1} + + case 1: + start = COORD{0, 0} + end = info.CursorPosition + + case 2: + start = COORD{0, 0} + end = COORD{info.Size.X - 1, info.Size.Y - 1} + } + + err = h.clearRange(h.attributes, start, end) + if err != nil { + return err + } + + // If the whole buffer was cleared, move the window to the top while preserving + // the window-relative cursor position. + if param == 2 { + pos := info.CursorPosition + window := info.Window + pos.Y -= window.Top + window.Bottom -= window.Top + window.Top = 0 + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return err + } + if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { + return err + } + } + + return nil +} + +func (h *windowsAnsiEventHandler) EL(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("EL: [%v]", strconv.Itoa(param)) + h.clearWrap() + + // [K -- Erases from the cursor to the end of the line, including the cursor position. + // [1K -- Erases from the beginning of the line to the cursor, including the cursor position. + // [2K -- Erases the complete line. + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + var start COORD + var end COORD + + switch param { + case 0: + start = info.CursorPosition + end = COORD{info.Size.X, info.CursorPosition.Y} + + case 1: + start = COORD{0, info.CursorPosition.Y} + end = info.CursorPosition + + case 2: + start = COORD{0, info.CursorPosition.Y} + end = COORD{info.Size.X, info.CursorPosition.Y} + } + + err = h.clearRange(h.attributes, start, end) + if err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) IL(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("IL: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.insertLines(param) +} + +func (h *windowsAnsiEventHandler) DL(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("DL: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.deleteLines(param) +} + +func (h *windowsAnsiEventHandler) ICH(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("ICH: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.insertCharacters(param) +} + +func (h *windowsAnsiEventHandler) DCH(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("DCH: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.deleteCharacters(param) +} + +func (h *windowsAnsiEventHandler) SGR(params []int) error { + if err := h.Flush(); err != nil { + return err + } + strings := []string{} + for _, v := range params { + strings = append(strings, strconv.Itoa(v)) + } + + logger.Infof("SGR: [%v]", strings) + + if len(params) <= 0 { + h.attributes = h.infoReset.Attributes + h.inverted = false + } else { + for _, attr := range params { + + if attr == ansiterm.ANSI_SGR_RESET { + h.attributes = h.infoReset.Attributes + h.inverted = false + continue + } + + h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr)) + } + } + + attributes := h.attributes + if h.inverted { + attributes = invertAttributes(attributes) + } + err := SetConsoleTextAttribute(h.fd, attributes) + if err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) SU(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("SU: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.scrollUp(param) +} + +func (h *windowsAnsiEventHandler) SD(param int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("SD: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.scrollDown(param) +} + +func (h *windowsAnsiEventHandler) DA(params []string) error { + logger.Infof("DA: [%v]", params) + // DA cannot be implemented because it must send data on the VT100 input stream, + // which is not available to go-ansiterm. + return nil +} + +func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error { + if err := h.Flush(); err != nil { + return err + } + logger.Infof("DECSTBM: [%d, %d]", top, bottom) + + // Windows is 0 indexed, Linux is 1 indexed + h.sr.top = int16(top - 1) + h.sr.bottom = int16(bottom - 1) + + // This command also moves the cursor to the origin. + h.clearWrap() + return h.CUP(1, 1) +} + +func (h *windowsAnsiEventHandler) RI() error { + if err := h.Flush(); err != nil { + return err + } + logger.Info("RI: []") + h.clearWrap() + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + sr := h.effectiveSr(info.Window) + if info.CursorPosition.Y == sr.top { + return h.scrollDown(1) + } + + return h.moveCursorVertical(-1) +} + +func (h *windowsAnsiEventHandler) IND() error { + logger.Info("IND: []") + return h.executeLF() +} + +func (h *windowsAnsiEventHandler) Flush() error { + h.curInfo = nil + if h.buffer.Len() > 0 { + logger.Infof("Flush: [%s]", h.buffer.Bytes()) + if _, err := h.buffer.WriteTo(h.file); err != nil { + return err + } + } + + if h.wrapNext && !h.drewMarginByte { + logger.Infof("Flush: drawing margin byte '%c'", h.marginByte) + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}} + size := COORD{1, 1} + position := COORD{0, 0} + region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y} + if err := WriteConsoleOutput(h.fd, charInfo, size, position, ®ion); err != nil { + return err + } + h.drewMarginByte = true + } + return nil +} + +// cacheConsoleInfo ensures that the current console screen information has been queried +// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos. +func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) { + if h.curInfo == nil { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return COORD{}, nil, err + } + h.curInfo = info + h.curPos = info.CursorPosition + } + return h.curPos, h.curInfo, nil +} + +func (h *windowsAnsiEventHandler) updatePos(pos COORD) { + if h.curInfo == nil { + panic("failed to call getCurrentInfo before calling updatePos") + } + h.curPos = pos +} + +// clearWrap clears the state where the cursor is in the margin +// waiting for the next character before wrapping the line. This must +// be done before most operations that act on the cursor. +func (h *windowsAnsiEventHandler) clearWrap() { + h.wrapNext = false + h.drewMarginByte = false +} diff --git a/vendor/github.com/PuerkitoBio/purell/.gitignore b/vendor/github.com/PuerkitoBio/purell/.gitignore new file mode 100644 index 00000000..748e4c80 --- /dev/null +++ b/vendor/github.com/PuerkitoBio/purell/.gitignore @@ -0,0 +1,5 @@ +*.sublime-* +.DS_Store +*.swp +*.swo +tags diff --git a/vendor/github.com/PuerkitoBio/purell/.travis.yml b/vendor/github.com/PuerkitoBio/purell/.travis.yml new file mode 100644 index 00000000..facfc91c --- /dev/null +++ b/vendor/github.com/PuerkitoBio/purell/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.4 + - 1.5 + - 1.6 + - tip diff --git a/vendor/github.com/PuerkitoBio/purell/LICENSE b/vendor/github.com/PuerkitoBio/purell/LICENSE new file mode 100644 index 00000000..4b9986de --- /dev/null +++ b/vendor/github.com/PuerkitoBio/purell/LICENSE @@ -0,0 +1,12 @@ +Copyright (c) 2012, Martin Angers +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/PuerkitoBio/purell/README.md b/vendor/github.com/PuerkitoBio/purell/README.md new file mode 100644 index 00000000..09e8a32c --- /dev/null +++ b/vendor/github.com/PuerkitoBio/purell/README.md @@ -0,0 +1,187 @@ +# Purell + +Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know... + +Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc]. + +[![build status](https://secure.travis-ci.org/PuerkitoBio/purell.png)](http://travis-ci.org/PuerkitoBio/purell) + +## Install + +`go get github.com/PuerkitoBio/purell` + +## Changelog + +* **2016-11-14 (v1.1.0)** : IDN: Conform to RFC 5895: Fold character width (thanks to @beeker1121). +* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich). +* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]). +* **v0.2.0** : Add benchmarks, Attempt IDN support. +* **v0.1.0** : Initial release. + +## Examples + +From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."): + +```go +package purell + +import ( + "fmt" + "net/url" +) + +func ExampleNormalizeURLString() { + if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/", + FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil { + panic(err) + } else { + fmt.Print(normalized) + } + // Output: http://somewebsite.com:80/Amazing%3F/url/ +} + +func ExampleMustNormalizeURLString() { + normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/", + FlagsUnsafeGreedy) + fmt.Print(normalized) + + // Output: http://somewebsite.com/Amazing%FA/url +} + +func ExampleNormalizeURL() { + if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil { + panic(err) + } else { + normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment) + fmt.Print(normalized) + } + + // Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0 +} +``` + +## API + +As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags: + +```go +const ( + // Safe normalizations + FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1 + FlagLowercaseHost // http://HOST -> http://host + FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF + FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA + FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$ + FlagRemoveDefaultPort // http://host:80 -> http://host + FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path + + // Usually safe normalizations + FlagRemoveTrailingSlash // http://host/path/ -> http://host/path + FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags) + FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c + + // Unsafe normalizations + FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/ + FlagRemoveFragment // http://host/path#fragment -> http://host/path + FlagForceHTTP // https://host -> http://host + FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b + FlagRemoveWWW // http://www.host/ -> http://host/ + FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags) + FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3 + + // Normalizations not in the wikipedia article, required to cover tests cases + // submitted by jehiah + FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147 + FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147 + FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147 + FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path + FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path + + // Convenience set of safe normalizations + FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator + + // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags, + // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix". + + // Convenience set of usually safe normalizations (includes FlagsSafe) + FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments + FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments + + // Convenience set of unsafe normalizations (includes FlagsUsuallySafe) + FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery + FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery + + // Convenience set of all available flags + FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator + FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator +) +``` + +For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set. + +The [full godoc reference is available on gopkgdoc][godoc]. + +Some things to note: + +* `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it. + +* The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*): + - %24 -> $ + - %26 -> & + - %2B-%3B -> +,-./0123456789:; + - %3D -> = + - %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ + - %5F -> _ + - %61-%7A -> abcdefghijklmnopqrstuvwxyz + - %7E -> ~ + + +* When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization). + +* The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell. + +* The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object. + +### Safe vs Usually Safe vs Unsafe + +Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between. + +Consider the following URL: + +`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid` + +Normalizing with the `FlagsSafe` gives: + +`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid` + +With the `FlagsUsuallySafeGreedy`: + +`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid` + +And with `FlagsUnsafeGreedy`: + +`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3` + +## TODOs + +* Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`. + +## Thanks / Contributions + +@rogpeppe +@jehiah +@opennota +@pchristopher1275 +@zenovich +@beeker1121 + +## License + +The [BSD 3-Clause license][bsd]. + +[bsd]: http://opensource.org/licenses/BSD-3-Clause +[wiki]: http://en.wikipedia.org/wiki/URL_normalization +[rfc]: http://tools.ietf.org/html/rfc3986#section-6 +[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell +[pr5]: https://github.com/PuerkitoBio/purell/pull/5 +[iss7]: https://github.com/PuerkitoBio/purell/issues/7 diff --git a/vendor/github.com/PuerkitoBio/purell/bench_test.go b/vendor/github.com/PuerkitoBio/purell/bench_test.go new file mode 100644 index 00000000..7549731f --- /dev/null +++ b/vendor/github.com/PuerkitoBio/purell/bench_test.go @@ -0,0 +1,57 @@ +package purell + +import ( + "testing" +) + +var ( + safeUrl = "HttPS://..iaMHost..Test:443/paTh^A%ef//./%41PaTH/..//?" + usuallySafeUrl = "HttPS://..iaMHost..Test:443/paTh^A%ef//./%41PaTH/../final/" + unsafeUrl = "HttPS://..www.iaMHost..Test:443/paTh^A%ef//./%41PaTH/../final/index.html?t=val1&a=val4&z=val5&a=val1#fragment" + allDWORDUrl = "HttPS://1113982867:/paTh^A%ef//./%41PaTH/../final/index.html?t=val1&a=val4&z=val5&a=val1#fragment" + allOctalUrl = "HttPS://0102.0146.07.0223:/paTh^A%ef//./%41PaTH/../final/index.html?t=val1&a=val4&z=val5&a=val1#fragment" + allHexUrl = "HttPS://0x42660793:/paTh^A%ef//./%41PaTH/../final/index.html?t=val1&a=val4&z=val5&a=val1#fragment" + allCombinedUrl = "HttPS://..0x42660793.:/paTh^A%ef//./%41PaTH/../final/index.html?t=val1&a=val4&z=val5&a=val1#fragment" +) + +func BenchmarkSafe(b *testing.B) { + for i := 0; i < b.N; i++ { + NormalizeURLString(safeUrl, FlagsSafe) + } +} + +func BenchmarkUsuallySafe(b *testing.B) { + for i := 0; i < b.N; i++ { + NormalizeURLString(usuallySafeUrl, FlagsUsuallySafeGreedy) + } +} + +func BenchmarkUnsafe(b *testing.B) { + for i := 0; i < b.N; i++ { + NormalizeURLString(unsafeUrl, FlagsUnsafeGreedy) + } +} + +func BenchmarkAllDWORD(b *testing.B) { + for i := 0; i < b.N; i++ { + NormalizeURLString(allDWORDUrl, FlagsAllGreedy) + } +} + +func BenchmarkAllOctal(b *testing.B) { + for i := 0; i < b.N; i++ { + NormalizeURLString(allOctalUrl, FlagsAllGreedy) + } +} + +func BenchmarkAllHex(b *testing.B) { + for i := 0; i < b.N; i++ { + NormalizeURLString(allHexUrl, FlagsAllGreedy) + } +} + +func BenchmarkAllCombined(b *testing.B) { + for i := 0; i < b.N; i++ { + NormalizeURLString(allCombinedUrl, FlagsAllGreedy) + } +} diff --git a/vendor/github.com/PuerkitoBio/purell/benchmarks/v0.1.0 b/vendor/github.com/PuerkitoBio/purell/benchmarks/v0.1.0 new file mode 100644 index 00000000..3bbe7113 --- /dev/null +++ b/vendor/github.com/PuerkitoBio/purell/benchmarks/v0.1.0 @@ -0,0 +1,9 @@ +PASS +BenchmarkSafe 500000 6131 ns/op +BenchmarkUsuallySafe 200000 7864 ns/op +BenchmarkUnsafe 100000 28560 ns/op +BenchmarkAllDWORD 50000 38722 ns/op +BenchmarkAllOctal 50000 40941 ns/op +BenchmarkAllHex 50000 44063 ns/op +BenchmarkAllCombined 50000 33613 ns/op +ok github.com/PuerkitoBio/purell 17.404s diff --git a/vendor/github.com/PuerkitoBio/purell/example_test.go b/vendor/github.com/PuerkitoBio/purell/example_test.go new file mode 100644 index 00000000..997b9536 --- /dev/null +++ b/vendor/github.com/PuerkitoBio/purell/example_test.go @@ -0,0 +1,35 @@ +package purell + +import ( + "fmt" + "net/url" +) + +func ExampleNormalizeURLString() { + if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/", + FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil { + panic(err) + } else { + fmt.Print(normalized) + } + // Output: http://somewebsite.com:80/Amazing%3F/url/ +} + +func ExampleMustNormalizeURLString() { + normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/", + FlagsUnsafeGreedy) + fmt.Print(normalized) + + // Output: http://somewebsite.com/Amazing%FA/url +} + +func ExampleNormalizeURL() { + if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil { + panic(err) + } else { + normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment) + fmt.Print(normalized) + } + + // Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0 +} diff --git a/vendor/github.com/PuerkitoBio/purell/purell.go b/vendor/github.com/PuerkitoBio/purell/purell.go new file mode 100644 index 00000000..645e1b76 --- /dev/null +++ b/vendor/github.com/PuerkitoBio/purell/purell.go @@ -0,0 +1,379 @@ +/* +Package purell offers URL normalization as described on the wikipedia page: +http://en.wikipedia.org/wiki/URL_normalization +*/ +package purell + +import ( + "bytes" + "fmt" + "net/url" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/PuerkitoBio/urlesc" + "golang.org/x/net/idna" + "golang.org/x/text/unicode/norm" + "golang.org/x/text/width" +) + +// A set of normalization flags determines how a URL will +// be normalized. +type NormalizationFlags uint + +const ( + // Safe normalizations + FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1 + FlagLowercaseHost // http://HOST -> http://host + FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF + FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA + FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$ + FlagRemoveDefaultPort // http://host:80 -> http://host + FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path + + // Usually safe normalizations + FlagRemoveTrailingSlash // http://host/path/ -> http://host/path + FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags) + FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c + + // Unsafe normalizations + FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/ + FlagRemoveFragment // http://host/path#fragment -> http://host/path + FlagForceHTTP // https://host -> http://host + FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b + FlagRemoveWWW // http://www.host/ -> http://host/ + FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags) + FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3 + + // Normalizations not in the wikipedia article, required to cover tests cases + // submitted by jehiah + FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147 + FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147 + FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147 + FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path + FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path + + // Convenience set of safe normalizations + FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator + + // For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags, + // while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix". + + // Convenience set of usually safe normalizations (includes FlagsSafe) + FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments + FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments + + // Convenience set of unsafe normalizations (includes FlagsUsuallySafe) + FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery + FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery + + // Convenience set of all available flags + FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator + FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator +) + +const ( + defaultHttpPort = ":80" + defaultHttpsPort = ":443" +) + +// Regular expressions used by the normalizations +var rxPort = regexp.MustCompile(`(:\d+)/?$`) +var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`) +var rxDupSlashes = regexp.MustCompile(`/{2,}`) +var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`) +var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`) +var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`) +var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`) +var rxEmptyPort = regexp.MustCompile(`:+$`) + +// Map of flags to implementation function. +// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically +// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator. + +// Since maps have undefined traversing order, make a slice of ordered keys +var flagsOrder = []NormalizationFlags{ + FlagLowercaseScheme, + FlagLowercaseHost, + FlagRemoveDefaultPort, + FlagRemoveDirectoryIndex, + FlagRemoveDotSegments, + FlagRemoveFragment, + FlagForceHTTP, // Must be after remove default port (because https=443/http=80) + FlagRemoveDuplicateSlashes, + FlagRemoveWWW, + FlagAddWWW, + FlagSortQuery, + FlagDecodeDWORDHost, + FlagDecodeOctalHost, + FlagDecodeHexHost, + FlagRemoveUnnecessaryHostDots, + FlagRemoveEmptyPortSeparator, + FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last + FlagAddTrailingSlash, +} + +// ... and then the map, where order is unimportant +var flags = map[NormalizationFlags]func(*url.URL){ + FlagLowercaseScheme: lowercaseScheme, + FlagLowercaseHost: lowercaseHost, + FlagRemoveDefaultPort: removeDefaultPort, + FlagRemoveDirectoryIndex: removeDirectoryIndex, + FlagRemoveDotSegments: removeDotSegments, + FlagRemoveFragment: removeFragment, + FlagForceHTTP: forceHTTP, + FlagRemoveDuplicateSlashes: removeDuplicateSlashes, + FlagRemoveWWW: removeWWW, + FlagAddWWW: addWWW, + FlagSortQuery: sortQuery, + FlagDecodeDWORDHost: decodeDWORDHost, + FlagDecodeOctalHost: decodeOctalHost, + FlagDecodeHexHost: decodeHexHost, + FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots, + FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator, + FlagRemoveTrailingSlash: removeTrailingSlash, + FlagAddTrailingSlash: addTrailingSlash, +} + +// MustNormalizeURLString returns the normalized string, and panics if an error occurs. +// It takes an URL string as input, as well as the normalization flags. +func MustNormalizeURLString(u string, f NormalizationFlags) string { + result, e := NormalizeURLString(u, f) + if e != nil { + panic(e) + } + return result +} + +// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object. +// It takes an URL string as input, as well as the normalization flags. +func NormalizeURLString(u string, f NormalizationFlags) (string, error) { + parsed, err := url.Parse(u) + if err != nil { + return "", err + } + + if f&FlagLowercaseHost == FlagLowercaseHost { + parsed.Host = strings.ToLower(parsed.Host) + } + + // The idna package doesn't fully conform to RFC 5895 + // (https://tools.ietf.org/html/rfc5895), so we do it here. + // Taken from Go 1.8 cycle source, courtesy of bradfitz. + // TODO: Remove when (if?) idna package conforms to RFC 5895. + parsed.Host = width.Fold.String(parsed.Host) + parsed.Host = norm.NFC.String(parsed.Host) + if parsed.Host, err = idna.ToASCII(parsed.Host); err != nil { + return "", err + } + + return NormalizeURL(parsed, f), nil +} + +// NormalizeURL returns the normalized string. +// It takes a parsed URL object as input, as well as the normalization flags. +func NormalizeURL(u *url.URL, f NormalizationFlags) string { + for _, k := range flagsOrder { + if f&k == k { + flags[k](u) + } + } + return urlesc.Escape(u) +} + +func lowercaseScheme(u *url.URL) { + if len(u.Scheme) > 0 { + u.Scheme = strings.ToLower(u.Scheme) + } +} + +func lowercaseHost(u *url.URL) { + if len(u.Host) > 0 { + u.Host = strings.ToLower(u.Host) + } +} + +func removeDefaultPort(u *url.URL) { + if len(u.Host) > 0 { + scheme := strings.ToLower(u.Scheme) + u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string { + if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) { + return "" + } + return val + }) + } +} + +func removeTrailingSlash(u *url.URL) { + if l := len(u.Path); l > 0 { + if strings.HasSuffix(u.Path, "/") { + u.Path = u.Path[:l-1] + } + } else if l = len(u.Host); l > 0 { + if strings.HasSuffix(u.Host, "/") { + u.Host = u.Host[:l-1] + } + } +} + +func addTrailingSlash(u *url.URL) { + if l := len(u.Path); l > 0 { + if !strings.HasSuffix(u.Path, "/") { + u.Path += "/" + } + } else if l = len(u.Host); l > 0 { + if !strings.HasSuffix(u.Host, "/") { + u.Host += "/" + } + } +} + +func removeDotSegments(u *url.URL) { + if len(u.Path) > 0 { + var dotFree []string + var lastIsDot bool + + sections := strings.Split(u.Path, "/") + for _, s := range sections { + if s == ".." { + if len(dotFree) > 0 { + dotFree = dotFree[:len(dotFree)-1] + } + } else if s != "." { + dotFree = append(dotFree, s) + } + lastIsDot = (s == "." || s == "..") + } + // Special case if host does not end with / and new path does not begin with / + u.Path = strings.Join(dotFree, "/") + if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") { + u.Path = "/" + u.Path + } + // Special case if the last segment was a dot, make sure the path ends with a slash + if lastIsDot && !strings.HasSuffix(u.Path, "/") { + u.Path += "/" + } + } +} + +func removeDirectoryIndex(u *url.URL) { + if len(u.Path) > 0 { + u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1") + } +} + +func removeFragment(u *url.URL) { + u.Fragment = "" +} + +func forceHTTP(u *url.URL) { + if strings.ToLower(u.Scheme) == "https" { + u.Scheme = "http" + } +} + +func removeDuplicateSlashes(u *url.URL) { + if len(u.Path) > 0 { + u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/") + } +} + +func removeWWW(u *url.URL) { + if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") { + u.Host = u.Host[4:] + } +} + +func addWWW(u *url.URL) { + if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") { + u.Host = "www." + u.Host + } +} + +func sortQuery(u *url.URL) { + q := u.Query() + + if len(q) > 0 { + arKeys := make([]string, len(q)) + i := 0 + for k, _ := range q { + arKeys[i] = k + i++ + } + sort.Strings(arKeys) + buf := new(bytes.Buffer) + for _, k := range arKeys { + sort.Strings(q[k]) + for _, v := range q[k] { + if buf.Len() > 0 { + buf.WriteRune('&') + } + buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v))) + } + } + + // Rebuild the raw query string + u.RawQuery = buf.String() + } +} + +func decodeDWORDHost(u *url.URL) { + if len(u.Host) > 0 { + if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 { + var parts [4]int64 + + dword, _ := strconv.ParseInt(matches[1], 10, 0) + for i, shift := range []uint{24, 16, 8, 0} { + parts[i] = dword >> shift & 0xFF + } + u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2]) + } + } +} + +func decodeOctalHost(u *url.URL) { + if len(u.Host) > 0 { + if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 { + var parts [4]int64 + + for i := 1; i <= 4; i++ { + parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0) + } + u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5]) + } + } +} + +func decodeHexHost(u *url.URL) { + if len(u.Host) > 0 { + if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 { + // Conversion is safe because of regex validation + parsed, _ := strconv.ParseInt(matches[1], 16, 0) + // Set host as DWORD (base 10) encoded host + u.Host = fmt.Sprintf("%d%s", parsed, matches[2]) + // The rest is the same as decoding a DWORD host + decodeDWORDHost(u) + } + } +} + +func removeUnncessaryHostDots(u *url.URL) { + if len(u.Host) > 0 { + if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 { + // Trim the leading and trailing dots + u.Host = strings.Trim(matches[1], ".") + if len(matches) > 2 { + u.Host += matches[2] + } + } + } +} + +func removeEmptyPortSeparator(u *url.URL) { + if len(u.Host) > 0 { + u.Host = rxEmptyPort.ReplaceAllString(u.Host, "") + } +} diff --git a/vendor/github.com/PuerkitoBio/purell/purell_test.go b/vendor/github.com/PuerkitoBio/purell/purell_test.go new file mode 100644 index 00000000..a3732e5a --- /dev/null +++ b/vendor/github.com/PuerkitoBio/purell/purell_test.go @@ -0,0 +1,768 @@ +package purell + +import ( + "fmt" + "net/url" + "testing" +) + +type testCase struct { + nm string + src string + flgs NormalizationFlags + res string + parsed bool +} + +var ( + cases = [...]*testCase{ + &testCase{ + "LowerScheme", + "HTTP://www.SRC.ca", + FlagLowercaseScheme, + "http://www.SRC.ca", + false, + }, + &testCase{ + "LowerScheme2", + "http://www.SRC.ca", + FlagLowercaseScheme, + "http://www.SRC.ca", + false, + }, + &testCase{ + "LowerHost", + "HTTP://www.SRC.ca/", + FlagLowercaseHost, + "http://www.src.ca/", // Since Go1.1, scheme is automatically lowercased + false, + }, + &testCase{ + "UpperEscapes", + `http://www.whatever.com/Some%aa%20Special%8Ecases/`, + FlagUppercaseEscapes, + "http://www.whatever.com/Some%AA%20Special%8Ecases/", + false, + }, + &testCase{ + "UnnecessaryEscapes", + `http://www.toto.com/%41%42%2E%44/%32%33%52%2D/%5f%7E`, + FlagDecodeUnnecessaryEscapes, + "http://www.toto.com/AB.D/23R-/_~", + false, + }, + &testCase{ + "RemoveDefaultPort", + "HTTP://www.SRC.ca:80/", + FlagRemoveDefaultPort, + "http://www.SRC.ca/", // Since Go1.1, scheme is automatically lowercased + false, + }, + &testCase{ + "RemoveDefaultPort2", + "HTTP://www.SRC.ca:80", + FlagRemoveDefaultPort, + "http://www.SRC.ca", // Since Go1.1, scheme is automatically lowercased + false, + }, + &testCase{ + "RemoveDefaultPort3", + "HTTP://www.SRC.ca:8080", + FlagRemoveDefaultPort, + "http://www.SRC.ca:8080", // Since Go1.1, scheme is automatically lowercased + false, + }, + &testCase{ + "Safe", + "HTTP://www.SRC.ca:80/to%1ato%8b%ee/OKnow%41%42%43%7e", + FlagsSafe, + "http://www.src.ca/to%1Ato%8B%EE/OKnowABC~", + false, + }, + &testCase{ + "BothLower", + "HTTP://www.SRC.ca:80/to%1ato%8b%ee/OKnow%41%42%43%7e", + FlagLowercaseHost | FlagLowercaseScheme, + "http://www.src.ca:80/to%1Ato%8B%EE/OKnowABC~", + false, + }, + &testCase{ + "RemoveTrailingSlash", + "HTTP://www.SRC.ca:80/", + FlagRemoveTrailingSlash, + "http://www.SRC.ca:80", // Since Go1.1, scheme is automatically lowercased + false, + }, + &testCase{ + "RemoveTrailingSlash2", + "HTTP://www.SRC.ca:80/toto/titi/", + FlagRemoveTrailingSlash, + "http://www.SRC.ca:80/toto/titi", // Since Go1.1, scheme is automatically lowercased + false, + }, + &testCase{ + "RemoveTrailingSlash3", + "HTTP://www.SRC.ca:80/toto/titi/fin/?a=1", + FlagRemoveTrailingSlash, + "http://www.SRC.ca:80/toto/titi/fin?a=1", // Since Go1.1, scheme is automatically lowercased + false, + }, + &testCase{ + "AddTrailingSlash", + "HTTP://www.SRC.ca:80", + FlagAddTrailingSlash, + "http://www.SRC.ca:80/", // Since Go1.1, scheme is automatically lowercased + false, + }, + &testCase{ + "AddTrailingSlash2", + "HTTP://www.SRC.ca:80/toto/titi.html", + FlagAddTrailingSlash, + "http://www.SRC.ca:80/toto/titi.html/", // Since Go1.1, scheme is automatically lowercased + false, + }, + &testCase{ + "AddTrailingSlash3", + "HTTP://www.SRC.ca:80/toto/titi/fin?a=1", + FlagAddTrailingSlash, + "http://www.SRC.ca:80/toto/titi/fin/?a=1", // Since Go1.1, scheme is automatically lowercased + false, + }, + &testCase{ + "RemoveDotSegments", + "HTTP://root/a/b/./../../c/", + FlagRemoveDotSegments, + "http://root/c/", // Since Go1.1, scheme is automatically lowercased + false, + }, + &testCase{ + "RemoveDotSegments2", + "HTTP://root/../a/b/./../c/../d", + FlagRemoveDotSegments, + "http://root/a/d", // Since Go1.1, scheme is automatically lowercased + false, + }, + &testCase{ + "UsuallySafe", + "HTTP://www.SRC.ca:80/to%1ato%8b%ee/./c/d/../OKnow%41%42%43%7e/?a=b#test", + FlagsUsuallySafeGreedy, + "http://www.src.ca/to%1Ato%8B%EE/c/OKnowABC~?a=b#test", + false, + }, + &testCase{ + "RemoveDirectoryIndex", + "HTTP://root/a/b/c/default.aspx", + FlagRemoveDirectoryIndex, + "http://root/a/b/c/", // Since Go1.1, scheme is automatically lowercased + false, + }, + &testCase{ + "RemoveDirectoryIndex2", + "HTTP://root/a/b/c/default#a=b", + FlagRemoveDirectoryIndex, + "http://root/a/b/c/default#a=b", // Since Go1.1, scheme is automatically lowercased + false, + }, + &testCase{ + "RemoveFragment", + "HTTP://root/a/b/c/default#toto=tata", + FlagRemoveFragment, + "http://root/a/b/c/default", // Since Go1.1, scheme is automatically lowercased + false, + }, + &testCase{ + "ForceHTTP", + "https://root/a/b/c/default#toto=tata", + FlagForceHTTP, + "http://root/a/b/c/default#toto=tata", + false, + }, + &testCase{ + "RemoveDuplicateSlashes", + "https://root/a//b///c////default#toto=tata", + FlagRemoveDuplicateSlashes, + "https://root/a/b/c/default#toto=tata", + false, + }, + &testCase{ + "RemoveDuplicateSlashes2", + "https://root//a//b///c////default#toto=tata", + FlagRemoveDuplicateSlashes, + "https://root/a/b/c/default#toto=tata", + false, + }, + &testCase{ + "RemoveWWW", + "https://www.root/a/b/c/", + FlagRemoveWWW, + "https://root/a/b/c/", + false, + }, + &testCase{ + "RemoveWWW2", + "https://WwW.Root/a/b/c/", + FlagRemoveWWW, + "https://Root/a/b/c/", + false, + }, + &testCase{ + "AddWWW", + "https://Root/a/b/c/", + FlagAddWWW, + "https://www.Root/a/b/c/", + false, + }, + &testCase{ + "SortQuery", + "http://root/toto/?b=4&a=1&c=3&b=2&a=5", + FlagSortQuery, + "http://root/toto/?a=1&a=5&b=2&b=4&c=3", + false, + }, + &testCase{ + "RemoveEmptyQuerySeparator", + "http://root/toto/?", + FlagRemoveEmptyQuerySeparator, + "http://root/toto/", + false, + }, + &testCase{ + "Unsafe", + "HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid", + FlagsUnsafeGreedy, + "http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3", + false, + }, + &testCase{ + "Safe2", + "HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid", + FlagsSafe, + "https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid", + false, + }, + &testCase{ + "UsuallySafe2", + "HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid", + FlagsUsuallySafeGreedy, + "https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid", + false, + }, + &testCase{ + "AddTrailingSlashBug", + "http://src.ca/", + FlagsAllNonGreedy, + "http://www.src.ca/", + false, + }, + &testCase{ + "SourceModified", + "HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid", + FlagsUnsafeGreedy, + "http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3", + true, + }, + &testCase{ + "IPv6-1", + "http://[2001:db8:1f70::999:de8:7648:6e8]/test", + FlagsSafe | FlagRemoveDotSegments, + "http://[2001:db8:1f70::999:de8:7648:6e8]/test", + false, + }, + &testCase{ + "IPv6-2", + "http://[::ffff:192.168.1.1]/test", + FlagsSafe | FlagRemoveDotSegments, + "http://[::ffff:192.168.1.1]/test", + false, + }, + &testCase{ + "IPv6-3", + "http://[::ffff:192.168.1.1]:80/test", + FlagsSafe | FlagRemoveDotSegments, + "http://[::ffff:192.168.1.1]/test", + false, + }, + &testCase{ + "IPv6-4", + "htTps://[::fFff:192.168.1.1]:443/test", + FlagsSafe | FlagRemoveDotSegments, + "https://[::ffff:192.168.1.1]/test", + false, + }, + &testCase{ + "FTP", + "ftp://user:pass@ftp.foo.net/foo/bar", + FlagsSafe | FlagRemoveDotSegments, + "ftp://user:pass@ftp.foo.net/foo/bar", + false, + }, + &testCase{ + "Standard-1", + "http://www.foo.com:80/foo", + FlagsSafe | FlagRemoveDotSegments, + "http://www.foo.com/foo", + false, + }, + &testCase{ + "Standard-2", + "http://www.foo.com:8000/foo", + FlagsSafe | FlagRemoveDotSegments, + "http://www.foo.com:8000/foo", + false, + }, + &testCase{ + "Standard-3", + "http://www.foo.com/%7ebar", + FlagsSafe | FlagRemoveDotSegments, + "http://www.foo.com/~bar", + false, + }, + &testCase{ + "Standard-4", + "http://www.foo.com/%7Ebar", + FlagsSafe | FlagRemoveDotSegments, + "http://www.foo.com/~bar", + false, + }, + &testCase{ + "Standard-5", + "http://USER:pass@www.Example.COM/foo/bar", + FlagsSafe | FlagRemoveDotSegments, + "http://USER:pass@www.example.com/foo/bar", + false, + }, + &testCase{ + "Standard-6", + "http://test.example/?a=%26&b=1", + FlagsSafe | FlagRemoveDotSegments, + "http://test.example/?a=%26&b=1", + false, + }, + &testCase{ + "Standard-7", + "http://test.example/%25/?p=%20val%20%25", + FlagsSafe | FlagRemoveDotSegments, + "http://test.example/%25/?p=%20val%20%25", + false, + }, + &testCase{ + "Standard-8", + "http://test.example/path/with a%20space+/", + FlagsSafe | FlagRemoveDotSegments, + "http://test.example/path/with%20a%20space+/", + false, + }, + &testCase{ + "Standard-9", + "http://test.example/?", + FlagsSafe | FlagRemoveDotSegments, + "http://test.example/", + false, + }, + &testCase{ + "Standard-10", + "http://a.COM/path/?b&a", + FlagsSafe | FlagRemoveDotSegments, + "http://a.com/path/?b&a", + false, + }, + &testCase{ + "StandardCasesAddTrailingSlash", + "http://test.example?", + FlagsSafe | FlagAddTrailingSlash, + "http://test.example/", + false, + }, + &testCase{ + "OctalIP-1", + "http://0123.011.0.4/", + FlagsSafe | FlagDecodeOctalHost, + "http://0123.011.0.4/", + false, + }, + &testCase{ + "OctalIP-2", + "http://0102.0146.07.0223/", + FlagsSafe | FlagDecodeOctalHost, + "http://66.102.7.147/", + false, + }, + &testCase{ + "OctalIP-3", + "http://0102.0146.07.0223.:23/", + FlagsSafe | FlagDecodeOctalHost, + "http://66.102.7.147.:23/", + false, + }, + &testCase{ + "OctalIP-4", + "http://USER:pass@0102.0146.07.0223../", + FlagsSafe | FlagDecodeOctalHost, + "http://USER:pass@66.102.7.147../", + false, + }, + &testCase{ + "DWORDIP-1", + "http://123.1113982867/", + FlagsSafe | FlagDecodeDWORDHost, + "http://123.1113982867/", + false, + }, + &testCase{ + "DWORDIP-2", + "http://1113982867/", + FlagsSafe | FlagDecodeDWORDHost, + "http://66.102.7.147/", + false, + }, + &testCase{ + "DWORDIP-3", + "http://1113982867.:23/", + FlagsSafe | FlagDecodeDWORDHost, + "http://66.102.7.147.:23/", + false, + }, + &testCase{ + "DWORDIP-4", + "http://USER:pass@1113982867../", + FlagsSafe | FlagDecodeDWORDHost, + "http://USER:pass@66.102.7.147../", + false, + }, + &testCase{ + "HexIP-1", + "http://0x123.1113982867/", + FlagsSafe | FlagDecodeHexHost, + "http://0x123.1113982867/", + false, + }, + &testCase{ + "HexIP-2", + "http://0x42660793/", + FlagsSafe | FlagDecodeHexHost, + "http://66.102.7.147/", + false, + }, + &testCase{ + "HexIP-3", + "http://0x42660793.:23/", + FlagsSafe | FlagDecodeHexHost, + "http://66.102.7.147.:23/", + false, + }, + &testCase{ + "HexIP-4", + "http://USER:pass@0x42660793../", + FlagsSafe | FlagDecodeHexHost, + "http://USER:pass@66.102.7.147../", + false, + }, + &testCase{ + "UnnecessaryHostDots-1", + "http://.www.foo.com../foo/bar.html", + FlagsSafe | FlagRemoveUnnecessaryHostDots, + "http://www.foo.com/foo/bar.html", + false, + }, + &testCase{ + "UnnecessaryHostDots-2", + "http://www.foo.com./foo/bar.html", + FlagsSafe | FlagRemoveUnnecessaryHostDots, + "http://www.foo.com/foo/bar.html", + false, + }, + &testCase{ + "UnnecessaryHostDots-3", + "http://www.foo.com.:81/foo", + FlagsSafe | FlagRemoveUnnecessaryHostDots, + "http://www.foo.com:81/foo", + false, + }, + &testCase{ + "UnnecessaryHostDots-4", + "http://www.example.com./", + FlagsSafe | FlagRemoveUnnecessaryHostDots, + "http://www.example.com/", + false, + }, + &testCase{ + "EmptyPort-1", + "http://www.thedraymin.co.uk:/main/?p=308", + FlagsSafe | FlagRemoveEmptyPortSeparator, + "http://www.thedraymin.co.uk/main/?p=308", + false, + }, + &testCase{ + "EmptyPort-2", + "http://www.src.ca:", + FlagsSafe | FlagRemoveEmptyPortSeparator, + "http://www.src.ca", + false, + }, + &testCase{ + "Slashes-1", + "http://test.example/foo/bar/.", + FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, + "http://test.example/foo/bar/", + false, + }, + &testCase{ + "Slashes-2", + "http://test.example/foo/bar/./", + FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, + "http://test.example/foo/bar/", + false, + }, + &testCase{ + "Slashes-3", + "http://test.example/foo/bar/..", + FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, + "http://test.example/foo/", + false, + }, + &testCase{ + "Slashes-4", + "http://test.example/foo/bar/../", + FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, + "http://test.example/foo/", + false, + }, + &testCase{ + "Slashes-5", + "http://test.example/foo/bar/../baz", + FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, + "http://test.example/foo/baz", + false, + }, + &testCase{ + "Slashes-6", + "http://test.example/foo/bar/../..", + FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, + "http://test.example/", + false, + }, + &testCase{ + "Slashes-7", + "http://test.example/foo/bar/../../", + FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, + "http://test.example/", + false, + }, + &testCase{ + "Slashes-8", + "http://test.example/foo/bar/../../baz", + FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, + "http://test.example/baz", + false, + }, + &testCase{ + "Slashes-9", + "http://test.example/foo/bar/../../../baz", + FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, + "http://test.example/baz", + false, + }, + &testCase{ + "Slashes-10", + "http://test.example/foo/bar/../../../../baz", + FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, + "http://test.example/baz", + false, + }, + &testCase{ + "Slashes-11", + "http://test.example/./foo", + FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, + "http://test.example/foo", + false, + }, + &testCase{ + "Slashes-12", + "http://test.example/../foo", + FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, + "http://test.example/foo", + false, + }, + &testCase{ + "Slashes-13", + "http://test.example/foo.", + FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, + "http://test.example/foo.", + false, + }, + &testCase{ + "Slashes-14", + "http://test.example/.foo", + FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, + "http://test.example/.foo", + false, + }, + &testCase{ + "Slashes-15", + "http://test.example/foo..", + FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, + "http://test.example/foo..", + false, + }, + &testCase{ + "Slashes-16", + "http://test.example/..foo", + FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, + "http://test.example/..foo", + false, + }, + &testCase{ + "Slashes-17", + "http://test.example/./../foo", + FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, + "http://test.example/foo", + false, + }, + &testCase{ + "Slashes-18", + "http://test.example/./foo/.", + FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, + "http://test.example/foo/", + false, + }, + &testCase{ + "Slashes-19", + "http://test.example/foo/./bar", + FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, + "http://test.example/foo/bar", + false, + }, + &testCase{ + "Slashes-20", + "http://test.example/foo/../bar", + FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, + "http://test.example/bar", + false, + }, + &testCase{ + "Slashes-21", + "http://test.example/foo//", + FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, + "http://test.example/foo/", + false, + }, + &testCase{ + "Slashes-22", + "http://test.example/foo///bar//", + FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, + "http://test.example/foo/bar/", + false, + }, + &testCase{ + "Relative", + "foo/bar", + FlagsAllGreedy, + "foo/bar", + false, + }, + &testCase{ + "Relative-1", + "./../foo", + FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, + "foo", + false, + }, + &testCase{ + "Relative-2", + "./foo/bar/../baz/../bang/..", + FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, + "foo/", + false, + }, + &testCase{ + "Relative-3", + "foo///bar//", + FlagsSafe | FlagRemoveDotSegments | FlagRemoveDuplicateSlashes, + "foo/bar/", + false, + }, + &testCase{ + "Relative-4", + "www.youtube.com", + FlagsUsuallySafeGreedy, + "www.youtube.com", + false, + }, + /*&testCase{ + "UrlNorm-5", + "http://ja.wikipedia.org/wiki/%E3%82%AD%E3%83%A3%E3%82%BF%E3%83%94%E3%83%A9%E3%83%BC%E3%82%B8%E3%83%A3%E3%83%91%E3%83%B3", + FlagsSafe | FlagRemoveDotSegments, + "http://ja.wikipedia.org/wiki/\xe3\x82\xad\xe3\x83\xa3\xe3\x82\xbf\xe3\x83\x94\xe3\x83\xa9\xe3\x83\xbc\xe3\x82\xb8\xe3\x83\xa3\xe3\x83\x91\xe3\x83\xb3", + false, + }, + &testCase{ + "UrlNorm-1", + "http://test.example/?a=%e3%82%82%26", + FlagsAllGreedy, + "http://test.example/?a=\xe3\x82\x82%26", + false, + },*/ + } +) + +func TestRunner(t *testing.T) { + for _, tc := range cases { + runCase(tc, t) + } +} + +func runCase(tc *testCase, t *testing.T) { + t.Logf("running %s...", tc.nm) + if tc.parsed { + u, e := url.Parse(tc.src) + if e != nil { + t.Errorf("%s - FAIL : %s", tc.nm, e) + return + } else { + NormalizeURL(u, tc.flgs) + if s := u.String(); s != tc.res { + t.Errorf("%s - FAIL expected '%s', got '%s'", tc.nm, tc.res, s) + } + } + } else { + if s, e := NormalizeURLString(tc.src, tc.flgs); e != nil { + t.Errorf("%s - FAIL : %s", tc.nm, e) + } else if s != tc.res { + t.Errorf("%s - FAIL expected '%s', got '%s'", tc.nm, tc.res, s) + } + } +} + +func TestDecodeUnnecessaryEscapesAll(t *testing.T) { + var url = "http://host/" + + for i := 0; i < 256; i++ { + url += fmt.Sprintf("%%%02x", i) + } + if s, e := NormalizeURLString(url, FlagDecodeUnnecessaryEscapes); e != nil { + t.Fatalf("Got error %s", e.Error()) + } else { + const want = "http://host/%00%01%02%03%04%05%06%07%08%09%0A%0B%0C%0D%0E%0F%10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E%1F%20!%22%23$%25&'()*+,-./0123456789:;%3C=%3E%3F@ABCDEFGHIJKLMNOPQRSTUVWXYZ[%5C]%5E_%60abcdefghijklmnopqrstuvwxyz%7B%7C%7D~%7F%80%81%82%83%84%85%86%87%88%89%8A%8B%8C%8D%8E%8F%90%91%92%93%94%95%96%97%98%99%9A%9B%9C%9D%9E%9F%A0%A1%A2%A3%A4%A5%A6%A7%A8%A9%AA%AB%AC%AD%AE%AF%B0%B1%B2%B3%B4%B5%B6%B7%B8%B9%BA%BB%BC%BD%BE%BF%C0%C1%C2%C3%C4%C5%C6%C7%C8%C9%CA%CB%CC%CD%CE%CF%D0%D1%D2%D3%D4%D5%D6%D7%D8%D9%DA%DB%DC%DD%DE%DF%E0%E1%E2%E3%E4%E5%E6%E7%E8%E9%EA%EB%EC%ED%EE%EF%F0%F1%F2%F3%F4%F5%F6%F7%F8%F9%FA%FB%FC%FD%FE%FF" + if s != want { + t.Errorf("DecodeUnnecessaryEscapesAll:\nwant\n%s\ngot\n%s", want, s) + } + } +} + +func TestEncodeNecessaryEscapesAll(t *testing.T) { + var url = "http://host/" + + for i := 0; i < 256; i++ { + if i != 0x25 { + url += string(i) + } + } + if s, e := NormalizeURLString(url, FlagEncodeNecessaryEscapes); e != nil { + t.Fatalf("Got error %s", e.Error()) + } else { + const want = "http://host/%00%01%02%03%04%05%06%07%08%09%0A%0B%0C%0D%0E%0F%10%11%12%13%14%15%16%17%18%19%1A%1B%1C%1D%1E%1F%20!%22#$&'()*+,-./0123456789:;%3C=%3E?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[%5C]%5E_%60abcdefghijklmnopqrstuvwxyz%7B%7C%7D~%7F%C2%80%C2%81%C2%82%C2%83%C2%84%C2%85%C2%86%C2%87%C2%88%C2%89%C2%8A%C2%8B%C2%8C%C2%8D%C2%8E%C2%8F%C2%90%C2%91%C2%92%C2%93%C2%94%C2%95%C2%96%C2%97%C2%98%C2%99%C2%9A%C2%9B%C2%9C%C2%9D%C2%9E%C2%9F%C2%A0%C2%A1%C2%A2%C2%A3%C2%A4%C2%A5%C2%A6%C2%A7%C2%A8%C2%A9%C2%AA%C2%AB%C2%AC%C2%AD%C2%AE%C2%AF%C2%B0%C2%B1%C2%B2%C2%B3%C2%B4%C2%B5%C2%B6%C2%B7%C2%B8%C2%B9%C2%BA%C2%BB%C2%BC%C2%BD%C2%BE%C2%BF%C3%80%C3%81%C3%82%C3%83%C3%84%C3%85%C3%86%C3%87%C3%88%C3%89%C3%8A%C3%8B%C3%8C%C3%8D%C3%8E%C3%8F%C3%90%C3%91%C3%92%C3%93%C3%94%C3%95%C3%96%C3%97%C3%98%C3%99%C3%9A%C3%9B%C3%9C%C3%9D%C3%9E%C3%9F%C3%A0%C3%A1%C3%A2%C3%A3%C3%A4%C3%A5%C3%A6%C3%A7%C3%A8%C3%A9%C3%AA%C3%AB%C3%AC%C3%AD%C3%AE%C3%AF%C3%B0%C3%B1%C3%B2%C3%B3%C3%B4%C3%B5%C3%B6%C3%B7%C3%B8%C3%B9%C3%BA%C3%BB%C3%BC%C3%BD%C3%BE%C3%BF" + if s != want { + t.Errorf("EncodeNecessaryEscapesAll:\nwant\n%s\ngot\n%s", want, s) + } + } +} diff --git a/vendor/github.com/PuerkitoBio/purell/urlnorm_test.go b/vendor/github.com/PuerkitoBio/purell/urlnorm_test.go new file mode 100644 index 00000000..d1b2ca6c --- /dev/null +++ b/vendor/github.com/PuerkitoBio/purell/urlnorm_test.go @@ -0,0 +1,53 @@ +package purell + +import ( + "testing" +) + +// Test cases merged from PR #1 +// Originally from https://github.com/jehiah/urlnorm/blob/master/test_urlnorm.py + +func assertMap(t *testing.T, cases map[string]string, f NormalizationFlags) { + for bad, good := range cases { + s, e := NormalizeURLString(bad, f) + if e != nil { + t.Errorf("%s normalizing %v to %v", e.Error(), bad, good) + } else { + if s != good { + t.Errorf("source: %v expected: %v got: %v", bad, good, s) + } + } + } +} + +// This tests normalization to a unicode representation +// precent escapes for unreserved values are unescaped to their unicode value +// tests normalization to idna domains +// test ip word handling, ipv6 address handling, and trailing domain periods +// in general, this matches google chromes unescaping for things in the address bar. +// spaces are converted to '+' (perhaphs controversial) +// http://code.google.com/p/google-url/ probably is another good reference for this approach +func TestUrlnorm(t *testing.T) { + testcases := map[string]string{ + "http://test.example/?a=%e3%82%82%26": "http://test.example/?a=%e3%82%82%26", + //"http://test.example/?a=%e3%82%82%26": "http://test.example/?a=\xe3\x82\x82%26", //should return a unicode character + "http://s.xn--q-bga.DE/": "http://s.xn--q-bga.de/", //should be in idna format + "http://XBLA\u306eXbox.com": "http://xn--xblaxbox-jf4g.com", //test utf8 and unicode + "http://президент.рф": "http://xn--d1abbgf6aiiy.xn--p1ai", + "http://ПРЕЗИДЕНТ.РФ": "http://xn--d1abbgf6aiiy.xn--p1ai", + "http://ab¥ヲ₩○.com": "http://xn--ab-ida8983azmfnvs.com", //test width folding + "http://\u00e9.com": "http://xn--9ca.com", + "http://e\u0301.com": "http://xn--9ca.com", + "http://ja.wikipedia.org/wiki/%E3%82%AD%E3%83%A3%E3%82%BF%E3%83%94%E3%83%A9%E3%83%BC%E3%82%B8%E3%83%A3%E3%83%91%E3%83%B3": "http://ja.wikipedia.org/wiki/%E3%82%AD%E3%83%A3%E3%82%BF%E3%83%94%E3%83%A9%E3%83%BC%E3%82%B8%E3%83%A3%E3%83%91%E3%83%B3", + //"http://ja.wikipedia.org/wiki/%E3%82%AD%E3%83%A3%E3%82%BF%E3%83%94%E3%83%A9%E3%83%BC%E3%82%B8%E3%83%A3%E3%83%91%E3%83%B3": "http://ja.wikipedia.org/wiki/\xe3\x82\xad\xe3\x83\xa3\xe3\x82\xbf\xe3\x83\x94\xe3\x83\xa9\xe3\x83\xbc\xe3\x82\xb8\xe3\x83\xa3\xe3\x83\x91\xe3\x83\xb3", + + "http://test.example/\xe3\x82\xad": "http://test.example/%E3%82%AD", + //"http://test.example/\xe3\x82\xad": "http://test.example/\xe3\x82\xad", + "http://test.example/?p=%23val#test-%23-val%25": "http://test.example/?p=%23val#test-%23-val%25", //check that %23 (#) is not escaped where it shouldn't be + + "http://test.domain/I%C3%B1t%C3%ABrn%C3%A2ti%C3%B4n%EF%BF%BDliz%C3%A6ti%C3%B8n": "http://test.domain/I%C3%B1t%C3%ABrn%C3%A2ti%C3%B4n%EF%BF%BDliz%C3%A6ti%C3%B8n", + //"http://test.domain/I%C3%B1t%C3%ABrn%C3%A2ti%C3%B4n%EF%BF%BDliz%C3%A6ti%C3%B8n": "http://test.domain/I\xc3\xb1t\xc3\xabrn\xc3\xa2ti\xc3\xb4n\xef\xbf\xbdliz\xc3\xa6ti\xc3\xb8n", + } + + assertMap(t, testcases, FlagsSafe|FlagRemoveDotSegments) +} diff --git a/vendor/github.com/PuerkitoBio/urlesc/.travis.yml b/vendor/github.com/PuerkitoBio/urlesc/.travis.yml new file mode 100644 index 00000000..478630e5 --- /dev/null +++ b/vendor/github.com/PuerkitoBio/urlesc/.travis.yml @@ -0,0 +1,11 @@ +language: go + +go: + - 1.4 + - tip + +install: + - go build . + +script: + - go test -v diff --git a/vendor/github.com/PuerkitoBio/urlesc/LICENSE b/vendor/github.com/PuerkitoBio/urlesc/LICENSE new file mode 100644 index 00000000..74487567 --- /dev/null +++ b/vendor/github.com/PuerkitoBio/urlesc/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/PuerkitoBio/urlesc/README.md b/vendor/github.com/PuerkitoBio/urlesc/README.md new file mode 100644 index 00000000..bebe305e --- /dev/null +++ b/vendor/github.com/PuerkitoBio/urlesc/README.md @@ -0,0 +1,16 @@ +urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.png?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc) +====== + +Package urlesc implements query escaping as per RFC 3986. + +It contains some parts of the net/url package, modified so as to allow +some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)). + +## Install + + go get github.com/PuerkitoBio/urlesc + +## License + +Go license (BSD-3-Clause) + diff --git a/vendor/github.com/PuerkitoBio/urlesc/urlesc.go b/vendor/github.com/PuerkitoBio/urlesc/urlesc.go new file mode 100644 index 00000000..1b846245 --- /dev/null +++ b/vendor/github.com/PuerkitoBio/urlesc/urlesc.go @@ -0,0 +1,180 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package urlesc implements query escaping as per RFC 3986. +// It contains some parts of the net/url package, modified so as to allow +// some reserved characters incorrectly escaped by net/url. +// See https://github.com/golang/go/issues/5684 +package urlesc + +import ( + "bytes" + "net/url" + "strings" +) + +type encoding int + +const ( + encodePath encoding = 1 + iota + encodeUserPassword + encodeQueryComponent + encodeFragment +) + +// Return true if the specified character should be escaped when +// appearing in a URL string, according to RFC 3986. +func shouldEscape(c byte, mode encoding) bool { + // §2.3 Unreserved characters (alphanum) + if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' { + return false + } + + switch c { + case '-', '.', '_', '~': // §2.3 Unreserved characters (mark) + return false + + // §2.2 Reserved characters (reserved) + case ':', '/', '?', '#', '[', ']', '@', // gen-delims + '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims + // Different sections of the URL allow a few of + // the reserved characters to appear unescaped. + switch mode { + case encodePath: // §3.3 + // The RFC allows sub-delims and : @. + // '/', '[' and ']' can be used to assign meaning to individual path + // segments. This package only manipulates the path as a whole, + // so we allow those as well. That leaves only ? and # to escape. + return c == '?' || c == '#' + + case encodeUserPassword: // §3.2.1 + // The RFC allows : and sub-delims in + // userinfo. The parsing of userinfo treats ':' as special so we must escape + // all the gen-delims. + return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@' + + case encodeQueryComponent: // §3.4 + // The RFC allows / and ?. + return c != '/' && c != '?' + + case encodeFragment: // §4.1 + // The RFC text is silent but the grammar allows + // everything, so escape nothing but # + return c == '#' + } + } + + // Everything else must be escaped. + return true +} + +// QueryEscape escapes the string so it can be safely placed +// inside a URL query. +func QueryEscape(s string) string { + return escape(s, encodeQueryComponent) +} + +func escape(s string, mode encoding) string { + spaceCount, hexCount := 0, 0 + for i := 0; i < len(s); i++ { + c := s[i] + if shouldEscape(c, mode) { + if c == ' ' && mode == encodeQueryComponent { + spaceCount++ + } else { + hexCount++ + } + } + } + + if spaceCount == 0 && hexCount == 0 { + return s + } + + t := make([]byte, len(s)+2*hexCount) + j := 0 + for i := 0; i < len(s); i++ { + switch c := s[i]; { + case c == ' ' && mode == encodeQueryComponent: + t[j] = '+' + j++ + case shouldEscape(c, mode): + t[j] = '%' + t[j+1] = "0123456789ABCDEF"[c>>4] + t[j+2] = "0123456789ABCDEF"[c&15] + j += 3 + default: + t[j] = s[i] + j++ + } + } + return string(t) +} + +var uiReplacer = strings.NewReplacer( + "%21", "!", + "%27", "'", + "%28", "(", + "%29", ")", + "%2A", "*", +) + +// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986. +func unescapeUserinfo(s string) string { + return uiReplacer.Replace(s) +} + +// Escape reassembles the URL into a valid URL string. +// The general form of the result is one of: +// +// scheme:opaque +// scheme://userinfo@host/path?query#fragment +// +// If u.Opaque is non-empty, String uses the first form; +// otherwise it uses the second form. +// +// In the second form, the following rules apply: +// - if u.Scheme is empty, scheme: is omitted. +// - if u.User is nil, userinfo@ is omitted. +// - if u.Host is empty, host/ is omitted. +// - if u.Scheme and u.Host are empty and u.User is nil, +// the entire scheme://userinfo@host/ is omitted. +// - if u.Host is non-empty and u.Path begins with a /, +// the form host/path does not add its own /. +// - if u.RawQuery is empty, ?query is omitted. +// - if u.Fragment is empty, #fragment is omitted. +func Escape(u *url.URL) string { + var buf bytes.Buffer + if u.Scheme != "" { + buf.WriteString(u.Scheme) + buf.WriteByte(':') + } + if u.Opaque != "" { + buf.WriteString(u.Opaque) + } else { + if u.Scheme != "" || u.Host != "" || u.User != nil { + buf.WriteString("//") + if ui := u.User; ui != nil { + buf.WriteString(unescapeUserinfo(ui.String())) + buf.WriteByte('@') + } + if h := u.Host; h != "" { + buf.WriteString(h) + } + } + if u.Path != "" && u.Path[0] != '/' && u.Host != "" { + buf.WriteByte('/') + } + buf.WriteString(escape(u.Path, encodePath)) + } + if u.RawQuery != "" { + buf.WriteByte('?') + buf.WriteString(u.RawQuery) + } + if u.Fragment != "" { + buf.WriteByte('#') + buf.WriteString(escape(u.Fragment, encodeFragment)) + } + return buf.String() +} diff --git a/vendor/github.com/PuerkitoBio/urlesc/urlesc_test.go b/vendor/github.com/PuerkitoBio/urlesc/urlesc_test.go new file mode 100644 index 00000000..45202e1d --- /dev/null +++ b/vendor/github.com/PuerkitoBio/urlesc/urlesc_test.go @@ -0,0 +1,641 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package urlesc + +import ( + "net/url" + "testing" +) + +type URLTest struct { + in string + out *url.URL + roundtrip string // expected result of reserializing the URL; empty means same as "in". +} + +var urltests = []URLTest{ + // no path + { + "http://www.google.com", + &url.URL{ + Scheme: "http", + Host: "www.google.com", + }, + "", + }, + // path + { + "http://www.google.com/", + &url.URL{ + Scheme: "http", + Host: "www.google.com", + Path: "/", + }, + "", + }, + // path with hex escaping + { + "http://www.google.com/file%20one%26two", + &url.URL{ + Scheme: "http", + Host: "www.google.com", + Path: "/file one&two", + }, + "http://www.google.com/file%20one&two", + }, + // user + { + "ftp://webmaster@www.google.com/", + &url.URL{ + Scheme: "ftp", + User: url.User("webmaster"), + Host: "www.google.com", + Path: "/", + }, + "", + }, + // escape sequence in username + { + "ftp://john%20doe@www.google.com/", + &url.URL{ + Scheme: "ftp", + User: url.User("john doe"), + Host: "www.google.com", + Path: "/", + }, + "ftp://john%20doe@www.google.com/", + }, + // query + { + "http://www.google.com/?q=go+language", + &url.URL{ + Scheme: "http", + Host: "www.google.com", + Path: "/", + RawQuery: "q=go+language", + }, + "", + }, + // query with hex escaping: NOT parsed + { + "http://www.google.com/?q=go%20language", + &url.URL{ + Scheme: "http", + Host: "www.google.com", + Path: "/", + RawQuery: "q=go%20language", + }, + "", + }, + // %20 outside query + { + "http://www.google.com/a%20b?q=c+d", + &url.URL{ + Scheme: "http", + Host: "www.google.com", + Path: "/a b", + RawQuery: "q=c+d", + }, + "", + }, + // path without leading /, so no parsing + { + "http:www.google.com/?q=go+language", + &url.URL{ + Scheme: "http", + Opaque: "www.google.com/", + RawQuery: "q=go+language", + }, + "http:www.google.com/?q=go+language", + }, + // path without leading /, so no parsing + { + "http:%2f%2fwww.google.com/?q=go+language", + &url.URL{ + Scheme: "http", + Opaque: "%2f%2fwww.google.com/", + RawQuery: "q=go+language", + }, + "http:%2f%2fwww.google.com/?q=go+language", + }, + // non-authority with path + { + "mailto:/webmaster@golang.org", + &url.URL{ + Scheme: "mailto", + Path: "/webmaster@golang.org", + }, + "mailto:///webmaster@golang.org", // unfortunate compromise + }, + // non-authority + { + "mailto:webmaster@golang.org", + &url.URL{ + Scheme: "mailto", + Opaque: "webmaster@golang.org", + }, + "", + }, + // unescaped :// in query should not create a scheme + { + "/foo?query=http://bad", + &url.URL{ + Path: "/foo", + RawQuery: "query=http://bad", + }, + "", + }, + // leading // without scheme should create an authority + { + "//foo", + &url.URL{ + Host: "foo", + }, + "", + }, + // leading // without scheme, with userinfo, path, and query + { + "//user@foo/path?a=b", + &url.URL{ + User: url.User("user"), + Host: "foo", + Path: "/path", + RawQuery: "a=b", + }, + "", + }, + // Three leading slashes isn't an authority, but doesn't return an error. + // (We can't return an error, as this code is also used via + // ServeHTTP -> ReadRequest -> Parse, which is arguably a + // different URL parsing context, but currently shares the + // same codepath) + { + "///threeslashes", + &url.URL{ + Path: "///threeslashes", + }, + "", + }, + { + "http://user:password@google.com", + &url.URL{ + Scheme: "http", + User: url.UserPassword("user", "password"), + Host: "google.com", + }, + "http://user:password@google.com", + }, + // unescaped @ in username should not confuse host + { + "http://j@ne:password@google.com", + &url.URL{ + Scheme: "http", + User: url.UserPassword("j@ne", "password"), + Host: "google.com", + }, + "http://j%40ne:password@google.com", + }, + // unescaped @ in password should not confuse host + { + "http://jane:p@ssword@google.com", + &url.URL{ + Scheme: "http", + User: url.UserPassword("jane", "p@ssword"), + Host: "google.com", + }, + "http://jane:p%40ssword@google.com", + }, + { + "http://j@ne:password@google.com/p@th?q=@go", + &url.URL{ + Scheme: "http", + User: url.UserPassword("j@ne", "password"), + Host: "google.com", + Path: "/p@th", + RawQuery: "q=@go", + }, + "http://j%40ne:password@google.com/p@th?q=@go", + }, + { + "http://www.google.com/?q=go+language#foo", + &url.URL{ + Scheme: "http", + Host: "www.google.com", + Path: "/", + RawQuery: "q=go+language", + Fragment: "foo", + }, + "", + }, + { + "http://www.google.com/?q=go+language#foo%26bar", + &url.URL{ + Scheme: "http", + Host: "www.google.com", + Path: "/", + RawQuery: "q=go+language", + Fragment: "foo&bar", + }, + "http://www.google.com/?q=go+language#foo&bar", + }, + { + "file:///home/adg/rabbits", + &url.URL{ + Scheme: "file", + Host: "", + Path: "/home/adg/rabbits", + }, + "file:///home/adg/rabbits", + }, + // "Windows" paths are no exception to the rule. + // See golang.org/issue/6027, especially comment #9. + { + "file:///C:/FooBar/Baz.txt", + &url.URL{ + Scheme: "file", + Host: "", + Path: "/C:/FooBar/Baz.txt", + }, + "file:///C:/FooBar/Baz.txt", + }, + // case-insensitive scheme + { + "MaIlTo:webmaster@golang.org", + &url.URL{ + Scheme: "mailto", + Opaque: "webmaster@golang.org", + }, + "mailto:webmaster@golang.org", + }, + // Relative path + { + "a/b/c", + &url.URL{ + Path: "a/b/c", + }, + "a/b/c", + }, + // escaped '?' in username and password + { + "http://%3Fam:pa%3Fsword@google.com", + &url.URL{ + Scheme: "http", + User: url.UserPassword("?am", "pa?sword"), + Host: "google.com", + }, + "", + }, + // escaped '?' and '#' in path + { + "http://example.com/%3F%23", + &url.URL{ + Scheme: "http", + Host: "example.com", + Path: "?#", + }, + "", + }, + // unescaped [ ] ! ' ( ) * in path + { + "http://example.com/[]!'()*", + &url.URL{ + Scheme: "http", + Host: "example.com", + Path: "[]!'()*", + }, + "http://example.com/[]!'()*", + }, + // escaped : / ? # [ ] @ in username and password + { + "http://%3A%2F%3F:%23%5B%5D%40@example.com", + &url.URL{ + Scheme: "http", + User: url.UserPassword(":/?", "#[]@"), + Host: "example.com", + }, + "", + }, + // unescaped ! $ & ' ( ) * + , ; = in username and password + { + "http://!$&'():*+,;=@example.com", + &url.URL{ + Scheme: "http", + User: url.UserPassword("!$&'()", "*+,;="), + Host: "example.com", + }, + "", + }, + // unescaped = : / . ? = in query component + { + "http://example.com/?q=http://google.com/?q=", + &url.URL{ + Scheme: "http", + Host: "example.com", + Path: "/", + RawQuery: "q=http://google.com/?q=", + }, + "", + }, + // unescaped : / ? [ ] @ ! $ & ' ( ) * + , ; = in fragment + { + "http://example.com/#:/?%23[]@!$&'()*+,;=", + &url.URL{ + Scheme: "http", + Host: "example.com", + Path: "/", + Fragment: ":/?#[]@!$&'()*+,;=", + }, + "", + }, +} + +func DoTestString(t *testing.T, parse func(string) (*url.URL, error), name string, tests []URLTest) { + for _, tt := range tests { + u, err := parse(tt.in) + if err != nil { + t.Errorf("%s(%q) returned error %s", name, tt.in, err) + continue + } + expected := tt.in + if len(tt.roundtrip) > 0 { + expected = tt.roundtrip + } + s := Escape(u) + if s != expected { + t.Errorf("Escape(%s(%q)) == %q (expected %q)", name, tt.in, s, expected) + } + } +} + +func TestURLString(t *testing.T) { + DoTestString(t, url.Parse, "Parse", urltests) + + // no leading slash on path should prepend + // slash on String() call + noslash := URLTest{ + "http://www.google.com/search", + &url.URL{ + Scheme: "http", + Host: "www.google.com", + Path: "search", + }, + "", + } + s := Escape(noslash.out) + if s != noslash.in { + t.Errorf("Expected %s; go %s", noslash.in, s) + } +} + +type EscapeTest struct { + in string + out string + err error +} + +var escapeTests = []EscapeTest{ + { + "", + "", + nil, + }, + { + "abc", + "abc", + nil, + }, + { + "one two", + "one+two", + nil, + }, + { + "10%", + "10%25", + nil, + }, + { + " ?&=#+%!<>#\"{}|\\^[]`☺\t:/@$'()*,;", + "+?%26%3D%23%2B%25%21%3C%3E%23%22%7B%7D%7C%5C%5E%5B%5D%60%E2%98%BA%09%3A/%40%24%27%28%29%2A%2C%3B", + nil, + }, +} + +func TestEscape(t *testing.T) { + for _, tt := range escapeTests { + actual := QueryEscape(tt.in) + if tt.out != actual { + t.Errorf("QueryEscape(%q) = %q, want %q", tt.in, actual, tt.out) + } + + // for bonus points, verify that escape:unescape is an identity. + roundtrip, err := url.QueryUnescape(actual) + if roundtrip != tt.in || err != nil { + t.Errorf("QueryUnescape(%q) = %q, %s; want %q, %s", actual, roundtrip, err, tt.in, "[no error]") + } + } +} + +var resolveReferenceTests = []struct { + base, rel, expected string +}{ + // Absolute URL references + {"http://foo.com?a=b", "https://bar.com/", "https://bar.com/"}, + {"http://foo.com/", "https://bar.com/?a=b", "https://bar.com/?a=b"}, + {"http://foo.com/bar", "mailto:foo@example.com", "mailto:foo@example.com"}, + + // Path-absolute references + {"http://foo.com/bar", "/baz", "http://foo.com/baz"}, + {"http://foo.com/bar?a=b#f", "/baz", "http://foo.com/baz"}, + {"http://foo.com/bar?a=b", "/baz?c=d", "http://foo.com/baz?c=d"}, + + // Scheme-relative + {"https://foo.com/bar?a=b", "//bar.com/quux", "https://bar.com/quux"}, + + // Path-relative references: + + // ... current directory + {"http://foo.com", ".", "http://foo.com/"}, + {"http://foo.com/bar", ".", "http://foo.com/"}, + {"http://foo.com/bar/", ".", "http://foo.com/bar/"}, + + // ... going down + {"http://foo.com", "bar", "http://foo.com/bar"}, + {"http://foo.com/", "bar", "http://foo.com/bar"}, + {"http://foo.com/bar/baz", "quux", "http://foo.com/bar/quux"}, + + // ... going up + {"http://foo.com/bar/baz", "../quux", "http://foo.com/quux"}, + {"http://foo.com/bar/baz", "../../../../../quux", "http://foo.com/quux"}, + {"http://foo.com/bar", "..", "http://foo.com/"}, + {"http://foo.com/bar/baz", "./..", "http://foo.com/"}, + // ".." in the middle (issue 3560) + {"http://foo.com/bar/baz", "quux/dotdot/../tail", "http://foo.com/bar/quux/tail"}, + {"http://foo.com/bar/baz", "quux/./dotdot/../tail", "http://foo.com/bar/quux/tail"}, + {"http://foo.com/bar/baz", "quux/./dotdot/.././tail", "http://foo.com/bar/quux/tail"}, + {"http://foo.com/bar/baz", "quux/./dotdot/./../tail", "http://foo.com/bar/quux/tail"}, + {"http://foo.com/bar/baz", "quux/./dotdot/dotdot/././../../tail", "http://foo.com/bar/quux/tail"}, + {"http://foo.com/bar/baz", "quux/./dotdot/dotdot/./.././../tail", "http://foo.com/bar/quux/tail"}, + {"http://foo.com/bar/baz", "quux/./dotdot/dotdot/dotdot/./../../.././././tail", "http://foo.com/bar/quux/tail"}, + {"http://foo.com/bar/baz", "quux/./dotdot/../dotdot/../dot/./tail/..", "http://foo.com/bar/quux/dot/"}, + + // Remove any dot-segments prior to forming the target URI. + // http://tools.ietf.org/html/rfc3986#section-5.2.4 + {"http://foo.com/dot/./dotdot/../foo/bar", "../baz", "http://foo.com/dot/baz"}, + + // Triple dot isn't special + {"http://foo.com/bar", "...", "http://foo.com/..."}, + + // Fragment + {"http://foo.com/bar", ".#frag", "http://foo.com/#frag"}, + + // RFC 3986: Normal Examples + // http://tools.ietf.org/html/rfc3986#section-5.4.1 + {"http://a/b/c/d;p?q", "g:h", "g:h"}, + {"http://a/b/c/d;p?q", "g", "http://a/b/c/g"}, + {"http://a/b/c/d;p?q", "./g", "http://a/b/c/g"}, + {"http://a/b/c/d;p?q", "g/", "http://a/b/c/g/"}, + {"http://a/b/c/d;p?q", "/g", "http://a/g"}, + {"http://a/b/c/d;p?q", "//g", "http://g"}, + {"http://a/b/c/d;p?q", "?y", "http://a/b/c/d;p?y"}, + {"http://a/b/c/d;p?q", "g?y", "http://a/b/c/g?y"}, + {"http://a/b/c/d;p?q", "#s", "http://a/b/c/d;p?q#s"}, + {"http://a/b/c/d;p?q", "g#s", "http://a/b/c/g#s"}, + {"http://a/b/c/d;p?q", "g?y#s", "http://a/b/c/g?y#s"}, + {"http://a/b/c/d;p?q", ";x", "http://a/b/c/;x"}, + {"http://a/b/c/d;p?q", "g;x", "http://a/b/c/g;x"}, + {"http://a/b/c/d;p?q", "g;x?y#s", "http://a/b/c/g;x?y#s"}, + {"http://a/b/c/d;p?q", "", "http://a/b/c/d;p?q"}, + {"http://a/b/c/d;p?q", ".", "http://a/b/c/"}, + {"http://a/b/c/d;p?q", "./", "http://a/b/c/"}, + {"http://a/b/c/d;p?q", "..", "http://a/b/"}, + {"http://a/b/c/d;p?q", "../", "http://a/b/"}, + {"http://a/b/c/d;p?q", "../g", "http://a/b/g"}, + {"http://a/b/c/d;p?q", "../..", "http://a/"}, + {"http://a/b/c/d;p?q", "../../", "http://a/"}, + {"http://a/b/c/d;p?q", "../../g", "http://a/g"}, + + // RFC 3986: Abnormal Examples + // http://tools.ietf.org/html/rfc3986#section-5.4.2 + {"http://a/b/c/d;p?q", "../../../g", "http://a/g"}, + {"http://a/b/c/d;p?q", "../../../../g", "http://a/g"}, + {"http://a/b/c/d;p?q", "/./g", "http://a/g"}, + {"http://a/b/c/d;p?q", "/../g", "http://a/g"}, + {"http://a/b/c/d;p?q", "g.", "http://a/b/c/g."}, + {"http://a/b/c/d;p?q", ".g", "http://a/b/c/.g"}, + {"http://a/b/c/d;p?q", "g..", "http://a/b/c/g.."}, + {"http://a/b/c/d;p?q", "..g", "http://a/b/c/..g"}, + {"http://a/b/c/d;p?q", "./../g", "http://a/b/g"}, + {"http://a/b/c/d;p?q", "./g/.", "http://a/b/c/g/"}, + {"http://a/b/c/d;p?q", "g/./h", "http://a/b/c/g/h"}, + {"http://a/b/c/d;p?q", "g/../h", "http://a/b/c/h"}, + {"http://a/b/c/d;p?q", "g;x=1/./y", "http://a/b/c/g;x=1/y"}, + {"http://a/b/c/d;p?q", "g;x=1/../y", "http://a/b/c/y"}, + {"http://a/b/c/d;p?q", "g?y/./x", "http://a/b/c/g?y/./x"}, + {"http://a/b/c/d;p?q", "g?y/../x", "http://a/b/c/g?y/../x"}, + {"http://a/b/c/d;p?q", "g#s/./x", "http://a/b/c/g#s/./x"}, + {"http://a/b/c/d;p?q", "g#s/../x", "http://a/b/c/g#s/../x"}, + + // Extras. + {"https://a/b/c/d;p?q", "//g?q", "https://g?q"}, + {"https://a/b/c/d;p?q", "//g#s", "https://g#s"}, + {"https://a/b/c/d;p?q", "//g/d/e/f?y#s", "https://g/d/e/f?y#s"}, + {"https://a/b/c/d;p#s", "?y", "https://a/b/c/d;p?y"}, + {"https://a/b/c/d;p?q#s", "?y", "https://a/b/c/d;p?y"}, +} + +func TestResolveReference(t *testing.T) { + mustParse := func(url_ string) *url.URL { + u, err := url.Parse(url_) + if err != nil { + t.Fatalf("Expected URL to parse: %q, got error: %v", url_, err) + } + return u + } + opaque := &url.URL{Scheme: "scheme", Opaque: "opaque"} + for _, test := range resolveReferenceTests { + base := mustParse(test.base) + rel := mustParse(test.rel) + url := base.ResolveReference(rel) + if Escape(url) != test.expected { + t.Errorf("URL(%q).ResolveReference(%q) == %q, got %q", test.base, test.rel, test.expected, Escape(url)) + } + // Ensure that new instances are returned. + if base == url { + t.Errorf("Expected URL.ResolveReference to return new URL instance.") + } + // Test the convenience wrapper too. + url, err := base.Parse(test.rel) + if err != nil { + t.Errorf("URL(%q).Parse(%q) failed: %v", test.base, test.rel, err) + } else if Escape(url) != test.expected { + t.Errorf("URL(%q).Parse(%q) == %q, got %q", test.base, test.rel, test.expected, Escape(url)) + } else if base == url { + // Ensure that new instances are returned for the wrapper too. + t.Errorf("Expected URL.Parse to return new URL instance.") + } + // Ensure Opaque resets the URL. + url = base.ResolveReference(opaque) + if *url != *opaque { + t.Errorf("ResolveReference failed to resolve opaque URL: want %#v, got %#v", url, opaque) + } + // Test the convenience wrapper with an opaque URL too. + url, err = base.Parse("scheme:opaque") + if err != nil { + t.Errorf(`URL(%q).Parse("scheme:opaque") failed: %v`, test.base, err) + } else if *url != *opaque { + t.Errorf("Parse failed to resolve opaque URL: want %#v, got %#v", url, opaque) + } else if base == url { + // Ensure that new instances are returned, again. + t.Errorf("Expected URL.Parse to return new URL instance.") + } + } +} + +type shouldEscapeTest struct { + in byte + mode encoding + escape bool +} + +var shouldEscapeTests = []shouldEscapeTest{ + // Unreserved characters (§2.3) + {'a', encodePath, false}, + {'a', encodeUserPassword, false}, + {'a', encodeQueryComponent, false}, + {'a', encodeFragment, false}, + {'z', encodePath, false}, + {'A', encodePath, false}, + {'Z', encodePath, false}, + {'0', encodePath, false}, + {'9', encodePath, false}, + {'-', encodePath, false}, + {'-', encodeUserPassword, false}, + {'-', encodeQueryComponent, false}, + {'-', encodeFragment, false}, + {'.', encodePath, false}, + {'_', encodePath, false}, + {'~', encodePath, false}, + + // User information (§3.2.1) + {':', encodeUserPassword, true}, + {'/', encodeUserPassword, true}, + {'?', encodeUserPassword, true}, + {'@', encodeUserPassword, true}, + {'$', encodeUserPassword, false}, + {'&', encodeUserPassword, false}, + {'+', encodeUserPassword, false}, + {',', encodeUserPassword, false}, + {';', encodeUserPassword, false}, + {'=', encodeUserPassword, false}, +} + +func TestShouldEscape(t *testing.T) { + for _, tt := range shouldEscapeTests { + if shouldEscape(tt.in, tt.mode) != tt.escape { + t.Errorf("shouldEscape(%q, %v) returned %v; expected %v", tt.in, tt.mode, !tt.escape, tt.escape) + } + } +} diff --git a/vendor/github.com/coreos/go-oidc/.gitignore b/vendor/github.com/coreos/go-oidc/.gitignore new file mode 100644 index 00000000..c96f2f47 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/.gitignore @@ -0,0 +1,2 @@ +/bin +/gopath diff --git a/vendor/github.com/coreos/go-oidc/.travis.yml b/vendor/github.com/coreos/go-oidc/.travis.yml new file mode 100644 index 00000000..f2f3c9c8 --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/.travis.yml @@ -0,0 +1,16 @@ +language: go + +go: + - 1.7.5 + - 1.8 + +install: + - go get -v -t github.com/coreos/go-oidc/... + - go get golang.org/x/tools/cmd/cover + - go get github.com/golang/lint/golint + +script: + - ./test + +notifications: + email: false diff --git a/vendor/github.com/coreos/go-oidc/CONTRIBUTING.md b/vendor/github.com/coreos/go-oidc/CONTRIBUTING.md new file mode 100644 index 00000000..6662073a --- /dev/null +++ b/vendor/github.com/coreos/go-oidc/CONTRIBUTING.md @@ -0,0 +1,71 @@ +# How to Contribute + +CoreOS projects are [Apache 2.0 licensed](LICENSE) and accept contributions via +GitHub pull requests. This document outlines some of the conventions on +development workflow, commit message formatting, contact points and other +resources to make it easier to get your contribution accepted. + +# Certificate of Origin + +By contributing to this project you agree to the Developer Certificate of +Origin (DCO). This document was created by the Linux Kernel community and is a +simple statement that you, as a contributor, have the legal right to make the +contribution. See the [DCO](DCO) file for details. + +# Email and Chat + +The project currently uses the general CoreOS email list and IRC channel: +- Email: [coreos-dev](https://groups.google.com/forum/#!forum/coreos-dev) +- IRC: #[coreos](irc://irc.freenode.org:6667/#coreos) IRC channel on freenode.org + +Please avoid emailing maintainers found in the MAINTAINERS file directly. They +are very busy and read the mailing lists. + +## Getting Started + +- Fork the repository on GitHub +- Read the [README](README.md) for build and test instructions +- Play with the project, submit bugs, submit patches! + +## Contribution Flow + +This is a rough outline of what a contributor's workflow looks like: + +- Create a topic branch from where you want to base your work (usually master). +- Make commits of logical units. +- Make sure your commit messages are in the proper format (see below). +- Push your changes to a topic branch in your fork of the repository. +- Make sure the tests pass, and add any new tests as appropriate. +- Submit a pull request to the original repository. + +Thanks for your contributions! + +### Format of the Commit Message + +We follow a rough convention for commit messages that is designed to answer two +questions: what changed and why. The subject line should feature the what and +the body of the commit should describe the why. + +``` +scripts: add the test-cluster command + +this uses tmux to setup a test cluster that you can easily kill and +start for debugging. + +Fixes #38 +``` + +The format can be described more formally as follows: + +``` +: + + + +