Merge pull request #2947 from tariq1890/update_versions
Update the versions of several dependencies
This commit is contained in:
commit
438b67feef
209 changed files with 31657 additions and 7738 deletions
|
@ -7,7 +7,7 @@ services:
|
||||||
language: go
|
language: go
|
||||||
|
|
||||||
go:
|
go:
|
||||||
- "1.11.x"
|
- "1.12.x"
|
||||||
|
|
||||||
go_import_path: github.com/docker/distribution
|
go_import_path: github.com/docker/distribution
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
FROM golang:1.11-alpine AS build
|
FROM golang:1.12-alpine AS build
|
||||||
|
|
||||||
ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution
|
ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution
|
||||||
ENV DOCKER_BUILDTAGS include_oss include_gcs
|
ENV DOCKER_BUILDTAGS include_oss include_gcs
|
||||||
|
|
22
go.mod
22
go.mod
|
@ -10,7 +10,7 @@ require (
|
||||||
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a // indirect
|
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a // indirect
|
||||||
github.com/bitly/go-simplejson v0.5.0 // indirect
|
github.com/bitly/go-simplejson v0.5.0 // indirect
|
||||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect
|
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 // indirect
|
||||||
github.com/bshuster-repo/logrus-logstash-hook v0.0.0-20170517171303-d2c0ecc1836d
|
github.com/bshuster-repo/logrus-logstash-hook v0.4.1
|
||||||
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd
|
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd
|
||||||
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b // indirect
|
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b // indirect
|
||||||
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 // indirect
|
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 // indirect
|
||||||
|
@ -20,18 +20,16 @@ require (
|
||||||
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916
|
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916
|
||||||
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1
|
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1
|
||||||
github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7
|
github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7
|
||||||
github.com/golang/protobuf v0.0.0-20160321223353-8d92cf5fc15a // indirect
|
|
||||||
github.com/gorilla/context v1.1.1 // indirect
|
|
||||||
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33
|
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33
|
||||||
github.com/gorilla/mux v0.0.0-20170228224354-599cba5e7b61
|
github.com/gorilla/mux v1.7.2
|
||||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||||
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7 // indirect
|
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7 // indirect
|
||||||
github.com/kr/pretty v0.1.0 // indirect
|
github.com/kr/pretty v0.1.0 // indirect
|
||||||
github.com/marstr/guid v1.1.0 // indirect
|
github.com/marstr/guid v1.1.0 // indirect
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||||
github.com/mitchellh/mapstructure v0.0.0-20150528213339-482a9fd5fa83
|
github.com/mitchellh/mapstructure v1.1.2
|
||||||
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f // indirect
|
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f // indirect
|
||||||
github.com/ncw/swift v1.0.40
|
github.com/ncw/swift v1.0.47
|
||||||
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420
|
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420
|
||||||
github.com/opencontainers/image-spec v1.0.0
|
github.com/opencontainers/image-spec v1.0.0
|
||||||
github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06 // indirect
|
github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06 // indirect
|
||||||
|
@ -39,21 +37,21 @@ require (
|
||||||
github.com/prometheus/common v0.0.0-20180110214958-89604d197083 // indirect
|
github.com/prometheus/common v0.0.0-20180110214958-89604d197083 // indirect
|
||||||
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7 // indirect
|
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7 // indirect
|
||||||
github.com/satori/go.uuid v1.2.0 // indirect
|
github.com/satori/go.uuid v1.2.0 // indirect
|
||||||
github.com/sirupsen/logrus v0.0.0-20170620144510-3d4380f53a34
|
github.com/sirupsen/logrus v1.4.2
|
||||||
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a // indirect
|
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a // indirect
|
||||||
github.com/spf13/cobra v0.0.0-20150605180824-312092086bed
|
github.com/spf13/cobra v0.0.3
|
||||||
github.com/spf13/pflag v0.0.0-20150601220040-564482062245 // indirect
|
github.com/spf13/pflag v1.0.3 // indirect
|
||||||
github.com/stretchr/testify v1.3.0 // indirect
|
github.com/stretchr/testify v1.3.0 // indirect
|
||||||
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 // indirect
|
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 // indirect
|
||||||
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50
|
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50
|
||||||
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f // indirect
|
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f // indirect
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
|
||||||
golang.org/x/oauth2 v0.0.0-20160304213135-045497edb623
|
golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b // indirect
|
||||||
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45
|
||||||
golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed // indirect
|
golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed // indirect
|
||||||
google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff
|
google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff
|
||||||
google.golang.org/appengine v0.0.0-20160301025000-12d5545dc1cf // indirect
|
|
||||||
google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8
|
google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8
|
||||||
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a // indirect
|
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a // indirect
|
||||||
gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789
|
gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789
|
||||||
gopkg.in/yaml.v2 v2.2.1
|
gopkg.in/yaml.v2 v2.2.2
|
||||||
)
|
)
|
||||||
|
|
61
go.sum
61
go.sum
|
@ -1,3 +1,5 @@
|
||||||
|
cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg=
|
||||||
|
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||||
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible h1:KnPIugL51v3N3WwvaSmZbxukD1WuWXOiE9fRdu32f2I=
|
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible h1:KnPIugL51v3N3WwvaSmZbxukD1WuWXOiE9fRdu32f2I=
|
||||||
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||||
github.com/Azure/go-autorest v10.8.1+incompatible h1:u0jVQf+a6k6x8A+sT60l6EY9XZu+kHdnZVPAYqpVRo0=
|
github.com/Azure/go-autorest v10.8.1+incompatible h1:u0jVQf+a6k6x8A+sT60l6EY9XZu+kHdnZVPAYqpVRo0=
|
||||||
|
@ -12,8 +14,8 @@ github.com/bitly/go-simplejson v0.5.0 h1:6IH+V8/tVMab511d5bn4M7EwGXZf9Hj6i2xSwkN
|
||||||
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
|
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
|
||||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
|
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
|
||||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
|
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
|
||||||
github.com/bshuster-repo/logrus-logstash-hook v0.0.0-20170517171303-d2c0ecc1836d h1:DJZ/vtxEZ876RjwGfanOc5kJK65b4tGRFqUDJriKuFg=
|
github.com/bshuster-repo/logrus-logstash-hook v0.4.1 h1:pgAtgj+A31JBVtEHu2uHuEx0n+2ukqUJnS2vVe5pQNA=
|
||||||
github.com/bshuster-repo/logrus-logstash-hook v0.0.0-20170517171303-d2c0ecc1836d/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
|
github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
|
||||||
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng=
|
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng=
|
||||||
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
|
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
|
||||||
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ=
|
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ=
|
||||||
|
@ -22,6 +24,8 @@ github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXe
|
||||||
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
|
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
|
||||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba h1:p6poVbjHDkKa+wtC8frBMwQtT3BmqGYBjzMwJ63tuR4=
|
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba h1:p6poVbjHDkKa+wtC8frBMwQtT3BmqGYBjzMwJ63tuR4=
|
||||||
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
|
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
|
||||||
github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c h1:KJAnOBuY9cTKVqB5cfbynpvFgeHRTREkRk8C977oFu4=
|
github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c h1:KJAnOBuY9cTKVqB5cfbynpvFgeHRTREkRk8C977oFu4=
|
||||||
|
@ -36,16 +40,14 @@ github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7 h1:LofdAjjjqCSXMwL
|
||||||
github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
|
github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
|
||||||
github.com/go-ini/ini v1.25.4 h1:Mujh4R/dH6YL8bxuISne3xX2+qcQ9p0IxKAP6ExWoUo=
|
github.com/go-ini/ini v1.25.4 h1:Mujh4R/dH6YL8bxuISne3xX2+qcQ9p0IxKAP6ExWoUo=
|
||||||
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||||
github.com/golang/protobuf v0.0.0-20160321223353-8d92cf5fc15a h1:vvuznlPYGGYdJ9yHfkMb3l9wSHugsXmSH/XN08b9XoI=
|
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||||
github.com/golang/protobuf v0.0.0-20160321223353-8d92cf5fc15a/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8=
|
||||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||||
github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8=
|
|
||||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
|
||||||
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33 h1:893HsJqtxp9z1SF76gg6hY70hRY1wVlTSnC/h1yUDCo=
|
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33 h1:893HsJqtxp9z1SF76gg6hY70hRY1wVlTSnC/h1yUDCo=
|
||||||
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
|
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
|
||||||
github.com/gorilla/mux v0.0.0-20170228224354-599cba5e7b61 h1:4lceeSGcX6bSAeeTiqcGX4DX6pMLPG5fKHvGeWAP/5c=
|
github.com/gorilla/mux v1.7.2 h1:zoNxOV7WjqXptQOVngLmcSQgXmgk4NMz1HibBchjl/I=
|
||||||
github.com/gorilla/mux v0.0.0-20170228224354-599cba5e7b61/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||||
|
@ -53,6 +55,8 @@ github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7 h1:SMvOWPJCES
|
||||||
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
|
@ -62,12 +66,12 @@ github.com/marstr/guid v1.1.0 h1:/M4H/1G4avsieL6BbUwCOBzulmoeKVP5ux/3mQNnbyI=
|
||||||
github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
|
github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
github.com/mitchellh/mapstructure v0.0.0-20150528213339-482a9fd5fa83 h1:DpXlMpJom9Cw/wuNxVv3e92kcCpGydKp2Rz6O5SrwkI=
|
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
||||||
github.com/mitchellh/mapstructure v0.0.0-20150528213339-482a9fd5fa83/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||||
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f h1:2+myh5ml7lgEU/51gbeLHfKGNfgEQQIWrlbdaOsidbQ=
|
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f h1:2+myh5ml7lgEU/51gbeLHfKGNfgEQQIWrlbdaOsidbQ=
|
||||||
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
|
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
|
||||||
github.com/ncw/swift v1.0.40 h1:0c+kzSF82qgP2TvDHwC534eoAMYTRS1jmr6KIMftTk0=
|
github.com/ncw/swift v1.0.47 h1:4DQRPj35Y41WogBxyhOXlrI37nzGlyEcsforeudyYPQ=
|
||||||
github.com/ncw/swift v1.0.40/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||||
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420 h1:Yu3681ykYHDfLoI6XVjL4JWmkE+3TX9yfIWwRCh1kFM=
|
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420 h1:Yu3681ykYHDfLoI6XVjL4JWmkE+3TX9yfIWwRCh1kFM=
|
||||||
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||||
github.com/opencontainers/image-spec v1.0.0 h1:jcw3cCH887bLKETGYpv8afogdYchbShR0eH6oD9d5PQ=
|
github.com/opencontainers/image-spec v1.0.0 h1:jcw3cCH887bLKETGYpv8afogdYchbShR0eH6oD9d5PQ=
|
||||||
|
@ -84,17 +88,19 @@ github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7 h1:hhvfGDVThBnd4
|
||||||
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
|
github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww=
|
||||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||||
github.com/sirupsen/logrus v0.0.0-20170620144510-3d4380f53a34 h1:kVLTAexkb0RpvzqHGdmxz80/bPLGuZn4qnUR0a4sW9Y=
|
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||||
github.com/sirupsen/logrus v0.0.0-20170620144510-3d4380f53a34/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM=
|
||||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||||
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs=
|
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs=
|
||||||
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||||
github.com/spf13/cobra v0.0.0-20150605180824-312092086bed h1:aMrfJ3kreAPpue0oog9V/QejkkfSfOIAFj1Okp0QFqc=
|
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
|
||||||
github.com/spf13/cobra v0.0.0-20150605180824-312092086bed/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||||
github.com/spf13/pflag v0.0.0-20150601220040-564482062245 h1:DbERKPDk2nYToPmmXaUZpO0TFlw3wj9K7FIK2dUIXv0=
|
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||||
github.com/spf13/pflag v0.0.0-20150601220040-564482062245/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI=
|
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI=
|
||||||
|
@ -105,11 +111,18 @@ github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1
|
||||||
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
|
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
|
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/oauth2 v0.0.0-20160304213135-045497edb623 h1:Ogcb1ofcvg+TPJ3U3BmnCFQxTzh9qidyMt5XgDWTrvw=
|
golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b h1:lkjdUzSyJ5P1+eal9fxXX9Xg2BTfswsonKUse48C0uE=
|
||||||
golang.org/x/oauth2 v0.0.0-20160304213135-045497edb623/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
|
||||||
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
|
||||||
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed h1:uPxWBzB3+mlnjy9W58qY1j/cjyFjutgw/Vhan2zLy/A=
|
golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed h1:uPxWBzB3+mlnjy9W58qY1j/cjyFjutgw/Vhan2zLy/A=
|
||||||
golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||||
|
@ -117,8 +130,8 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff h1:mk5zS3XLqVUzdF/CQCZ5ERujSF/8JFo+Wpkp/5I93NA=
|
google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff h1:mk5zS3XLqVUzdF/CQCZ5ERujSF/8JFo+Wpkp/5I93NA=
|
||||||
google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||||
google.golang.org/appengine v0.0.0-20160301025000-12d5545dc1cf h1:K3MmlQkPswneT7eBhEDAmDpCnNRKBX1ZHuRS7Sf9W+U=
|
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
||||||
google.golang.org/appengine v0.0.0-20160301025000-12d5545dc1cf/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8 h1:Cpp2P6TPjujNoC5M2KHY6g7wfyLYfIWRZaSdIKfDasA=
|
google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8 h1:Cpp2P6TPjujNoC5M2KHY6g7wfyLYfIWRZaSdIKfDasA=
|
||||||
google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
|
google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
|
||||||
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a h1:zo0EaRwJM6T5UQ+QEt2dDSgEmbFJ4pZr/Rzsjpu7zgI=
|
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a h1:zo0EaRwJM6T5UQ+QEt2dDSgEmbFJ4pZr/Rzsjpu7zgI=
|
||||||
|
@ -126,5 +139,5 @@ google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDg
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789 h1:NMiUjDZiD6qDVeBOzpImftxXzQHCp2Y2QLdmaqU9MRk=
|
gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789 h1:NMiUjDZiD6qDVeBOzpImftxXzQHCp2Y2QLdmaqU9MRk=
|
||||||
gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
|
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||||
|
|
15
vendor/cloud.google.com/go/AUTHORS
generated
vendored
Normal file
15
vendor/cloud.google.com/go/AUTHORS
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
||||||
|
# This is the official list of cloud authors for copyright purposes.
|
||||||
|
# This file is distinct from the CONTRIBUTORS files.
|
||||||
|
# See the latter for an explanation.
|
||||||
|
|
||||||
|
# Names should be added to this file as:
|
||||||
|
# Name or Organization <email address>
|
||||||
|
# The email address is not required for organizations.
|
||||||
|
|
||||||
|
Filippo Valsorda <hi@filippo.io>
|
||||||
|
Google Inc.
|
||||||
|
Ingo Oeser <nightlyone@googlemail.com>
|
||||||
|
Palm Stone Games, Inc.
|
||||||
|
Paweł Knap <pawelknap88@gmail.com>
|
||||||
|
Péter Szilágyi <peterke@gmail.com>
|
||||||
|
Tyler Treat <ttreat31@gmail.com>
|
40
vendor/cloud.google.com/go/CONTRIBUTORS
generated
vendored
Normal file
40
vendor/cloud.google.com/go/CONTRIBUTORS
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
# People who have agreed to one of the CLAs and can contribute patches.
|
||||||
|
# The AUTHORS file lists the copyright holders; this file
|
||||||
|
# lists people. For example, Google employees are listed here
|
||||||
|
# but not in AUTHORS, because Google holds the copyright.
|
||||||
|
#
|
||||||
|
# https://developers.google.com/open-source/cla/individual
|
||||||
|
# https://developers.google.com/open-source/cla/corporate
|
||||||
|
#
|
||||||
|
# Names should be added to this file as:
|
||||||
|
# Name <email address>
|
||||||
|
|
||||||
|
# Keep the list alphabetically sorted.
|
||||||
|
|
||||||
|
Alexis Hunt <lexer@google.com>
|
||||||
|
Andreas Litt <andreas.litt@gmail.com>
|
||||||
|
Andrew Gerrand <adg@golang.org>
|
||||||
|
Brad Fitzpatrick <bradfitz@golang.org>
|
||||||
|
Burcu Dogan <jbd@google.com>
|
||||||
|
Dave Day <djd@golang.org>
|
||||||
|
David Sansome <me@davidsansome.com>
|
||||||
|
David Symonds <dsymonds@golang.org>
|
||||||
|
Filippo Valsorda <hi@filippo.io>
|
||||||
|
Glenn Lewis <gmlewis@google.com>
|
||||||
|
Ingo Oeser <nightlyone@googlemail.com>
|
||||||
|
James Hall <james.hall@shopify.com>
|
||||||
|
Johan Euphrosine <proppy@google.com>
|
||||||
|
Jonathan Amsterdam <jba@google.com>
|
||||||
|
Kunpei Sakai <namusyaka@gmail.com>
|
||||||
|
Luna Duclos <luna.duclos@palmstonegames.com>
|
||||||
|
Magnus Hiie <magnus.hiie@gmail.com>
|
||||||
|
Mario Castro <mariocaster@gmail.com>
|
||||||
|
Michael McGreevy <mcgreevy@golang.org>
|
||||||
|
Omar Jarjur <ojarjur@google.com>
|
||||||
|
Paweł Knap <pawelknap88@gmail.com>
|
||||||
|
Péter Szilágyi <peterke@gmail.com>
|
||||||
|
Sarah Adams <shadams@google.com>
|
||||||
|
Thanatat Tamtan <acoshift@gmail.com>
|
||||||
|
Toby Burress <kurin@google.com>
|
||||||
|
Tuo Shan <shantuo@google.com>
|
||||||
|
Tyler Treat <ttreat31@gmail.com>
|
202
vendor/cloud.google.com/go/LICENSE
generated
vendored
Normal file
202
vendor/cloud.google.com/go/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
501
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
Normal file
501
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
Normal file
|
@ -0,0 +1,501 @@
|
||||||
|
// Copyright 2014 Google LLC
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package metadata provides access to Google Compute Engine (GCE)
|
||||||
|
// metadata and API service accounts.
|
||||||
|
//
|
||||||
|
// This package is a wrapper around the GCE metadata service,
|
||||||
|
// as documented at https://developers.google.com/compute/docs/metadata.
|
||||||
|
package metadata // import "cloud.google.com/go/compute/metadata"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// metadataIP is the documented metadata server IP address.
|
||||||
|
metadataIP = "169.254.169.254"
|
||||||
|
|
||||||
|
// metadataHostEnv is the environment variable specifying the
|
||||||
|
// GCE metadata hostname. If empty, the default value of
|
||||||
|
// metadataIP ("169.254.169.254") is used instead.
|
||||||
|
// This is variable name is not defined by any spec, as far as
|
||||||
|
// I know; it was made up for the Go package.
|
||||||
|
metadataHostEnv = "GCE_METADATA_HOST"
|
||||||
|
|
||||||
|
userAgent = "gcloud-golang/0.1"
|
||||||
|
)
|
||||||
|
|
||||||
|
type cachedValue struct {
|
||||||
|
k string
|
||||||
|
trim bool
|
||||||
|
mu sync.Mutex
|
||||||
|
v string
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
projID = &cachedValue{k: "project/project-id", trim: true}
|
||||||
|
projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
|
||||||
|
instID = &cachedValue{k: "instance/id", trim: true}
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
defaultClient = &Client{hc: &http.Client{
|
||||||
|
Transport: &http.Transport{
|
||||||
|
Dial: (&net.Dialer{
|
||||||
|
Timeout: 2 * time.Second,
|
||||||
|
KeepAlive: 30 * time.Second,
|
||||||
|
}).Dial,
|
||||||
|
ResponseHeaderTimeout: 2 * time.Second,
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
subscribeClient = &Client{hc: &http.Client{
|
||||||
|
Transport: &http.Transport{
|
||||||
|
Dial: (&net.Dialer{
|
||||||
|
Timeout: 2 * time.Second,
|
||||||
|
KeepAlive: 30 * time.Second,
|
||||||
|
}).Dial,
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
)
|
||||||
|
|
||||||
|
// NotDefinedError is returned when requested metadata is not defined.
|
||||||
|
//
|
||||||
|
// The underlying string is the suffix after "/computeMetadata/v1/".
|
||||||
|
//
|
||||||
|
// This error is not returned if the value is defined to be the empty
|
||||||
|
// string.
|
||||||
|
type NotDefinedError string
|
||||||
|
|
||||||
|
func (suffix NotDefinedError) Error() string {
|
||||||
|
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cachedValue) get(cl *Client) (v string, err error) {
|
||||||
|
defer c.mu.Unlock()
|
||||||
|
c.mu.Lock()
|
||||||
|
if c.v != "" {
|
||||||
|
return c.v, nil
|
||||||
|
}
|
||||||
|
if c.trim {
|
||||||
|
v, err = cl.getTrimmed(c.k)
|
||||||
|
} else {
|
||||||
|
v, err = cl.Get(c.k)
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
c.v = v
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
onGCEOnce sync.Once
|
||||||
|
onGCE bool
|
||||||
|
)
|
||||||
|
|
||||||
|
// OnGCE reports whether this process is running on Google Compute Engine.
|
||||||
|
func OnGCE() bool {
|
||||||
|
onGCEOnce.Do(initOnGCE)
|
||||||
|
return onGCE
|
||||||
|
}
|
||||||
|
|
||||||
|
func initOnGCE() {
|
||||||
|
onGCE = testOnGCE()
|
||||||
|
}
|
||||||
|
|
||||||
|
func testOnGCE() bool {
|
||||||
|
// The user explicitly said they're on GCE, so trust them.
|
||||||
|
if os.Getenv(metadataHostEnv) != "" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
resc := make(chan bool, 2)
|
||||||
|
|
||||||
|
// Try two strategies in parallel.
|
||||||
|
// See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
|
||||||
|
go func() {
|
||||||
|
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
|
||||||
|
req.Header.Set("User-Agent", userAgent)
|
||||||
|
res, err := defaultClient.hc.Do(req.WithContext(ctx))
|
||||||
|
if err != nil {
|
||||||
|
resc <- false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
resc <- res.Header.Get("Metadata-Flavor") == "Google"
|
||||||
|
}()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
addrs, err := net.LookupHost("metadata.google.internal")
|
||||||
|
if err != nil || len(addrs) == 0 {
|
||||||
|
resc <- false
|
||||||
|
return
|
||||||
|
}
|
||||||
|
resc <- strsContains(addrs, metadataIP)
|
||||||
|
}()
|
||||||
|
|
||||||
|
tryHarder := systemInfoSuggestsGCE()
|
||||||
|
if tryHarder {
|
||||||
|
res := <-resc
|
||||||
|
if res {
|
||||||
|
// The first strategy succeeded, so let's use it.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// Wait for either the DNS or metadata server probe to
|
||||||
|
// contradict the other one and say we are running on
|
||||||
|
// GCE. Give it a lot of time to do so, since the system
|
||||||
|
// info already suggests we're running on a GCE BIOS.
|
||||||
|
timer := time.NewTimer(5 * time.Second)
|
||||||
|
defer timer.Stop()
|
||||||
|
select {
|
||||||
|
case res = <-resc:
|
||||||
|
return res
|
||||||
|
case <-timer.C:
|
||||||
|
// Too slow. Who knows what this system is.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// There's no hint from the system info that we're running on
|
||||||
|
// GCE, so use the first probe's result as truth, whether it's
|
||||||
|
// true or false. The goal here is to optimize for speed for
|
||||||
|
// users who are NOT running on GCE. We can't assume that
|
||||||
|
// either a DNS lookup or an HTTP request to a blackholed IP
|
||||||
|
// address is fast. Worst case this should return when the
|
||||||
|
// metaClient's Transport.ResponseHeaderTimeout or
|
||||||
|
// Transport.Dial.Timeout fires (in two seconds).
|
||||||
|
return <-resc
|
||||||
|
}
|
||||||
|
|
||||||
|
// systemInfoSuggestsGCE reports whether the local system (without
|
||||||
|
// doing network requests) suggests that we're running on GCE. If this
|
||||||
|
// returns true, testOnGCE tries a bit harder to reach its metadata
|
||||||
|
// server.
|
||||||
|
func systemInfoSuggestsGCE() bool {
|
||||||
|
if runtime.GOOS != "linux" {
|
||||||
|
// We don't have any non-Linux clues available, at least yet.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name")
|
||||||
|
name := strings.TrimSpace(string(slurp))
|
||||||
|
return name == "Google" || name == "Google Compute Engine"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no
|
||||||
|
// ResponseHeaderTimeout).
|
||||||
|
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||||
|
return subscribeClient.Subscribe(suffix, fn)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get calls Client.Get on the default client.
|
||||||
|
func Get(suffix string) (string, error) { return defaultClient.Get(suffix) }
|
||||||
|
|
||||||
|
// ProjectID returns the current instance's project ID string.
|
||||||
|
func ProjectID() (string, error) { return defaultClient.ProjectID() }
|
||||||
|
|
||||||
|
// NumericProjectID returns the current instance's numeric project ID.
|
||||||
|
func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() }
|
||||||
|
|
||||||
|
// InternalIP returns the instance's primary internal IP address.
|
||||||
|
func InternalIP() (string, error) { return defaultClient.InternalIP() }
|
||||||
|
|
||||||
|
// ExternalIP returns the instance's primary external (public) IP address.
|
||||||
|
func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
|
||||||
|
|
||||||
|
// Hostname returns the instance's hostname. This will be of the form
|
||||||
|
// "<instanceID>.c.<projID>.internal".
|
||||||
|
func Hostname() (string, error) { return defaultClient.Hostname() }
|
||||||
|
|
||||||
|
// InstanceTags returns the list of user-defined instance tags,
|
||||||
|
// assigned when initially creating a GCE instance.
|
||||||
|
func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() }
|
||||||
|
|
||||||
|
// InstanceID returns the current VM's numeric instance ID.
|
||||||
|
func InstanceID() (string, error) { return defaultClient.InstanceID() }
|
||||||
|
|
||||||
|
// InstanceName returns the current VM's instance ID string.
|
||||||
|
func InstanceName() (string, error) { return defaultClient.InstanceName() }
|
||||||
|
|
||||||
|
// Zone returns the current VM's zone, such as "us-central1-b".
|
||||||
|
func Zone() (string, error) { return defaultClient.Zone() }
|
||||||
|
|
||||||
|
// InstanceAttributes calls Client.InstanceAttributes on the default client.
|
||||||
|
func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() }
|
||||||
|
|
||||||
|
// ProjectAttributes calls Client.ProjectAttributes on the default client.
|
||||||
|
func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() }
|
||||||
|
|
||||||
|
// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client.
|
||||||
|
func InstanceAttributeValue(attr string) (string, error) {
|
||||||
|
return defaultClient.InstanceAttributeValue(attr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client.
|
||||||
|
func ProjectAttributeValue(attr string) (string, error) {
|
||||||
|
return defaultClient.ProjectAttributeValue(attr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scopes calls Client.Scopes on the default client.
|
||||||
|
func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) }
|
||||||
|
|
||||||
|
func strsContains(ss []string, s string) bool {
|
||||||
|
for _, v := range ss {
|
||||||
|
if v == s {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Client provides metadata.
|
||||||
|
type Client struct {
|
||||||
|
hc *http.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewClient returns a Client that can be used to fetch metadata. All HTTP requests
|
||||||
|
// will use the given http.Client instead of the default client.
|
||||||
|
func NewClient(c *http.Client) *Client {
|
||||||
|
return &Client{hc: c}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getETag returns a value from the metadata service as well as the associated ETag.
|
||||||
|
// This func is otherwise equivalent to Get.
|
||||||
|
func (c *Client) getETag(suffix string) (value, etag string, err error) {
|
||||||
|
// Using a fixed IP makes it very difficult to spoof the metadata service in
|
||||||
|
// a container, which is an important use-case for local testing of cloud
|
||||||
|
// deployments. To enable spoofing of the metadata service, the environment
|
||||||
|
// variable GCE_METADATA_HOST is first inspected to decide where metadata
|
||||||
|
// requests shall go.
|
||||||
|
host := os.Getenv(metadataHostEnv)
|
||||||
|
if host == "" {
|
||||||
|
// Using 169.254.169.254 instead of "metadata" here because Go
|
||||||
|
// binaries built with the "netgo" tag and without cgo won't
|
||||||
|
// know the search suffix for "metadata" is
|
||||||
|
// ".google.internal", and this IP address is documented as
|
||||||
|
// being stable anyway.
|
||||||
|
host = metadataIP
|
||||||
|
}
|
||||||
|
url := "http://" + host + "/computeMetadata/v1/" + suffix
|
||||||
|
req, _ := http.NewRequest("GET", url, nil)
|
||||||
|
req.Header.Set("Metadata-Flavor", "Google")
|
||||||
|
req.Header.Set("User-Agent", userAgent)
|
||||||
|
res, err := c.hc.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
defer res.Body.Close()
|
||||||
|
if res.StatusCode == http.StatusNotFound {
|
||||||
|
return "", "", NotDefinedError(suffix)
|
||||||
|
}
|
||||||
|
if res.StatusCode != 200 {
|
||||||
|
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
|
||||||
|
}
|
||||||
|
all, err := ioutil.ReadAll(res.Body)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
return string(all), res.Header.Get("Etag"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns a value from the metadata service.
|
||||||
|
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
||||||
|
//
|
||||||
|
// If the GCE_METADATA_HOST environment variable is not defined, a default of
|
||||||
|
// 169.254.169.254 will be used instead.
|
||||||
|
//
|
||||||
|
// If the requested metadata is not defined, the returned error will
|
||||||
|
// be of type NotDefinedError.
|
||||||
|
func (c *Client) Get(suffix string) (string, error) {
|
||||||
|
val, _, err := c.getETag(suffix)
|
||||||
|
return val, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) getTrimmed(suffix string) (s string, err error) {
|
||||||
|
s, err = c.Get(suffix)
|
||||||
|
s = strings.TrimSpace(s)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) lines(suffix string) ([]string, error) {
|
||||||
|
j, err := c.Get(suffix)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
s := strings.Split(strings.TrimSpace(j), "\n")
|
||||||
|
for i := range s {
|
||||||
|
s[i] = strings.TrimSpace(s[i])
|
||||||
|
}
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProjectID returns the current instance's project ID string.
|
||||||
|
func (c *Client) ProjectID() (string, error) { return projID.get(c) }
|
||||||
|
|
||||||
|
// NumericProjectID returns the current instance's numeric project ID.
|
||||||
|
func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) }
|
||||||
|
|
||||||
|
// InstanceID returns the current VM's numeric instance ID.
|
||||||
|
func (c *Client) InstanceID() (string, error) { return instID.get(c) }
|
||||||
|
|
||||||
|
// InternalIP returns the instance's primary internal IP address.
|
||||||
|
func (c *Client) InternalIP() (string, error) {
|
||||||
|
return c.getTrimmed("instance/network-interfaces/0/ip")
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExternalIP returns the instance's primary external (public) IP address.
|
||||||
|
func (c *Client) ExternalIP() (string, error) {
|
||||||
|
return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Hostname returns the instance's hostname. This will be of the form
|
||||||
|
// "<instanceID>.c.<projID>.internal".
|
||||||
|
func (c *Client) Hostname() (string, error) {
|
||||||
|
return c.getTrimmed("instance/hostname")
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstanceTags returns the list of user-defined instance tags,
|
||||||
|
// assigned when initially creating a GCE instance.
|
||||||
|
func (c *Client) InstanceTags() ([]string, error) {
|
||||||
|
var s []string
|
||||||
|
j, err := c.Get("instance/tags")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstanceName returns the current VM's instance ID string.
|
||||||
|
func (c *Client) InstanceName() (string, error) {
|
||||||
|
host, err := c.Hostname()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return strings.Split(host, ".")[0], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Zone returns the current VM's zone, such as "us-central1-b".
|
||||||
|
func (c *Client) Zone() (string, error) {
|
||||||
|
zone, err := c.getTrimmed("instance/zone")
|
||||||
|
// zone is of the form "projects/<projNum>/zones/<zoneName>".
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return zone[strings.LastIndex(zone, "/")+1:], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// InstanceAttributes returns the list of user-defined attributes,
|
||||||
|
// assigned when initially creating a GCE VM instance. The value of an
|
||||||
|
// attribute can be obtained with InstanceAttributeValue.
|
||||||
|
func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") }
|
||||||
|
|
||||||
|
// ProjectAttributes returns the list of user-defined attributes
|
||||||
|
// applying to the project as a whole, not just this VM. The value of
|
||||||
|
// an attribute can be obtained with ProjectAttributeValue.
|
||||||
|
func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") }
|
||||||
|
|
||||||
|
// InstanceAttributeValue returns the value of the provided VM
|
||||||
|
// instance attribute.
|
||||||
|
//
|
||||||
|
// If the requested attribute is not defined, the returned error will
|
||||||
|
// be of type NotDefinedError.
|
||||||
|
//
|
||||||
|
// InstanceAttributeValue may return ("", nil) if the attribute was
|
||||||
|
// defined to be the empty string.
|
||||||
|
func (c *Client) InstanceAttributeValue(attr string) (string, error) {
|
||||||
|
return c.Get("instance/attributes/" + attr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ProjectAttributeValue returns the value of the provided
|
||||||
|
// project attribute.
|
||||||
|
//
|
||||||
|
// If the requested attribute is not defined, the returned error will
|
||||||
|
// be of type NotDefinedError.
|
||||||
|
//
|
||||||
|
// ProjectAttributeValue may return ("", nil) if the attribute was
|
||||||
|
// defined to be the empty string.
|
||||||
|
func (c *Client) ProjectAttributeValue(attr string) (string, error) {
|
||||||
|
return c.Get("project/attributes/" + attr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Scopes returns the service account scopes for the given account.
|
||||||
|
// The account may be empty or the string "default" to use the instance's
|
||||||
|
// main account.
|
||||||
|
func (c *Client) Scopes(serviceAccount string) ([]string, error) {
|
||||||
|
if serviceAccount == "" {
|
||||||
|
serviceAccount = "default"
|
||||||
|
}
|
||||||
|
return c.lines("instance/service-accounts/" + serviceAccount + "/scopes")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Subscribe subscribes to a value from the metadata service.
|
||||||
|
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
|
||||||
|
// The suffix may contain query parameters.
|
||||||
|
//
|
||||||
|
// Subscribe calls fn with the latest metadata value indicated by the provided
|
||||||
|
// suffix. If the metadata value is deleted, fn is called with the empty string
|
||||||
|
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
|
||||||
|
// is deleted. Subscribe returns the error value returned from the last call to
|
||||||
|
// fn, which may be nil when ok == false.
|
||||||
|
func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error {
|
||||||
|
const failedSubscribeSleep = time.Second * 5
|
||||||
|
|
||||||
|
// First check to see if the metadata value exists at all.
|
||||||
|
val, lastETag, err := c.getETag(suffix)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := fn(val, true); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ok := true
|
||||||
|
if strings.ContainsRune(suffix, '?') {
|
||||||
|
suffix += "&wait_for_change=true&last_etag="
|
||||||
|
} else {
|
||||||
|
suffix += "?wait_for_change=true&last_etag="
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag))
|
||||||
|
if err != nil {
|
||||||
|
if _, deleted := err.(NotDefinedError); !deleted {
|
||||||
|
time.Sleep(failedSubscribeSleep)
|
||||||
|
continue // Retry on other errors.
|
||||||
|
}
|
||||||
|
ok = false
|
||||||
|
}
|
||||||
|
lastETag = etag
|
||||||
|
|
||||||
|
if err := fn(val, ok); err != nil || !ok {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
3
vendor/github.com/bshuster-repo/logrus-logstash-hook/logstash_formatter.go
generated
vendored
3
vendor/github.com/bshuster-repo/logrus-logstash-hook/logstash_formatter.go
generated
vendored
|
@ -4,6 +4,7 @@ import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
@ -44,7 +45,7 @@ func (f *LogstashFormatter) FormatWithPrefix(entry *logrus.Entry, prefix string)
|
||||||
timeStampFormat := f.TimestampFormat
|
timeStampFormat := f.TimestampFormat
|
||||||
|
|
||||||
if timeStampFormat == "" {
|
if timeStampFormat == "" {
|
||||||
timeStampFormat = logrus.DefaultTimestampFormat
|
timeStampFormat = time.RFC3339
|
||||||
}
|
}
|
||||||
|
|
||||||
fields["@timestamp"] = entry.Time.Format(timeStampFormat)
|
fields["@timestamp"] = entry.Time.Format(timeStampFormat)
|
||||||
|
|
3
vendor/github.com/golang/protobuf/LICENSE
generated
vendored
3
vendor/github.com/golang/protobuf/LICENSE
generated
vendored
|
@ -1,7 +1,4 @@
|
||||||
Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
|
|
||||||
Copyright 2010 The Go Authors. All rights reserved.
|
Copyright 2010 The Go Authors. All rights reserved.
|
||||||
https://github.com/golang/protobuf
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
Redistribution and use in source and binary forms, with or without
|
||||||
modification, are permitted provided that the following conditions are
|
modification, are permitted provided that the following conditions are
|
||||||
|
|
43
vendor/github.com/golang/protobuf/proto/Makefile
generated
vendored
43
vendor/github.com/golang/protobuf/proto/Makefile
generated
vendored
|
@ -1,43 +0,0 @@
|
||||||
# Go support for Protocol Buffers - Google's data interchange format
|
|
||||||
#
|
|
||||||
# Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
# https://github.com/golang/protobuf
|
|
||||||
#
|
|
||||||
# Redistribution and use in source and binary forms, with or without
|
|
||||||
# modification, are permitted provided that the following conditions are
|
|
||||||
# met:
|
|
||||||
#
|
|
||||||
# * Redistributions of source code must retain the above copyright
|
|
||||||
# notice, this list of conditions and the following disclaimer.
|
|
||||||
# * Redistributions in binary form must reproduce the above
|
|
||||||
# copyright notice, this list of conditions and the following disclaimer
|
|
||||||
# in the documentation and/or other materials provided with the
|
|
||||||
# distribution.
|
|
||||||
# * Neither the name of Google Inc. nor the names of its
|
|
||||||
# contributors may be used to endorse or promote products derived from
|
|
||||||
# this software without specific prior written permission.
|
|
||||||
#
|
|
||||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
|
|
||||||
install:
|
|
||||||
go install
|
|
||||||
|
|
||||||
test: install generate-test-pbs
|
|
||||||
go test
|
|
||||||
|
|
||||||
|
|
||||||
generate-test-pbs:
|
|
||||||
make install
|
|
||||||
make -C testdata
|
|
||||||
protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto
|
|
||||||
make
|
|
56
vendor/github.com/golang/protobuf/proto/clone.go
generated
vendored
56
vendor/github.com/golang/protobuf/proto/clone.go
generated
vendored
|
@ -35,22 +35,39 @@
|
||||||
package proto
|
package proto
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"log"
|
"log"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Clone returns a deep copy of a protocol buffer.
|
// Clone returns a deep copy of a protocol buffer.
|
||||||
func Clone(pb Message) Message {
|
func Clone(src Message) Message {
|
||||||
in := reflect.ValueOf(pb)
|
in := reflect.ValueOf(src)
|
||||||
if in.IsNil() {
|
if in.IsNil() {
|
||||||
return pb
|
return src
|
||||||
|
}
|
||||||
|
out := reflect.New(in.Type().Elem())
|
||||||
|
dst := out.Interface().(Message)
|
||||||
|
Merge(dst, src)
|
||||||
|
return dst
|
||||||
}
|
}
|
||||||
|
|
||||||
out := reflect.New(in.Type().Elem())
|
// Merger is the interface representing objects that can merge messages of the same type.
|
||||||
// out is empty so a merge is a deep copy.
|
type Merger interface {
|
||||||
mergeStruct(out.Elem(), in.Elem())
|
// Merge merges src into this message.
|
||||||
return out.Interface().(Message)
|
// Required and optional fields that are set in src will be set to that value in dst.
|
||||||
|
// Elements of repeated fields will be appended.
|
||||||
|
//
|
||||||
|
// Merge may panic if called with a different argument type than the receiver.
|
||||||
|
Merge(src Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// generatedMerger is the custom merge method that generated protos will have.
|
||||||
|
// We must add this method since a generate Merge method will conflict with
|
||||||
|
// many existing protos that have a Merge data field already defined.
|
||||||
|
type generatedMerger interface {
|
||||||
|
XXX_Merge(src Message)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Merge merges src into dst.
|
// Merge merges src into dst.
|
||||||
|
@ -58,17 +75,24 @@ func Clone(pb Message) Message {
|
||||||
// Elements of repeated fields will be appended.
|
// Elements of repeated fields will be appended.
|
||||||
// Merge panics if src and dst are not the same type, or if dst is nil.
|
// Merge panics if src and dst are not the same type, or if dst is nil.
|
||||||
func Merge(dst, src Message) {
|
func Merge(dst, src Message) {
|
||||||
|
if m, ok := dst.(Merger); ok {
|
||||||
|
m.Merge(src)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
in := reflect.ValueOf(src)
|
in := reflect.ValueOf(src)
|
||||||
out := reflect.ValueOf(dst)
|
out := reflect.ValueOf(dst)
|
||||||
if out.IsNil() {
|
if out.IsNil() {
|
||||||
panic("proto: nil destination")
|
panic("proto: nil destination")
|
||||||
}
|
}
|
||||||
if in.Type() != out.Type() {
|
if in.Type() != out.Type() {
|
||||||
// Explicit test prior to mergeStruct so that mistyped nils will fail
|
panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
|
||||||
panic("proto: type mismatch")
|
|
||||||
}
|
}
|
||||||
if in.IsNil() {
|
if in.IsNil() {
|
||||||
// Merging nil into non-nil is a quiet no-op
|
return // Merge from nil src is a noop
|
||||||
|
}
|
||||||
|
if m, ok := dst.(generatedMerger); ok {
|
||||||
|
m.XXX_Merge(src)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
mergeStruct(out.Elem(), in.Elem())
|
mergeStruct(out.Elem(), in.Elem())
|
||||||
|
@ -84,9 +108,15 @@ func mergeStruct(out, in reflect.Value) {
|
||||||
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
|
mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
if emIn, ok := in.Addr().Interface().(extendableProto); ok {
|
if emIn, err := extendable(in.Addr().Interface()); err == nil {
|
||||||
emOut := out.Addr().Interface().(extendableProto)
|
emOut, _ := extendable(out.Addr().Interface())
|
||||||
mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())
|
mIn, muIn := emIn.extensionsRead()
|
||||||
|
if mIn != nil {
|
||||||
|
mOut := emOut.extensionsWrite()
|
||||||
|
muIn.Lock()
|
||||||
|
mergeExtension(mOut, mIn)
|
||||||
|
muIn.Unlock()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
uf := in.FieldByName("XXX_unrecognized")
|
uf := in.FieldByName("XXX_unrecognized")
|
||||||
|
|
774
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
774
vendor/github.com/golang/protobuf/proto/decode.go
generated
vendored
|
@ -39,8 +39,6 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
|
||||||
"reflect"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// errOverflow is returned when an integer is too large to be represented.
|
// errOverflow is returned when an integer is too large to be represented.
|
||||||
|
@ -50,10 +48,6 @@ var errOverflow = errors.New("proto: integer overflow")
|
||||||
// wire type is encountered. It does not get returned to user code.
|
// wire type is encountered. It does not get returned to user code.
|
||||||
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
|
var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
|
||||||
|
|
||||||
// The fundamental decoders that interpret bytes on the wire.
|
|
||||||
// Those that take integer types all return uint64 and are
|
|
||||||
// therefore of type valueDecoder.
|
|
||||||
|
|
||||||
// DecodeVarint reads a varint-encoded integer from the slice.
|
// DecodeVarint reads a varint-encoded integer from the slice.
|
||||||
// It returns the integer and the number of bytes consumed, or
|
// It returns the integer and the number of bytes consumed, or
|
||||||
// zero if there is not enough.
|
// zero if there is not enough.
|
||||||
|
@ -61,7 +55,6 @@ var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for
|
||||||
// int32, int64, uint32, uint64, bool, and enum
|
// int32, int64, uint32, uint64, bool, and enum
|
||||||
// protocol buffer types.
|
// protocol buffer types.
|
||||||
func DecodeVarint(buf []byte) (x uint64, n int) {
|
func DecodeVarint(buf []byte) (x uint64, n int) {
|
||||||
// x, n already 0
|
|
||||||
for shift := uint(0); shift < 64; shift += 7 {
|
for shift := uint(0); shift < 64; shift += 7 {
|
||||||
if n >= len(buf) {
|
if n >= len(buf) {
|
||||||
return 0, 0
|
return 0, 0
|
||||||
|
@ -78,13 +71,7 @@ func DecodeVarint(buf []byte) (x uint64, n int) {
|
||||||
return 0, 0
|
return 0, 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
|
||||||
// This is the format for the
|
|
||||||
// int32, int64, uint32, uint64, bool, and enum
|
|
||||||
// protocol buffer types.
|
|
||||||
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
|
||||||
// x, err already 0
|
|
||||||
|
|
||||||
i := p.index
|
i := p.index
|
||||||
l := len(p.buf)
|
l := len(p.buf)
|
||||||
|
|
||||||
|
@ -107,6 +94,107 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DecodeVarint reads a varint-encoded integer from the Buffer.
|
||||||
|
// This is the format for the
|
||||||
|
// int32, int64, uint32, uint64, bool, and enum
|
||||||
|
// protocol buffer types.
|
||||||
|
func (p *Buffer) DecodeVarint() (x uint64, err error) {
|
||||||
|
i := p.index
|
||||||
|
buf := p.buf
|
||||||
|
|
||||||
|
if i >= len(buf) {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
} else if buf[i] < 0x80 {
|
||||||
|
p.index++
|
||||||
|
return uint64(buf[i]), nil
|
||||||
|
} else if len(buf)-i < 10 {
|
||||||
|
return p.decodeVarintSlow()
|
||||||
|
}
|
||||||
|
|
||||||
|
var b uint64
|
||||||
|
// we already checked the first byte
|
||||||
|
x = uint64(buf[i]) - 0x80
|
||||||
|
i++
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 7
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 7
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 14
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 14
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 21
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 21
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 28
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 28
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 35
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 35
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 42
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 42
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 49
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 49
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 56
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
x -= 0x80 << 56
|
||||||
|
|
||||||
|
b = uint64(buf[i])
|
||||||
|
i++
|
||||||
|
x += b << 63
|
||||||
|
if b&0x80 == 0 {
|
||||||
|
goto done
|
||||||
|
}
|
||||||
|
// x -= 0x80 << 63 // Always zero.
|
||||||
|
|
||||||
|
return 0, errOverflow
|
||||||
|
|
||||||
|
done:
|
||||||
|
p.index = i
|
||||||
|
return x, nil
|
||||||
|
}
|
||||||
|
|
||||||
// DecodeFixed64 reads a 64-bit integer from the Buffer.
|
// DecodeFixed64 reads a 64-bit integer from the Buffer.
|
||||||
// This is the format for the
|
// This is the format for the
|
||||||
// fixed64, sfixed64, and double protocol buffer types.
|
// fixed64, sfixed64, and double protocol buffer types.
|
||||||
|
@ -173,9 +261,6 @@ func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// These are not ValueDecoders: they produce an array of bytes or a string.
|
|
||||||
// bytes, embedded messages
|
|
||||||
|
|
||||||
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
|
// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
|
||||||
// This is the format used for the bytes protocol buffer
|
// This is the format used for the bytes protocol buffer
|
||||||
// type and for embedded messages.
|
// type and for embedded messages.
|
||||||
|
@ -217,81 +302,29 @@ func (p *Buffer) DecodeStringBytes() (s string, err error) {
|
||||||
return string(buf), nil
|
return string(buf), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
|
|
||||||
// If the protocol buffer has extensions, and the field matches, add it as an extension.
|
|
||||||
// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
|
|
||||||
func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
|
|
||||||
oi := o.index
|
|
||||||
|
|
||||||
err := o.skip(t, tag, wire)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !unrecField.IsValid() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
ptr := structPointer_Bytes(base, unrecField)
|
|
||||||
|
|
||||||
// Add the skipped field to struct field
|
|
||||||
obuf := o.buf
|
|
||||||
|
|
||||||
o.buf = *ptr
|
|
||||||
o.EncodeVarint(uint64(tag<<3 | wire))
|
|
||||||
*ptr = append(o.buf, obuf[oi:o.index]...)
|
|
||||||
|
|
||||||
o.buf = obuf
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
|
|
||||||
func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
|
|
||||||
|
|
||||||
var u uint64
|
|
||||||
var err error
|
|
||||||
|
|
||||||
switch wire {
|
|
||||||
case WireVarint:
|
|
||||||
_, err = o.DecodeVarint()
|
|
||||||
case WireFixed64:
|
|
||||||
_, err = o.DecodeFixed64()
|
|
||||||
case WireBytes:
|
|
||||||
_, err = o.DecodeRawBytes(false)
|
|
||||||
case WireFixed32:
|
|
||||||
_, err = o.DecodeFixed32()
|
|
||||||
case WireStartGroup:
|
|
||||||
for {
|
|
||||||
u, err = o.DecodeVarint()
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
fwire := int(u & 0x7)
|
|
||||||
if fwire == WireEndGroup {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
ftag := int(u >> 3)
|
|
||||||
err = o.skip(t, ftag, fwire)
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshaler is the interface representing objects that can
|
// Unmarshaler is the interface representing objects that can
|
||||||
// unmarshal themselves. The method should reset the receiver before
|
// unmarshal themselves. The argument points to data that may be
|
||||||
// decoding starts. The argument points to data that may be
|
|
||||||
// overwritten, so implementations should not keep references to the
|
// overwritten, so implementations should not keep references to the
|
||||||
// buffer.
|
// buffer.
|
||||||
|
// Unmarshal implementations should not clear the receiver.
|
||||||
|
// Any unmarshaled data should be merged into the receiver.
|
||||||
|
// Callers of Unmarshal that do not want to retain existing data
|
||||||
|
// should Reset the receiver before calling Unmarshal.
|
||||||
type Unmarshaler interface {
|
type Unmarshaler interface {
|
||||||
Unmarshal([]byte) error
|
Unmarshal([]byte) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// newUnmarshaler is the interface representing objects that can
|
||||||
|
// unmarshal themselves. The semantics are identical to Unmarshaler.
|
||||||
|
//
|
||||||
|
// This exists to support protoc-gen-go generated messages.
|
||||||
|
// The proto package will stop type-asserting to this interface in the future.
|
||||||
|
//
|
||||||
|
// DO NOT DEPEND ON THIS.
|
||||||
|
type newUnmarshaler interface {
|
||||||
|
XXX_Unmarshal([]byte) error
|
||||||
|
}
|
||||||
|
|
||||||
// Unmarshal parses the protocol buffer representation in buf and places the
|
// Unmarshal parses the protocol buffer representation in buf and places the
|
||||||
// decoded result in pb. If the struct underlying pb does not match
|
// decoded result in pb. If the struct underlying pb does not match
|
||||||
// the data in buf, the results can be unpredictable.
|
// the data in buf, the results can be unpredictable.
|
||||||
|
@ -301,7 +334,13 @@ type Unmarshaler interface {
|
||||||
// to preserve and append to existing data.
|
// to preserve and append to existing data.
|
||||||
func Unmarshal(buf []byte, pb Message) error {
|
func Unmarshal(buf []byte, pb Message) error {
|
||||||
pb.Reset()
|
pb.Reset()
|
||||||
return UnmarshalMerge(buf, pb)
|
if u, ok := pb.(newUnmarshaler); ok {
|
||||||
|
return u.XXX_Unmarshal(buf)
|
||||||
|
}
|
||||||
|
if u, ok := pb.(Unmarshaler); ok {
|
||||||
|
return u.Unmarshal(buf)
|
||||||
|
}
|
||||||
|
return NewBuffer(buf).Unmarshal(pb)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalMerge parses the protocol buffer representation in buf and
|
// UnmarshalMerge parses the protocol buffer representation in buf and
|
||||||
|
@ -311,8 +350,16 @@ func Unmarshal(buf []byte, pb Message) error {
|
||||||
// UnmarshalMerge merges into existing data in pb.
|
// UnmarshalMerge merges into existing data in pb.
|
||||||
// Most code should use Unmarshal instead.
|
// Most code should use Unmarshal instead.
|
||||||
func UnmarshalMerge(buf []byte, pb Message) error {
|
func UnmarshalMerge(buf []byte, pb Message) error {
|
||||||
// If the object can unmarshal itself, let it.
|
if u, ok := pb.(newUnmarshaler); ok {
|
||||||
|
return u.XXX_Unmarshal(buf)
|
||||||
|
}
|
||||||
if u, ok := pb.(Unmarshaler); ok {
|
if u, ok := pb.(Unmarshaler); ok {
|
||||||
|
// NOTE: The history of proto have unfortunately been inconsistent
|
||||||
|
// whether Unmarshaler should or should not implicitly clear itself.
|
||||||
|
// Some implementations do, most do not.
|
||||||
|
// Thus, calling this here may or may not do what people want.
|
||||||
|
//
|
||||||
|
// See https://github.com/golang/protobuf/issues/424
|
||||||
return u.Unmarshal(buf)
|
return u.Unmarshal(buf)
|
||||||
}
|
}
|
||||||
return NewBuffer(buf).Unmarshal(pb)
|
return NewBuffer(buf).Unmarshal(pb)
|
||||||
|
@ -328,541 +375,54 @@ func (p *Buffer) DecodeMessage(pb Message) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// DecodeGroup reads a tag-delimited group from the Buffer.
|
// DecodeGroup reads a tag-delimited group from the Buffer.
|
||||||
|
// StartGroup tag is already consumed. This function consumes
|
||||||
|
// EndGroup tag.
|
||||||
func (p *Buffer) DecodeGroup(pb Message) error {
|
func (p *Buffer) DecodeGroup(pb Message) error {
|
||||||
typ, base, err := getbase(pb)
|
b := p.buf[p.index:]
|
||||||
if err != nil {
|
x, y := findEndGroup(b)
|
||||||
return err
|
if x < 0 {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
}
|
}
|
||||||
return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
|
err := Unmarshal(b[:x], pb)
|
||||||
|
p.index += y
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unmarshal parses the protocol buffer representation in the
|
// Unmarshal parses the protocol buffer representation in the
|
||||||
// Buffer and places the decoded result in pb. If the struct
|
// Buffer and places the decoded result in pb. If the struct
|
||||||
// underlying pb does not match the data in the buffer, the results can be
|
// underlying pb does not match the data in the buffer, the results can be
|
||||||
// unpredictable.
|
// unpredictable.
|
||||||
|
//
|
||||||
|
// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
|
||||||
func (p *Buffer) Unmarshal(pb Message) error {
|
func (p *Buffer) Unmarshal(pb Message) error {
|
||||||
// If the object can unmarshal itself, let it.
|
// If the object can unmarshal itself, let it.
|
||||||
|
if u, ok := pb.(newUnmarshaler); ok {
|
||||||
|
err := u.XXX_Unmarshal(p.buf[p.index:])
|
||||||
|
p.index = len(p.buf)
|
||||||
|
return err
|
||||||
|
}
|
||||||
if u, ok := pb.(Unmarshaler); ok {
|
if u, ok := pb.(Unmarshaler); ok {
|
||||||
|
// NOTE: The history of proto have unfortunately been inconsistent
|
||||||
|
// whether Unmarshaler should or should not implicitly clear itself.
|
||||||
|
// Some implementations do, most do not.
|
||||||
|
// Thus, calling this here may or may not do what people want.
|
||||||
|
//
|
||||||
|
// See https://github.com/golang/protobuf/issues/424
|
||||||
err := u.Unmarshal(p.buf[p.index:])
|
err := u.Unmarshal(p.buf[p.index:])
|
||||||
p.index = len(p.buf)
|
p.index = len(p.buf)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
typ, base, err := getbase(pb)
|
// Slow workaround for messages that aren't Unmarshalers.
|
||||||
if err != nil {
|
// This includes some hand-coded .pb.go files and
|
||||||
return err
|
// bootstrap protos.
|
||||||
}
|
// TODO: fix all of those and then add Unmarshal to
|
||||||
|
// the Message interface. Then:
|
||||||
err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
|
// The cast above and code below can be deleted.
|
||||||
|
// The old unmarshaler can be deleted.
|
||||||
if collectStats {
|
// Clients can call Unmarshal directly (can already do that, actually).
|
||||||
stats.Decode++
|
var info InternalMessageInfo
|
||||||
}
|
err := info.Unmarshal(pb, p.buf[p.index:])
|
||||||
|
p.index = len(p.buf)
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// unmarshalType does the work of unmarshaling a structure.
|
|
||||||
func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
|
|
||||||
var state errorState
|
|
||||||
required, reqFields := prop.reqCount, uint64(0)
|
|
||||||
|
|
||||||
var err error
|
|
||||||
for err == nil && o.index < len(o.buf) {
|
|
||||||
oi := o.index
|
|
||||||
var u uint64
|
|
||||||
u, err = o.DecodeVarint()
|
|
||||||
if err != nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
wire := int(u & 0x7)
|
|
||||||
if wire == WireEndGroup {
|
|
||||||
if is_group {
|
|
||||||
return nil // input is satisfied
|
|
||||||
}
|
|
||||||
return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
|
|
||||||
}
|
|
||||||
tag := int(u >> 3)
|
|
||||||
if tag <= 0 {
|
|
||||||
return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
|
|
||||||
}
|
|
||||||
fieldnum, ok := prop.decoderTags.get(tag)
|
|
||||||
if !ok {
|
|
||||||
// Maybe it's an extension?
|
|
||||||
if prop.extendable {
|
|
||||||
if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {
|
|
||||||
if err = o.skip(st, tag, wire); err == nil {
|
|
||||||
ext := e.ExtensionMap()[int32(tag)] // may be missing
|
|
||||||
ext.enc = append(ext.enc, o.buf[oi:o.index]...)
|
|
||||||
e.ExtensionMap()[int32(tag)] = ext
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Maybe it's a oneof?
|
|
||||||
if prop.oneofUnmarshaler != nil {
|
|
||||||
m := structPointer_Interface(base, st).(Message)
|
|
||||||
// First return value indicates whether tag is a oneof field.
|
|
||||||
ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
|
|
||||||
if err == ErrInternalBadWireType {
|
|
||||||
// Map the error to something more descriptive.
|
|
||||||
// Do the formatting here to save generated code space.
|
|
||||||
err = fmt.Errorf("bad wiretype for oneof field in %T", m)
|
|
||||||
}
|
|
||||||
if ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
p := prop.Prop[fieldnum]
|
|
||||||
|
|
||||||
if p.dec == nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
dec := p.dec
|
|
||||||
if wire != WireStartGroup && wire != p.WireType {
|
|
||||||
if wire == WireBytes && p.packedDec != nil {
|
|
||||||
// a packable field
|
|
||||||
dec = p.packedDec
|
|
||||||
} else {
|
|
||||||
err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
decErr := dec(o, p, base)
|
|
||||||
if decErr != nil && !state.shouldContinue(decErr, p) {
|
|
||||||
err = decErr
|
|
||||||
}
|
|
||||||
if err == nil && p.Required {
|
|
||||||
// Successfully decoded a required field.
|
|
||||||
if tag <= 64 {
|
|
||||||
// use bitmap for fields 1-64 to catch field reuse.
|
|
||||||
var mask uint64 = 1 << uint64(tag-1)
|
|
||||||
if reqFields&mask == 0 {
|
|
||||||
// new required field
|
|
||||||
reqFields |= mask
|
|
||||||
required--
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// This is imprecise. It can be fooled by a required field
|
|
||||||
// with a tag > 64 that is encoded twice; that's very rare.
|
|
||||||
// A fully correct implementation would require allocating
|
|
||||||
// a data structure, which we would like to avoid.
|
|
||||||
required--
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err == nil {
|
|
||||||
if is_group {
|
|
||||||
return io.ErrUnexpectedEOF
|
|
||||||
}
|
|
||||||
if state.err != nil {
|
|
||||||
return state.err
|
|
||||||
}
|
|
||||||
if required > 0 {
|
|
||||||
// Not enough information to determine the exact field. If we use extra
|
|
||||||
// CPU, we could determine the field only if the missing required field
|
|
||||||
// has a tag <= 64 and we check reqFields.
|
|
||||||
return &RequiredNotSetError{"{Unknown}"}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Individual type decoders
|
|
||||||
// For each,
|
|
||||||
// u is the decoded value,
|
|
||||||
// v is a pointer to the field (pointer) in the struct
|
|
||||||
|
|
||||||
// Sizes of the pools to allocate inside the Buffer.
|
|
||||||
// The goal is modest amortization and allocation
|
|
||||||
// on at least 16-byte boundaries.
|
|
||||||
const (
|
|
||||||
boolPoolSize = 16
|
|
||||||
uint32PoolSize = 8
|
|
||||||
uint64PoolSize = 4
|
|
||||||
)
|
|
||||||
|
|
||||||
// Decode a bool.
|
|
||||||
func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
|
|
||||||
u, err := p.valDec(o)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(o.bools) == 0 {
|
|
||||||
o.bools = make([]bool, boolPoolSize)
|
|
||||||
}
|
|
||||||
o.bools[0] = u != 0
|
|
||||||
*structPointer_Bool(base, p.field) = &o.bools[0]
|
|
||||||
o.bools = o.bools[1:]
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
|
|
||||||
u, err := p.valDec(o)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*structPointer_BoolVal(base, p.field) = u != 0
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode an int32.
|
|
||||||
func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
|
|
||||||
u, err := p.valDec(o)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
|
|
||||||
u, err := p.valDec(o)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode an int64.
|
|
||||||
func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
|
|
||||||
u, err := p.valDec(o)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
word64_Set(structPointer_Word64(base, p.field), o, u)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
|
|
||||||
u, err := p.valDec(o)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode a string.
|
|
||||||
func (o *Buffer) dec_string(p *Properties, base structPointer) error {
|
|
||||||
s, err := o.DecodeStringBytes()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*structPointer_String(base, p.field) = &s
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
|
|
||||||
s, err := o.DecodeStringBytes()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*structPointer_StringVal(base, p.field) = s
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode a slice of bytes ([]byte).
|
|
||||||
func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
|
|
||||||
b, err := o.DecodeRawBytes(true)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
*structPointer_Bytes(base, p.field) = b
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode a slice of bools ([]bool).
|
|
||||||
func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
|
|
||||||
u, err := p.valDec(o)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
v := structPointer_BoolSlice(base, p.field)
|
|
||||||
*v = append(*v, u != 0)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode a slice of bools ([]bool) in packed format.
|
|
||||||
func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
|
|
||||||
v := structPointer_BoolSlice(base, p.field)
|
|
||||||
|
|
||||||
nn, err := o.DecodeVarint()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
nb := int(nn) // number of bytes of encoded bools
|
|
||||||
fin := o.index + nb
|
|
||||||
if fin < o.index {
|
|
||||||
return errOverflow
|
|
||||||
}
|
|
||||||
|
|
||||||
y := *v
|
|
||||||
for o.index < fin {
|
|
||||||
u, err := p.valDec(o)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
y = append(y, u != 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
*v = y
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode a slice of int32s ([]int32).
|
|
||||||
func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
|
|
||||||
u, err := p.valDec(o)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
structPointer_Word32Slice(base, p.field).Append(uint32(u))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode a slice of int32s ([]int32) in packed format.
|
|
||||||
func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
|
|
||||||
v := structPointer_Word32Slice(base, p.field)
|
|
||||||
|
|
||||||
nn, err := o.DecodeVarint()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
nb := int(nn) // number of bytes of encoded int32s
|
|
||||||
|
|
||||||
fin := o.index + nb
|
|
||||||
if fin < o.index {
|
|
||||||
return errOverflow
|
|
||||||
}
|
|
||||||
for o.index < fin {
|
|
||||||
u, err := p.valDec(o)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
v.Append(uint32(u))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode a slice of int64s ([]int64).
|
|
||||||
func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
|
|
||||||
u, err := p.valDec(o)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
structPointer_Word64Slice(base, p.field).Append(u)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode a slice of int64s ([]int64) in packed format.
|
|
||||||
func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
|
|
||||||
v := structPointer_Word64Slice(base, p.field)
|
|
||||||
|
|
||||||
nn, err := o.DecodeVarint()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
nb := int(nn) // number of bytes of encoded int64s
|
|
||||||
|
|
||||||
fin := o.index + nb
|
|
||||||
if fin < o.index {
|
|
||||||
return errOverflow
|
|
||||||
}
|
|
||||||
for o.index < fin {
|
|
||||||
u, err := p.valDec(o)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
v.Append(u)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode a slice of strings ([]string).
|
|
||||||
func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
|
|
||||||
s, err := o.DecodeStringBytes()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
v := structPointer_StringSlice(base, p.field)
|
|
||||||
*v = append(*v, s)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode a slice of slice of bytes ([][]byte).
|
|
||||||
func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
|
|
||||||
b, err := o.DecodeRawBytes(true)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
v := structPointer_BytesSlice(base, p.field)
|
|
||||||
*v = append(*v, b)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode a map field.
|
|
||||||
func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
|
|
||||||
raw, err := o.DecodeRawBytes(false)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
oi := o.index // index at the end of this map entry
|
|
||||||
o.index -= len(raw) // move buffer back to start of map entry
|
|
||||||
|
|
||||||
mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
|
|
||||||
if mptr.Elem().IsNil() {
|
|
||||||
mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
|
|
||||||
}
|
|
||||||
v := mptr.Elem() // map[K]V
|
|
||||||
|
|
||||||
// Prepare addressable doubly-indirect placeholders for the key and value types.
|
|
||||||
// See enc_new_map for why.
|
|
||||||
keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
|
|
||||||
keybase := toStructPointer(keyptr.Addr()) // **K
|
|
||||||
|
|
||||||
var valbase structPointer
|
|
||||||
var valptr reflect.Value
|
|
||||||
switch p.mtype.Elem().Kind() {
|
|
||||||
case reflect.Slice:
|
|
||||||
// []byte
|
|
||||||
var dummy []byte
|
|
||||||
valptr = reflect.ValueOf(&dummy) // *[]byte
|
|
||||||
valbase = toStructPointer(valptr) // *[]byte
|
|
||||||
case reflect.Ptr:
|
|
||||||
// message; valptr is **Msg; need to allocate the intermediate pointer
|
|
||||||
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
|
|
||||||
valptr.Set(reflect.New(valptr.Type().Elem()))
|
|
||||||
valbase = toStructPointer(valptr)
|
|
||||||
default:
|
|
||||||
// everything else
|
|
||||||
valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
|
|
||||||
valbase = toStructPointer(valptr.Addr()) // **V
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode.
|
|
||||||
// This parses a restricted wire format, namely the encoding of a message
|
|
||||||
// with two fields. See enc_new_map for the format.
|
|
||||||
for o.index < oi {
|
|
||||||
// tagcode for key and value properties are always a single byte
|
|
||||||
// because they have tags 1 and 2.
|
|
||||||
tagcode := o.buf[o.index]
|
|
||||||
o.index++
|
|
||||||
switch tagcode {
|
|
||||||
case p.mkeyprop.tagcode[0]:
|
|
||||||
if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case p.mvalprop.tagcode[0]:
|
|
||||||
if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
// TODO: Should we silently skip this instead?
|
|
||||||
return fmt.Errorf("proto: bad map data tag %d", raw[0])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
keyelem, valelem := keyptr.Elem(), valptr.Elem()
|
|
||||||
if !keyelem.IsValid() {
|
|
||||||
keyelem = reflect.Zero(p.mtype.Key())
|
|
||||||
}
|
|
||||||
if !valelem.IsValid() {
|
|
||||||
valelem = reflect.Zero(p.mtype.Elem())
|
|
||||||
}
|
|
||||||
|
|
||||||
v.SetMapIndex(keyelem, valelem)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode a group.
|
|
||||||
func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
|
|
||||||
bas := structPointer_GetStructPointer(base, p.field)
|
|
||||||
if structPointer_IsNil(bas) {
|
|
||||||
// allocate new nested message
|
|
||||||
bas = toStructPointer(reflect.New(p.stype))
|
|
||||||
structPointer_SetStructPointer(base, p.field, bas)
|
|
||||||
}
|
|
||||||
return o.unmarshalType(p.stype, p.sprop, true, bas)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode an embedded message.
|
|
||||||
func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
|
|
||||||
raw, e := o.DecodeRawBytes(false)
|
|
||||||
if e != nil {
|
|
||||||
return e
|
|
||||||
}
|
|
||||||
|
|
||||||
bas := structPointer_GetStructPointer(base, p.field)
|
|
||||||
if structPointer_IsNil(bas) {
|
|
||||||
// allocate new nested message
|
|
||||||
bas = toStructPointer(reflect.New(p.stype))
|
|
||||||
structPointer_SetStructPointer(base, p.field, bas)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the object can unmarshal itself, let it.
|
|
||||||
if p.isUnmarshaler {
|
|
||||||
iv := structPointer_Interface(bas, p.stype)
|
|
||||||
return iv.(Unmarshaler).Unmarshal(raw)
|
|
||||||
}
|
|
||||||
|
|
||||||
obuf := o.buf
|
|
||||||
oi := o.index
|
|
||||||
o.buf = raw
|
|
||||||
o.index = 0
|
|
||||||
|
|
||||||
err = o.unmarshalType(p.stype, p.sprop, false, bas)
|
|
||||||
o.buf = obuf
|
|
||||||
o.index = oi
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode a slice of embedded messages.
|
|
||||||
func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
|
|
||||||
return o.dec_slice_struct(p, false, base)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode a slice of embedded groups.
|
|
||||||
func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
|
|
||||||
return o.dec_slice_struct(p, true, base)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode a slice of structs ([]*struct).
|
|
||||||
func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
|
|
||||||
v := reflect.New(p.stype)
|
|
||||||
bas := toStructPointer(v)
|
|
||||||
structPointer_StructPointerSlice(base, p.field).Append(bas)
|
|
||||||
|
|
||||||
if is_group {
|
|
||||||
err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
raw, err := o.DecodeRawBytes(false)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the object can unmarshal itself, let it.
|
|
||||||
if p.isUnmarshaler {
|
|
||||||
iv := v.Interface()
|
|
||||||
return iv.(Unmarshaler).Unmarshal(raw)
|
|
||||||
}
|
|
||||||
|
|
||||||
obuf := o.buf
|
|
||||||
oi := o.index
|
|
||||||
o.buf = raw
|
|
||||||
o.index = 0
|
|
||||||
|
|
||||||
err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
|
|
||||||
|
|
||||||
o.buf = obuf
|
|
||||||
o.index = oi
|
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
350
vendor/github.com/golang/protobuf/proto/discard.go
generated
vendored
Normal file
350
vendor/github.com/golang/protobuf/proto/discard.go
generated
vendored
Normal file
|
@ -0,0 +1,350 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
|
type generatedDiscarder interface {
|
||||||
|
XXX_DiscardUnknown()
|
||||||
|
}
|
||||||
|
|
||||||
|
// DiscardUnknown recursively discards all unknown fields from this message
|
||||||
|
// and all embedded messages.
|
||||||
|
//
|
||||||
|
// When unmarshaling a message with unrecognized fields, the tags and values
|
||||||
|
// of such fields are preserved in the Message. This allows a later call to
|
||||||
|
// marshal to be able to produce a message that continues to have those
|
||||||
|
// unrecognized fields. To avoid this, DiscardUnknown is used to
|
||||||
|
// explicitly clear the unknown fields after unmarshaling.
|
||||||
|
//
|
||||||
|
// For proto2 messages, the unknown fields of message extensions are only
|
||||||
|
// discarded from messages that have been accessed via GetExtension.
|
||||||
|
func DiscardUnknown(m Message) {
|
||||||
|
if m, ok := m.(generatedDiscarder); ok {
|
||||||
|
m.XXX_DiscardUnknown()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// TODO: Dynamically populate a InternalMessageInfo for legacy messages,
|
||||||
|
// but the master branch has no implementation for InternalMessageInfo,
|
||||||
|
// so it would be more work to replicate that approach.
|
||||||
|
discardLegacy(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DiscardUnknown recursively discards all unknown fields.
|
||||||
|
func (a *InternalMessageInfo) DiscardUnknown(m Message) {
|
||||||
|
di := atomicLoadDiscardInfo(&a.discard)
|
||||||
|
if di == nil {
|
||||||
|
di = getDiscardInfo(reflect.TypeOf(m).Elem())
|
||||||
|
atomicStoreDiscardInfo(&a.discard, di)
|
||||||
|
}
|
||||||
|
di.discard(toPointer(&m))
|
||||||
|
}
|
||||||
|
|
||||||
|
type discardInfo struct {
|
||||||
|
typ reflect.Type
|
||||||
|
|
||||||
|
initialized int32 // 0: only typ is valid, 1: everything is valid
|
||||||
|
lock sync.Mutex
|
||||||
|
|
||||||
|
fields []discardFieldInfo
|
||||||
|
unrecognized field
|
||||||
|
}
|
||||||
|
|
||||||
|
type discardFieldInfo struct {
|
||||||
|
field field // Offset of field, guaranteed to be valid
|
||||||
|
discard func(src pointer)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
discardInfoMap = map[reflect.Type]*discardInfo{}
|
||||||
|
discardInfoLock sync.Mutex
|
||||||
|
)
|
||||||
|
|
||||||
|
func getDiscardInfo(t reflect.Type) *discardInfo {
|
||||||
|
discardInfoLock.Lock()
|
||||||
|
defer discardInfoLock.Unlock()
|
||||||
|
di := discardInfoMap[t]
|
||||||
|
if di == nil {
|
||||||
|
di = &discardInfo{typ: t}
|
||||||
|
discardInfoMap[t] = di
|
||||||
|
}
|
||||||
|
return di
|
||||||
|
}
|
||||||
|
|
||||||
|
func (di *discardInfo) discard(src pointer) {
|
||||||
|
if src.isNil() {
|
||||||
|
return // Nothing to do.
|
||||||
|
}
|
||||||
|
|
||||||
|
if atomic.LoadInt32(&di.initialized) == 0 {
|
||||||
|
di.computeDiscardInfo()
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, fi := range di.fields {
|
||||||
|
sfp := src.offset(fi.field)
|
||||||
|
fi.discard(sfp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// For proto2 messages, only discard unknown fields in message extensions
|
||||||
|
// that have been accessed via GetExtension.
|
||||||
|
if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
|
||||||
|
// Ignore lock since DiscardUnknown is not concurrency safe.
|
||||||
|
emm, _ := em.extensionsRead()
|
||||||
|
for _, mx := range emm {
|
||||||
|
if m, ok := mx.value.(Message); ok {
|
||||||
|
DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if di.unrecognized.IsValid() {
|
||||||
|
*src.offset(di.unrecognized).toBytes() = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (di *discardInfo) computeDiscardInfo() {
|
||||||
|
di.lock.Lock()
|
||||||
|
defer di.lock.Unlock()
|
||||||
|
if di.initialized != 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t := di.typ
|
||||||
|
n := t.NumField()
|
||||||
|
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
f := t.Field(i)
|
||||||
|
if strings.HasPrefix(f.Name, "XXX_") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
dfi := discardFieldInfo{field: toField(&f)}
|
||||||
|
tf := f.Type
|
||||||
|
|
||||||
|
// Unwrap tf to get its most basic type.
|
||||||
|
var isPointer, isSlice bool
|
||||||
|
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||||
|
isSlice = true
|
||||||
|
tf = tf.Elem()
|
||||||
|
}
|
||||||
|
if tf.Kind() == reflect.Ptr {
|
||||||
|
isPointer = true
|
||||||
|
tf = tf.Elem()
|
||||||
|
}
|
||||||
|
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||||
|
panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
|
||||||
|
}
|
||||||
|
|
||||||
|
switch tf.Kind() {
|
||||||
|
case reflect.Struct:
|
||||||
|
switch {
|
||||||
|
case !isPointer:
|
||||||
|
panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
|
||||||
|
case isSlice: // E.g., []*pb.T
|
||||||
|
di := getDiscardInfo(tf)
|
||||||
|
dfi.discard = func(src pointer) {
|
||||||
|
sps := src.getPointerSlice()
|
||||||
|
for _, sp := range sps {
|
||||||
|
if !sp.isNil() {
|
||||||
|
di.discard(sp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default: // E.g., *pb.T
|
||||||
|
di := getDiscardInfo(tf)
|
||||||
|
dfi.discard = func(src pointer) {
|
||||||
|
sp := src.getPointer()
|
||||||
|
if !sp.isNil() {
|
||||||
|
di.discard(sp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Map:
|
||||||
|
switch {
|
||||||
|
case isPointer || isSlice:
|
||||||
|
panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
|
||||||
|
default: // E.g., map[K]V
|
||||||
|
if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
|
||||||
|
dfi.discard = func(src pointer) {
|
||||||
|
sm := src.asPointerTo(tf).Elem()
|
||||||
|
if sm.Len() == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for _, key := range sm.MapKeys() {
|
||||||
|
val := sm.MapIndex(key)
|
||||||
|
DiscardUnknown(val.Interface().(Message))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
dfi.discard = func(pointer) {} // Noop
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Interface:
|
||||||
|
// Must be oneof field.
|
||||||
|
switch {
|
||||||
|
case isPointer || isSlice:
|
||||||
|
panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
|
||||||
|
default: // E.g., interface{}
|
||||||
|
// TODO: Make this faster?
|
||||||
|
dfi.discard = func(src pointer) {
|
||||||
|
su := src.asPointerTo(tf).Elem()
|
||||||
|
if !su.IsNil() {
|
||||||
|
sv := su.Elem().Elem().Field(0)
|
||||||
|
if sv.Kind() == reflect.Ptr && sv.IsNil() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch sv.Type().Kind() {
|
||||||
|
case reflect.Ptr: // Proto struct (e.g., *T)
|
||||||
|
DiscardUnknown(sv.Interface().(Message))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
di.fields = append(di.fields, dfi)
|
||||||
|
}
|
||||||
|
|
||||||
|
di.unrecognized = invalidField
|
||||||
|
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
|
||||||
|
if f.Type != reflect.TypeOf([]byte{}) {
|
||||||
|
panic("expected XXX_unrecognized to be of type []byte")
|
||||||
|
}
|
||||||
|
di.unrecognized = toField(&f)
|
||||||
|
}
|
||||||
|
|
||||||
|
atomic.StoreInt32(&di.initialized, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func discardLegacy(m Message) {
|
||||||
|
v := reflect.ValueOf(m)
|
||||||
|
if v.Kind() != reflect.Ptr || v.IsNil() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
v = v.Elem()
|
||||||
|
if v.Kind() != reflect.Struct {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t := v.Type()
|
||||||
|
|
||||||
|
for i := 0; i < v.NumField(); i++ {
|
||||||
|
f := t.Field(i)
|
||||||
|
if strings.HasPrefix(f.Name, "XXX_") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
vf := v.Field(i)
|
||||||
|
tf := f.Type
|
||||||
|
|
||||||
|
// Unwrap tf to get its most basic type.
|
||||||
|
var isPointer, isSlice bool
|
||||||
|
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||||
|
isSlice = true
|
||||||
|
tf = tf.Elem()
|
||||||
|
}
|
||||||
|
if tf.Kind() == reflect.Ptr {
|
||||||
|
isPointer = true
|
||||||
|
tf = tf.Elem()
|
||||||
|
}
|
||||||
|
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||||
|
panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
|
||||||
|
}
|
||||||
|
|
||||||
|
switch tf.Kind() {
|
||||||
|
case reflect.Struct:
|
||||||
|
switch {
|
||||||
|
case !isPointer:
|
||||||
|
panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
|
||||||
|
case isSlice: // E.g., []*pb.T
|
||||||
|
for j := 0; j < vf.Len(); j++ {
|
||||||
|
discardLegacy(vf.Index(j).Interface().(Message))
|
||||||
|
}
|
||||||
|
default: // E.g., *pb.T
|
||||||
|
discardLegacy(vf.Interface().(Message))
|
||||||
|
}
|
||||||
|
case reflect.Map:
|
||||||
|
switch {
|
||||||
|
case isPointer || isSlice:
|
||||||
|
panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
|
||||||
|
default: // E.g., map[K]V
|
||||||
|
tv := vf.Type().Elem()
|
||||||
|
if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
|
||||||
|
for _, key := range vf.MapKeys() {
|
||||||
|
val := vf.MapIndex(key)
|
||||||
|
discardLegacy(val.Interface().(Message))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Interface:
|
||||||
|
// Must be oneof field.
|
||||||
|
switch {
|
||||||
|
case isPointer || isSlice:
|
||||||
|
panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
|
||||||
|
default: // E.g., test_proto.isCommunique_Union interface
|
||||||
|
if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
|
||||||
|
vf = vf.Elem() // E.g., *test_proto.Communique_Msg
|
||||||
|
if !vf.IsNil() {
|
||||||
|
vf = vf.Elem() // E.g., test_proto.Communique_Msg
|
||||||
|
vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
|
||||||
|
if vf.Kind() == reflect.Ptr {
|
||||||
|
discardLegacy(vf.Interface().(Message))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
|
||||||
|
if vf.Type() != reflect.TypeOf([]byte{}) {
|
||||||
|
panic("expected XXX_unrecognized to be of type []byte")
|
||||||
|
}
|
||||||
|
vf.Set(reflect.ValueOf([]byte(nil)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// For proto2 messages, only discard unknown fields in message extensions
|
||||||
|
// that have been accessed via GetExtension.
|
||||||
|
if em, err := extendable(m); err == nil {
|
||||||
|
// Ignore lock since discardLegacy is not concurrency safe.
|
||||||
|
emm, _ := em.extensionsRead()
|
||||||
|
for _, mx := range emm {
|
||||||
|
if m, ok := mx.value.(Message); ok {
|
||||||
|
discardLegacy(m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
1182
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
1182
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
File diff suppressed because it is too large
Load diff
64
vendor/github.com/golang/protobuf/proto/equal.go
generated
vendored
64
vendor/github.com/golang/protobuf/proto/equal.go
generated
vendored
|
@ -54,13 +54,17 @@ Equality is defined in this way:
|
||||||
in a proto3 .proto file, fields are not "set"; specifically,
|
in a proto3 .proto file, fields are not "set"; specifically,
|
||||||
zero length proto3 "bytes" fields are equal (nil == {}).
|
zero length proto3 "bytes" fields are equal (nil == {}).
|
||||||
- Two repeated fields are equal iff their lengths are the same,
|
- Two repeated fields are equal iff their lengths are the same,
|
||||||
and their corresponding elements are equal (a "bytes" field,
|
and their corresponding elements are equal. Note a "bytes" field,
|
||||||
although represented by []byte, is not a repeated field)
|
although represented by []byte, is not a repeated field and the
|
||||||
|
rule for the scalar fields described above applies.
|
||||||
- Two unset fields are equal.
|
- Two unset fields are equal.
|
||||||
- Two unknown field sets are equal if their current
|
- Two unknown field sets are equal if their current
|
||||||
encoded state is equal.
|
encoded state is equal.
|
||||||
- Two extension sets are equal iff they have corresponding
|
- Two extension sets are equal iff they have corresponding
|
||||||
elements that are pairwise equal.
|
elements that are pairwise equal.
|
||||||
|
- Two map fields are equal iff their lengths are the same,
|
||||||
|
and they contain the same set of elements. Zero-length map
|
||||||
|
fields are equal.
|
||||||
- Every other combination of things are not equal.
|
- Every other combination of things are not equal.
|
||||||
|
|
||||||
The return value is undefined if a and b are not protocol buffers.
|
The return value is undefined if a and b are not protocol buffers.
|
||||||
|
@ -105,15 +109,6 @@ func equalStruct(v1, v2 reflect.Value) bool {
|
||||||
// set/unset mismatch
|
// set/unset mismatch
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
b1, ok := f1.Interface().(raw)
|
|
||||||
if ok {
|
|
||||||
b2 := f2.Interface().(raw)
|
|
||||||
// RawMessage
|
|
||||||
if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
f1, f2 = f1.Elem(), f2.Elem()
|
f1, f2 = f1.Elem(), f2.Elem()
|
||||||
}
|
}
|
||||||
if !equalAny(f1, f2, sprop.Prop[i]) {
|
if !equalAny(f1, f2, sprop.Prop[i]) {
|
||||||
|
@ -121,9 +116,16 @@ func equalStruct(v1, v2 reflect.Value) bool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
|
||||||
|
em2 := v2.FieldByName("XXX_InternalExtensions")
|
||||||
|
if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
|
if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
|
||||||
em2 := v2.FieldByName("XXX_extensions")
|
em2 := v2.FieldByName("XXX_extensions")
|
||||||
if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
|
if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -135,11 +137,7 @@ func equalStruct(v1, v2 reflect.Value) bool {
|
||||||
|
|
||||||
u1 := uf.Bytes()
|
u1 := uf.Bytes()
|
||||||
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
|
u2 := v2.FieldByName("XXX_unrecognized").Bytes()
|
||||||
if !bytes.Equal(u1, u2) {
|
return bytes.Equal(u1, u2)
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// v1 and v2 are known to have the same type.
|
// v1 and v2 are known to have the same type.
|
||||||
|
@ -184,6 +182,13 @@ func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
case reflect.Ptr:
|
case reflect.Ptr:
|
||||||
|
// Maps may have nil values in them, so check for nil.
|
||||||
|
if v1.IsNil() && v2.IsNil() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if v1.IsNil() != v2.IsNil() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
return equalAny(v1.Elem(), v2.Elem(), prop)
|
return equalAny(v1.Elem(), v2.Elem(), prop)
|
||||||
case reflect.Slice:
|
case reflect.Slice:
|
||||||
if v1.Type().Elem().Kind() == reflect.Uint8 {
|
if v1.Type().Elem().Kind() == reflect.Uint8 {
|
||||||
|
@ -223,8 +228,14 @@ func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// base is the struct type that the extensions are based on.
|
// base is the struct type that the extensions are based on.
|
||||||
// em1 and em2 are extension maps.
|
// x1 and x2 are InternalExtensions.
|
||||||
func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
|
||||||
|
em1, _ := x1.extensionsRead()
|
||||||
|
em2, _ := x2.extensionsRead()
|
||||||
|
return equalExtMap(base, em1, em2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
||||||
if len(em1) != len(em2) {
|
if len(em1) != len(em2) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -237,6 +248,15 @@ func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
||||||
|
|
||||||
m1, m2 := e1.value, e2.value
|
m1, m2 := e1.value, e2.value
|
||||||
|
|
||||||
|
if m1 == nil && m2 == nil {
|
||||||
|
// Both have only encoded form.
|
||||||
|
if bytes.Equal(e1.enc, e2.enc) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// The bytes are different, but the extensions might still be
|
||||||
|
// equal. We need to decode them to compare.
|
||||||
|
}
|
||||||
|
|
||||||
if m1 != nil && m2 != nil {
|
if m1 != nil && m2 != nil {
|
||||||
// Both are unencoded.
|
// Both are unencoded.
|
||||||
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
|
||||||
|
@ -252,8 +272,12 @@ func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
|
||||||
desc = m[extNum]
|
desc = m[extNum]
|
||||||
}
|
}
|
||||||
if desc == nil {
|
if desc == nil {
|
||||||
|
// If both have only encoded form and the bytes are the same,
|
||||||
|
// it is handled above. We get here when the bytes are different.
|
||||||
|
// We don't know how to decode it, so just compare them as byte
|
||||||
|
// slices.
|
||||||
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
|
log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
|
||||||
continue
|
return false
|
||||||
}
|
}
|
||||||
var err error
|
var err error
|
||||||
if m1 == nil {
|
if m1 == nil {
|
||||||
|
|
326
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
326
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
|
@ -38,6 +38,7 @@ package proto
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -52,14 +53,111 @@ type ExtensionRange struct {
|
||||||
Start, End int32 // both inclusive
|
Start, End int32 // both inclusive
|
||||||
}
|
}
|
||||||
|
|
||||||
// extendableProto is an interface implemented by any protocol buffer that may be extended.
|
// extendableProto is an interface implemented by any protocol buffer generated by the current
|
||||||
|
// proto compiler that may be extended.
|
||||||
type extendableProto interface {
|
type extendableProto interface {
|
||||||
|
Message
|
||||||
|
ExtensionRangeArray() []ExtensionRange
|
||||||
|
extensionsWrite() map[int32]Extension
|
||||||
|
extensionsRead() (map[int32]Extension, sync.Locker)
|
||||||
|
}
|
||||||
|
|
||||||
|
// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
|
||||||
|
// version of the proto compiler that may be extended.
|
||||||
|
type extendableProtoV1 interface {
|
||||||
Message
|
Message
|
||||||
ExtensionRangeArray() []ExtensionRange
|
ExtensionRangeArray() []ExtensionRange
|
||||||
ExtensionMap() map[int32]Extension
|
ExtensionMap() map[int32]Extension
|
||||||
}
|
}
|
||||||
|
|
||||||
var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
|
// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
|
||||||
|
type extensionAdapter struct {
|
||||||
|
extendableProtoV1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e extensionAdapter) extensionsWrite() map[int32]Extension {
|
||||||
|
return e.ExtensionMap()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
|
||||||
|
return e.ExtensionMap(), notLocker{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
|
||||||
|
type notLocker struct{}
|
||||||
|
|
||||||
|
func (n notLocker) Lock() {}
|
||||||
|
func (n notLocker) Unlock() {}
|
||||||
|
|
||||||
|
// extendable returns the extendableProto interface for the given generated proto message.
|
||||||
|
// If the proto message has the old extension format, it returns a wrapper that implements
|
||||||
|
// the extendableProto interface.
|
||||||
|
func extendable(p interface{}) (extendableProto, error) {
|
||||||
|
switch p := p.(type) {
|
||||||
|
case extendableProto:
|
||||||
|
if isNilPtr(p) {
|
||||||
|
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
|
||||||
|
}
|
||||||
|
return p, nil
|
||||||
|
case extendableProtoV1:
|
||||||
|
if isNilPtr(p) {
|
||||||
|
return nil, fmt.Errorf("proto: nil %T is not extendable", p)
|
||||||
|
}
|
||||||
|
return extensionAdapter{p}, nil
|
||||||
|
}
|
||||||
|
// Don't allocate a specific error containing %T:
|
||||||
|
// this is the hot path for Clone and MarshalText.
|
||||||
|
return nil, errNotExtendable
|
||||||
|
}
|
||||||
|
|
||||||
|
var errNotExtendable = errors.New("proto: not an extendable proto.Message")
|
||||||
|
|
||||||
|
func isNilPtr(x interface{}) bool {
|
||||||
|
v := reflect.ValueOf(x)
|
||||||
|
return v.Kind() == reflect.Ptr && v.IsNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
// XXX_InternalExtensions is an internal representation of proto extensions.
|
||||||
|
//
|
||||||
|
// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
|
||||||
|
// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
|
||||||
|
//
|
||||||
|
// The methods of XXX_InternalExtensions are not concurrency safe in general,
|
||||||
|
// but calls to logically read-only methods such as has and get may be executed concurrently.
|
||||||
|
type XXX_InternalExtensions struct {
|
||||||
|
// The struct must be indirect so that if a user inadvertently copies a
|
||||||
|
// generated message and its embedded XXX_InternalExtensions, they
|
||||||
|
// avoid the mayhem of a copied mutex.
|
||||||
|
//
|
||||||
|
// The mutex serializes all logically read-only operations to p.extensionMap.
|
||||||
|
// It is up to the client to ensure that write operations to p.extensionMap are
|
||||||
|
// mutually exclusive with other accesses.
|
||||||
|
p *struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
extensionMap map[int32]Extension
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// extensionsWrite returns the extension map, creating it on first use.
|
||||||
|
func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
|
||||||
|
if e.p == nil {
|
||||||
|
e.p = new(struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
extensionMap map[int32]Extension
|
||||||
|
})
|
||||||
|
e.p.extensionMap = make(map[int32]Extension)
|
||||||
|
}
|
||||||
|
return e.p.extensionMap
|
||||||
|
}
|
||||||
|
|
||||||
|
// extensionsRead returns the extensions map for read-only use. It may be nil.
|
||||||
|
// The caller must hold the returned mutex's lock when accessing Elements within the map.
|
||||||
|
func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
|
||||||
|
if e.p == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
return e.p.extensionMap, &e.p.mu
|
||||||
|
}
|
||||||
|
|
||||||
// ExtensionDesc represents an extension specification.
|
// ExtensionDesc represents an extension specification.
|
||||||
// Used in generated code from the protocol compiler.
|
// Used in generated code from the protocol compiler.
|
||||||
|
@ -69,6 +167,7 @@ type ExtensionDesc struct {
|
||||||
Field int32 // field number
|
Field int32 // field number
|
||||||
Name string // fully-qualified name of extension, for text formatting
|
Name string // fully-qualified name of extension, for text formatting
|
||||||
Tag string // protobuf tag style
|
Tag string // protobuf tag style
|
||||||
|
Filename string // name of the file in which the extension is defined
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ed *ExtensionDesc) repeated() bool {
|
func (ed *ExtensionDesc) repeated() bool {
|
||||||
|
@ -92,8 +191,13 @@ type Extension struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetRawExtension is for testing only.
|
// SetRawExtension is for testing only.
|
||||||
func SetRawExtension(base extendableProto, id int32, b []byte) {
|
func SetRawExtension(base Message, id int32, b []byte) {
|
||||||
base.ExtensionMap()[id] = Extension{enc: b}
|
epb, err := extendable(base)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
extmap := epb.extensionsWrite()
|
||||||
|
extmap[id] = Extension{enc: b}
|
||||||
}
|
}
|
||||||
|
|
||||||
// isExtensionField returns true iff the given field number is in an extension range.
|
// isExtensionField returns true iff the given field number is in an extension range.
|
||||||
|
@ -108,9 +212,13 @@ func isExtensionField(pb extendableProto, field int32) bool {
|
||||||
|
|
||||||
// checkExtensionTypes checks that the given extension is valid for pb.
|
// checkExtensionTypes checks that the given extension is valid for pb.
|
||||||
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
|
func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
|
||||||
|
var pbi interface{} = pb
|
||||||
// Check the extended type.
|
// Check the extended type.
|
||||||
if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b {
|
if ea, ok := pbi.(extensionAdapter); ok {
|
||||||
return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
|
pbi = ea.extendableProtoV1
|
||||||
|
}
|
||||||
|
if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
|
||||||
|
return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
|
||||||
}
|
}
|
||||||
// Check the range.
|
// Check the range.
|
||||||
if !isExtensionField(pb, extension.Field) {
|
if !isExtensionField(pb, extension.Field) {
|
||||||
|
@ -155,80 +263,62 @@ func extensionProperties(ed *ExtensionDesc) *Properties {
|
||||||
return prop
|
return prop
|
||||||
}
|
}
|
||||||
|
|
||||||
// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m.
|
|
||||||
func encodeExtensionMap(m map[int32]Extension) error {
|
|
||||||
for k, e := range m {
|
|
||||||
if e.value == nil || e.desc == nil {
|
|
||||||
// Extension is only in its encoded form.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// We don't skip extensions that have an encoded form set,
|
|
||||||
// because the extension value may have been mutated after
|
|
||||||
// the last time this function was called.
|
|
||||||
|
|
||||||
et := reflect.TypeOf(e.desc.ExtensionType)
|
|
||||||
props := extensionProperties(e.desc)
|
|
||||||
|
|
||||||
p := NewBuffer(nil)
|
|
||||||
// If e.value has type T, the encoder expects a *struct{ X T }.
|
|
||||||
// Pass a *T with a zero field and hope it all works out.
|
|
||||||
x := reflect.New(et)
|
|
||||||
x.Elem().Set(reflect.ValueOf(e.value))
|
|
||||||
if err := props.enc(p, props, toStructPointer(x)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
e.enc = p.buf
|
|
||||||
m[k] = e
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func sizeExtensionMap(m map[int32]Extension) (n int) {
|
|
||||||
for _, e := range m {
|
|
||||||
if e.value == nil || e.desc == nil {
|
|
||||||
// Extension is only in its encoded form.
|
|
||||||
n += len(e.enc)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// We don't skip extensions that have an encoded form set,
|
|
||||||
// because the extension value may have been mutated after
|
|
||||||
// the last time this function was called.
|
|
||||||
|
|
||||||
et := reflect.TypeOf(e.desc.ExtensionType)
|
|
||||||
props := extensionProperties(e.desc)
|
|
||||||
|
|
||||||
// If e.value has type T, the encoder expects a *struct{ X T }.
|
|
||||||
// Pass a *T with a zero field and hope it all works out.
|
|
||||||
x := reflect.New(et)
|
|
||||||
x.Elem().Set(reflect.ValueOf(e.value))
|
|
||||||
n += props.size(props, toStructPointer(x))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasExtension returns whether the given extension is present in pb.
|
// HasExtension returns whether the given extension is present in pb.
|
||||||
func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
|
func HasExtension(pb Message, extension *ExtensionDesc) bool {
|
||||||
// TODO: Check types, field numbers, etc.?
|
// TODO: Check types, field numbers, etc.?
|
||||||
_, ok := pb.ExtensionMap()[extension.Field]
|
epb, err := extendable(pb)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
extmap, mu := epb.extensionsRead()
|
||||||
|
if extmap == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
mu.Lock()
|
||||||
|
_, ok := extmap[extension.Field]
|
||||||
|
mu.Unlock()
|
||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClearExtension removes the given extension from pb.
|
// ClearExtension removes the given extension from pb.
|
||||||
func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
|
func ClearExtension(pb Message, extension *ExtensionDesc) {
|
||||||
|
epb, err := extendable(pb)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
// TODO: Check types, field numbers, etc.?
|
// TODO: Check types, field numbers, etc.?
|
||||||
delete(pb.ExtensionMap(), extension.Field)
|
extmap := epb.extensionsWrite()
|
||||||
|
delete(extmap, extension.Field)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetExtension parses and returns the given extension of pb.
|
// GetExtension retrieves a proto2 extended field from pb.
|
||||||
// If the extension is not present and has no default value it returns ErrMissingExtension.
|
//
|
||||||
func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
|
// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
|
||||||
if err := checkExtensionTypes(pb, extension); err != nil {
|
// then GetExtension parses the encoded field and returns a Go value of the specified type.
|
||||||
|
// If the field is not present, then the default value is returned (if one is specified),
|
||||||
|
// otherwise ErrMissingExtension is reported.
|
||||||
|
//
|
||||||
|
// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
|
||||||
|
// then GetExtension returns the raw encoded bytes of the field extension.
|
||||||
|
func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
|
||||||
|
epb, err := extendable(pb)
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
emap := pb.ExtensionMap()
|
if extension.ExtendedType != nil {
|
||||||
|
// can only check type if this is a complete descriptor
|
||||||
|
if err := checkExtensionTypes(epb, extension); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
emap, mu := epb.extensionsRead()
|
||||||
|
if emap == nil {
|
||||||
|
return defaultExtensionValue(extension)
|
||||||
|
}
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
e, ok := emap[extension.Field]
|
e, ok := emap[extension.Field]
|
||||||
if !ok {
|
if !ok {
|
||||||
// defaultExtensionValue returns the default value or
|
// defaultExtensionValue returns the default value or
|
||||||
|
@ -247,6 +337,11 @@ func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, er
|
||||||
return e.value, nil
|
return e.value, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if extension.ExtensionType == nil {
|
||||||
|
// incomplete descriptor
|
||||||
|
return e.enc, nil
|
||||||
|
}
|
||||||
|
|
||||||
v, err := decodeExtension(e.enc, extension)
|
v, err := decodeExtension(e.enc, extension)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -264,6 +359,11 @@ func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, er
|
||||||
// defaultExtensionValue returns the default value for extension.
|
// defaultExtensionValue returns the default value for extension.
|
||||||
// If no default for an extension is defined ErrMissingExtension is returned.
|
// If no default for an extension is defined ErrMissingExtension is returned.
|
||||||
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
|
func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
|
||||||
|
if extension.ExtensionType == nil {
|
||||||
|
// incomplete descriptor, so no default
|
||||||
|
return nil, ErrMissingExtension
|
||||||
|
}
|
||||||
|
|
||||||
t := reflect.TypeOf(extension.ExtensionType)
|
t := reflect.TypeOf(extension.ExtensionType)
|
||||||
props := extensionProperties(extension)
|
props := extensionProperties(extension)
|
||||||
|
|
||||||
|
@ -298,31 +398,28 @@ func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
|
||||||
|
|
||||||
// decodeExtension decodes an extension encoded in b.
|
// decodeExtension decodes an extension encoded in b.
|
||||||
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
|
func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
|
||||||
o := NewBuffer(b)
|
|
||||||
|
|
||||||
t := reflect.TypeOf(extension.ExtensionType)
|
t := reflect.TypeOf(extension.ExtensionType)
|
||||||
|
unmarshal := typeUnmarshaler(t, extension.Tag)
|
||||||
props := extensionProperties(extension)
|
|
||||||
|
|
||||||
// t is a pointer to a struct, pointer to basic type or a slice.
|
// t is a pointer to a struct, pointer to basic type or a slice.
|
||||||
// Allocate a "field" to store the pointer/slice itself; the
|
// Allocate space to store the pointer/slice.
|
||||||
// pointer/slice will be stored here. We pass
|
|
||||||
// the address of this field to props.dec.
|
|
||||||
// This passes a zero field and a *t and lets props.dec
|
|
||||||
// interpret it as a *struct{ x t }.
|
|
||||||
value := reflect.New(t).Elem()
|
value := reflect.New(t).Elem()
|
||||||
|
|
||||||
|
var err error
|
||||||
for {
|
for {
|
||||||
// Discard wire type and field number varint. It isn't needed.
|
x, n := decodeVarint(b)
|
||||||
if _, err := o.DecodeVarint(); err != nil {
|
if n == 0 {
|
||||||
|
return nil, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b = b[n:]
|
||||||
|
wire := int(x) & 7
|
||||||
|
|
||||||
|
b, err = unmarshal(b, valToPointer(value.Addr()), wire)
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
|
if len(b) == 0 {
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if o.index >= len(o.buf) {
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -332,10 +429,9 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
|
||||||
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
|
// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
|
||||||
// The returned slice has the same length as es; missing extensions will appear as nil elements.
|
// The returned slice has the same length as es; missing extensions will appear as nil elements.
|
||||||
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
|
func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
|
||||||
epb, ok := pb.(extendableProto)
|
epb, err := extendable(pb)
|
||||||
if !ok {
|
if err != nil {
|
||||||
err = errors.New("proto: not an extendable proto")
|
return nil, err
|
||||||
return
|
|
||||||
}
|
}
|
||||||
extensions = make([]interface{}, len(es))
|
extensions = make([]interface{}, len(es))
|
||||||
for i, e := range es {
|
for i, e := range es {
|
||||||
|
@ -350,9 +446,44 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
|
||||||
|
// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
|
||||||
|
// just the Field field, which defines the extension's field number.
|
||||||
|
func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
|
||||||
|
epb, err := extendable(pb)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
registeredExtensions := RegisteredExtensions(pb)
|
||||||
|
|
||||||
|
emap, mu := epb.extensionsRead()
|
||||||
|
if emap == nil {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
extensions := make([]*ExtensionDesc, 0, len(emap))
|
||||||
|
for extid, e := range emap {
|
||||||
|
desc := e.desc
|
||||||
|
if desc == nil {
|
||||||
|
desc = registeredExtensions[extid]
|
||||||
|
if desc == nil {
|
||||||
|
desc = &ExtensionDesc{Field: extid}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
extensions = append(extensions, desc)
|
||||||
|
}
|
||||||
|
return extensions, nil
|
||||||
|
}
|
||||||
|
|
||||||
// SetExtension sets the specified extension of pb to the specified value.
|
// SetExtension sets the specified extension of pb to the specified value.
|
||||||
func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
|
func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
|
||||||
if err := checkExtensionTypes(pb, extension); err != nil {
|
epb, err := extendable(pb)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := checkExtensionTypes(epb, extension); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
typ := reflect.TypeOf(extension.ExtensionType)
|
typ := reflect.TypeOf(extension.ExtensionType)
|
||||||
|
@ -368,10 +499,23 @@ func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{
|
||||||
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
|
return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
|
||||||
}
|
}
|
||||||
|
|
||||||
pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
|
extmap := epb.extensionsWrite()
|
||||||
|
extmap[extension.Field] = Extension{desc: extension, value: value}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ClearAllExtensions clears all extensions from pb.
|
||||||
|
func ClearAllExtensions(pb Message) {
|
||||||
|
epb, err := extendable(pb)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
m := epb.extensionsWrite()
|
||||||
|
for k := range m {
|
||||||
|
delete(m, k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// A global registry of extensions.
|
// A global registry of extensions.
|
||||||
// The generated code will register the generated descriptors by calling RegisterExtension.
|
// The generated code will register the generated descriptors by calling RegisterExtension.
|
||||||
|
|
||||||
|
|
135
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
135
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
|
@ -73,7 +73,6 @@ for a protocol buffer variable v:
|
||||||
When the .proto file specifies `syntax="proto3"`, there are some differences:
|
When the .proto file specifies `syntax="proto3"`, there are some differences:
|
||||||
|
|
||||||
- Non-repeated fields of non-message type are values instead of pointers.
|
- Non-repeated fields of non-message type are values instead of pointers.
|
||||||
- Getters are only generated for message and oneof fields.
|
|
||||||
- Enum types do not get an Enum method.
|
- Enum types do not get an Enum method.
|
||||||
|
|
||||||
The simplest way to describe this is to see an example.
|
The simplest way to describe this is to see an example.
|
||||||
|
@ -274,6 +273,67 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
|
||||||
|
// Marshal reports this when a required field is not initialized.
|
||||||
|
// Unmarshal reports this when a required field is missing from the wire data.
|
||||||
|
type RequiredNotSetError struct{ field string }
|
||||||
|
|
||||||
|
func (e *RequiredNotSetError) Error() string {
|
||||||
|
if e.field == "" {
|
||||||
|
return fmt.Sprintf("proto: required field not set")
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("proto: required field %q not set", e.field)
|
||||||
|
}
|
||||||
|
func (e *RequiredNotSetError) RequiredNotSet() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
type invalidUTF8Error struct{ field string }
|
||||||
|
|
||||||
|
func (e *invalidUTF8Error) Error() string {
|
||||||
|
if e.field == "" {
|
||||||
|
return "proto: invalid UTF-8 detected"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
|
||||||
|
}
|
||||||
|
func (e *invalidUTF8Error) InvalidUTF8() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
|
||||||
|
// This error should not be exposed to the external API as such errors should
|
||||||
|
// be recreated with the field information.
|
||||||
|
var errInvalidUTF8 = &invalidUTF8Error{}
|
||||||
|
|
||||||
|
// isNonFatal reports whether the error is either a RequiredNotSet error
|
||||||
|
// or a InvalidUTF8 error.
|
||||||
|
func isNonFatal(err error) bool {
|
||||||
|
if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
type nonFatal struct{ E error }
|
||||||
|
|
||||||
|
// Merge merges err into nf and reports whether it was successful.
|
||||||
|
// Otherwise it returns false for any fatal non-nil errors.
|
||||||
|
func (nf *nonFatal) Merge(err error) (ok bool) {
|
||||||
|
if err == nil {
|
||||||
|
return true // not an error
|
||||||
|
}
|
||||||
|
if !isNonFatal(err) {
|
||||||
|
return false // fatal error
|
||||||
|
}
|
||||||
|
if nf.E == nil {
|
||||||
|
nf.E = err // store first instance of non-fatal error
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// Message is implemented by generated protocol buffer messages.
|
// Message is implemented by generated protocol buffer messages.
|
||||||
type Message interface {
|
type Message interface {
|
||||||
Reset()
|
Reset()
|
||||||
|
@ -308,18 +368,9 @@ func GetStats() Stats { return stats }
|
||||||
// temporary Buffer and are fine for most applications.
|
// temporary Buffer and are fine for most applications.
|
||||||
type Buffer struct {
|
type Buffer struct {
|
||||||
buf []byte // encode/decode byte stream
|
buf []byte // encode/decode byte stream
|
||||||
index int // write point
|
index int // read point
|
||||||
|
|
||||||
// pools of basic types to amortize allocation.
|
deterministic bool
|
||||||
bools []bool
|
|
||||||
uint32s []uint32
|
|
||||||
uint64s []uint64
|
|
||||||
|
|
||||||
// extra pools, only used with pointer_reflect.go
|
|
||||||
int32s []int32
|
|
||||||
int64s []int64
|
|
||||||
float32s []float32
|
|
||||||
float64s []float64
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBuffer allocates a new Buffer and initializes its internal data to
|
// NewBuffer allocates a new Buffer and initializes its internal data to
|
||||||
|
@ -344,6 +395,30 @@ func (p *Buffer) SetBuf(s []byte) {
|
||||||
// Bytes returns the contents of the Buffer.
|
// Bytes returns the contents of the Buffer.
|
||||||
func (p *Buffer) Bytes() []byte { return p.buf }
|
func (p *Buffer) Bytes() []byte { return p.buf }
|
||||||
|
|
||||||
|
// SetDeterministic sets whether to use deterministic serialization.
|
||||||
|
//
|
||||||
|
// Deterministic serialization guarantees that for a given binary, equal
|
||||||
|
// messages will always be serialized to the same bytes. This implies:
|
||||||
|
//
|
||||||
|
// - Repeated serialization of a message will return the same bytes.
|
||||||
|
// - Different processes of the same binary (which may be executing on
|
||||||
|
// different machines) will serialize equal messages to the same bytes.
|
||||||
|
//
|
||||||
|
// Note that the deterministic serialization is NOT canonical across
|
||||||
|
// languages. It is not guaranteed to remain stable over time. It is unstable
|
||||||
|
// across different builds with schema changes due to unknown fields.
|
||||||
|
// Users who need canonical serialization (e.g., persistent storage in a
|
||||||
|
// canonical form, fingerprinting, etc.) should define their own
|
||||||
|
// canonicalization specification and implement their own serializer rather
|
||||||
|
// than relying on this API.
|
||||||
|
//
|
||||||
|
// If deterministic serialization is requested, map entries will be sorted
|
||||||
|
// by keys in lexographical order. This is an implementation detail and
|
||||||
|
// subject to change.
|
||||||
|
func (p *Buffer) SetDeterministic(deterministic bool) {
|
||||||
|
p.deterministic = deterministic
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Helper routines for simplifying the creation of optional fields of basic type.
|
* Helper routines for simplifying the creation of optional fields of basic type.
|
||||||
*/
|
*/
|
||||||
|
@ -832,22 +907,12 @@ func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMes
|
||||||
return sf, false, nil
|
return sf, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// mapKeys returns a sort.Interface to be used for sorting the map keys.
|
||||||
// Map fields may have key types of non-float scalars, strings and enums.
|
// Map fields may have key types of non-float scalars, strings and enums.
|
||||||
// The easiest way to sort them in some deterministic order is to use fmt.
|
|
||||||
// If this turns out to be inefficient we can always consider other options,
|
|
||||||
// such as doing a Schwartzian transform.
|
|
||||||
|
|
||||||
func mapKeys(vs []reflect.Value) sort.Interface {
|
func mapKeys(vs []reflect.Value) sort.Interface {
|
||||||
s := mapKeySorter{
|
s := mapKeySorter{vs: vs}
|
||||||
vs: vs,
|
|
||||||
// default Less function: textual comparison
|
|
||||||
less: func(a, b reflect.Value) bool {
|
|
||||||
return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
|
// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
|
||||||
// numeric keys are sorted numerically.
|
|
||||||
if len(vs) == 0 {
|
if len(vs) == 0 {
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
@ -856,6 +921,12 @@ func mapKeys(vs []reflect.Value) sort.Interface {
|
||||||
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
|
s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
|
||||||
case reflect.Uint32, reflect.Uint64:
|
case reflect.Uint32, reflect.Uint64:
|
||||||
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
|
s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
|
||||||
|
case reflect.Bool:
|
||||||
|
s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
|
||||||
|
case reflect.String:
|
||||||
|
s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
|
||||||
}
|
}
|
||||||
|
|
||||||
return s
|
return s
|
||||||
|
@ -889,6 +960,20 @@ func isProto3Zero(v reflect.Value) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
|
||||||
|
// to assert that that code is compatible with this version of the proto package.
|
||||||
|
const ProtoPackageIsVersion2 = true
|
||||||
|
|
||||||
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
|
||||||
// to assert that that code is compatible with this version of the proto package.
|
// to assert that that code is compatible with this version of the proto package.
|
||||||
const ProtoPackageIsVersion1 = true
|
const ProtoPackageIsVersion1 = true
|
||||||
|
|
||||||
|
// InternalMessageInfo is a type used internally by generated .pb.go files.
|
||||||
|
// This type is not intended to be used by non-generated code.
|
||||||
|
// This type is not subject to any compatibility guarantee.
|
||||||
|
type InternalMessageInfo struct {
|
||||||
|
marshal *marshalInfo
|
||||||
|
unmarshal *unmarshalInfo
|
||||||
|
merge *mergeInfo
|
||||||
|
discard *discardInfo
|
||||||
|
}
|
||||||
|
|
100
vendor/github.com/golang/protobuf/proto/message_set.go
generated
vendored
100
vendor/github.com/golang/protobuf/proto/message_set.go
generated
vendored
|
@ -42,6 +42,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"sort"
|
"sort"
|
||||||
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
|
// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
|
||||||
|
@ -94,10 +95,7 @@ func (ms *messageSet) find(pb Message) *_MessageSet_Item {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ms *messageSet) Has(pb Message) bool {
|
func (ms *messageSet) Has(pb Message) bool {
|
||||||
if ms.find(pb) != nil {
|
return ms.find(pb) != nil
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ms *messageSet) Unmarshal(pb Message) error {
|
func (ms *messageSet) Unmarshal(pb Message) error {
|
||||||
|
@ -149,36 +147,54 @@ func skipVarint(buf []byte) []byte {
|
||||||
|
|
||||||
// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
|
// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
|
||||||
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
|
// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||||
func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
|
func MarshalMessageSet(exts interface{}) ([]byte, error) {
|
||||||
if err := encodeExtensionMap(m); err != nil {
|
return marshalMessageSet(exts, false)
|
||||||
return nil, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sort extension IDs to provide a deterministic encoding.
|
// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal.
|
||||||
// See also enc_map in encode.go.
|
func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) {
|
||||||
ids := make([]int, 0, len(m))
|
switch exts := exts.(type) {
|
||||||
for id := range m {
|
case *XXX_InternalExtensions:
|
||||||
ids = append(ids, int(id))
|
var u marshalInfo
|
||||||
}
|
siz := u.sizeMessageSet(exts)
|
||||||
sort.Ints(ids)
|
b := make([]byte, 0, siz)
|
||||||
|
return u.appendMessageSet(b, exts, deterministic)
|
||||||
|
|
||||||
ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
|
case map[int32]Extension:
|
||||||
for _, id := range ids {
|
// This is an old-style extension map.
|
||||||
e := m[int32(id)]
|
// Wrap it in a new-style XXX_InternalExtensions.
|
||||||
// Remove the wire type and field number varint, as well as the length varint.
|
ie := XXX_InternalExtensions{
|
||||||
msg := skipVarint(skipVarint(e.enc))
|
p: &struct {
|
||||||
|
mu sync.Mutex
|
||||||
ms.Item = append(ms.Item, &_MessageSet_Item{
|
extensionMap map[int32]Extension
|
||||||
TypeId: Int32(int32(id)),
|
}{
|
||||||
Message: msg,
|
extensionMap: exts,
|
||||||
})
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var u marshalInfo
|
||||||
|
siz := u.sizeMessageSet(&ie)
|
||||||
|
b := make([]byte, 0, siz)
|
||||||
|
return u.appendMessageSet(b, &ie, deterministic)
|
||||||
|
|
||||||
|
default:
|
||||||
|
return nil, errors.New("proto: not an extension map")
|
||||||
}
|
}
|
||||||
return Marshal(ms)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
|
// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
|
||||||
// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
|
// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
|
||||||
func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
|
func UnmarshalMessageSet(buf []byte, exts interface{}) error {
|
||||||
|
var m map[int32]Extension
|
||||||
|
switch exts := exts.(type) {
|
||||||
|
case *XXX_InternalExtensions:
|
||||||
|
m = exts.extensionsWrite()
|
||||||
|
case map[int32]Extension:
|
||||||
|
m = exts
|
||||||
|
default:
|
||||||
|
return errors.New("proto: not an extension map")
|
||||||
|
}
|
||||||
|
|
||||||
ms := new(messageSet)
|
ms := new(messageSet)
|
||||||
if err := Unmarshal(buf, ms); err != nil {
|
if err := Unmarshal(buf, ms); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -209,7 +225,24 @@ func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
|
||||||
|
|
||||||
// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
|
// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
|
||||||
// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
||||||
func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
|
func MarshalMessageSetJSON(exts interface{}) ([]byte, error) {
|
||||||
|
var m map[int32]Extension
|
||||||
|
switch exts := exts.(type) {
|
||||||
|
case *XXX_InternalExtensions:
|
||||||
|
var mu sync.Locker
|
||||||
|
m, mu = exts.extensionsRead()
|
||||||
|
if m != nil {
|
||||||
|
// Keep the extensions map locked until we're done marshaling to prevent
|
||||||
|
// races between marshaling and unmarshaling the lazily-{en,de}coded
|
||||||
|
// values.
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
}
|
||||||
|
case map[int32]Extension:
|
||||||
|
m = exts
|
||||||
|
default:
|
||||||
|
return nil, errors.New("proto: not an extension map")
|
||||||
|
}
|
||||||
var b bytes.Buffer
|
var b bytes.Buffer
|
||||||
b.WriteByte('{')
|
b.WriteByte('{')
|
||||||
|
|
||||||
|
@ -222,15 +255,16 @@ func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
|
||||||
|
|
||||||
for i, id := range ids {
|
for i, id := range ids {
|
||||||
ext := m[id]
|
ext := m[id]
|
||||||
if i > 0 {
|
|
||||||
b.WriteByte(',')
|
|
||||||
}
|
|
||||||
|
|
||||||
msd, ok := messageSetMap[id]
|
msd, ok := messageSetMap[id]
|
||||||
if !ok {
|
if !ok {
|
||||||
// Unknown type; we can't render it, so skip it.
|
// Unknown type; we can't render it, so skip it.
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if i > 0 && b.Len() > 1 {
|
||||||
|
b.WriteByte(',')
|
||||||
|
}
|
||||||
|
|
||||||
fmt.Fprintf(&b, `"[%s]":`, msd.name)
|
fmt.Fprintf(&b, `"[%s]":`, msd.name)
|
||||||
|
|
||||||
x := ext.value
|
x := ext.value
|
||||||
|
@ -252,7 +286,7 @@ func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
|
||||||
|
|
||||||
// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
|
// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
|
||||||
// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
|
||||||
func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error {
|
func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {
|
||||||
// Common-case fast path.
|
// Common-case fast path.
|
||||||
if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
|
if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
|
||||||
return nil
|
return nil
|
||||||
|
|
634
vendor/github.com/golang/protobuf/proto/pointer_reflect.go
generated
vendored
634
vendor/github.com/golang/protobuf/proto/pointer_reflect.go
generated
vendored
|
@ -29,7 +29,7 @@
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
// +build appengine
|
// +build purego appengine js
|
||||||
|
|
||||||
// This file contains an implementation of proto field accesses using package reflect.
|
// This file contains an implementation of proto field accesses using package reflect.
|
||||||
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
|
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
|
||||||
|
@ -38,32 +38,13 @@
|
||||||
package proto
|
package proto
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"math"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"sync"
|
||||||
)
|
)
|
||||||
|
|
||||||
// A structPointer is a pointer to a struct.
|
const unsafeAllowed = false
|
||||||
type structPointer struct {
|
|
||||||
v reflect.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
// toStructPointer returns a structPointer equivalent to the given reflect value.
|
// A field identifies a field in a struct, accessible from a pointer.
|
||||||
// The reflect value must itself be a pointer to a struct.
|
|
||||||
func toStructPointer(v reflect.Value) structPointer {
|
|
||||||
return structPointer{v}
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsNil reports whether p is nil.
|
|
||||||
func structPointer_IsNil(p structPointer) bool {
|
|
||||||
return p.v.IsNil()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Interface returns the struct pointer as an interface value.
|
|
||||||
func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
|
|
||||||
return p.v.Interface()
|
|
||||||
}
|
|
||||||
|
|
||||||
// A field identifies a field in a struct, accessible from a structPointer.
|
|
||||||
// In this implementation, a field is identified by the sequence of field indices
|
// In this implementation, a field is identified by the sequence of field indices
|
||||||
// passed to reflect's FieldByIndex.
|
// passed to reflect's FieldByIndex.
|
||||||
type field []int
|
type field []int
|
||||||
|
@ -76,404 +57,301 @@ func toField(f *reflect.StructField) field {
|
||||||
// invalidField is an invalid field identifier.
|
// invalidField is an invalid field identifier.
|
||||||
var invalidField = field(nil)
|
var invalidField = field(nil)
|
||||||
|
|
||||||
|
// zeroField is a noop when calling pointer.offset.
|
||||||
|
var zeroField = field([]int{})
|
||||||
|
|
||||||
// IsValid reports whether the field identifier is valid.
|
// IsValid reports whether the field identifier is valid.
|
||||||
func (f field) IsValid() bool { return f != nil }
|
func (f field) IsValid() bool { return f != nil }
|
||||||
|
|
||||||
// field returns the given field in the struct as a reflect value.
|
// The pointer type is for the table-driven decoder.
|
||||||
func structPointer_field(p structPointer, f field) reflect.Value {
|
// The implementation here uses a reflect.Value of pointer type to
|
||||||
// Special case: an extension map entry with a value of type T
|
// create a generic pointer. In pointer_unsafe.go we use unsafe
|
||||||
// passes a *T to the struct-handling code with a zero field,
|
// instead of reflect to implement the same (but faster) interface.
|
||||||
// expecting that it will be treated as equivalent to *struct{ X T },
|
type pointer struct {
|
||||||
// which has the same memory layout. We have to handle that case
|
|
||||||
// specially, because reflect will panic if we call FieldByIndex on a
|
|
||||||
// non-struct.
|
|
||||||
if f == nil {
|
|
||||||
return p.v.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
return p.v.Elem().FieldByIndex(f)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ifield returns the given field in the struct as an interface value.
|
|
||||||
func structPointer_ifield(p structPointer, f field) interface{} {
|
|
||||||
return structPointer_field(p, f).Addr().Interface()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bytes returns the address of a []byte field in the struct.
|
|
||||||
func structPointer_Bytes(p structPointer, f field) *[]byte {
|
|
||||||
return structPointer_ifield(p, f).(*[]byte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BytesSlice returns the address of a [][]byte field in the struct.
|
|
||||||
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
|
|
||||||
return structPointer_ifield(p, f).(*[][]byte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bool returns the address of a *bool field in the struct.
|
|
||||||
func structPointer_Bool(p structPointer, f field) **bool {
|
|
||||||
return structPointer_ifield(p, f).(**bool)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BoolVal returns the address of a bool field in the struct.
|
|
||||||
func structPointer_BoolVal(p structPointer, f field) *bool {
|
|
||||||
return structPointer_ifield(p, f).(*bool)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BoolSlice returns the address of a []bool field in the struct.
|
|
||||||
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
|
|
||||||
return structPointer_ifield(p, f).(*[]bool)
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the address of a *string field in the struct.
|
|
||||||
func structPointer_String(p structPointer, f field) **string {
|
|
||||||
return structPointer_ifield(p, f).(**string)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringVal returns the address of a string field in the struct.
|
|
||||||
func structPointer_StringVal(p structPointer, f field) *string {
|
|
||||||
return structPointer_ifield(p, f).(*string)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringSlice returns the address of a []string field in the struct.
|
|
||||||
func structPointer_StringSlice(p structPointer, f field) *[]string {
|
|
||||||
return structPointer_ifield(p, f).(*[]string)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExtMap returns the address of an extension map field in the struct.
|
|
||||||
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
|
|
||||||
return structPointer_ifield(p, f).(*map[int32]Extension)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAt returns the reflect.Value for a pointer to a field in the struct.
|
|
||||||
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
|
|
||||||
return structPointer_field(p, f).Addr()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetStructPointer writes a *struct field in the struct.
|
|
||||||
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
|
|
||||||
structPointer_field(p, f).Set(q.v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetStructPointer reads a *struct field in the struct.
|
|
||||||
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
|
|
||||||
return structPointer{structPointer_field(p, f)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// StructPointerSlice the address of a []*struct field in the struct.
|
|
||||||
func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
|
|
||||||
return structPointerSlice{structPointer_field(p, f)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A structPointerSlice represents the address of a slice of pointers to structs
|
|
||||||
// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
|
|
||||||
type structPointerSlice struct {
|
|
||||||
v reflect.Value
|
v reflect.Value
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p structPointerSlice) Len() int { return p.v.Len() }
|
// toPointer converts an interface of pointer type to a pointer
|
||||||
func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
|
// that points to the same target.
|
||||||
func (p structPointerSlice) Append(q structPointer) {
|
func toPointer(i *Message) pointer {
|
||||||
p.v.Set(reflect.Append(p.v, q.v))
|
return pointer{v: reflect.ValueOf(*i)}
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
// toAddrPointer converts an interface to a pointer that points to
|
||||||
int32Type = reflect.TypeOf(int32(0))
|
// the interface data.
|
||||||
uint32Type = reflect.TypeOf(uint32(0))
|
func toAddrPointer(i *interface{}, isptr bool) pointer {
|
||||||
float32Type = reflect.TypeOf(float32(0))
|
v := reflect.ValueOf(*i)
|
||||||
int64Type = reflect.TypeOf(int64(0))
|
u := reflect.New(v.Type())
|
||||||
uint64Type = reflect.TypeOf(uint64(0))
|
u.Elem().Set(v)
|
||||||
float64Type = reflect.TypeOf(float64(0))
|
return pointer{v: u}
|
||||||
)
|
|
||||||
|
|
||||||
// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
|
|
||||||
// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
|
|
||||||
type word32 struct {
|
|
||||||
v reflect.Value
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsNil reports whether p is nil.
|
// valToPointer converts v to a pointer. v must be of pointer type.
|
||||||
func word32_IsNil(p word32) bool {
|
func valToPointer(v reflect.Value) pointer {
|
||||||
|
return pointer{v: v}
|
||||||
|
}
|
||||||
|
|
||||||
|
// offset converts from a pointer to a structure to a pointer to
|
||||||
|
// one of its fields.
|
||||||
|
func (p pointer) offset(f field) pointer {
|
||||||
|
return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p pointer) isNil() bool {
|
||||||
return p.v.IsNil()
|
return p.v.IsNil()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set sets p to point at a newly allocated word with bits set to x.
|
// grow updates the slice s in place to make it one element longer.
|
||||||
func word32_Set(p word32, o *Buffer, x uint32) {
|
// s must be addressable.
|
||||||
t := p.v.Type().Elem()
|
// Returns the (addressable) new element.
|
||||||
switch t {
|
func grow(s reflect.Value) reflect.Value {
|
||||||
case int32Type:
|
n, m := s.Len(), s.Cap()
|
||||||
if len(o.int32s) == 0 {
|
|
||||||
o.int32s = make([]int32, uint32PoolSize)
|
|
||||||
}
|
|
||||||
o.int32s[0] = int32(x)
|
|
||||||
p.v.Set(reflect.ValueOf(&o.int32s[0]))
|
|
||||||
o.int32s = o.int32s[1:]
|
|
||||||
return
|
|
||||||
case uint32Type:
|
|
||||||
if len(o.uint32s) == 0 {
|
|
||||||
o.uint32s = make([]uint32, uint32PoolSize)
|
|
||||||
}
|
|
||||||
o.uint32s[0] = x
|
|
||||||
p.v.Set(reflect.ValueOf(&o.uint32s[0]))
|
|
||||||
o.uint32s = o.uint32s[1:]
|
|
||||||
return
|
|
||||||
case float32Type:
|
|
||||||
if len(o.float32s) == 0 {
|
|
||||||
o.float32s = make([]float32, uint32PoolSize)
|
|
||||||
}
|
|
||||||
o.float32s[0] = math.Float32frombits(x)
|
|
||||||
p.v.Set(reflect.ValueOf(&o.float32s[0]))
|
|
||||||
o.float32s = o.float32s[1:]
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// must be enum
|
|
||||||
p.v.Set(reflect.New(t))
|
|
||||||
p.v.Elem().SetInt(int64(int32(x)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get gets the bits pointed at by p, as a uint32.
|
|
||||||
func word32_Get(p word32) uint32 {
|
|
||||||
elem := p.v.Elem()
|
|
||||||
switch elem.Kind() {
|
|
||||||
case reflect.Int32:
|
|
||||||
return uint32(elem.Int())
|
|
||||||
case reflect.Uint32:
|
|
||||||
return uint32(elem.Uint())
|
|
||||||
case reflect.Float32:
|
|
||||||
return math.Float32bits(float32(elem.Float()))
|
|
||||||
}
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
|
|
||||||
func structPointer_Word32(p structPointer, f field) word32 {
|
|
||||||
return word32{structPointer_field(p, f)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A word32Val represents a field of type int32, uint32, float32, or enum.
|
|
||||||
// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
|
|
||||||
type word32Val struct {
|
|
||||||
v reflect.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set sets *p to x.
|
|
||||||
func word32Val_Set(p word32Val, x uint32) {
|
|
||||||
switch p.v.Type() {
|
|
||||||
case int32Type:
|
|
||||||
p.v.SetInt(int64(x))
|
|
||||||
return
|
|
||||||
case uint32Type:
|
|
||||||
p.v.SetUint(uint64(x))
|
|
||||||
return
|
|
||||||
case float32Type:
|
|
||||||
p.v.SetFloat(float64(math.Float32frombits(x)))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// must be enum
|
|
||||||
p.v.SetInt(int64(int32(x)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get gets the bits pointed at by p, as a uint32.
|
|
||||||
func word32Val_Get(p word32Val) uint32 {
|
|
||||||
elem := p.v
|
|
||||||
switch elem.Kind() {
|
|
||||||
case reflect.Int32:
|
|
||||||
return uint32(elem.Int())
|
|
||||||
case reflect.Uint32:
|
|
||||||
return uint32(elem.Uint())
|
|
||||||
case reflect.Float32:
|
|
||||||
return math.Float32bits(float32(elem.Float()))
|
|
||||||
}
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
|
|
||||||
func structPointer_Word32Val(p structPointer, f field) word32Val {
|
|
||||||
return word32Val{structPointer_field(p, f)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A word32Slice is a slice of 32-bit values.
|
|
||||||
// That is, v.Type() is []int32, []uint32, []float32, or []enum.
|
|
||||||
type word32Slice struct {
|
|
||||||
v reflect.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p word32Slice) Append(x uint32) {
|
|
||||||
n, m := p.v.Len(), p.v.Cap()
|
|
||||||
if n < m {
|
if n < m {
|
||||||
p.v.SetLen(n + 1)
|
s.SetLen(n + 1)
|
||||||
} else {
|
} else {
|
||||||
t := p.v.Type().Elem()
|
s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
|
||||||
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
|
|
||||||
}
|
|
||||||
elem := p.v.Index(n)
|
|
||||||
switch elem.Kind() {
|
|
||||||
case reflect.Int32:
|
|
||||||
elem.SetInt(int64(int32(x)))
|
|
||||||
case reflect.Uint32:
|
|
||||||
elem.SetUint(uint64(x))
|
|
||||||
case reflect.Float32:
|
|
||||||
elem.SetFloat(float64(math.Float32frombits(x)))
|
|
||||||
}
|
}
|
||||||
|
return s.Index(n)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p word32Slice) Len() int {
|
func (p pointer) toInt64() *int64 {
|
||||||
return p.v.Len()
|
return p.v.Interface().(*int64)
|
||||||
|
}
|
||||||
|
func (p pointer) toInt64Ptr() **int64 {
|
||||||
|
return p.v.Interface().(**int64)
|
||||||
|
}
|
||||||
|
func (p pointer) toInt64Slice() *[]int64 {
|
||||||
|
return p.v.Interface().(*[]int64)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p word32Slice) Index(i int) uint32 {
|
var int32ptr = reflect.TypeOf((*int32)(nil))
|
||||||
elem := p.v.Index(i)
|
|
||||||
switch elem.Kind() {
|
func (p pointer) toInt32() *int32 {
|
||||||
case reflect.Int32:
|
return p.v.Convert(int32ptr).Interface().(*int32)
|
||||||
return uint32(elem.Int())
|
|
||||||
case reflect.Uint32:
|
|
||||||
return uint32(elem.Uint())
|
|
||||||
case reflect.Float32:
|
|
||||||
return math.Float32bits(float32(elem.Float()))
|
|
||||||
}
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
|
// The toInt32Ptr/Slice methods don't work because of enums.
|
||||||
func structPointer_Word32Slice(p structPointer, f field) word32Slice {
|
// Instead, we must use set/get methods for the int32ptr/slice case.
|
||||||
return word32Slice{structPointer_field(p, f)}
|
/*
|
||||||
|
func (p pointer) toInt32Ptr() **int32 {
|
||||||
|
return p.v.Interface().(**int32)
|
||||||
|
}
|
||||||
|
func (p pointer) toInt32Slice() *[]int32 {
|
||||||
|
return p.v.Interface().(*[]int32)
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
func (p pointer) getInt32Ptr() *int32 {
|
||||||
|
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
||||||
|
// raw int32 type
|
||||||
|
return p.v.Elem().Interface().(*int32)
|
||||||
|
}
|
||||||
|
// an enum
|
||||||
|
return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
|
||||||
|
}
|
||||||
|
func (p pointer) setInt32Ptr(v int32) {
|
||||||
|
// Allocate value in a *int32. Possibly convert that to a *enum.
|
||||||
|
// Then assign it to a **int32 or **enum.
|
||||||
|
// Note: we can convert *int32 to *enum, but we can't convert
|
||||||
|
// **int32 to **enum!
|
||||||
|
p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
|
||||||
}
|
}
|
||||||
|
|
||||||
// word64 is like word32 but for 64-bit values.
|
// getInt32Slice copies []int32 from p as a new slice.
|
||||||
type word64 struct {
|
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||||
v reflect.Value
|
func (p pointer) getInt32Slice() []int32 {
|
||||||
|
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
||||||
|
// raw int32 type
|
||||||
|
return p.v.Elem().Interface().([]int32)
|
||||||
|
}
|
||||||
|
// an enum
|
||||||
|
// Allocate a []int32, then assign []enum's values into it.
|
||||||
|
// Note: we can't convert []enum to []int32.
|
||||||
|
slice := p.v.Elem()
|
||||||
|
s := make([]int32, slice.Len())
|
||||||
|
for i := 0; i < slice.Len(); i++ {
|
||||||
|
s[i] = int32(slice.Index(i).Int())
|
||||||
|
}
|
||||||
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func word64_Set(p word64, o *Buffer, x uint64) {
|
// setInt32Slice copies []int32 into p as a new slice.
|
||||||
t := p.v.Type().Elem()
|
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||||
switch t {
|
func (p pointer) setInt32Slice(v []int32) {
|
||||||
case int64Type:
|
if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
|
||||||
if len(o.int64s) == 0 {
|
// raw int32 type
|
||||||
o.int64s = make([]int64, uint64PoolSize)
|
p.v.Elem().Set(reflect.ValueOf(v))
|
||||||
}
|
|
||||||
o.int64s[0] = int64(x)
|
|
||||||
p.v.Set(reflect.ValueOf(&o.int64s[0]))
|
|
||||||
o.int64s = o.int64s[1:]
|
|
||||||
return
|
|
||||||
case uint64Type:
|
|
||||||
if len(o.uint64s) == 0 {
|
|
||||||
o.uint64s = make([]uint64, uint64PoolSize)
|
|
||||||
}
|
|
||||||
o.uint64s[0] = x
|
|
||||||
p.v.Set(reflect.ValueOf(&o.uint64s[0]))
|
|
||||||
o.uint64s = o.uint64s[1:]
|
|
||||||
return
|
|
||||||
case float64Type:
|
|
||||||
if len(o.float64s) == 0 {
|
|
||||||
o.float64s = make([]float64, uint64PoolSize)
|
|
||||||
}
|
|
||||||
o.float64s[0] = math.Float64frombits(x)
|
|
||||||
p.v.Set(reflect.ValueOf(&o.float64s[0]))
|
|
||||||
o.float64s = o.float64s[1:]
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
panic("unreachable")
|
// an enum
|
||||||
|
// Allocate a []enum, then assign []int32's values into it.
|
||||||
|
// Note: we can't convert []enum to []int32.
|
||||||
|
slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
|
||||||
|
for i, x := range v {
|
||||||
|
slice.Index(i).SetInt(int64(x))
|
||||||
|
}
|
||||||
|
p.v.Elem().Set(slice)
|
||||||
|
}
|
||||||
|
func (p pointer) appendInt32Slice(v int32) {
|
||||||
|
grow(p.v.Elem()).SetInt(int64(v))
|
||||||
}
|
}
|
||||||
|
|
||||||
func word64_IsNil(p word64) bool {
|
func (p pointer) toUint64() *uint64 {
|
||||||
return p.v.IsNil()
|
return p.v.Interface().(*uint64)
|
||||||
|
}
|
||||||
|
func (p pointer) toUint64Ptr() **uint64 {
|
||||||
|
return p.v.Interface().(**uint64)
|
||||||
|
}
|
||||||
|
func (p pointer) toUint64Slice() *[]uint64 {
|
||||||
|
return p.v.Interface().(*[]uint64)
|
||||||
|
}
|
||||||
|
func (p pointer) toUint32() *uint32 {
|
||||||
|
return p.v.Interface().(*uint32)
|
||||||
|
}
|
||||||
|
func (p pointer) toUint32Ptr() **uint32 {
|
||||||
|
return p.v.Interface().(**uint32)
|
||||||
|
}
|
||||||
|
func (p pointer) toUint32Slice() *[]uint32 {
|
||||||
|
return p.v.Interface().(*[]uint32)
|
||||||
|
}
|
||||||
|
func (p pointer) toBool() *bool {
|
||||||
|
return p.v.Interface().(*bool)
|
||||||
|
}
|
||||||
|
func (p pointer) toBoolPtr() **bool {
|
||||||
|
return p.v.Interface().(**bool)
|
||||||
|
}
|
||||||
|
func (p pointer) toBoolSlice() *[]bool {
|
||||||
|
return p.v.Interface().(*[]bool)
|
||||||
|
}
|
||||||
|
func (p pointer) toFloat64() *float64 {
|
||||||
|
return p.v.Interface().(*float64)
|
||||||
|
}
|
||||||
|
func (p pointer) toFloat64Ptr() **float64 {
|
||||||
|
return p.v.Interface().(**float64)
|
||||||
|
}
|
||||||
|
func (p pointer) toFloat64Slice() *[]float64 {
|
||||||
|
return p.v.Interface().(*[]float64)
|
||||||
|
}
|
||||||
|
func (p pointer) toFloat32() *float32 {
|
||||||
|
return p.v.Interface().(*float32)
|
||||||
|
}
|
||||||
|
func (p pointer) toFloat32Ptr() **float32 {
|
||||||
|
return p.v.Interface().(**float32)
|
||||||
|
}
|
||||||
|
func (p pointer) toFloat32Slice() *[]float32 {
|
||||||
|
return p.v.Interface().(*[]float32)
|
||||||
|
}
|
||||||
|
func (p pointer) toString() *string {
|
||||||
|
return p.v.Interface().(*string)
|
||||||
|
}
|
||||||
|
func (p pointer) toStringPtr() **string {
|
||||||
|
return p.v.Interface().(**string)
|
||||||
|
}
|
||||||
|
func (p pointer) toStringSlice() *[]string {
|
||||||
|
return p.v.Interface().(*[]string)
|
||||||
|
}
|
||||||
|
func (p pointer) toBytes() *[]byte {
|
||||||
|
return p.v.Interface().(*[]byte)
|
||||||
|
}
|
||||||
|
func (p pointer) toBytesSlice() *[][]byte {
|
||||||
|
return p.v.Interface().(*[][]byte)
|
||||||
|
}
|
||||||
|
func (p pointer) toExtensions() *XXX_InternalExtensions {
|
||||||
|
return p.v.Interface().(*XXX_InternalExtensions)
|
||||||
|
}
|
||||||
|
func (p pointer) toOldExtensions() *map[int32]Extension {
|
||||||
|
return p.v.Interface().(*map[int32]Extension)
|
||||||
|
}
|
||||||
|
func (p pointer) getPointer() pointer {
|
||||||
|
return pointer{v: p.v.Elem()}
|
||||||
|
}
|
||||||
|
func (p pointer) setPointer(q pointer) {
|
||||||
|
p.v.Elem().Set(q.v)
|
||||||
|
}
|
||||||
|
func (p pointer) appendPointer(q pointer) {
|
||||||
|
grow(p.v.Elem()).Set(q.v)
|
||||||
}
|
}
|
||||||
|
|
||||||
func word64_Get(p word64) uint64 {
|
// getPointerSlice copies []*T from p as a new []pointer.
|
||||||
elem := p.v.Elem()
|
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||||
switch elem.Kind() {
|
func (p pointer) getPointerSlice() []pointer {
|
||||||
case reflect.Int64:
|
if p.v.IsNil() {
|
||||||
return uint64(elem.Int())
|
return nil
|
||||||
case reflect.Uint64:
|
|
||||||
return elem.Uint()
|
|
||||||
case reflect.Float64:
|
|
||||||
return math.Float64bits(elem.Float())
|
|
||||||
}
|
}
|
||||||
panic("unreachable")
|
n := p.v.Elem().Len()
|
||||||
|
s := make([]pointer, n)
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
s[i] = pointer{v: p.v.Elem().Index(i)}
|
||||||
|
}
|
||||||
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func structPointer_Word64(p structPointer, f field) word64 {
|
// setPointerSlice copies []pointer into p as a new []*T.
|
||||||
return word64{structPointer_field(p, f)}
|
// This behavior differs from the implementation in pointer_unsafe.go.
|
||||||
}
|
func (p pointer) setPointerSlice(v []pointer) {
|
||||||
|
if v == nil {
|
||||||
// word64Val is like word32Val but for 64-bit values.
|
p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
|
||||||
type word64Val struct {
|
|
||||||
v reflect.Value
|
|
||||||
}
|
|
||||||
|
|
||||||
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
|
|
||||||
switch p.v.Type() {
|
|
||||||
case int64Type:
|
|
||||||
p.v.SetInt(int64(x))
|
|
||||||
return
|
|
||||||
case uint64Type:
|
|
||||||
p.v.SetUint(x)
|
|
||||||
return
|
|
||||||
case float64Type:
|
|
||||||
p.v.SetFloat(math.Float64frombits(x))
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
panic("unreachable")
|
s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
|
||||||
|
for _, p := range v {
|
||||||
|
s = reflect.Append(s, p.v)
|
||||||
|
}
|
||||||
|
p.v.Elem().Set(s)
|
||||||
}
|
}
|
||||||
|
|
||||||
func word64Val_Get(p word64Val) uint64 {
|
// getInterfacePointer returns a pointer that points to the
|
||||||
elem := p.v
|
// interface data of the interface pointed by p.
|
||||||
switch elem.Kind() {
|
func (p pointer) getInterfacePointer() pointer {
|
||||||
case reflect.Int64:
|
if p.v.Elem().IsNil() {
|
||||||
return uint64(elem.Int())
|
return pointer{v: p.v.Elem()}
|
||||||
case reflect.Uint64:
|
|
||||||
return elem.Uint()
|
|
||||||
case reflect.Float64:
|
|
||||||
return math.Float64bits(elem.Float())
|
|
||||||
}
|
}
|
||||||
panic("unreachable")
|
return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
|
||||||
}
|
}
|
||||||
|
|
||||||
func structPointer_Word64Val(p structPointer, f field) word64Val {
|
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
|
||||||
return word64Val{structPointer_field(p, f)}
|
// TODO: check that p.v.Type().Elem() == t?
|
||||||
|
return p.v
|
||||||
}
|
}
|
||||||
|
|
||||||
type word64Slice struct {
|
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
|
||||||
v reflect.Value
|
atomicLock.Lock()
|
||||||
|
defer atomicLock.Unlock()
|
||||||
|
return *p
|
||||||
|
}
|
||||||
|
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
|
||||||
|
atomicLock.Lock()
|
||||||
|
defer atomicLock.Unlock()
|
||||||
|
*p = v
|
||||||
|
}
|
||||||
|
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
|
||||||
|
atomicLock.Lock()
|
||||||
|
defer atomicLock.Unlock()
|
||||||
|
return *p
|
||||||
|
}
|
||||||
|
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
|
||||||
|
atomicLock.Lock()
|
||||||
|
defer atomicLock.Unlock()
|
||||||
|
*p = v
|
||||||
|
}
|
||||||
|
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
|
||||||
|
atomicLock.Lock()
|
||||||
|
defer atomicLock.Unlock()
|
||||||
|
return *p
|
||||||
|
}
|
||||||
|
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
|
||||||
|
atomicLock.Lock()
|
||||||
|
defer atomicLock.Unlock()
|
||||||
|
*p = v
|
||||||
|
}
|
||||||
|
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
|
||||||
|
atomicLock.Lock()
|
||||||
|
defer atomicLock.Unlock()
|
||||||
|
return *p
|
||||||
|
}
|
||||||
|
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
|
||||||
|
atomicLock.Lock()
|
||||||
|
defer atomicLock.Unlock()
|
||||||
|
*p = v
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p word64Slice) Append(x uint64) {
|
var atomicLock sync.Mutex
|
||||||
n, m := p.v.Len(), p.v.Cap()
|
|
||||||
if n < m {
|
|
||||||
p.v.SetLen(n + 1)
|
|
||||||
} else {
|
|
||||||
t := p.v.Type().Elem()
|
|
||||||
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
|
|
||||||
}
|
|
||||||
elem := p.v.Index(n)
|
|
||||||
switch elem.Kind() {
|
|
||||||
case reflect.Int64:
|
|
||||||
elem.SetInt(int64(int64(x)))
|
|
||||||
case reflect.Uint64:
|
|
||||||
elem.SetUint(uint64(x))
|
|
||||||
case reflect.Float64:
|
|
||||||
elem.SetFloat(float64(math.Float64frombits(x)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p word64Slice) Len() int {
|
|
||||||
return p.v.Len()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p word64Slice) Index(i int) uint64 {
|
|
||||||
elem := p.v.Index(i)
|
|
||||||
switch elem.Kind() {
|
|
||||||
case reflect.Int64:
|
|
||||||
return uint64(elem.Int())
|
|
||||||
case reflect.Uint64:
|
|
||||||
return uint64(elem.Uint())
|
|
||||||
case reflect.Float64:
|
|
||||||
return math.Float64bits(float64(elem.Float()))
|
|
||||||
}
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
func structPointer_Word64Slice(p structPointer, f field) word64Slice {
|
|
||||||
return word64Slice{structPointer_field(p, f)}
|
|
||||||
}
|
|
||||||
|
|
368
vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
generated
vendored
368
vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
generated
vendored
|
@ -29,7 +29,7 @@
|
||||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
// +build !appengine
|
// +build !purego,!appengine,!js
|
||||||
|
|
||||||
// This file contains the implementation of the proto field accesses using package unsafe.
|
// This file contains the implementation of the proto field accesses using package unsafe.
|
||||||
|
|
||||||
|
@ -37,38 +37,13 @@ package proto
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"sync/atomic"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NOTE: These type_Foo functions would more idiomatically be methods,
|
const unsafeAllowed = true
|
||||||
// but Go does not allow methods on pointer types, and we must preserve
|
|
||||||
// some pointer type for the garbage collector. We use these
|
|
||||||
// funcs with clunky names as our poor approximation to methods.
|
|
||||||
//
|
|
||||||
// An alternative would be
|
|
||||||
// type structPointer struct { p unsafe.Pointer }
|
|
||||||
// but that does not registerize as well.
|
|
||||||
|
|
||||||
// A structPointer is a pointer to a struct.
|
// A field identifies a field in a struct, accessible from a pointer.
|
||||||
type structPointer unsafe.Pointer
|
|
||||||
|
|
||||||
// toStructPointer returns a structPointer equivalent to the given reflect value.
|
|
||||||
func toStructPointer(v reflect.Value) structPointer {
|
|
||||||
return structPointer(unsafe.Pointer(v.Pointer()))
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsNil reports whether p is nil.
|
|
||||||
func structPointer_IsNil(p structPointer) bool {
|
|
||||||
return p == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Interface returns the struct pointer, assumed to have element type t,
|
|
||||||
// as an interface value.
|
|
||||||
func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
|
|
||||||
return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
|
|
||||||
}
|
|
||||||
|
|
||||||
// A field identifies a field in a struct, accessible from a structPointer.
|
|
||||||
// In this implementation, a field is identified by its byte offset from the start of the struct.
|
// In this implementation, a field is identified by its byte offset from the start of the struct.
|
||||||
type field uintptr
|
type field uintptr
|
||||||
|
|
||||||
|
@ -80,187 +55,254 @@ func toField(f *reflect.StructField) field {
|
||||||
// invalidField is an invalid field identifier.
|
// invalidField is an invalid field identifier.
|
||||||
const invalidField = ^field(0)
|
const invalidField = ^field(0)
|
||||||
|
|
||||||
|
// zeroField is a noop when calling pointer.offset.
|
||||||
|
const zeroField = field(0)
|
||||||
|
|
||||||
// IsValid reports whether the field identifier is valid.
|
// IsValid reports whether the field identifier is valid.
|
||||||
func (f field) IsValid() bool {
|
func (f field) IsValid() bool {
|
||||||
return f != ^field(0)
|
return f != invalidField
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bytes returns the address of a []byte field in the struct.
|
// The pointer type below is for the new table-driven encoder/decoder.
|
||||||
func structPointer_Bytes(p structPointer, f field) *[]byte {
|
// The implementation here uses unsafe.Pointer to create a generic pointer.
|
||||||
return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
// In pointer_reflect.go we use reflect instead of unsafe to implement
|
||||||
|
// the same (but slower) interface.
|
||||||
|
type pointer struct {
|
||||||
|
p unsafe.Pointer
|
||||||
}
|
}
|
||||||
|
|
||||||
// BytesSlice returns the address of a [][]byte field in the struct.
|
// size of pointer
|
||||||
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
|
var ptrSize = unsafe.Sizeof(uintptr(0))
|
||||||
return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
|
||||||
|
// toPointer converts an interface of pointer type to a pointer
|
||||||
|
// that points to the same target.
|
||||||
|
func toPointer(i *Message) pointer {
|
||||||
|
// Super-tricky - read pointer out of data word of interface value.
|
||||||
|
// Saves ~25ns over the equivalent:
|
||||||
|
// return valToPointer(reflect.ValueOf(*i))
|
||||||
|
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bool returns the address of a *bool field in the struct.
|
// toAddrPointer converts an interface to a pointer that points to
|
||||||
func structPointer_Bool(p structPointer, f field) **bool {
|
// the interface data.
|
||||||
return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
func toAddrPointer(i *interface{}, isptr bool) pointer {
|
||||||
|
// Super-tricky - read or get the address of data word of interface value.
|
||||||
|
if isptr {
|
||||||
|
// The interface is of pointer type, thus it is a direct interface.
|
||||||
|
// The data word is the pointer data itself. We take its address.
|
||||||
|
return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
|
||||||
|
}
|
||||||
|
// The interface is not of pointer type. The data word is the pointer
|
||||||
|
// to the data.
|
||||||
|
return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
|
||||||
}
|
}
|
||||||
|
|
||||||
// BoolVal returns the address of a bool field in the struct.
|
// valToPointer converts v to a pointer. v must be of pointer type.
|
||||||
func structPointer_BoolVal(p structPointer, f field) *bool {
|
func valToPointer(v reflect.Value) pointer {
|
||||||
return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
return pointer{p: unsafe.Pointer(v.Pointer())}
|
||||||
}
|
}
|
||||||
|
|
||||||
// BoolSlice returns the address of a []bool field in the struct.
|
// offset converts from a pointer to a structure to a pointer to
|
||||||
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
|
// one of its fields.
|
||||||
return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
func (p pointer) offset(f field) pointer {
|
||||||
|
// For safety, we should panic if !f.IsValid, however calling panic causes
|
||||||
|
// this to no longer be inlineable, which is a serious performance cost.
|
||||||
|
/*
|
||||||
|
if !f.IsValid() {
|
||||||
|
panic("invalid field")
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
|
||||||
}
|
}
|
||||||
|
|
||||||
// String returns the address of a *string field in the struct.
|
func (p pointer) isNil() bool {
|
||||||
func structPointer_String(p structPointer, f field) **string {
|
return p.p == nil
|
||||||
return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// StringVal returns the address of a string field in the struct.
|
func (p pointer) toInt64() *int64 {
|
||||||
func structPointer_StringVal(p structPointer, f field) *string {
|
return (*int64)(p.p)
|
||||||
return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
}
|
||||||
|
func (p pointer) toInt64Ptr() **int64 {
|
||||||
|
return (**int64)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toInt64Slice() *[]int64 {
|
||||||
|
return (*[]int64)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toInt32() *int32 {
|
||||||
|
return (*int32)(p.p)
|
||||||
}
|
}
|
||||||
|
|
||||||
// StringSlice returns the address of a []string field in the struct.
|
// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
|
||||||
func structPointer_StringSlice(p structPointer, f field) *[]string {
|
/*
|
||||||
return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
func (p pointer) toInt32Ptr() **int32 {
|
||||||
|
return (**int32)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toInt32Slice() *[]int32 {
|
||||||
|
return (*[]int32)(p.p)
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
func (p pointer) getInt32Ptr() *int32 {
|
||||||
|
return *(**int32)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) setInt32Ptr(v int32) {
|
||||||
|
*(**int32)(p.p) = &v
|
||||||
}
|
}
|
||||||
|
|
||||||
// ExtMap returns the address of an extension map field in the struct.
|
// getInt32Slice loads a []int32 from p.
|
||||||
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
|
// The value returned is aliased with the original slice.
|
||||||
return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
// This behavior differs from the implementation in pointer_reflect.go.
|
||||||
|
func (p pointer) getInt32Slice() []int32 {
|
||||||
|
return *(*[]int32)(p.p)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAt returns the reflect.Value for a pointer to a field in the struct.
|
// setInt32Slice stores a []int32 to p.
|
||||||
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
|
// The value set is aliased with the input slice.
|
||||||
return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
|
// This behavior differs from the implementation in pointer_reflect.go.
|
||||||
|
func (p pointer) setInt32Slice(v []int32) {
|
||||||
|
*(*[]int32)(p.p) = v
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetStructPointer writes a *struct field in the struct.
|
// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
|
||||||
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
|
func (p pointer) appendInt32Slice(v int32) {
|
||||||
*(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
|
s := (*[]int32)(p.p)
|
||||||
|
*s = append(*s, v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetStructPointer reads a *struct field in the struct.
|
func (p pointer) toUint64() *uint64 {
|
||||||
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
|
return (*uint64)(p.p)
|
||||||
return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
}
|
||||||
|
func (p pointer) toUint64Ptr() **uint64 {
|
||||||
|
return (**uint64)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toUint64Slice() *[]uint64 {
|
||||||
|
return (*[]uint64)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toUint32() *uint32 {
|
||||||
|
return (*uint32)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toUint32Ptr() **uint32 {
|
||||||
|
return (**uint32)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toUint32Slice() *[]uint32 {
|
||||||
|
return (*[]uint32)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toBool() *bool {
|
||||||
|
return (*bool)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toBoolPtr() **bool {
|
||||||
|
return (**bool)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toBoolSlice() *[]bool {
|
||||||
|
return (*[]bool)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toFloat64() *float64 {
|
||||||
|
return (*float64)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toFloat64Ptr() **float64 {
|
||||||
|
return (**float64)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toFloat64Slice() *[]float64 {
|
||||||
|
return (*[]float64)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toFloat32() *float32 {
|
||||||
|
return (*float32)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toFloat32Ptr() **float32 {
|
||||||
|
return (**float32)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toFloat32Slice() *[]float32 {
|
||||||
|
return (*[]float32)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toString() *string {
|
||||||
|
return (*string)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toStringPtr() **string {
|
||||||
|
return (**string)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toStringSlice() *[]string {
|
||||||
|
return (*[]string)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toBytes() *[]byte {
|
||||||
|
return (*[]byte)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toBytesSlice() *[][]byte {
|
||||||
|
return (*[][]byte)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toExtensions() *XXX_InternalExtensions {
|
||||||
|
return (*XXX_InternalExtensions)(p.p)
|
||||||
|
}
|
||||||
|
func (p pointer) toOldExtensions() *map[int32]Extension {
|
||||||
|
return (*map[int32]Extension)(p.p)
|
||||||
}
|
}
|
||||||
|
|
||||||
// StructPointerSlice the address of a []*struct field in the struct.
|
// getPointerSlice loads []*T from p as a []pointer.
|
||||||
func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
|
// The value returned is aliased with the original slice.
|
||||||
return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
// This behavior differs from the implementation in pointer_reflect.go.
|
||||||
|
func (p pointer) getPointerSlice() []pointer {
|
||||||
|
// Super-tricky - p should point to a []*T where T is a
|
||||||
|
// message type. We load it as []pointer.
|
||||||
|
return *(*[]pointer)(p.p)
|
||||||
}
|
}
|
||||||
|
|
||||||
// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
|
// setPointerSlice stores []pointer into p as a []*T.
|
||||||
type structPointerSlice []structPointer
|
// The value set is aliased with the input slice.
|
||||||
|
// This behavior differs from the implementation in pointer_reflect.go.
|
||||||
func (v *structPointerSlice) Len() int { return len(*v) }
|
func (p pointer) setPointerSlice(v []pointer) {
|
||||||
func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
|
// Super-tricky - p should point to a []*T where T is a
|
||||||
func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
|
// message type. We store it as []pointer.
|
||||||
|
*(*[]pointer)(p.p) = v
|
||||||
// A word32 is the address of a "pointer to 32-bit value" field.
|
|
||||||
type word32 **uint32
|
|
||||||
|
|
||||||
// IsNil reports whether *v is nil.
|
|
||||||
func word32_IsNil(p word32) bool {
|
|
||||||
return *p == nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set sets *v to point at a newly allocated word set to x.
|
// getPointer loads the pointer at p and returns it.
|
||||||
func word32_Set(p word32, o *Buffer, x uint32) {
|
func (p pointer) getPointer() pointer {
|
||||||
if len(o.uint32s) == 0 {
|
return pointer{p: *(*unsafe.Pointer)(p.p)}
|
||||||
o.uint32s = make([]uint32, uint32PoolSize)
|
|
||||||
}
|
|
||||||
o.uint32s[0] = x
|
|
||||||
*p = &o.uint32s[0]
|
|
||||||
o.uint32s = o.uint32s[1:]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get gets the value pointed at by *v.
|
// setPointer stores the pointer q at p.
|
||||||
func word32_Get(p word32) uint32 {
|
func (p pointer) setPointer(q pointer) {
|
||||||
return **p
|
*(*unsafe.Pointer)(p.p) = q.p
|
||||||
}
|
}
|
||||||
|
|
||||||
// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
|
// append q to the slice pointed to by p.
|
||||||
func structPointer_Word32(p structPointer, f field) word32 {
|
func (p pointer) appendPointer(q pointer) {
|
||||||
return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
|
s := (*[]unsafe.Pointer)(p.p)
|
||||||
|
*s = append(*s, q.p)
|
||||||
}
|
}
|
||||||
|
|
||||||
// A word32Val is the address of a 32-bit value field.
|
// getInterfacePointer returns a pointer that points to the
|
||||||
type word32Val *uint32
|
// interface data of the interface pointed by p.
|
||||||
|
func (p pointer) getInterfacePointer() pointer {
|
||||||
// Set sets *p to x.
|
// Super-tricky - read pointer out of data word of interface value.
|
||||||
func word32Val_Set(p word32Val, x uint32) {
|
return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
|
||||||
*p = x
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get gets the value pointed at by p.
|
// asPointerTo returns a reflect.Value that is a pointer to an
|
||||||
func word32Val_Get(p word32Val) uint32 {
|
// object of type t stored at p.
|
||||||
return *p
|
func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
|
||||||
|
return reflect.NewAt(t, p.p)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
|
func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
|
||||||
func structPointer_Word32Val(p structPointer, f field) word32Val {
|
return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||||
return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
|
|
||||||
}
|
}
|
||||||
|
func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
|
||||||
// A word32Slice is a slice of 32-bit values.
|
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||||
type word32Slice []uint32
|
|
||||||
|
|
||||||
func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
|
|
||||||
func (v *word32Slice) Len() int { return len(*v) }
|
|
||||||
func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
|
|
||||||
|
|
||||||
// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
|
|
||||||
func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
|
|
||||||
return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
|
||||||
}
|
}
|
||||||
|
func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
|
||||||
// word64 is like word32 but for 64-bit values.
|
return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||||
type word64 **uint64
|
|
||||||
|
|
||||||
func word64_Set(p word64, o *Buffer, x uint64) {
|
|
||||||
if len(o.uint64s) == 0 {
|
|
||||||
o.uint64s = make([]uint64, uint64PoolSize)
|
|
||||||
}
|
}
|
||||||
o.uint64s[0] = x
|
func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
|
||||||
*p = &o.uint64s[0]
|
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||||
o.uint64s = o.uint64s[1:]
|
|
||||||
}
|
}
|
||||||
|
func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
|
||||||
func word64_IsNil(p word64) bool {
|
return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||||
return *p == nil
|
|
||||||
}
|
}
|
||||||
|
func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
|
||||||
func word64_Get(p word64) uint64 {
|
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||||
return **p
|
|
||||||
}
|
}
|
||||||
|
func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
|
||||||
func structPointer_Word64(p structPointer, f field) word64 {
|
return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
|
||||||
return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
|
|
||||||
}
|
}
|
||||||
|
func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
|
||||||
// word64Val is like word32Val but for 64-bit values.
|
atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
|
||||||
type word64Val *uint64
|
|
||||||
|
|
||||||
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
|
|
||||||
*p = x
|
|
||||||
}
|
|
||||||
|
|
||||||
func word64Val_Get(p word64Val) uint64 {
|
|
||||||
return *p
|
|
||||||
}
|
|
||||||
|
|
||||||
func structPointer_Word64Val(p structPointer, f field) word64Val {
|
|
||||||
return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
|
|
||||||
}
|
|
||||||
|
|
||||||
// word64Slice is like word32Slice but for 64-bit values.
|
|
||||||
type word64Slice []uint64
|
|
||||||
|
|
||||||
func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
|
|
||||||
func (v *word64Slice) Len() int { return len(*v) }
|
|
||||||
func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
|
|
||||||
|
|
||||||
func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
|
|
||||||
return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
|
|
||||||
}
|
}
|
||||||
|
|
454
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
454
vendor/github.com/golang/protobuf/proto/properties.go
generated
vendored
|
@ -58,42 +58,6 @@ const (
|
||||||
WireFixed32 = 5
|
WireFixed32 = 5
|
||||||
)
|
)
|
||||||
|
|
||||||
const startSize = 10 // initial slice/string sizes
|
|
||||||
|
|
||||||
// Encoders are defined in encode.go
|
|
||||||
// An encoder outputs the full representation of a field, including its
|
|
||||||
// tag and encoder type.
|
|
||||||
type encoder func(p *Buffer, prop *Properties, base structPointer) error
|
|
||||||
|
|
||||||
// A valueEncoder encodes a single integer in a particular encoding.
|
|
||||||
type valueEncoder func(o *Buffer, x uint64) error
|
|
||||||
|
|
||||||
// Sizers are defined in encode.go
|
|
||||||
// A sizer returns the encoded size of a field, including its tag and encoder
|
|
||||||
// type.
|
|
||||||
type sizer func(prop *Properties, base structPointer) int
|
|
||||||
|
|
||||||
// A valueSizer returns the encoded size of a single integer in a particular
|
|
||||||
// encoding.
|
|
||||||
type valueSizer func(x uint64) int
|
|
||||||
|
|
||||||
// Decoders are defined in decode.go
|
|
||||||
// A decoder creates a value from its wire representation.
|
|
||||||
// Unrecognized subelements are saved in unrec.
|
|
||||||
type decoder func(p *Buffer, prop *Properties, base structPointer) error
|
|
||||||
|
|
||||||
// A valueDecoder decodes a single integer in a particular encoding.
|
|
||||||
type valueDecoder func(o *Buffer) (x uint64, err error)
|
|
||||||
|
|
||||||
// A oneofMarshaler does the marshaling for all oneof fields in a message.
|
|
||||||
type oneofMarshaler func(Message, *Buffer) error
|
|
||||||
|
|
||||||
// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
|
|
||||||
type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
|
|
||||||
|
|
||||||
// A oneofSizer does the sizing for all oneof fields in a message.
|
|
||||||
type oneofSizer func(Message) int
|
|
||||||
|
|
||||||
// tagMap is an optimization over map[int]int for typical protocol buffer
|
// tagMap is an optimization over map[int]int for typical protocol buffer
|
||||||
// use-cases. Encoded protocol buffers are often in tag order with small tag
|
// use-cases. Encoded protocol buffers are often in tag order with small tag
|
||||||
// numbers.
|
// numbers.
|
||||||
|
@ -140,13 +104,6 @@ type StructProperties struct {
|
||||||
decoderTags tagMap // map from proto tag to struct field number
|
decoderTags tagMap // map from proto tag to struct field number
|
||||||
decoderOrigNames map[string]int // map from original name to struct field number
|
decoderOrigNames map[string]int // map from original name to struct field number
|
||||||
order []int // list of struct field numbers in tag order
|
order []int // list of struct field numbers in tag order
|
||||||
unrecField field // field id of the XXX_unrecognized []byte field
|
|
||||||
extendable bool // is this an extendable proto
|
|
||||||
|
|
||||||
oneofMarshaler oneofMarshaler
|
|
||||||
oneofUnmarshaler oneofUnmarshaler
|
|
||||||
oneofSizer oneofSizer
|
|
||||||
stype reflect.Type
|
|
||||||
|
|
||||||
// OneofTypes contains information about the oneof fields in this message.
|
// OneofTypes contains information about the oneof fields in this message.
|
||||||
// It is keyed by the original name of a field.
|
// It is keyed by the original name of a field.
|
||||||
|
@ -182,41 +139,24 @@ type Properties struct {
|
||||||
Repeated bool
|
Repeated bool
|
||||||
Packed bool // relevant for repeated primitives only
|
Packed bool // relevant for repeated primitives only
|
||||||
Enum string // set for enum types only
|
Enum string // set for enum types only
|
||||||
proto3 bool // whether this is known to be a proto3 field; set for []byte only
|
proto3 bool // whether this is known to be a proto3 field
|
||||||
oneof bool // whether this is a oneof field
|
oneof bool // whether this is a oneof field
|
||||||
|
|
||||||
Default string // default value
|
Default string // default value
|
||||||
HasDefault bool // whether an explicit default was provided
|
HasDefault bool // whether an explicit default was provided
|
||||||
def_uint64 uint64
|
|
||||||
|
|
||||||
enc encoder
|
|
||||||
valEnc valueEncoder // set for bool and numeric types only
|
|
||||||
field field
|
|
||||||
tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
|
|
||||||
tagbuf [8]byte
|
|
||||||
stype reflect.Type // set for struct types only
|
stype reflect.Type // set for struct types only
|
||||||
sprop *StructProperties // set for struct types only
|
sprop *StructProperties // set for struct types only
|
||||||
isMarshaler bool
|
|
||||||
isUnmarshaler bool
|
|
||||||
|
|
||||||
mtype reflect.Type // set for map types only
|
mtype reflect.Type // set for map types only
|
||||||
mkeyprop *Properties // set for map types only
|
MapKeyProp *Properties // set for map types only
|
||||||
mvalprop *Properties // set for map types only
|
MapValProp *Properties // set for map types only
|
||||||
|
|
||||||
size sizer
|
|
||||||
valSize valueSizer // set for bool and numeric types only
|
|
||||||
|
|
||||||
dec decoder
|
|
||||||
valDec valueDecoder // set for bool and numeric types only
|
|
||||||
|
|
||||||
// If this is a packable field, this will be the decoder for the packed version of the field.
|
|
||||||
packedDec decoder
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// String formats the properties in the protobuf struct field tag style.
|
// String formats the properties in the protobuf struct field tag style.
|
||||||
func (p *Properties) String() string {
|
func (p *Properties) String() string {
|
||||||
s := p.Wire
|
s := p.Wire
|
||||||
s = ","
|
s += ","
|
||||||
s += strconv.Itoa(p.Tag)
|
s += strconv.Itoa(p.Tag)
|
||||||
if p.Required {
|
if p.Required {
|
||||||
s += ",req"
|
s += ",req"
|
||||||
|
@ -262,29 +202,14 @@ func (p *Properties) Parse(s string) {
|
||||||
switch p.Wire {
|
switch p.Wire {
|
||||||
case "varint":
|
case "varint":
|
||||||
p.WireType = WireVarint
|
p.WireType = WireVarint
|
||||||
p.valEnc = (*Buffer).EncodeVarint
|
|
||||||
p.valDec = (*Buffer).DecodeVarint
|
|
||||||
p.valSize = sizeVarint
|
|
||||||
case "fixed32":
|
case "fixed32":
|
||||||
p.WireType = WireFixed32
|
p.WireType = WireFixed32
|
||||||
p.valEnc = (*Buffer).EncodeFixed32
|
|
||||||
p.valDec = (*Buffer).DecodeFixed32
|
|
||||||
p.valSize = sizeFixed32
|
|
||||||
case "fixed64":
|
case "fixed64":
|
||||||
p.WireType = WireFixed64
|
p.WireType = WireFixed64
|
||||||
p.valEnc = (*Buffer).EncodeFixed64
|
|
||||||
p.valDec = (*Buffer).DecodeFixed64
|
|
||||||
p.valSize = sizeFixed64
|
|
||||||
case "zigzag32":
|
case "zigzag32":
|
||||||
p.WireType = WireVarint
|
p.WireType = WireVarint
|
||||||
p.valEnc = (*Buffer).EncodeZigzag32
|
|
||||||
p.valDec = (*Buffer).DecodeZigzag32
|
|
||||||
p.valSize = sizeZigzag32
|
|
||||||
case "zigzag64":
|
case "zigzag64":
|
||||||
p.WireType = WireVarint
|
p.WireType = WireVarint
|
||||||
p.valEnc = (*Buffer).EncodeZigzag64
|
|
||||||
p.valDec = (*Buffer).DecodeZigzag64
|
|
||||||
p.valSize = sizeZigzag64
|
|
||||||
case "bytes", "group":
|
case "bytes", "group":
|
||||||
p.WireType = WireBytes
|
p.WireType = WireBytes
|
||||||
// no numeric converter for non-numeric types
|
// no numeric converter for non-numeric types
|
||||||
|
@ -299,6 +224,7 @@ func (p *Properties) Parse(s string) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
outer:
|
||||||
for i := 2; i < len(fields); i++ {
|
for i := 2; i < len(fields); i++ {
|
||||||
f := fields[i]
|
f := fields[i]
|
||||||
switch {
|
switch {
|
||||||
|
@ -326,260 +252,41 @@ func (p *Properties) Parse(s string) {
|
||||||
if i+1 < len(fields) {
|
if i+1 < len(fields) {
|
||||||
// Commas aren't escaped, and def is always last.
|
// Commas aren't escaped, and def is always last.
|
||||||
p.Default += "," + strings.Join(fields[i+1:], ",")
|
p.Default += "," + strings.Join(fields[i+1:], ",")
|
||||||
break
|
break outer
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func logNoSliceEnc(t1, t2 reflect.Type) {
|
|
||||||
fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
|
|
||||||
}
|
|
||||||
|
|
||||||
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
|
var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
|
||||||
|
|
||||||
// Initialize the fields for encoding and decoding.
|
// setFieldProps initializes the field properties for submessages and maps.
|
||||||
func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
|
func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
|
||||||
p.enc = nil
|
|
||||||
p.dec = nil
|
|
||||||
p.size = nil
|
|
||||||
|
|
||||||
switch t1 := typ; t1.Kind() {
|
switch t1 := typ; t1.Kind() {
|
||||||
default:
|
|
||||||
fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
|
|
||||||
|
|
||||||
// proto3 scalar types
|
|
||||||
|
|
||||||
case reflect.Bool:
|
|
||||||
p.enc = (*Buffer).enc_proto3_bool
|
|
||||||
p.dec = (*Buffer).dec_proto3_bool
|
|
||||||
p.size = size_proto3_bool
|
|
||||||
case reflect.Int32:
|
|
||||||
p.enc = (*Buffer).enc_proto3_int32
|
|
||||||
p.dec = (*Buffer).dec_proto3_int32
|
|
||||||
p.size = size_proto3_int32
|
|
||||||
case reflect.Uint32:
|
|
||||||
p.enc = (*Buffer).enc_proto3_uint32
|
|
||||||
p.dec = (*Buffer).dec_proto3_int32 // can reuse
|
|
||||||
p.size = size_proto3_uint32
|
|
||||||
case reflect.Int64, reflect.Uint64:
|
|
||||||
p.enc = (*Buffer).enc_proto3_int64
|
|
||||||
p.dec = (*Buffer).dec_proto3_int64
|
|
||||||
p.size = size_proto3_int64
|
|
||||||
case reflect.Float32:
|
|
||||||
p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
|
|
||||||
p.dec = (*Buffer).dec_proto3_int32
|
|
||||||
p.size = size_proto3_uint32
|
|
||||||
case reflect.Float64:
|
|
||||||
p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
|
|
||||||
p.dec = (*Buffer).dec_proto3_int64
|
|
||||||
p.size = size_proto3_int64
|
|
||||||
case reflect.String:
|
|
||||||
p.enc = (*Buffer).enc_proto3_string
|
|
||||||
p.dec = (*Buffer).dec_proto3_string
|
|
||||||
p.size = size_proto3_string
|
|
||||||
|
|
||||||
case reflect.Ptr:
|
case reflect.Ptr:
|
||||||
switch t2 := t1.Elem(); t2.Kind() {
|
if t1.Elem().Kind() == reflect.Struct {
|
||||||
default:
|
|
||||||
fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
|
|
||||||
break
|
|
||||||
case reflect.Bool:
|
|
||||||
p.enc = (*Buffer).enc_bool
|
|
||||||
p.dec = (*Buffer).dec_bool
|
|
||||||
p.size = size_bool
|
|
||||||
case reflect.Int32:
|
|
||||||
p.enc = (*Buffer).enc_int32
|
|
||||||
p.dec = (*Buffer).dec_int32
|
|
||||||
p.size = size_int32
|
|
||||||
case reflect.Uint32:
|
|
||||||
p.enc = (*Buffer).enc_uint32
|
|
||||||
p.dec = (*Buffer).dec_int32 // can reuse
|
|
||||||
p.size = size_uint32
|
|
||||||
case reflect.Int64, reflect.Uint64:
|
|
||||||
p.enc = (*Buffer).enc_int64
|
|
||||||
p.dec = (*Buffer).dec_int64
|
|
||||||
p.size = size_int64
|
|
||||||
case reflect.Float32:
|
|
||||||
p.enc = (*Buffer).enc_uint32 // can just treat them as bits
|
|
||||||
p.dec = (*Buffer).dec_int32
|
|
||||||
p.size = size_uint32
|
|
||||||
case reflect.Float64:
|
|
||||||
p.enc = (*Buffer).enc_int64 // can just treat them as bits
|
|
||||||
p.dec = (*Buffer).dec_int64
|
|
||||||
p.size = size_int64
|
|
||||||
case reflect.String:
|
|
||||||
p.enc = (*Buffer).enc_string
|
|
||||||
p.dec = (*Buffer).dec_string
|
|
||||||
p.size = size_string
|
|
||||||
case reflect.Struct:
|
|
||||||
p.stype = t1.Elem()
|
p.stype = t1.Elem()
|
||||||
p.isMarshaler = isMarshaler(t1)
|
|
||||||
p.isUnmarshaler = isUnmarshaler(t1)
|
|
||||||
if p.Wire == "bytes" {
|
|
||||||
p.enc = (*Buffer).enc_struct_message
|
|
||||||
p.dec = (*Buffer).dec_struct_message
|
|
||||||
p.size = size_struct_message
|
|
||||||
} else {
|
|
||||||
p.enc = (*Buffer).enc_struct_group
|
|
||||||
p.dec = (*Buffer).dec_struct_group
|
|
||||||
p.size = size_struct_group
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
case reflect.Slice:
|
case reflect.Slice:
|
||||||
switch t2 := t1.Elem(); t2.Kind() {
|
if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
|
||||||
default:
|
|
||||||
logNoSliceEnc(t1, t2)
|
|
||||||
break
|
|
||||||
case reflect.Bool:
|
|
||||||
if p.Packed {
|
|
||||||
p.enc = (*Buffer).enc_slice_packed_bool
|
|
||||||
p.size = size_slice_packed_bool
|
|
||||||
} else {
|
|
||||||
p.enc = (*Buffer).enc_slice_bool
|
|
||||||
p.size = size_slice_bool
|
|
||||||
}
|
|
||||||
p.dec = (*Buffer).dec_slice_bool
|
|
||||||
p.packedDec = (*Buffer).dec_slice_packed_bool
|
|
||||||
case reflect.Int32:
|
|
||||||
if p.Packed {
|
|
||||||
p.enc = (*Buffer).enc_slice_packed_int32
|
|
||||||
p.size = size_slice_packed_int32
|
|
||||||
} else {
|
|
||||||
p.enc = (*Buffer).enc_slice_int32
|
|
||||||
p.size = size_slice_int32
|
|
||||||
}
|
|
||||||
p.dec = (*Buffer).dec_slice_int32
|
|
||||||
p.packedDec = (*Buffer).dec_slice_packed_int32
|
|
||||||
case reflect.Uint32:
|
|
||||||
if p.Packed {
|
|
||||||
p.enc = (*Buffer).enc_slice_packed_uint32
|
|
||||||
p.size = size_slice_packed_uint32
|
|
||||||
} else {
|
|
||||||
p.enc = (*Buffer).enc_slice_uint32
|
|
||||||
p.size = size_slice_uint32
|
|
||||||
}
|
|
||||||
p.dec = (*Buffer).dec_slice_int32
|
|
||||||
p.packedDec = (*Buffer).dec_slice_packed_int32
|
|
||||||
case reflect.Int64, reflect.Uint64:
|
|
||||||
if p.Packed {
|
|
||||||
p.enc = (*Buffer).enc_slice_packed_int64
|
|
||||||
p.size = size_slice_packed_int64
|
|
||||||
} else {
|
|
||||||
p.enc = (*Buffer).enc_slice_int64
|
|
||||||
p.size = size_slice_int64
|
|
||||||
}
|
|
||||||
p.dec = (*Buffer).dec_slice_int64
|
|
||||||
p.packedDec = (*Buffer).dec_slice_packed_int64
|
|
||||||
case reflect.Uint8:
|
|
||||||
p.enc = (*Buffer).enc_slice_byte
|
|
||||||
p.dec = (*Buffer).dec_slice_byte
|
|
||||||
p.size = size_slice_byte
|
|
||||||
// This is a []byte, which is either a bytes field,
|
|
||||||
// or the value of a map field. In the latter case,
|
|
||||||
// we always encode an empty []byte, so we should not
|
|
||||||
// use the proto3 enc/size funcs.
|
|
||||||
// f == nil iff this is the key/value of a map field.
|
|
||||||
if p.proto3 && f != nil {
|
|
||||||
p.enc = (*Buffer).enc_proto3_slice_byte
|
|
||||||
p.size = size_proto3_slice_byte
|
|
||||||
}
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
switch t2.Bits() {
|
|
||||||
case 32:
|
|
||||||
// can just treat them as bits
|
|
||||||
if p.Packed {
|
|
||||||
p.enc = (*Buffer).enc_slice_packed_uint32
|
|
||||||
p.size = size_slice_packed_uint32
|
|
||||||
} else {
|
|
||||||
p.enc = (*Buffer).enc_slice_uint32
|
|
||||||
p.size = size_slice_uint32
|
|
||||||
}
|
|
||||||
p.dec = (*Buffer).dec_slice_int32
|
|
||||||
p.packedDec = (*Buffer).dec_slice_packed_int32
|
|
||||||
case 64:
|
|
||||||
// can just treat them as bits
|
|
||||||
if p.Packed {
|
|
||||||
p.enc = (*Buffer).enc_slice_packed_int64
|
|
||||||
p.size = size_slice_packed_int64
|
|
||||||
} else {
|
|
||||||
p.enc = (*Buffer).enc_slice_int64
|
|
||||||
p.size = size_slice_int64
|
|
||||||
}
|
|
||||||
p.dec = (*Buffer).dec_slice_int64
|
|
||||||
p.packedDec = (*Buffer).dec_slice_packed_int64
|
|
||||||
default:
|
|
||||||
logNoSliceEnc(t1, t2)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
case reflect.String:
|
|
||||||
p.enc = (*Buffer).enc_slice_string
|
|
||||||
p.dec = (*Buffer).dec_slice_string
|
|
||||||
p.size = size_slice_string
|
|
||||||
case reflect.Ptr:
|
|
||||||
switch t3 := t2.Elem(); t3.Kind() {
|
|
||||||
default:
|
|
||||||
fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
|
|
||||||
break
|
|
||||||
case reflect.Struct:
|
|
||||||
p.stype = t2.Elem()
|
p.stype = t2.Elem()
|
||||||
p.isMarshaler = isMarshaler(t2)
|
|
||||||
p.isUnmarshaler = isUnmarshaler(t2)
|
|
||||||
if p.Wire == "bytes" {
|
|
||||||
p.enc = (*Buffer).enc_slice_struct_message
|
|
||||||
p.dec = (*Buffer).dec_slice_struct_message
|
|
||||||
p.size = size_slice_struct_message
|
|
||||||
} else {
|
|
||||||
p.enc = (*Buffer).enc_slice_struct_group
|
|
||||||
p.dec = (*Buffer).dec_slice_struct_group
|
|
||||||
p.size = size_slice_struct_group
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Slice:
|
|
||||||
switch t2.Elem().Kind() {
|
|
||||||
default:
|
|
||||||
fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
|
|
||||||
break
|
|
||||||
case reflect.Uint8:
|
|
||||||
p.enc = (*Buffer).enc_slice_slice_byte
|
|
||||||
p.dec = (*Buffer).dec_slice_slice_byte
|
|
||||||
p.size = size_slice_slice_byte
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
case reflect.Map:
|
case reflect.Map:
|
||||||
p.enc = (*Buffer).enc_new_map
|
|
||||||
p.dec = (*Buffer).dec_new_map
|
|
||||||
p.size = size_new_map
|
|
||||||
|
|
||||||
p.mtype = t1
|
p.mtype = t1
|
||||||
p.mkeyprop = &Properties{}
|
p.MapKeyProp = &Properties{}
|
||||||
p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
|
p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
|
||||||
p.mvalprop = &Properties{}
|
p.MapValProp = &Properties{}
|
||||||
vtype := p.mtype.Elem()
|
vtype := p.mtype.Elem()
|
||||||
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
|
if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
|
||||||
// The value type is not a message (*T) or bytes ([]byte),
|
// The value type is not a message (*T) or bytes ([]byte),
|
||||||
// so we need encoders for the pointer to this type.
|
// so we need encoders for the pointer to this type.
|
||||||
vtype = reflect.PtrTo(vtype)
|
vtype = reflect.PtrTo(vtype)
|
||||||
}
|
}
|
||||||
p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
|
p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
|
||||||
}
|
}
|
||||||
|
|
||||||
// precalculate tag code
|
|
||||||
wire := p.WireType
|
|
||||||
if p.Packed {
|
|
||||||
wire = WireBytes
|
|
||||||
}
|
|
||||||
x := uint32(p.Tag)<<3 | uint32(wire)
|
|
||||||
i := 0
|
|
||||||
for i = 0; x > 127; i++ {
|
|
||||||
p.tagbuf[i] = 0x80 | uint8(x&0x7F)
|
|
||||||
x >>= 7
|
|
||||||
}
|
|
||||||
p.tagbuf[i] = uint8(x)
|
|
||||||
p.tagcode = p.tagbuf[0 : i+1]
|
|
||||||
|
|
||||||
if p.stype != nil {
|
if p.stype != nil {
|
||||||
if lockGetProp {
|
if lockGetProp {
|
||||||
p.sprop = GetProperties(p.stype)
|
p.sprop = GetProperties(p.stype)
|
||||||
|
@ -591,31 +298,8 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock
|
||||||
|
|
||||||
var (
|
var (
|
||||||
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
|
||||||
unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// isMarshaler reports whether type t implements Marshaler.
|
|
||||||
func isMarshaler(t reflect.Type) bool {
|
|
||||||
// We're checking for (likely) pointer-receiver methods
|
|
||||||
// so if t is not a pointer, something is very wrong.
|
|
||||||
// The calls above only invoke isMarshaler on pointer types.
|
|
||||||
if t.Kind() != reflect.Ptr {
|
|
||||||
panic("proto: misuse of isMarshaler")
|
|
||||||
}
|
|
||||||
return t.Implements(marshalerType)
|
|
||||||
}
|
|
||||||
|
|
||||||
// isUnmarshaler reports whether type t implements Unmarshaler.
|
|
||||||
func isUnmarshaler(t reflect.Type) bool {
|
|
||||||
// We're checking for (likely) pointer-receiver methods
|
|
||||||
// so if t is not a pointer, something is very wrong.
|
|
||||||
// The calls above only invoke isUnmarshaler on pointer types.
|
|
||||||
if t.Kind() != reflect.Ptr {
|
|
||||||
panic("proto: misuse of isUnmarshaler")
|
|
||||||
}
|
|
||||||
return t.Implements(unmarshalerType)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Init populates the properties from a protocol buffer struct tag.
|
// Init populates the properties from a protocol buffer struct tag.
|
||||||
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
|
func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
|
||||||
p.init(typ, name, tag, f, true)
|
p.init(typ, name, tag, f, true)
|
||||||
|
@ -625,14 +309,11 @@ func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructF
|
||||||
// "bytes,49,opt,def=hello!"
|
// "bytes,49,opt,def=hello!"
|
||||||
p.Name = name
|
p.Name = name
|
||||||
p.OrigName = name
|
p.OrigName = name
|
||||||
if f != nil {
|
|
||||||
p.field = toField(f)
|
|
||||||
}
|
|
||||||
if tag == "" {
|
if tag == "" {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
p.Parse(tag)
|
p.Parse(tag)
|
||||||
p.setEncAndDec(typ, f, lockGetProp)
|
p.setFieldProps(typ, f, lockGetProp)
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -682,8 +363,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
|
||||||
propertiesMap[t] = prop
|
propertiesMap[t] = prop
|
||||||
|
|
||||||
// build properties
|
// build properties
|
||||||
prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType)
|
|
||||||
prop.unrecField = invalidField
|
|
||||||
prop.Prop = make([]*Properties, t.NumField())
|
prop.Prop = make([]*Properties, t.NumField())
|
||||||
prop.order = make([]int, t.NumField())
|
prop.order = make([]int, t.NumField())
|
||||||
|
|
||||||
|
@ -693,15 +372,11 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
|
||||||
name := f.Name
|
name := f.Name
|
||||||
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
|
p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
|
||||||
|
|
||||||
if f.Name == "XXX_extensions" { // special case
|
oneof := f.Tag.Get("protobuf_oneof") // special case
|
||||||
p.enc = (*Buffer).enc_map
|
if oneof != "" {
|
||||||
p.dec = nil // not needed
|
// Oneof fields don't use the traditional protobuf tag.
|
||||||
p.size = size_map
|
p.OrigName = oneof
|
||||||
}
|
}
|
||||||
if f.Name == "XXX_unrecognized" { // special case
|
|
||||||
prop.unrecField = toField(&f)
|
|
||||||
}
|
|
||||||
oneof := f.Tag.Get("protobuf_oneof") != "" // special case
|
|
||||||
prop.Prop[i] = p
|
prop.Prop[i] = p
|
||||||
prop.order[i] = i
|
prop.order[i] = i
|
||||||
if debug {
|
if debug {
|
||||||
|
@ -711,9 +386,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
|
||||||
}
|
}
|
||||||
print("\n")
|
print("\n")
|
||||||
}
|
}
|
||||||
if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof {
|
|
||||||
fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Re-order prop.order.
|
// Re-order prop.order.
|
||||||
|
@ -724,8 +396,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
|
||||||
}
|
}
|
||||||
if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
|
if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
|
||||||
var oots []interface{}
|
var oots []interface{}
|
||||||
prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
|
_, _, _, oots = om.XXX_OneofFuncs()
|
||||||
prop.stype = t
|
|
||||||
|
|
||||||
// Interpret oneof metadata.
|
// Interpret oneof metadata.
|
||||||
prop.OneofTypes = make(map[string]*OneofProperties)
|
prop.OneofTypes = make(map[string]*OneofProperties)
|
||||||
|
@ -775,30 +446,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties {
|
||||||
return prop
|
return prop
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return the Properties object for the x[0]'th field of the structure.
|
|
||||||
func propByIndex(t reflect.Type, x []int) *Properties {
|
|
||||||
if len(x) != 1 {
|
|
||||||
fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
prop := GetProperties(t)
|
|
||||||
return prop.Prop[x[0]]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the address and type of a pointer to a struct from an interface.
|
|
||||||
func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
|
|
||||||
if pb == nil {
|
|
||||||
err = ErrNil
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// get the reflect type of the pointer to the struct.
|
|
||||||
t = reflect.TypeOf(pb)
|
|
||||||
// get the address of the struct.
|
|
||||||
value := reflect.ValueOf(pb)
|
|
||||||
b = toStructPointer(value)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// A global registry of enum types.
|
// A global registry of enum types.
|
||||||
// The generated code will register the generated maps by calling RegisterEnum.
|
// The generated code will register the generated maps by calling RegisterEnum.
|
||||||
|
|
||||||
|
@ -822,25 +469,76 @@ func EnumValueMap(enumType string) map[string]int32 {
|
||||||
// A registry of all linked message types.
|
// A registry of all linked message types.
|
||||||
// The string is a fully-qualified proto name ("pkg.Message").
|
// The string is a fully-qualified proto name ("pkg.Message").
|
||||||
var (
|
var (
|
||||||
protoTypes = make(map[string]reflect.Type)
|
protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers
|
||||||
|
protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types
|
||||||
revProtoTypes = make(map[reflect.Type]string)
|
revProtoTypes = make(map[reflect.Type]string)
|
||||||
)
|
)
|
||||||
|
|
||||||
// RegisterType is called from generated code and maps from the fully qualified
|
// RegisterType is called from generated code and maps from the fully qualified
|
||||||
// proto name to the type (pointer to struct) of the protocol buffer.
|
// proto name to the type (pointer to struct) of the protocol buffer.
|
||||||
func RegisterType(x Message, name string) {
|
func RegisterType(x Message, name string) {
|
||||||
if _, ok := protoTypes[name]; ok {
|
if _, ok := protoTypedNils[name]; ok {
|
||||||
// TODO: Some day, make this a panic.
|
// TODO: Some day, make this a panic.
|
||||||
log.Printf("proto: duplicate proto type registered: %s", name)
|
log.Printf("proto: duplicate proto type registered: %s", name)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
t := reflect.TypeOf(x)
|
t := reflect.TypeOf(x)
|
||||||
protoTypes[name] = t
|
if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
|
||||||
|
// Generated code always calls RegisterType with nil x.
|
||||||
|
// This check is just for extra safety.
|
||||||
|
protoTypedNils[name] = x
|
||||||
|
} else {
|
||||||
|
protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
|
||||||
|
}
|
||||||
|
revProtoTypes[t] = name
|
||||||
|
}
|
||||||
|
|
||||||
|
// RegisterMapType is called from generated code and maps from the fully qualified
|
||||||
|
// proto name to the native map type of the proto map definition.
|
||||||
|
func RegisterMapType(x interface{}, name string) {
|
||||||
|
if reflect.TypeOf(x).Kind() != reflect.Map {
|
||||||
|
panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
|
||||||
|
}
|
||||||
|
if _, ok := protoMapTypes[name]; ok {
|
||||||
|
log.Printf("proto: duplicate proto type registered: %s", name)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t := reflect.TypeOf(x)
|
||||||
|
protoMapTypes[name] = t
|
||||||
revProtoTypes[t] = name
|
revProtoTypes[t] = name
|
||||||
}
|
}
|
||||||
|
|
||||||
// MessageName returns the fully-qualified proto name for the given message type.
|
// MessageName returns the fully-qualified proto name for the given message type.
|
||||||
func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] }
|
func MessageName(x Message) string {
|
||||||
|
type xname interface {
|
||||||
|
XXX_MessageName() string
|
||||||
|
}
|
||||||
|
if m, ok := x.(xname); ok {
|
||||||
|
return m.XXX_MessageName()
|
||||||
|
}
|
||||||
|
return revProtoTypes[reflect.TypeOf(x)]
|
||||||
|
}
|
||||||
|
|
||||||
// MessageType returns the message type (pointer to struct) for a named message.
|
// MessageType returns the message type (pointer to struct) for a named message.
|
||||||
func MessageType(name string) reflect.Type { return protoTypes[name] }
|
// The type is not guaranteed to implement proto.Message if the name refers to a
|
||||||
|
// map entry.
|
||||||
|
func MessageType(name string) reflect.Type {
|
||||||
|
if t, ok := protoTypedNils[name]; ok {
|
||||||
|
return reflect.TypeOf(t)
|
||||||
|
}
|
||||||
|
return protoMapTypes[name]
|
||||||
|
}
|
||||||
|
|
||||||
|
// A registry of all linked proto files.
|
||||||
|
var (
|
||||||
|
protoFiles = make(map[string][]byte) // file name => fileDescriptor
|
||||||
|
)
|
||||||
|
|
||||||
|
// RegisterFile is called from generated code and maps from the
|
||||||
|
// full file name of a .proto file to its compressed FileDescriptorProto.
|
||||||
|
func RegisterFile(filename string, fileDescriptor []byte) {
|
||||||
|
protoFiles[filename] = fileDescriptor
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
|
||||||
|
func FileDescriptor(filename string) []byte { return protoFiles[filename] }
|
||||||
|
|
2767
vendor/github.com/golang/protobuf/proto/table_marshal.go
generated
vendored
Normal file
2767
vendor/github.com/golang/protobuf/proto/table_marshal.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
654
vendor/github.com/golang/protobuf/proto/table_merge.go
generated
vendored
Normal file
654
vendor/github.com/golang/protobuf/proto/table_merge.go
generated
vendored
Normal file
|
@ -0,0 +1,654 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Merge merges the src message into dst.
|
||||||
|
// This assumes that dst and src of the same type and are non-nil.
|
||||||
|
func (a *InternalMessageInfo) Merge(dst, src Message) {
|
||||||
|
mi := atomicLoadMergeInfo(&a.merge)
|
||||||
|
if mi == nil {
|
||||||
|
mi = getMergeInfo(reflect.TypeOf(dst).Elem())
|
||||||
|
atomicStoreMergeInfo(&a.merge, mi)
|
||||||
|
}
|
||||||
|
mi.merge(toPointer(&dst), toPointer(&src))
|
||||||
|
}
|
||||||
|
|
||||||
|
type mergeInfo struct {
|
||||||
|
typ reflect.Type
|
||||||
|
|
||||||
|
initialized int32 // 0: only typ is valid, 1: everything is valid
|
||||||
|
lock sync.Mutex
|
||||||
|
|
||||||
|
fields []mergeFieldInfo
|
||||||
|
unrecognized field // Offset of XXX_unrecognized
|
||||||
|
}
|
||||||
|
|
||||||
|
type mergeFieldInfo struct {
|
||||||
|
field field // Offset of field, guaranteed to be valid
|
||||||
|
|
||||||
|
// isPointer reports whether the value in the field is a pointer.
|
||||||
|
// This is true for the following situations:
|
||||||
|
// * Pointer to struct
|
||||||
|
// * Pointer to basic type (proto2 only)
|
||||||
|
// * Slice (first value in slice header is a pointer)
|
||||||
|
// * String (first value in string header is a pointer)
|
||||||
|
isPointer bool
|
||||||
|
|
||||||
|
// basicWidth reports the width of the field assuming that it is directly
|
||||||
|
// embedded in the struct (as is the case for basic types in proto3).
|
||||||
|
// The possible values are:
|
||||||
|
// 0: invalid
|
||||||
|
// 1: bool
|
||||||
|
// 4: int32, uint32, float32
|
||||||
|
// 8: int64, uint64, float64
|
||||||
|
basicWidth int
|
||||||
|
|
||||||
|
// Where dst and src are pointers to the types being merged.
|
||||||
|
merge func(dst, src pointer)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
mergeInfoMap = map[reflect.Type]*mergeInfo{}
|
||||||
|
mergeInfoLock sync.Mutex
|
||||||
|
)
|
||||||
|
|
||||||
|
func getMergeInfo(t reflect.Type) *mergeInfo {
|
||||||
|
mergeInfoLock.Lock()
|
||||||
|
defer mergeInfoLock.Unlock()
|
||||||
|
mi := mergeInfoMap[t]
|
||||||
|
if mi == nil {
|
||||||
|
mi = &mergeInfo{typ: t}
|
||||||
|
mergeInfoMap[t] = mi
|
||||||
|
}
|
||||||
|
return mi
|
||||||
|
}
|
||||||
|
|
||||||
|
// merge merges src into dst assuming they are both of type *mi.typ.
|
||||||
|
func (mi *mergeInfo) merge(dst, src pointer) {
|
||||||
|
if dst.isNil() {
|
||||||
|
panic("proto: nil destination")
|
||||||
|
}
|
||||||
|
if src.isNil() {
|
||||||
|
return // Nothing to do.
|
||||||
|
}
|
||||||
|
|
||||||
|
if atomic.LoadInt32(&mi.initialized) == 0 {
|
||||||
|
mi.computeMergeInfo()
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, fi := range mi.fields {
|
||||||
|
sfp := src.offset(fi.field)
|
||||||
|
|
||||||
|
// As an optimization, we can avoid the merge function call cost
|
||||||
|
// if we know for sure that the source will have no effect
|
||||||
|
// by checking if it is the zero value.
|
||||||
|
if unsafeAllowed {
|
||||||
|
if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if fi.basicWidth > 0 {
|
||||||
|
switch {
|
||||||
|
case fi.basicWidth == 1 && !*sfp.toBool():
|
||||||
|
continue
|
||||||
|
case fi.basicWidth == 4 && *sfp.toUint32() == 0:
|
||||||
|
continue
|
||||||
|
case fi.basicWidth == 8 && *sfp.toUint64() == 0:
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
dfp := dst.offset(fi.field)
|
||||||
|
fi.merge(dfp, sfp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Make this faster?
|
||||||
|
out := dst.asPointerTo(mi.typ).Elem()
|
||||||
|
in := src.asPointerTo(mi.typ).Elem()
|
||||||
|
if emIn, err := extendable(in.Addr().Interface()); err == nil {
|
||||||
|
emOut, _ := extendable(out.Addr().Interface())
|
||||||
|
mIn, muIn := emIn.extensionsRead()
|
||||||
|
if mIn != nil {
|
||||||
|
mOut := emOut.extensionsWrite()
|
||||||
|
muIn.Lock()
|
||||||
|
mergeExtension(mOut, mIn)
|
||||||
|
muIn.Unlock()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if mi.unrecognized.IsValid() {
|
||||||
|
if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
|
||||||
|
*dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mi *mergeInfo) computeMergeInfo() {
|
||||||
|
mi.lock.Lock()
|
||||||
|
defer mi.lock.Unlock()
|
||||||
|
if mi.initialized != 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t := mi.typ
|
||||||
|
n := t.NumField()
|
||||||
|
|
||||||
|
props := GetProperties(t)
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
f := t.Field(i)
|
||||||
|
if strings.HasPrefix(f.Name, "XXX_") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
mfi := mergeFieldInfo{field: toField(&f)}
|
||||||
|
tf := f.Type
|
||||||
|
|
||||||
|
// As an optimization, we can avoid the merge function call cost
|
||||||
|
// if we know for sure that the source will have no effect
|
||||||
|
// by checking if it is the zero value.
|
||||||
|
if unsafeAllowed {
|
||||||
|
switch tf.Kind() {
|
||||||
|
case reflect.Ptr, reflect.Slice, reflect.String:
|
||||||
|
// As a special case, we assume slices and strings are pointers
|
||||||
|
// since we know that the first field in the SliceSlice or
|
||||||
|
// StringHeader is a data pointer.
|
||||||
|
mfi.isPointer = true
|
||||||
|
case reflect.Bool:
|
||||||
|
mfi.basicWidth = 1
|
||||||
|
case reflect.Int32, reflect.Uint32, reflect.Float32:
|
||||||
|
mfi.basicWidth = 4
|
||||||
|
case reflect.Int64, reflect.Uint64, reflect.Float64:
|
||||||
|
mfi.basicWidth = 8
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unwrap tf to get at its most basic type.
|
||||||
|
var isPointer, isSlice bool
|
||||||
|
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
|
||||||
|
isSlice = true
|
||||||
|
tf = tf.Elem()
|
||||||
|
}
|
||||||
|
if tf.Kind() == reflect.Ptr {
|
||||||
|
isPointer = true
|
||||||
|
tf = tf.Elem()
|
||||||
|
}
|
||||||
|
if isPointer && isSlice && tf.Kind() != reflect.Struct {
|
||||||
|
panic("both pointer and slice for basic type in " + tf.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
switch tf.Kind() {
|
||||||
|
case reflect.Int32:
|
||||||
|
switch {
|
||||||
|
case isSlice: // E.g., []int32
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
// NOTE: toInt32Slice is not defined (see pointer_reflect.go).
|
||||||
|
/*
|
||||||
|
sfsp := src.toInt32Slice()
|
||||||
|
if *sfsp != nil {
|
||||||
|
dfsp := dst.toInt32Slice()
|
||||||
|
*dfsp = append(*dfsp, *sfsp...)
|
||||||
|
if *dfsp == nil {
|
||||||
|
*dfsp = []int64{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
sfs := src.getInt32Slice()
|
||||||
|
if sfs != nil {
|
||||||
|
dfs := dst.getInt32Slice()
|
||||||
|
dfs = append(dfs, sfs...)
|
||||||
|
if dfs == nil {
|
||||||
|
dfs = []int32{}
|
||||||
|
}
|
||||||
|
dst.setInt32Slice(dfs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case isPointer: // E.g., *int32
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
// NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
|
||||||
|
/*
|
||||||
|
sfpp := src.toInt32Ptr()
|
||||||
|
if *sfpp != nil {
|
||||||
|
dfpp := dst.toInt32Ptr()
|
||||||
|
if *dfpp == nil {
|
||||||
|
*dfpp = Int32(**sfpp)
|
||||||
|
} else {
|
||||||
|
**dfpp = **sfpp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
sfp := src.getInt32Ptr()
|
||||||
|
if sfp != nil {
|
||||||
|
dfp := dst.getInt32Ptr()
|
||||||
|
if dfp == nil {
|
||||||
|
dst.setInt32Ptr(*sfp)
|
||||||
|
} else {
|
||||||
|
*dfp = *sfp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default: // E.g., int32
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
if v := *src.toInt32(); v != 0 {
|
||||||
|
*dst.toInt32() = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Int64:
|
||||||
|
switch {
|
||||||
|
case isSlice: // E.g., []int64
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sfsp := src.toInt64Slice()
|
||||||
|
if *sfsp != nil {
|
||||||
|
dfsp := dst.toInt64Slice()
|
||||||
|
*dfsp = append(*dfsp, *sfsp...)
|
||||||
|
if *dfsp == nil {
|
||||||
|
*dfsp = []int64{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case isPointer: // E.g., *int64
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sfpp := src.toInt64Ptr()
|
||||||
|
if *sfpp != nil {
|
||||||
|
dfpp := dst.toInt64Ptr()
|
||||||
|
if *dfpp == nil {
|
||||||
|
*dfpp = Int64(**sfpp)
|
||||||
|
} else {
|
||||||
|
**dfpp = **sfpp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default: // E.g., int64
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
if v := *src.toInt64(); v != 0 {
|
||||||
|
*dst.toInt64() = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Uint32:
|
||||||
|
switch {
|
||||||
|
case isSlice: // E.g., []uint32
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sfsp := src.toUint32Slice()
|
||||||
|
if *sfsp != nil {
|
||||||
|
dfsp := dst.toUint32Slice()
|
||||||
|
*dfsp = append(*dfsp, *sfsp...)
|
||||||
|
if *dfsp == nil {
|
||||||
|
*dfsp = []uint32{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case isPointer: // E.g., *uint32
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sfpp := src.toUint32Ptr()
|
||||||
|
if *sfpp != nil {
|
||||||
|
dfpp := dst.toUint32Ptr()
|
||||||
|
if *dfpp == nil {
|
||||||
|
*dfpp = Uint32(**sfpp)
|
||||||
|
} else {
|
||||||
|
**dfpp = **sfpp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default: // E.g., uint32
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
if v := *src.toUint32(); v != 0 {
|
||||||
|
*dst.toUint32() = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Uint64:
|
||||||
|
switch {
|
||||||
|
case isSlice: // E.g., []uint64
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sfsp := src.toUint64Slice()
|
||||||
|
if *sfsp != nil {
|
||||||
|
dfsp := dst.toUint64Slice()
|
||||||
|
*dfsp = append(*dfsp, *sfsp...)
|
||||||
|
if *dfsp == nil {
|
||||||
|
*dfsp = []uint64{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case isPointer: // E.g., *uint64
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sfpp := src.toUint64Ptr()
|
||||||
|
if *sfpp != nil {
|
||||||
|
dfpp := dst.toUint64Ptr()
|
||||||
|
if *dfpp == nil {
|
||||||
|
*dfpp = Uint64(**sfpp)
|
||||||
|
} else {
|
||||||
|
**dfpp = **sfpp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default: // E.g., uint64
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
if v := *src.toUint64(); v != 0 {
|
||||||
|
*dst.toUint64() = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Float32:
|
||||||
|
switch {
|
||||||
|
case isSlice: // E.g., []float32
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sfsp := src.toFloat32Slice()
|
||||||
|
if *sfsp != nil {
|
||||||
|
dfsp := dst.toFloat32Slice()
|
||||||
|
*dfsp = append(*dfsp, *sfsp...)
|
||||||
|
if *dfsp == nil {
|
||||||
|
*dfsp = []float32{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case isPointer: // E.g., *float32
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sfpp := src.toFloat32Ptr()
|
||||||
|
if *sfpp != nil {
|
||||||
|
dfpp := dst.toFloat32Ptr()
|
||||||
|
if *dfpp == nil {
|
||||||
|
*dfpp = Float32(**sfpp)
|
||||||
|
} else {
|
||||||
|
**dfpp = **sfpp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default: // E.g., float32
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
if v := *src.toFloat32(); v != 0 {
|
||||||
|
*dst.toFloat32() = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Float64:
|
||||||
|
switch {
|
||||||
|
case isSlice: // E.g., []float64
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sfsp := src.toFloat64Slice()
|
||||||
|
if *sfsp != nil {
|
||||||
|
dfsp := dst.toFloat64Slice()
|
||||||
|
*dfsp = append(*dfsp, *sfsp...)
|
||||||
|
if *dfsp == nil {
|
||||||
|
*dfsp = []float64{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case isPointer: // E.g., *float64
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sfpp := src.toFloat64Ptr()
|
||||||
|
if *sfpp != nil {
|
||||||
|
dfpp := dst.toFloat64Ptr()
|
||||||
|
if *dfpp == nil {
|
||||||
|
*dfpp = Float64(**sfpp)
|
||||||
|
} else {
|
||||||
|
**dfpp = **sfpp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default: // E.g., float64
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
if v := *src.toFloat64(); v != 0 {
|
||||||
|
*dst.toFloat64() = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Bool:
|
||||||
|
switch {
|
||||||
|
case isSlice: // E.g., []bool
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sfsp := src.toBoolSlice()
|
||||||
|
if *sfsp != nil {
|
||||||
|
dfsp := dst.toBoolSlice()
|
||||||
|
*dfsp = append(*dfsp, *sfsp...)
|
||||||
|
if *dfsp == nil {
|
||||||
|
*dfsp = []bool{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case isPointer: // E.g., *bool
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sfpp := src.toBoolPtr()
|
||||||
|
if *sfpp != nil {
|
||||||
|
dfpp := dst.toBoolPtr()
|
||||||
|
if *dfpp == nil {
|
||||||
|
*dfpp = Bool(**sfpp)
|
||||||
|
} else {
|
||||||
|
**dfpp = **sfpp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default: // E.g., bool
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
if v := *src.toBool(); v {
|
||||||
|
*dst.toBool() = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.String:
|
||||||
|
switch {
|
||||||
|
case isSlice: // E.g., []string
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sfsp := src.toStringSlice()
|
||||||
|
if *sfsp != nil {
|
||||||
|
dfsp := dst.toStringSlice()
|
||||||
|
*dfsp = append(*dfsp, *sfsp...)
|
||||||
|
if *dfsp == nil {
|
||||||
|
*dfsp = []string{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case isPointer: // E.g., *string
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sfpp := src.toStringPtr()
|
||||||
|
if *sfpp != nil {
|
||||||
|
dfpp := dst.toStringPtr()
|
||||||
|
if *dfpp == nil {
|
||||||
|
*dfpp = String(**sfpp)
|
||||||
|
} else {
|
||||||
|
**dfpp = **sfpp
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default: // E.g., string
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
if v := *src.toString(); v != "" {
|
||||||
|
*dst.toString() = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Slice:
|
||||||
|
isProto3 := props.Prop[i].proto3
|
||||||
|
switch {
|
||||||
|
case isPointer:
|
||||||
|
panic("bad pointer in byte slice case in " + tf.Name())
|
||||||
|
case tf.Elem().Kind() != reflect.Uint8:
|
||||||
|
panic("bad element kind in byte slice case in " + tf.Name())
|
||||||
|
case isSlice: // E.g., [][]byte
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sbsp := src.toBytesSlice()
|
||||||
|
if *sbsp != nil {
|
||||||
|
dbsp := dst.toBytesSlice()
|
||||||
|
for _, sb := range *sbsp {
|
||||||
|
if sb == nil {
|
||||||
|
*dbsp = append(*dbsp, nil)
|
||||||
|
} else {
|
||||||
|
*dbsp = append(*dbsp, append([]byte{}, sb...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if *dbsp == nil {
|
||||||
|
*dbsp = [][]byte{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default: // E.g., []byte
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sbp := src.toBytes()
|
||||||
|
if *sbp != nil {
|
||||||
|
dbp := dst.toBytes()
|
||||||
|
if !isProto3 || len(*sbp) > 0 {
|
||||||
|
*dbp = append([]byte{}, *sbp...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Struct:
|
||||||
|
switch {
|
||||||
|
case !isPointer:
|
||||||
|
panic(fmt.Sprintf("message field %s without pointer", tf))
|
||||||
|
case isSlice: // E.g., []*pb.T
|
||||||
|
mi := getMergeInfo(tf)
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sps := src.getPointerSlice()
|
||||||
|
if sps != nil {
|
||||||
|
dps := dst.getPointerSlice()
|
||||||
|
for _, sp := range sps {
|
||||||
|
var dp pointer
|
||||||
|
if !sp.isNil() {
|
||||||
|
dp = valToPointer(reflect.New(tf))
|
||||||
|
mi.merge(dp, sp)
|
||||||
|
}
|
||||||
|
dps = append(dps, dp)
|
||||||
|
}
|
||||||
|
if dps == nil {
|
||||||
|
dps = []pointer{}
|
||||||
|
}
|
||||||
|
dst.setPointerSlice(dps)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default: // E.g., *pb.T
|
||||||
|
mi := getMergeInfo(tf)
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sp := src.getPointer()
|
||||||
|
if !sp.isNil() {
|
||||||
|
dp := dst.getPointer()
|
||||||
|
if dp.isNil() {
|
||||||
|
dp = valToPointer(reflect.New(tf))
|
||||||
|
dst.setPointer(dp)
|
||||||
|
}
|
||||||
|
mi.merge(dp, sp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Map:
|
||||||
|
switch {
|
||||||
|
case isPointer || isSlice:
|
||||||
|
panic("bad pointer or slice in map case in " + tf.Name())
|
||||||
|
default: // E.g., map[K]V
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
sm := src.asPointerTo(tf).Elem()
|
||||||
|
if sm.Len() == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
dm := dst.asPointerTo(tf).Elem()
|
||||||
|
if dm.IsNil() {
|
||||||
|
dm.Set(reflect.MakeMap(tf))
|
||||||
|
}
|
||||||
|
|
||||||
|
switch tf.Elem().Kind() {
|
||||||
|
case reflect.Ptr: // Proto struct (e.g., *T)
|
||||||
|
for _, key := range sm.MapKeys() {
|
||||||
|
val := sm.MapIndex(key)
|
||||||
|
val = reflect.ValueOf(Clone(val.Interface().(Message)))
|
||||||
|
dm.SetMapIndex(key, val)
|
||||||
|
}
|
||||||
|
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
|
||||||
|
for _, key := range sm.MapKeys() {
|
||||||
|
val := sm.MapIndex(key)
|
||||||
|
val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
|
||||||
|
dm.SetMapIndex(key, val)
|
||||||
|
}
|
||||||
|
default: // Basic type (e.g., string)
|
||||||
|
for _, key := range sm.MapKeys() {
|
||||||
|
val := sm.MapIndex(key)
|
||||||
|
dm.SetMapIndex(key, val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case reflect.Interface:
|
||||||
|
// Must be oneof field.
|
||||||
|
switch {
|
||||||
|
case isPointer || isSlice:
|
||||||
|
panic("bad pointer or slice in interface case in " + tf.Name())
|
||||||
|
default: // E.g., interface{}
|
||||||
|
// TODO: Make this faster?
|
||||||
|
mfi.merge = func(dst, src pointer) {
|
||||||
|
su := src.asPointerTo(tf).Elem()
|
||||||
|
if !su.IsNil() {
|
||||||
|
du := dst.asPointerTo(tf).Elem()
|
||||||
|
typ := su.Elem().Type()
|
||||||
|
if du.IsNil() || du.Elem().Type() != typ {
|
||||||
|
du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
|
||||||
|
}
|
||||||
|
sv := su.Elem().Elem().Field(0)
|
||||||
|
if sv.Kind() == reflect.Ptr && sv.IsNil() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
dv := du.Elem().Elem().Field(0)
|
||||||
|
if dv.Kind() == reflect.Ptr && dv.IsNil() {
|
||||||
|
dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
|
||||||
|
}
|
||||||
|
switch sv.Type().Kind() {
|
||||||
|
case reflect.Ptr: // Proto struct (e.g., *T)
|
||||||
|
Merge(dv.Interface().(Message), sv.Interface().(Message))
|
||||||
|
case reflect.Slice: // E.g. Bytes type (e.g., []byte)
|
||||||
|
dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
|
||||||
|
default: // Basic type (e.g., string)
|
||||||
|
dv.Set(sv)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("merger not found for type:%s", tf))
|
||||||
|
}
|
||||||
|
mi.fields = append(mi.fields, mfi)
|
||||||
|
}
|
||||||
|
|
||||||
|
mi.unrecognized = invalidField
|
||||||
|
if f, ok := t.FieldByName("XXX_unrecognized"); ok {
|
||||||
|
if f.Type != reflect.TypeOf([]byte{}) {
|
||||||
|
panic("expected XXX_unrecognized to be of type []byte")
|
||||||
|
}
|
||||||
|
mi.unrecognized = toField(&f)
|
||||||
|
}
|
||||||
|
|
||||||
|
atomic.StoreInt32(&mi.initialized, 1)
|
||||||
|
}
|
2051
vendor/github.com/golang/protobuf/proto/table_unmarshal.go
generated
vendored
Normal file
2051
vendor/github.com/golang/protobuf/proto/table_unmarshal.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
76
vendor/github.com/golang/protobuf/proto/text.go
generated
vendored
76
vendor/github.com/golang/protobuf/proto/text.go
generated
vendored
|
@ -50,7 +50,6 @@ import (
|
||||||
var (
|
var (
|
||||||
newline = []byte("\n")
|
newline = []byte("\n")
|
||||||
spaces = []byte(" ")
|
spaces = []byte(" ")
|
||||||
gtNewline = []byte(">\n")
|
|
||||||
endBraceNewline = []byte("}\n")
|
endBraceNewline = []byte("}\n")
|
||||||
backslashN = []byte{'\\', 'n'}
|
backslashN = []byte{'\\', 'n'}
|
||||||
backslashR = []byte{'\\', 'r'}
|
backslashR = []byte{'\\', 'r'}
|
||||||
|
@ -154,7 +153,7 @@ func (w *textWriter) indent() { w.ind++ }
|
||||||
|
|
||||||
func (w *textWriter) unindent() {
|
func (w *textWriter) unindent() {
|
||||||
if w.ind == 0 {
|
if w.ind == 0 {
|
||||||
log.Printf("proto: textWriter unindented too far")
|
log.Print("proto: textWriter unindented too far")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
w.ind--
|
w.ind--
|
||||||
|
@ -170,11 +169,6 @@ func writeName(w *textWriter, props *Properties) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// raw is the interface satisfied by RawMessage.
|
|
||||||
type raw interface {
|
|
||||||
Bytes() []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func requiresQuotes(u string) bool {
|
func requiresQuotes(u string) bool {
|
||||||
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
|
// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
|
||||||
for _, ch := range u {
|
for _, ch := range u {
|
||||||
|
@ -269,6 +263,10 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
||||||
props := sprops.Prop[i]
|
props := sprops.Prop[i]
|
||||||
name := st.Field(i).Name
|
name := st.Field(i).Name
|
||||||
|
|
||||||
|
if name == "XXX_NoUnkeyedLiteral" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
if strings.HasPrefix(name, "XXX_") {
|
if strings.HasPrefix(name, "XXX_") {
|
||||||
// There are two XXX_ fields:
|
// There are two XXX_ fields:
|
||||||
// XXX_unrecognized []byte
|
// XXX_unrecognized []byte
|
||||||
|
@ -355,7 +353,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
|
if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := w.WriteByte('\n'); err != nil {
|
if err := w.WriteByte('\n'); err != nil {
|
||||||
|
@ -372,7 +370,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := tm.writeAny(w, val, props.mvalprop); err != nil {
|
if err := tm.writeAny(w, val, props.MapValProp); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := w.WriteByte('\n'); err != nil {
|
if err := w.WriteByte('\n'); err != nil {
|
||||||
|
@ -436,12 +434,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if b, ok := fv.Interface().(raw); ok {
|
|
||||||
if err := writeRaw(w, b.Bytes()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Enums have a String method, so writeAny will work fine.
|
// Enums have a String method, so writeAny will work fine.
|
||||||
if err := tm.writeAny(w, fv, props); err != nil {
|
if err := tm.writeAny(w, fv, props); err != nil {
|
||||||
|
@ -455,7 +447,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
||||||
|
|
||||||
// Extensions (the XXX_extensions field).
|
// Extensions (the XXX_extensions field).
|
||||||
pv := sv.Addr()
|
pv := sv.Addr()
|
||||||
if pv.Type().Implements(extendableProtoType) {
|
if _, err := extendable(pv.Interface()); err == nil {
|
||||||
if err := tm.writeExtensions(w, pv); err != nil {
|
if err := tm.writeExtensions(w, pv); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -464,27 +456,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeRaw writes an uninterpreted raw message.
|
|
||||||
func writeRaw(w *textWriter, b []byte) error {
|
|
||||||
if err := w.WriteByte('<'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !w.compact {
|
|
||||||
if err := w.WriteByte('\n'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
w.indent()
|
|
||||||
if err := writeUnknownStruct(w, b); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
w.unindent()
|
|
||||||
if err := w.WriteByte('>'); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeAny writes an arbitrary field.
|
// writeAny writes an arbitrary field.
|
||||||
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
|
||||||
v = reflect.Indirect(v)
|
v = reflect.Indirect(v)
|
||||||
|
@ -513,7 +484,7 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
|
||||||
switch v.Kind() {
|
switch v.Kind() {
|
||||||
case reflect.Slice:
|
case reflect.Slice:
|
||||||
// Should only be a []byte; repeated fields are handled in writeStruct.
|
// Should only be a []byte; repeated fields are handled in writeStruct.
|
||||||
if err := writeString(w, string(v.Interface().([]byte))); err != nil {
|
if err := writeString(w, string(v.Bytes())); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
case reflect.String:
|
case reflect.String:
|
||||||
|
@ -535,6 +506,19 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
w.indent()
|
w.indent()
|
||||||
|
if v.CanAddr() {
|
||||||
|
// Calling v.Interface on a struct causes the reflect package to
|
||||||
|
// copy the entire struct. This is racy with the new Marshaler
|
||||||
|
// since we atomically update the XXX_sizecache.
|
||||||
|
//
|
||||||
|
// Thus, we retrieve a pointer to the struct if possible to avoid
|
||||||
|
// a race since v.Interface on the pointer doesn't copy the struct.
|
||||||
|
//
|
||||||
|
// If v is not addressable, then we are not worried about a race
|
||||||
|
// since it implies that the binary Marshaler cannot possibly be
|
||||||
|
// mutating this value.
|
||||||
|
v = v.Addr()
|
||||||
|
}
|
||||||
if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
|
if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
|
||||||
text, err := etm.MarshalText()
|
text, err := etm.MarshalText()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -543,9 +527,14 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
|
||||||
if _, err = w.Write(text); err != nil {
|
if _, err = w.Write(text); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else if err := tm.writeStruct(w, v); err != nil {
|
} else {
|
||||||
|
if v.Kind() == reflect.Ptr {
|
||||||
|
v = v.Elem()
|
||||||
|
}
|
||||||
|
if err := tm.writeStruct(w, v); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
w.unindent()
|
w.unindent()
|
||||||
if err := w.WriteByte(ket); err != nil {
|
if err := w.WriteByte(ket); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -689,17 +678,22 @@ func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||||
// pv is assumed to be a pointer to a protocol message struct that is extendable.
|
// pv is assumed to be a pointer to a protocol message struct that is extendable.
|
||||||
func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
|
func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
|
||||||
emap := extensionMaps[pv.Type().Elem()]
|
emap := extensionMaps[pv.Type().Elem()]
|
||||||
ep := pv.Interface().(extendableProto)
|
ep, _ := extendable(pv.Interface())
|
||||||
|
|
||||||
// Order the extensions by ID.
|
// Order the extensions by ID.
|
||||||
// This isn't strictly necessary, but it will give us
|
// This isn't strictly necessary, but it will give us
|
||||||
// canonical output, which will also make testing easier.
|
// canonical output, which will also make testing easier.
|
||||||
m := ep.ExtensionMap()
|
m, mu := ep.extensionsRead()
|
||||||
|
if m == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
mu.Lock()
|
||||||
ids := make([]int32, 0, len(m))
|
ids := make([]int32, 0, len(m))
|
||||||
for id := range m {
|
for id := range m {
|
||||||
ids = append(ids, id)
|
ids = append(ids, id)
|
||||||
}
|
}
|
||||||
sort.Sort(int32Slice(ids))
|
sort.Sort(int32Slice(ids))
|
||||||
|
mu.Unlock()
|
||||||
|
|
||||||
for _, extNum := range ids {
|
for _, extNum := range ids {
|
||||||
ext := m[extNum]
|
ext := m[extNum]
|
||||||
|
|
135
vendor/github.com/golang/protobuf/proto/text_parser.go
generated
vendored
135
vendor/github.com/golang/protobuf/proto/text_parser.go
generated
vendored
|
@ -44,6 +44,9 @@ import (
|
||||||
"unicode/utf8"
|
"unicode/utf8"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Error string emitted when deserializing Any and fields are already set
|
||||||
|
const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
|
||||||
|
|
||||||
type ParseError struct {
|
type ParseError struct {
|
||||||
Message string
|
Message string
|
||||||
Line int // 1-based line number
|
Line int // 1-based line number
|
||||||
|
@ -203,7 +206,6 @@ func (p *textParser) advance() {
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errBadUTF8 = errors.New("proto: bad UTF-8")
|
errBadUTF8 = errors.New("proto: bad UTF-8")
|
||||||
errBadHex = errors.New("proto: bad hexadecimal")
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func unquoteC(s string, quote rune) (string, error) {
|
func unquoteC(s string, quote rune) (string, error) {
|
||||||
|
@ -274,60 +276,47 @@ func unescape(s string) (ch string, tail string, err error) {
|
||||||
return "?", s, nil // trigraph workaround
|
return "?", s, nil // trigraph workaround
|
||||||
case '\'', '"', '\\':
|
case '\'', '"', '\\':
|
||||||
return string(r), s, nil
|
return string(r), s, nil
|
||||||
case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
|
case '0', '1', '2', '3', '4', '5', '6', '7':
|
||||||
if len(s) < 2 {
|
if len(s) < 2 {
|
||||||
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
|
return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
|
||||||
}
|
}
|
||||||
base := 8
|
ss := string(r) + s[:2]
|
||||||
ss := s[:2]
|
|
||||||
s = s[2:]
|
s = s[2:]
|
||||||
if r == 'x' || r == 'X' {
|
i, err := strconv.ParseUint(ss, 8, 8)
|
||||||
base = 16
|
|
||||||
} else {
|
|
||||||
ss = string(r) + ss
|
|
||||||
}
|
|
||||||
i, err := strconv.ParseUint(ss, base, 8)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", "", err
|
return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
|
||||||
}
|
}
|
||||||
return string([]byte{byte(i)}), s, nil
|
return string([]byte{byte(i)}), s, nil
|
||||||
case 'u', 'U':
|
case 'x', 'X', 'u', 'U':
|
||||||
n := 4
|
var n int
|
||||||
if r == 'U' {
|
switch r {
|
||||||
|
case 'x', 'X':
|
||||||
|
n = 2
|
||||||
|
case 'u':
|
||||||
|
n = 4
|
||||||
|
case 'U':
|
||||||
n = 8
|
n = 8
|
||||||
}
|
}
|
||||||
if len(s) < n {
|
if len(s) < n {
|
||||||
return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
|
return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
|
||||||
}
|
|
||||||
|
|
||||||
bs := make([]byte, n/2)
|
|
||||||
for i := 0; i < n; i += 2 {
|
|
||||||
a, ok1 := unhex(s[i])
|
|
||||||
b, ok2 := unhex(s[i+1])
|
|
||||||
if !ok1 || !ok2 {
|
|
||||||
return "", "", errBadHex
|
|
||||||
}
|
|
||||||
bs[i/2] = a<<4 | b
|
|
||||||
}
|
}
|
||||||
|
ss := s[:n]
|
||||||
s = s[n:]
|
s = s[n:]
|
||||||
return string(bs), s, nil
|
i, err := strconv.ParseUint(ss, 16, 64)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
|
||||||
|
}
|
||||||
|
if r == 'x' || r == 'X' {
|
||||||
|
return string([]byte{byte(i)}), s, nil
|
||||||
|
}
|
||||||
|
if i > utf8.MaxRune {
|
||||||
|
return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
|
||||||
|
}
|
||||||
|
return string(i), s, nil
|
||||||
}
|
}
|
||||||
return "", "", fmt.Errorf(`unknown escape \%c`, r)
|
return "", "", fmt.Errorf(`unknown escape \%c`, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adapted from src/pkg/strconv/quote.go.
|
|
||||||
func unhex(b byte) (v byte, ok bool) {
|
|
||||||
switch {
|
|
||||||
case '0' <= b && b <= '9':
|
|
||||||
return b - '0', true
|
|
||||||
case 'a' <= b && b <= 'f':
|
|
||||||
return b - 'a' + 10, true
|
|
||||||
case 'A' <= b && b <= 'F':
|
|
||||||
return b - 'A' + 10, true
|
|
||||||
}
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Back off the parser by one token. Can only be done between calls to next().
|
// Back off the parser by one token. Can only be done between calls to next().
|
||||||
// It makes the next advance() a no-op.
|
// It makes the next advance() a no-op.
|
||||||
func (p *textParser) back() { p.backed = true }
|
func (p *textParser) back() { p.backed = true }
|
||||||
|
@ -508,8 +497,16 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return p.errorf("failed to marshal message of type %q: %v", messageName, err)
|
return p.errorf("failed to marshal message of type %q: %v", messageName, err)
|
||||||
}
|
}
|
||||||
|
if fieldSet["type_url"] {
|
||||||
|
return p.errorf(anyRepeatedlyUnpacked, "type_url")
|
||||||
|
}
|
||||||
|
if fieldSet["value"] {
|
||||||
|
return p.errorf(anyRepeatedlyUnpacked, "value")
|
||||||
|
}
|
||||||
sv.FieldByName("TypeUrl").SetString(extName)
|
sv.FieldByName("TypeUrl").SetString(extName)
|
||||||
sv.FieldByName("Value").SetBytes(b)
|
sv.FieldByName("Value").SetBytes(b)
|
||||||
|
fieldSet["type_url"] = true
|
||||||
|
fieldSet["value"] = true
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -550,7 +547,7 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
||||||
}
|
}
|
||||||
reqFieldErr = err
|
reqFieldErr = err
|
||||||
}
|
}
|
||||||
ep := sv.Addr().Interface().(extendableProto)
|
ep := sv.Addr().Interface().(Message)
|
||||||
if !rep {
|
if !rep {
|
||||||
SetExtension(ep, desc, ext.Interface())
|
SetExtension(ep, desc, ext.Interface())
|
||||||
} else {
|
} else {
|
||||||
|
@ -581,7 +578,11 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
||||||
props = oop.Prop
|
props = oop.Prop
|
||||||
nv := reflect.New(oop.Type.Elem())
|
nv := reflect.New(oop.Type.Elem())
|
||||||
dst = nv.Elem().Field(0)
|
dst = nv.Elem().Field(0)
|
||||||
sv.Field(oop.Field).Set(nv)
|
field := sv.Field(oop.Field)
|
||||||
|
if !field.IsNil() {
|
||||||
|
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
|
||||||
|
}
|
||||||
|
field.Set(nv)
|
||||||
}
|
}
|
||||||
if !dst.IsValid() {
|
if !dst.IsValid() {
|
||||||
return p.errorf("unknown field name %q in %v", name, st)
|
return p.errorf("unknown field name %q in %v", name, st)
|
||||||
|
@ -602,8 +603,9 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
||||||
|
|
||||||
// The map entry should be this sequence of tokens:
|
// The map entry should be this sequence of tokens:
|
||||||
// < key : KEY value : VALUE >
|
// < key : KEY value : VALUE >
|
||||||
// Technically the "key" and "value" could come in any order,
|
// However, implementations may omit key or value, and technically
|
||||||
// but in practice they won't.
|
// we should support them in any order. See b/28924776 for a time
|
||||||
|
// this went wrong.
|
||||||
|
|
||||||
tok := p.next()
|
tok := p.next()
|
||||||
var terminator string
|
var terminator string
|
||||||
|
@ -615,32 +617,39 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
||||||
default:
|
default:
|
||||||
return p.errorf("expected '{' or '<', found %q", tok.value)
|
return p.errorf("expected '{' or '<', found %q", tok.value)
|
||||||
}
|
}
|
||||||
if err := p.consumeToken("key"); err != nil {
|
for {
|
||||||
return err
|
tok := p.next()
|
||||||
|
if tok.err != nil {
|
||||||
|
return tok.err
|
||||||
}
|
}
|
||||||
|
if tok.value == terminator {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
switch tok.value {
|
||||||
|
case "key":
|
||||||
if err := p.consumeToken(":"); err != nil {
|
if err := p.consumeToken(":"); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := p.readAny(key, props.mkeyprop); err != nil {
|
if err := p.readAny(key, props.MapKeyProp); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := p.consumeOptionalSeparator(); err != nil {
|
if err := p.consumeOptionalSeparator(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := p.consumeToken("value"); err != nil {
|
case "value":
|
||||||
|
if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
|
if err := p.readAny(val, props.MapValProp); err != nil {
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := p.readAny(val, props.mvalprop); err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := p.consumeOptionalSeparator(); err != nil {
|
if err := p.consumeOptionalSeparator(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := p.consumeToken(terminator); err != nil {
|
default:
|
||||||
return err
|
p.back()
|
||||||
|
return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
dst.SetMapIndex(key, val)
|
dst.SetMapIndex(key, val)
|
||||||
|
@ -663,7 +672,8 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
reqFieldErr = err
|
reqFieldErr = err
|
||||||
} else if props.Required {
|
}
|
||||||
|
if props.Required {
|
||||||
reqCount--
|
reqCount--
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -704,6 +714,9 @@ func (p *textParser) consumeExtName() (string, error) {
|
||||||
if tok.err != nil {
|
if tok.err != nil {
|
||||||
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
|
return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
|
||||||
}
|
}
|
||||||
|
if p.done && tok.value != "]" {
|
||||||
|
return "", p.errorf("unclosed type_url or extension name")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return strings.Join(parts, ""), nil
|
return strings.Join(parts, ""), nil
|
||||||
}
|
}
|
||||||
|
@ -772,12 +785,12 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
|
||||||
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
|
||||||
return p.readAny(fv.Index(fv.Len()-1), props)
|
return p.readAny(fv.Index(fv.Len()-1), props)
|
||||||
case reflect.Bool:
|
case reflect.Bool:
|
||||||
// Either "true", "false", 1 or 0.
|
// true/1/t/True or false/f/0/False.
|
||||||
switch tok.value {
|
switch tok.value {
|
||||||
case "true", "1":
|
case "true", "1", "t", "True":
|
||||||
fv.SetBool(true)
|
fv.SetBool(true)
|
||||||
return nil
|
return nil
|
||||||
case "false", "0":
|
case "false", "0", "f", "False":
|
||||||
fv.SetBool(false)
|
fv.SetBool(false)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -859,13 +872,9 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
|
||||||
// UnmarshalText returns *RequiredNotSetError.
|
// UnmarshalText returns *RequiredNotSetError.
|
||||||
func UnmarshalText(s string, pb Message) error {
|
func UnmarshalText(s string, pb Message) error {
|
||||||
if um, ok := pb.(encoding.TextUnmarshaler); ok {
|
if um, ok := pb.(encoding.TextUnmarshaler); ok {
|
||||||
err := um.UnmarshalText([]byte(s))
|
return um.UnmarshalText([]byte(s))
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
pb.Reset()
|
pb.Reset()
|
||||||
v := reflect.ValueOf(pb)
|
v := reflect.ValueOf(pb)
|
||||||
if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
|
return newTextParser(s).readStruct(v.Elem(), "")
|
||||||
return pe
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
19
vendor/github.com/gorilla/context/.travis.yml
generated
vendored
19
vendor/github.com/gorilla/context/.travis.yml
generated
vendored
|
@ -1,19 +0,0 @@
|
||||||
language: go
|
|
||||||
sudo: false
|
|
||||||
|
|
||||||
matrix:
|
|
||||||
include:
|
|
||||||
- go: 1.3
|
|
||||||
- go: 1.4
|
|
||||||
- go: 1.5
|
|
||||||
- go: 1.6
|
|
||||||
- go: 1.7
|
|
||||||
- go: tip
|
|
||||||
allow_failures:
|
|
||||||
- go: tip
|
|
||||||
|
|
||||||
script:
|
|
||||||
- go get -t -v ./...
|
|
||||||
- diff -u <(echo -n) <(gofmt -d .)
|
|
||||||
- go vet $(go list ./... | grep -v /vendor/)
|
|
||||||
- go test -v -race ./...
|
|
27
vendor/github.com/gorilla/context/LICENSE
generated
vendored
27
vendor/github.com/gorilla/context/LICENSE
generated
vendored
|
@ -1,27 +0,0 @@
|
||||||
Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are
|
|
||||||
met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright
|
|
||||||
notice, this list of conditions and the following disclaimer.
|
|
||||||
* Redistributions in binary form must reproduce the above
|
|
||||||
copyright notice, this list of conditions and the following disclaimer
|
|
||||||
in the documentation and/or other materials provided with the
|
|
||||||
distribution.
|
|
||||||
* Neither the name of Google Inc. nor the names of its
|
|
||||||
contributors may be used to endorse or promote products derived from
|
|
||||||
this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
||||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
||||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
||||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
||||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
||||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
||||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
||||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
||||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
10
vendor/github.com/gorilla/context/README.md
generated
vendored
10
vendor/github.com/gorilla/context/README.md
generated
vendored
|
@ -1,10 +0,0 @@
|
||||||
context
|
|
||||||
=======
|
|
||||||
[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context)
|
|
||||||
|
|
||||||
gorilla/context is a general purpose registry for global request variables.
|
|
||||||
|
|
||||||
> Note: gorilla/context, having been born well before `context.Context` existed, does not play well
|
|
||||||
> with the shallow copying of the request that [`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext) (added to net/http Go 1.7 onwards) performs. You should either use *just* gorilla/context, or moving forward, the new `http.Request.Context()`.
|
|
||||||
|
|
||||||
Read the full documentation here: http://www.gorillatoolkit.org/pkg/context
|
|
143
vendor/github.com/gorilla/context/context.go
generated
vendored
143
vendor/github.com/gorilla/context/context.go
generated
vendored
|
@ -1,143 +0,0 @@
|
||||||
// Copyright 2012 The Gorilla Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package context
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
mutex sync.RWMutex
|
|
||||||
data = make(map[*http.Request]map[interface{}]interface{})
|
|
||||||
datat = make(map[*http.Request]int64)
|
|
||||||
)
|
|
||||||
|
|
||||||
// Set stores a value for a given key in a given request.
|
|
||||||
func Set(r *http.Request, key, val interface{}) {
|
|
||||||
mutex.Lock()
|
|
||||||
if data[r] == nil {
|
|
||||||
data[r] = make(map[interface{}]interface{})
|
|
||||||
datat[r] = time.Now().Unix()
|
|
||||||
}
|
|
||||||
data[r][key] = val
|
|
||||||
mutex.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns a value stored for a given key in a given request.
|
|
||||||
func Get(r *http.Request, key interface{}) interface{} {
|
|
||||||
mutex.RLock()
|
|
||||||
if ctx := data[r]; ctx != nil {
|
|
||||||
value := ctx[key]
|
|
||||||
mutex.RUnlock()
|
|
||||||
return value
|
|
||||||
}
|
|
||||||
mutex.RUnlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetOk returns stored value and presence state like multi-value return of map access.
|
|
||||||
func GetOk(r *http.Request, key interface{}) (interface{}, bool) {
|
|
||||||
mutex.RLock()
|
|
||||||
if _, ok := data[r]; ok {
|
|
||||||
value, ok := data[r][key]
|
|
||||||
mutex.RUnlock()
|
|
||||||
return value, ok
|
|
||||||
}
|
|
||||||
mutex.RUnlock()
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests.
|
|
||||||
func GetAll(r *http.Request) map[interface{}]interface{} {
|
|
||||||
mutex.RLock()
|
|
||||||
if context, ok := data[r]; ok {
|
|
||||||
result := make(map[interface{}]interface{}, len(context))
|
|
||||||
for k, v := range context {
|
|
||||||
result[k] = v
|
|
||||||
}
|
|
||||||
mutex.RUnlock()
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
mutex.RUnlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if
|
|
||||||
// the request was registered.
|
|
||||||
func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) {
|
|
||||||
mutex.RLock()
|
|
||||||
context, ok := data[r]
|
|
||||||
result := make(map[interface{}]interface{}, len(context))
|
|
||||||
for k, v := range context {
|
|
||||||
result[k] = v
|
|
||||||
}
|
|
||||||
mutex.RUnlock()
|
|
||||||
return result, ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete removes a value stored for a given key in a given request.
|
|
||||||
func Delete(r *http.Request, key interface{}) {
|
|
||||||
mutex.Lock()
|
|
||||||
if data[r] != nil {
|
|
||||||
delete(data[r], key)
|
|
||||||
}
|
|
||||||
mutex.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clear removes all values stored for a given request.
|
|
||||||
//
|
|
||||||
// This is usually called by a handler wrapper to clean up request
|
|
||||||
// variables at the end of a request lifetime. See ClearHandler().
|
|
||||||
func Clear(r *http.Request) {
|
|
||||||
mutex.Lock()
|
|
||||||
clear(r)
|
|
||||||
mutex.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// clear is Clear without the lock.
|
|
||||||
func clear(r *http.Request) {
|
|
||||||
delete(data, r)
|
|
||||||
delete(datat, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Purge removes request data stored for longer than maxAge, in seconds.
|
|
||||||
// It returns the amount of requests removed.
|
|
||||||
//
|
|
||||||
// If maxAge <= 0, all request data is removed.
|
|
||||||
//
|
|
||||||
// This is only used for sanity check: in case context cleaning was not
|
|
||||||
// properly set some request data can be kept forever, consuming an increasing
|
|
||||||
// amount of memory. In case this is detected, Purge() must be called
|
|
||||||
// periodically until the problem is fixed.
|
|
||||||
func Purge(maxAge int) int {
|
|
||||||
mutex.Lock()
|
|
||||||
count := 0
|
|
||||||
if maxAge <= 0 {
|
|
||||||
count = len(data)
|
|
||||||
data = make(map[*http.Request]map[interface{}]interface{})
|
|
||||||
datat = make(map[*http.Request]int64)
|
|
||||||
} else {
|
|
||||||
min := time.Now().Unix() - int64(maxAge)
|
|
||||||
for r := range data {
|
|
||||||
if datat[r] < min {
|
|
||||||
clear(r)
|
|
||||||
count++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
mutex.Unlock()
|
|
||||||
return count
|
|
||||||
}
|
|
||||||
|
|
||||||
// ClearHandler wraps an http.Handler and clears request values at the end
|
|
||||||
// of a request lifetime.
|
|
||||||
func ClearHandler(h http.Handler) http.Handler {
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
||||||
defer Clear(r)
|
|
||||||
h.ServeHTTP(w, r)
|
|
||||||
})
|
|
||||||
}
|
|
88
vendor/github.com/gorilla/context/doc.go
generated
vendored
88
vendor/github.com/gorilla/context/doc.go
generated
vendored
|
@ -1,88 +0,0 @@
|
||||||
// Copyright 2012 The Gorilla Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
/*
|
|
||||||
Package context stores values shared during a request lifetime.
|
|
||||||
|
|
||||||
Note: gorilla/context, having been born well before `context.Context` existed,
|
|
||||||
does not play well > with the shallow copying of the request that
|
|
||||||
[`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext)
|
|
||||||
(added to net/http Go 1.7 onwards) performs. You should either use *just*
|
|
||||||
gorilla/context, or moving forward, the new `http.Request.Context()`.
|
|
||||||
|
|
||||||
For example, a router can set variables extracted from the URL and later
|
|
||||||
application handlers can access those values, or it can be used to store
|
|
||||||
sessions values to be saved at the end of a request. There are several
|
|
||||||
others common uses.
|
|
||||||
|
|
||||||
The idea was posted by Brad Fitzpatrick to the go-nuts mailing list:
|
|
||||||
|
|
||||||
http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53
|
|
||||||
|
|
||||||
Here's the basic usage: first define the keys that you will need. The key
|
|
||||||
type is interface{} so a key can be of any type that supports equality.
|
|
||||||
Here we define a key using a custom int type to avoid name collisions:
|
|
||||||
|
|
||||||
package foo
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/gorilla/context"
|
|
||||||
)
|
|
||||||
|
|
||||||
type key int
|
|
||||||
|
|
||||||
const MyKey key = 0
|
|
||||||
|
|
||||||
Then set a variable. Variables are bound to an http.Request object, so you
|
|
||||||
need a request instance to set a value:
|
|
||||||
|
|
||||||
context.Set(r, MyKey, "bar")
|
|
||||||
|
|
||||||
The application can later access the variable using the same key you provided:
|
|
||||||
|
|
||||||
func MyHandler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
// val is "bar".
|
|
||||||
val := context.Get(r, foo.MyKey)
|
|
||||||
|
|
||||||
// returns ("bar", true)
|
|
||||||
val, ok := context.GetOk(r, foo.MyKey)
|
|
||||||
// ...
|
|
||||||
}
|
|
||||||
|
|
||||||
And that's all about the basic usage. We discuss some other ideas below.
|
|
||||||
|
|
||||||
Any type can be stored in the context. To enforce a given type, make the key
|
|
||||||
private and wrap Get() and Set() to accept and return values of a specific
|
|
||||||
type:
|
|
||||||
|
|
||||||
type key int
|
|
||||||
|
|
||||||
const mykey key = 0
|
|
||||||
|
|
||||||
// GetMyKey returns a value for this package from the request values.
|
|
||||||
func GetMyKey(r *http.Request) SomeType {
|
|
||||||
if rv := context.Get(r, mykey); rv != nil {
|
|
||||||
return rv.(SomeType)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetMyKey sets a value for this package in the request values.
|
|
||||||
func SetMyKey(r *http.Request, val SomeType) {
|
|
||||||
context.Set(r, mykey, val)
|
|
||||||
}
|
|
||||||
|
|
||||||
Variables must be cleared at the end of a request, to remove all values
|
|
||||||
that were stored. This can be done in an http.Handler, after a request was
|
|
||||||
served. Just call Clear() passing the request:
|
|
||||||
|
|
||||||
context.Clear(r)
|
|
||||||
|
|
||||||
...or use ClearHandler(), which conveniently wraps an http.Handler to clear
|
|
||||||
variables at the end of a request lifetime.
|
|
||||||
|
|
||||||
The Routers from the packages gorilla/mux and gorilla/pat call Clear()
|
|
||||||
so if you are using either of them you don't need to clear the context manually.
|
|
||||||
*/
|
|
||||||
package context
|
|
20
vendor/github.com/gorilla/mux/.travis.yml
generated
vendored
20
vendor/github.com/gorilla/mux/.travis.yml
generated
vendored
|
@ -1,15 +1,17 @@
|
||||||
language: go
|
language: go
|
||||||
sudo: false
|
|
||||||
|
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- go: 1.2
|
- go: 1.7.x
|
||||||
- go: 1.3
|
- go: 1.8.x
|
||||||
- go: 1.4
|
- go: 1.9.x
|
||||||
- go: 1.5
|
- go: 1.10.x
|
||||||
- go: 1.6
|
- go: 1.11.x
|
||||||
- go: 1.7
|
- go: 1.x
|
||||||
- go: 1.8
|
env: LATEST=true
|
||||||
|
- go: tip
|
||||||
|
allow_failures:
|
||||||
- go: tip
|
- go: tip
|
||||||
|
|
||||||
install:
|
install:
|
||||||
|
@ -18,5 +20,5 @@ install:
|
||||||
script:
|
script:
|
||||||
- go get -t -v ./...
|
- go get -t -v ./...
|
||||||
- diff -u <(echo -n) <(gofmt -d .)
|
- diff -u <(echo -n) <(gofmt -d .)
|
||||||
- go tool vet .
|
- if [[ "$LATEST" = true ]]; then go vet .; fi
|
||||||
- go test -v -race ./...
|
- go test -v -race ./...
|
||||||
|
|
8
vendor/github.com/gorilla/mux/AUTHORS
generated
vendored
Normal file
8
vendor/github.com/gorilla/mux/AUTHORS
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
# This is the official list of gorilla/mux authors for copyright purposes.
|
||||||
|
#
|
||||||
|
# Please keep the list sorted.
|
||||||
|
|
||||||
|
Google LLC (https://opensource.google.com/)
|
||||||
|
Kamil Kisielk <kamil@kamilkisiel.net>
|
||||||
|
Matt Silverlock <matt@eatsleeprepeat.net>
|
||||||
|
Rodrigo Moraes (https://github.com/moraes)
|
11
vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md
generated
vendored
Normal file
11
vendor/github.com/gorilla/mux/ISSUE_TEMPLATE.md
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
**What version of Go are you running?** (Paste the output of `go version`)
|
||||||
|
|
||||||
|
|
||||||
|
**What version of gorilla/mux are you at?** (Paste the output of `git rev-parse HEAD` inside `$GOPATH/src/github.com/gorilla/mux`)
|
||||||
|
|
||||||
|
|
||||||
|
**Describe your problem** (and what you have tried so far)
|
||||||
|
|
||||||
|
|
||||||
|
**Paste a minimal, runnable, reproduction of your issue below** (use backticks to format it)
|
||||||
|
|
2
vendor/github.com/gorilla/mux/LICENSE
generated
vendored
2
vendor/github.com/gorilla/mux/LICENSE
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
Copyright (c) 2012 Rodrigo Moraes. All rights reserved.
|
Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved.
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
Redistribution and use in source and binary forms, with or without
|
||||||
modification, are permitted provided that the following conditions are
|
modification, are permitted provided that the following conditions are
|
||||||
|
|
407
vendor/github.com/gorilla/mux/README.md
generated
vendored
407
vendor/github.com/gorilla/mux/README.md
generated
vendored
|
@ -1,12 +1,12 @@
|
||||||
gorilla/mux
|
# gorilla/mux
|
||||||
===
|
|
||||||
[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux)
|
[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux)
|
||||||
[![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux)
|
[![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux)
|
||||||
[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge)
|
[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge)
|
||||||
|
|
||||||
![Gorilla Logo](http://www.gorillatoolkit.org/static/images/gorilla-icon-64.png)
|
![Gorilla Logo](http://www.gorillatoolkit.org/static/images/gorilla-icon-64.png)
|
||||||
|
|
||||||
http://www.gorillatoolkit.org/pkg/mux
|
https://www.gorillatoolkit.org/pkg/mux
|
||||||
|
|
||||||
Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to
|
Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to
|
||||||
their respective handler.
|
their respective handler.
|
||||||
|
@ -15,7 +15,7 @@ The name mux stands for "HTTP request multiplexer". Like the standard `http.Serv
|
||||||
|
|
||||||
* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`.
|
* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`.
|
||||||
* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers.
|
* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers.
|
||||||
* URL hosts and paths can have variables with an optional regular expression.
|
* URL hosts, paths and query values can have variables with an optional regular expression.
|
||||||
* Registered URLs can be built, or "reversed", which helps maintaining references to resources.
|
* Registered URLs can be built, or "reversed", which helps maintaining references to resources.
|
||||||
* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching.
|
* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching.
|
||||||
|
|
||||||
|
@ -24,9 +24,12 @@ The name mux stands for "HTTP request multiplexer". Like the standard `http.Serv
|
||||||
* [Install](#install)
|
* [Install](#install)
|
||||||
* [Examples](#examples)
|
* [Examples](#examples)
|
||||||
* [Matching Routes](#matching-routes)
|
* [Matching Routes](#matching-routes)
|
||||||
* [Listing Routes](#listing-routes)
|
|
||||||
* [Static Files](#static-files)
|
* [Static Files](#static-files)
|
||||||
* [Registered URLs](#registered-urls)
|
* [Registered URLs](#registered-urls)
|
||||||
|
* [Walking Routes](#walking-routes)
|
||||||
|
* [Graceful Shutdown](#graceful-shutdown)
|
||||||
|
* [Middleware](#middleware)
|
||||||
|
* [Testing Handlers](#testing-handlers)
|
||||||
* [Full Example](#full-example)
|
* [Full Example](#full-example)
|
||||||
|
|
||||||
---
|
---
|
||||||
|
@ -85,7 +88,7 @@ r := mux.NewRouter()
|
||||||
// Only matches if domain is "www.example.com".
|
// Only matches if domain is "www.example.com".
|
||||||
r.Host("www.example.com")
|
r.Host("www.example.com")
|
||||||
// Matches a dynamic subdomain.
|
// Matches a dynamic subdomain.
|
||||||
r.Host("{subdomain:[a-z]+}.domain.com")
|
r.Host("{subdomain:[a-z]+}.example.com")
|
||||||
```
|
```
|
||||||
|
|
||||||
There are several other matchers that can be added. To match path prefixes:
|
There are several other matchers that can be added. To match path prefixes:
|
||||||
|
@ -135,6 +138,14 @@ r.HandleFunc("/products", ProductsHandler).
|
||||||
Schemes("http")
|
Schemes("http")
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Routes are tested in the order they were added to the router. If two routes match, the first one wins:
|
||||||
|
|
||||||
|
```go
|
||||||
|
r := mux.NewRouter()
|
||||||
|
r.HandleFunc("/specific", specificHandler)
|
||||||
|
r.PathPrefix("/").Handler(catchAllHandler)
|
||||||
|
```
|
||||||
|
|
||||||
Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting".
|
Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting".
|
||||||
|
|
||||||
For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it:
|
For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it:
|
||||||
|
@ -169,47 +180,12 @@ s.HandleFunc("/{key}/", ProductHandler)
|
||||||
s.HandleFunc("/{key}/details", ProductDetailsHandler)
|
s.HandleFunc("/{key}/details", ProductDetailsHandler)
|
||||||
```
|
```
|
||||||
|
|
||||||
### Listing Routes
|
|
||||||
|
|
||||||
Routes on a mux can be listed using the Router.Walk method—useful for generating documentation:
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/gorilla/mux"
|
|
||||||
)
|
|
||||||
|
|
||||||
func handler(w http.ResponseWriter, r *http.Request) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
r := mux.NewRouter()
|
|
||||||
r.HandleFunc("/", handler)
|
|
||||||
r.HandleFunc("/products", handler)
|
|
||||||
r.HandleFunc("/articles", handler)
|
|
||||||
r.HandleFunc("/articles/{id}", handler)
|
|
||||||
r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
|
|
||||||
t, err := route.GetPathTemplate()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
fmt.Println(t)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
http.Handle("/", r)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Static Files
|
### Static Files
|
||||||
|
|
||||||
Note that the path provided to `PathPrefix()` represents a "wildcard": calling
|
Note that the path provided to `PathPrefix()` represents a "wildcard": calling
|
||||||
`PathPrefix("/static/").Handler(...)` means that the handler will be passed any
|
`PathPrefix("/static/").Handler(...)` means that the handler will be passed any
|
||||||
request that matches "/static/*". This makes it easy to serve static files with mux:
|
request that matches "/static/\*". This makes it easy to serve static files with mux:
|
||||||
|
|
||||||
```go
|
```go
|
||||||
func main() {
|
func main() {
|
||||||
|
@ -258,19 +234,21 @@ url, err := r.Get("article").URL("category", "technology", "id", "42")
|
||||||
"/articles/technology/42"
|
"/articles/technology/42"
|
||||||
```
|
```
|
||||||
|
|
||||||
This also works for host variables:
|
This also works for host and query value variables:
|
||||||
|
|
||||||
```go
|
```go
|
||||||
r := mux.NewRouter()
|
r := mux.NewRouter()
|
||||||
r.Host("{subdomain}.domain.com").
|
r.Host("{subdomain}.example.com").
|
||||||
Path("/articles/{category}/{id:[0-9]+}").
|
Path("/articles/{category}/{id:[0-9]+}").
|
||||||
|
Queries("filter", "{filter}").
|
||||||
HandlerFunc(ArticleHandler).
|
HandlerFunc(ArticleHandler).
|
||||||
Name("article")
|
Name("article")
|
||||||
|
|
||||||
// url.String() will be "http://news.domain.com/articles/technology/42"
|
// url.String() will be "http://news.example.com/articles/technology/42?filter=gorilla"
|
||||||
url, err := r.Get("article").URL("subdomain", "news",
|
url, err := r.Get("article").URL("subdomain", "news",
|
||||||
"category", "technology",
|
"category", "technology",
|
||||||
"id", "42")
|
"id", "42",
|
||||||
|
"filter", "gorilla")
|
||||||
```
|
```
|
||||||
|
|
||||||
All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match.
|
All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match.
|
||||||
|
@ -286,7 +264,7 @@ r.HeadersRegexp("Content-Type", "application/(text|json)")
|
||||||
There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do:
|
There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do:
|
||||||
|
|
||||||
```go
|
```go
|
||||||
// "http://news.domain.com/"
|
// "http://news.example.com/"
|
||||||
host, err := r.Get("article").URLHost("subdomain", "news")
|
host, err := r.Get("article").URLHost("subdomain", "news")
|
||||||
|
|
||||||
// "/articles/technology/42"
|
// "/articles/technology/42"
|
||||||
|
@ -297,17 +275,348 @@ And if you use subrouters, host and path defined separately can be built as well
|
||||||
|
|
||||||
```go
|
```go
|
||||||
r := mux.NewRouter()
|
r := mux.NewRouter()
|
||||||
s := r.Host("{subdomain}.domain.com").Subrouter()
|
s := r.Host("{subdomain}.example.com").Subrouter()
|
||||||
s.Path("/articles/{category}/{id:[0-9]+}").
|
s.Path("/articles/{category}/{id:[0-9]+}").
|
||||||
HandlerFunc(ArticleHandler).
|
HandlerFunc(ArticleHandler).
|
||||||
Name("article")
|
Name("article")
|
||||||
|
|
||||||
// "http://news.domain.com/articles/technology/42"
|
// "http://news.example.com/articles/technology/42"
|
||||||
url, err := r.Get("article").URL("subdomain", "news",
|
url, err := r.Get("article").URL("subdomain", "news",
|
||||||
"category", "technology",
|
"category", "technology",
|
||||||
"id", "42")
|
"id", "42")
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### Walking Routes
|
||||||
|
|
||||||
|
The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example,
|
||||||
|
the following prints all of the registered routes:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/gorilla/mux"
|
||||||
|
)
|
||||||
|
|
||||||
|
func handler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
r := mux.NewRouter()
|
||||||
|
r.HandleFunc("/", handler)
|
||||||
|
r.HandleFunc("/products", handler).Methods("POST")
|
||||||
|
r.HandleFunc("/articles", handler).Methods("GET")
|
||||||
|
r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT")
|
||||||
|
r.HandleFunc("/authors", handler).Queries("surname", "{surname}")
|
||||||
|
err := r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
|
||||||
|
pathTemplate, err := route.GetPathTemplate()
|
||||||
|
if err == nil {
|
||||||
|
fmt.Println("ROUTE:", pathTemplate)
|
||||||
|
}
|
||||||
|
pathRegexp, err := route.GetPathRegexp()
|
||||||
|
if err == nil {
|
||||||
|
fmt.Println("Path regexp:", pathRegexp)
|
||||||
|
}
|
||||||
|
queriesTemplates, err := route.GetQueriesTemplates()
|
||||||
|
if err == nil {
|
||||||
|
fmt.Println("Queries templates:", strings.Join(queriesTemplates, ","))
|
||||||
|
}
|
||||||
|
queriesRegexps, err := route.GetQueriesRegexp()
|
||||||
|
if err == nil {
|
||||||
|
fmt.Println("Queries regexps:", strings.Join(queriesRegexps, ","))
|
||||||
|
}
|
||||||
|
methods, err := route.GetMethods()
|
||||||
|
if err == nil {
|
||||||
|
fmt.Println("Methods:", strings.Join(methods, ","))
|
||||||
|
}
|
||||||
|
fmt.Println()
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
http.Handle("/", r)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Graceful Shutdown
|
||||||
|
|
||||||
|
Go 1.8 introduced the ability to [gracefully shutdown](https://golang.org/doc/go1.8#http_shutdown) a `*http.Server`. Here's how to do that alongside `mux`:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"flag"
|
||||||
|
"log"
|
||||||
|
"net/http"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/gorilla/mux"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
var wait time.Duration
|
||||||
|
flag.DurationVar(&wait, "graceful-timeout", time.Second * 15, "the duration for which the server gracefully wait for existing connections to finish - e.g. 15s or 1m")
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
r := mux.NewRouter()
|
||||||
|
// Add your routes as needed
|
||||||
|
|
||||||
|
srv := &http.Server{
|
||||||
|
Addr: "0.0.0.0:8080",
|
||||||
|
// Good practice to set timeouts to avoid Slowloris attacks.
|
||||||
|
WriteTimeout: time.Second * 15,
|
||||||
|
ReadTimeout: time.Second * 15,
|
||||||
|
IdleTimeout: time.Second * 60,
|
||||||
|
Handler: r, // Pass our instance of gorilla/mux in.
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run our server in a goroutine so that it doesn't block.
|
||||||
|
go func() {
|
||||||
|
if err := srv.ListenAndServe(); err != nil {
|
||||||
|
log.Println(err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
c := make(chan os.Signal, 1)
|
||||||
|
// We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C)
|
||||||
|
// SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught.
|
||||||
|
signal.Notify(c, os.Interrupt)
|
||||||
|
|
||||||
|
// Block until we receive our signal.
|
||||||
|
<-c
|
||||||
|
|
||||||
|
// Create a deadline to wait for.
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), wait)
|
||||||
|
defer cancel()
|
||||||
|
// Doesn't block if no connections, but will otherwise wait
|
||||||
|
// until the timeout deadline.
|
||||||
|
srv.Shutdown(ctx)
|
||||||
|
// Optionally, you could run srv.Shutdown in a goroutine and block on
|
||||||
|
// <-ctx.Done() if your application should wait for other services
|
||||||
|
// to finalize based on context cancellation.
|
||||||
|
log.Println("shutting down")
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Middleware
|
||||||
|
|
||||||
|
Mux supports the addition of middlewares to a [Router](https://godoc.org/github.com/gorilla/mux#Router), which are executed in the order they are added if a match is found, including its subrouters.
|
||||||
|
Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or `ResponseWriter` hijacking.
|
||||||
|
|
||||||
|
Mux middlewares are defined using the de facto standard type:
|
||||||
|
|
||||||
|
```go
|
||||||
|
type MiddlewareFunc func(http.Handler) http.Handler
|
||||||
|
```
|
||||||
|
|
||||||
|
Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc. This takes advantage of closures being able access variables from the context where they are created, while retaining the signature enforced by the receivers.
|
||||||
|
|
||||||
|
A very basic middleware which logs the URI of the request being handled could be written as:
|
||||||
|
|
||||||
|
```go
|
||||||
|
func loggingMiddleware(next http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// Do stuff here
|
||||||
|
log.Println(r.RequestURI)
|
||||||
|
// Call the next handler, which can be another middleware in the chain, or the final handler.
|
||||||
|
next.ServeHTTP(w, r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Middlewares can be added to a router using `Router.Use()`:
|
||||||
|
|
||||||
|
```go
|
||||||
|
r := mux.NewRouter()
|
||||||
|
r.HandleFunc("/", handler)
|
||||||
|
r.Use(loggingMiddleware)
|
||||||
|
```
|
||||||
|
|
||||||
|
A more complex authentication middleware, which maps session token to users, could be written as:
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Define our struct
|
||||||
|
type authenticationMiddleware struct {
|
||||||
|
tokenUsers map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize it somewhere
|
||||||
|
func (amw *authenticationMiddleware) Populate() {
|
||||||
|
amw.tokenUsers["00000000"] = "user0"
|
||||||
|
amw.tokenUsers["aaaaaaaa"] = "userA"
|
||||||
|
amw.tokenUsers["05f717e5"] = "randomUser"
|
||||||
|
amw.tokenUsers["deadbeef"] = "user0"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Middleware function, which will be called for each request
|
||||||
|
func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
token := r.Header.Get("X-Session-Token")
|
||||||
|
|
||||||
|
if user, found := amw.tokenUsers[token]; found {
|
||||||
|
// We found the token in our map
|
||||||
|
log.Printf("Authenticated user %s\n", user)
|
||||||
|
// Pass down the request to the next middleware (or final handler)
|
||||||
|
next.ServeHTTP(w, r)
|
||||||
|
} else {
|
||||||
|
// Write an error and stop the handler chain
|
||||||
|
http.Error(w, "Forbidden", http.StatusForbidden)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```go
|
||||||
|
r := mux.NewRouter()
|
||||||
|
r.HandleFunc("/", handler)
|
||||||
|
|
||||||
|
amw := authenticationMiddleware{}
|
||||||
|
amw.Populate()
|
||||||
|
|
||||||
|
r.Use(amw.Middleware)
|
||||||
|
```
|
||||||
|
|
||||||
|
Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. Middlewares _should_ write to `ResponseWriter` if they _are_ going to terminate the request, and they _should not_ write to `ResponseWriter` if they _are not_ going to terminate it.
|
||||||
|
|
||||||
|
### Testing Handlers
|
||||||
|
|
||||||
|
Testing handlers in a Go web application is straightforward, and _mux_ doesn't complicate this any further. Given two files: `endpoints.go` and `endpoints_test.go`, here's how we'd test an application using _mux_.
|
||||||
|
|
||||||
|
First, our simple HTTP handler:
|
||||||
|
|
||||||
|
```go
|
||||||
|
// endpoints.go
|
||||||
|
package main
|
||||||
|
|
||||||
|
func HealthCheckHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// A very simple health check.
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
|
||||||
|
// In the future we could report back on the status of our DB, or our cache
|
||||||
|
// (e.g. Redis) by performing a simple PING, and include them in the response.
|
||||||
|
io.WriteString(w, `{"alive": true}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
r := mux.NewRouter()
|
||||||
|
r.HandleFunc("/health", HealthCheckHandler)
|
||||||
|
|
||||||
|
log.Fatal(http.ListenAndServe("localhost:8080", r))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Our test code:
|
||||||
|
|
||||||
|
```go
|
||||||
|
// endpoints_test.go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestHealthCheckHandler(t *testing.T) {
|
||||||
|
// Create a request to pass to our handler. We don't have any query parameters for now, so we'll
|
||||||
|
// pass 'nil' as the third parameter.
|
||||||
|
req, err := http.NewRequest("GET", "/health", nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response.
|
||||||
|
rr := httptest.NewRecorder()
|
||||||
|
handler := http.HandlerFunc(HealthCheckHandler)
|
||||||
|
|
||||||
|
// Our handlers satisfy http.Handler, so we can call their ServeHTTP method
|
||||||
|
// directly and pass in our Request and ResponseRecorder.
|
||||||
|
handler.ServeHTTP(rr, req)
|
||||||
|
|
||||||
|
// Check the status code is what we expect.
|
||||||
|
if status := rr.Code; status != http.StatusOK {
|
||||||
|
t.Errorf("handler returned wrong status code: got %v want %v",
|
||||||
|
status, http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the response body is what we expect.
|
||||||
|
expected := `{"alive": true}`
|
||||||
|
if rr.Body.String() != expected {
|
||||||
|
t.Errorf("handler returned unexpected body: got %v want %v",
|
||||||
|
rr.Body.String(), expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
In the case that our routes have [variables](#examples), we can pass those in the request. We could write
|
||||||
|
[table-driven tests](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go) to test multiple
|
||||||
|
possible route variables as needed.
|
||||||
|
|
||||||
|
```go
|
||||||
|
// endpoints.go
|
||||||
|
func main() {
|
||||||
|
r := mux.NewRouter()
|
||||||
|
// A route with a route variable:
|
||||||
|
r.HandleFunc("/metrics/{type}", MetricsHandler)
|
||||||
|
|
||||||
|
log.Fatal(http.ListenAndServe("localhost:8080", r))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Our test file, with a table-driven test of `routeVariables`:
|
||||||
|
|
||||||
|
```go
|
||||||
|
// endpoints_test.go
|
||||||
|
func TestMetricsHandler(t *testing.T) {
|
||||||
|
tt := []struct{
|
||||||
|
routeVariable string
|
||||||
|
shouldPass bool
|
||||||
|
}{
|
||||||
|
{"goroutines", true},
|
||||||
|
{"heap", true},
|
||||||
|
{"counters", true},
|
||||||
|
{"queries", true},
|
||||||
|
{"adhadaeqm3k", false},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tt {
|
||||||
|
path := fmt.Sprintf("/metrics/%s", tc.routeVariable)
|
||||||
|
req, err := http.NewRequest("GET", path, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rr := httptest.NewRecorder()
|
||||||
|
|
||||||
|
// Need to create a router that we can pass the request through so that the vars will be added to the context
|
||||||
|
router := mux.NewRouter()
|
||||||
|
router.HandleFunc("/metrics/{type}", MetricsHandler)
|
||||||
|
router.ServeHTTP(rr, req)
|
||||||
|
|
||||||
|
// In this case, our MetricsHandler returns a non-200 response
|
||||||
|
// for a route variable it doesn't know about.
|
||||||
|
if rr.Code == http.StatusOK && !tc.shouldPass {
|
||||||
|
t.Errorf("handler should have failed on routeVariable %s: got %v want %v",
|
||||||
|
tc.routeVariable, rr.Code, http.StatusOK)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
## Full Example
|
## Full Example
|
||||||
|
|
||||||
Here's a complete, runnable example of a small `mux` based server:
|
Here's a complete, runnable example of a small `mux` based server:
|
||||||
|
|
|
@ -1,5 +1,3 @@
|
||||||
// +build go1.7
|
|
||||||
|
|
||||||
package mux
|
package mux
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
@ -18,7 +16,3 @@ func contextSet(r *http.Request, key, val interface{}) *http.Request {
|
||||||
|
|
||||||
return r.WithContext(context.WithValue(r.Context(), key, val))
|
return r.WithContext(context.WithValue(r.Context(), key, val))
|
||||||
}
|
}
|
||||||
|
|
||||||
func contextClear(r *http.Request) {
|
|
||||||
return
|
|
||||||
}
|
|
26
vendor/github.com/gorilla/mux/context_gorilla.go
generated
vendored
26
vendor/github.com/gorilla/mux/context_gorilla.go
generated
vendored
|
@ -1,26 +0,0 @@
|
||||||
// +build !go1.7
|
|
||||||
|
|
||||||
package mux
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
"github.com/gorilla/context"
|
|
||||||
)
|
|
||||||
|
|
||||||
func contextGet(r *http.Request, key interface{}) interface{} {
|
|
||||||
return context.Get(r, key)
|
|
||||||
}
|
|
||||||
|
|
||||||
func contextSet(r *http.Request, key, val interface{}) *http.Request {
|
|
||||||
if val == nil {
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
context.Set(r, key, val)
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
func contextClear(r *http.Request) {
|
|
||||||
context.Clear(r)
|
|
||||||
}
|
|
76
vendor/github.com/gorilla/mux/doc.go
generated
vendored
76
vendor/github.com/gorilla/mux/doc.go
generated
vendored
|
@ -12,8 +12,8 @@ or other conditions. The main features are:
|
||||||
|
|
||||||
* Requests can be matched based on URL host, path, path prefix, schemes,
|
* Requests can be matched based on URL host, path, path prefix, schemes,
|
||||||
header and query values, HTTP methods or using custom matchers.
|
header and query values, HTTP methods or using custom matchers.
|
||||||
* URL hosts and paths can have variables with an optional regular
|
* URL hosts, paths and query values can have variables with an optional
|
||||||
expression.
|
regular expression.
|
||||||
* Registered URLs can be built, or "reversed", which helps maintaining
|
* Registered URLs can be built, or "reversed", which helps maintaining
|
||||||
references to resources.
|
references to resources.
|
||||||
* Routes can be used as subrouters: nested routes are only tested if the
|
* Routes can be used as subrouters: nested routes are only tested if the
|
||||||
|
@ -188,18 +188,20 @@ key/value pairs for the route variables. For the previous route, we would do:
|
||||||
|
|
||||||
"/articles/technology/42"
|
"/articles/technology/42"
|
||||||
|
|
||||||
This also works for host variables:
|
This also works for host and query value variables:
|
||||||
|
|
||||||
r := mux.NewRouter()
|
r := mux.NewRouter()
|
||||||
r.Host("{subdomain}.domain.com").
|
r.Host("{subdomain}.domain.com").
|
||||||
Path("/articles/{category}/{id:[0-9]+}").
|
Path("/articles/{category}/{id:[0-9]+}").
|
||||||
|
Queries("filter", "{filter}").
|
||||||
HandlerFunc(ArticleHandler).
|
HandlerFunc(ArticleHandler).
|
||||||
Name("article")
|
Name("article")
|
||||||
|
|
||||||
// url.String() will be "http://news.domain.com/articles/technology/42"
|
// url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla"
|
||||||
url, err := r.Get("article").URL("subdomain", "news",
|
url, err := r.Get("article").URL("subdomain", "news",
|
||||||
"category", "technology",
|
"category", "technology",
|
||||||
"id", "42")
|
"id", "42",
|
||||||
|
"filter", "gorilla")
|
||||||
|
|
||||||
All variables defined in the route are required, and their values must
|
All variables defined in the route are required, and their values must
|
||||||
conform to the corresponding patterns. These requirements guarantee that a
|
conform to the corresponding patterns. These requirements guarantee that a
|
||||||
|
@ -236,5 +238,69 @@ as well:
|
||||||
url, err := r.Get("article").URL("subdomain", "news",
|
url, err := r.Get("article").URL("subdomain", "news",
|
||||||
"category", "technology",
|
"category", "technology",
|
||||||
"id", "42")
|
"id", "42")
|
||||||
|
|
||||||
|
Mux supports the addition of middlewares to a Router, which are executed in the order they are added if a match is found, including its subrouters. Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or ResponseWriter hijacking.
|
||||||
|
|
||||||
|
type MiddlewareFunc func(http.Handler) http.Handler
|
||||||
|
|
||||||
|
Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc (closures can access variables from the context where they are created).
|
||||||
|
|
||||||
|
A very basic middleware which logs the URI of the request being handled could be written as:
|
||||||
|
|
||||||
|
func simpleMw(next http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
// Do stuff here
|
||||||
|
log.Println(r.RequestURI)
|
||||||
|
// Call the next handler, which can be another middleware in the chain, or the final handler.
|
||||||
|
next.ServeHTTP(w, r)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
Middlewares can be added to a router using `Router.Use()`:
|
||||||
|
|
||||||
|
r := mux.NewRouter()
|
||||||
|
r.HandleFunc("/", handler)
|
||||||
|
r.Use(simpleMw)
|
||||||
|
|
||||||
|
A more complex authentication middleware, which maps session token to users, could be written as:
|
||||||
|
|
||||||
|
// Define our struct
|
||||||
|
type authenticationMiddleware struct {
|
||||||
|
tokenUsers map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize it somewhere
|
||||||
|
func (amw *authenticationMiddleware) Populate() {
|
||||||
|
amw.tokenUsers["00000000"] = "user0"
|
||||||
|
amw.tokenUsers["aaaaaaaa"] = "userA"
|
||||||
|
amw.tokenUsers["05f717e5"] = "randomUser"
|
||||||
|
amw.tokenUsers["deadbeef"] = "user0"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Middleware function, which will be called for each request
|
||||||
|
func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
token := r.Header.Get("X-Session-Token")
|
||||||
|
|
||||||
|
if user, found := amw.tokenUsers[token]; found {
|
||||||
|
// We found the token in our map
|
||||||
|
log.Printf("Authenticated user %s\n", user)
|
||||||
|
next.ServeHTTP(w, r)
|
||||||
|
} else {
|
||||||
|
http.Error(w, "Forbidden", http.StatusForbidden)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
r := mux.NewRouter()
|
||||||
|
r.HandleFunc("/", handler)
|
||||||
|
|
||||||
|
amw := authenticationMiddleware{}
|
||||||
|
amw.Populate()
|
||||||
|
|
||||||
|
r.Use(amw.Middleware)
|
||||||
|
|
||||||
|
Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to.
|
||||||
|
|
||||||
*/
|
*/
|
||||||
package mux
|
package mux
|
||||||
|
|
1
vendor/github.com/gorilla/mux/go.mod
generated
vendored
Normal file
1
vendor/github.com/gorilla/mux/go.mod
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
module github.com/gorilla/mux
|
72
vendor/github.com/gorilla/mux/middleware.go
generated
vendored
Normal file
72
vendor/github.com/gorilla/mux/middleware.go
generated
vendored
Normal file
|
@ -0,0 +1,72 @@
|
||||||
|
package mux
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MiddlewareFunc is a function which receives an http.Handler and returns another http.Handler.
|
||||||
|
// Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed
|
||||||
|
// to it, and then calls the handler passed as parameter to the MiddlewareFunc.
|
||||||
|
type MiddlewareFunc func(http.Handler) http.Handler
|
||||||
|
|
||||||
|
// middleware interface is anything which implements a MiddlewareFunc named Middleware.
|
||||||
|
type middleware interface {
|
||||||
|
Middleware(handler http.Handler) http.Handler
|
||||||
|
}
|
||||||
|
|
||||||
|
// Middleware allows MiddlewareFunc to implement the middleware interface.
|
||||||
|
func (mw MiddlewareFunc) Middleware(handler http.Handler) http.Handler {
|
||||||
|
return mw(handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use appends a MiddlewareFunc to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router.
|
||||||
|
func (r *Router) Use(mwf ...MiddlewareFunc) {
|
||||||
|
for _, fn := range mwf {
|
||||||
|
r.middlewares = append(r.middlewares, fn)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// useInterface appends a middleware to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router.
|
||||||
|
func (r *Router) useInterface(mw middleware) {
|
||||||
|
r.middlewares = append(r.middlewares, mw)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CORSMethodMiddleware sets the Access-Control-Allow-Methods response header
|
||||||
|
// on a request, by matching routes based only on paths. It also handles
|
||||||
|
// OPTIONS requests, by settings Access-Control-Allow-Methods, and then
|
||||||
|
// returning without calling the next http handler.
|
||||||
|
func CORSMethodMiddleware(r *Router) MiddlewareFunc {
|
||||||
|
return func(next http.Handler) http.Handler {
|
||||||
|
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||||
|
var allMethods []string
|
||||||
|
|
||||||
|
err := r.Walk(func(route *Route, _ *Router, _ []*Route) error {
|
||||||
|
for _, m := range route.matchers {
|
||||||
|
if _, ok := m.(*routeRegexp); ok {
|
||||||
|
if m.Match(req, &RouteMatch{}) {
|
||||||
|
methods, err := route.GetMethods()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
allMethods = append(allMethods, methods...)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
w.Header().Set("Access-Control-Allow-Methods", strings.Join(append(allMethods, "OPTIONS"), ","))
|
||||||
|
|
||||||
|
if req.Method == "OPTIONS" {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
next.ServeHTTP(w, req)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
229
vendor/github.com/gorilla/mux/mux.go
generated
vendored
229
vendor/github.com/gorilla/mux/mux.go
generated
vendored
|
@ -10,12 +10,19 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"path"
|
"path"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strings"
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrMethodMismatch is returned when the method in the request does not match
|
||||||
|
// the method defined against the route.
|
||||||
|
ErrMethodMismatch = errors.New("method is not allowed")
|
||||||
|
// ErrNotFound is returned when no route match is found.
|
||||||
|
ErrNotFound = errors.New("no matching route was found")
|
||||||
)
|
)
|
||||||
|
|
||||||
// NewRouter returns a new router instance.
|
// NewRouter returns a new router instance.
|
||||||
func NewRouter() *Router {
|
func NewRouter() *Router {
|
||||||
return &Router{namedRoutes: make(map[string]*Route), KeepContext: false}
|
return &Router{namedRoutes: make(map[string]*Route)}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Router registers routes to be matched and dispatches a handler.
|
// Router registers routes to be matched and dispatches a handler.
|
||||||
|
@ -39,37 +46,125 @@ func NewRouter() *Router {
|
||||||
type Router struct {
|
type Router struct {
|
||||||
// Configurable Handler to be used when no route matches.
|
// Configurable Handler to be used when no route matches.
|
||||||
NotFoundHandler http.Handler
|
NotFoundHandler http.Handler
|
||||||
// Parent route, if this is a subrouter.
|
|
||||||
parent parentRoute
|
// Configurable Handler to be used when the request method does not match the route.
|
||||||
|
MethodNotAllowedHandler http.Handler
|
||||||
|
|
||||||
// Routes to be matched, in order.
|
// Routes to be matched, in order.
|
||||||
routes []*Route
|
routes []*Route
|
||||||
|
|
||||||
// Routes by name for URL building.
|
// Routes by name for URL building.
|
||||||
namedRoutes map[string]*Route
|
namedRoutes map[string]*Route
|
||||||
// See Router.StrictSlash(). This defines the flag for new routes.
|
|
||||||
strictSlash bool
|
|
||||||
// See Router.SkipClean(). This defines the flag for new routes.
|
|
||||||
skipClean bool
|
|
||||||
// If true, do not clear the request context after handling the request.
|
// If true, do not clear the request context after handling the request.
|
||||||
// This has no effect when go1.7+ is used, since the context is stored
|
//
|
||||||
|
// Deprecated: No effect when go1.7+ is used, since the context is stored
|
||||||
// on the request itself.
|
// on the request itself.
|
||||||
KeepContext bool
|
KeepContext bool
|
||||||
// see Router.UseEncodedPath(). This defines a flag for all routes.
|
|
||||||
useEncodedPath bool
|
// Slice of middlewares to be called after a match is found
|
||||||
|
middlewares []middleware
|
||||||
|
|
||||||
|
// configuration shared with `Route`
|
||||||
|
routeConf
|
||||||
}
|
}
|
||||||
|
|
||||||
// Match matches registered routes against the request.
|
// common route configuration shared between `Router` and `Route`
|
||||||
|
type routeConf struct {
|
||||||
|
// If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to"
|
||||||
|
useEncodedPath bool
|
||||||
|
|
||||||
|
// If true, when the path pattern is "/path/", accessing "/path" will
|
||||||
|
// redirect to the former and vice versa.
|
||||||
|
strictSlash bool
|
||||||
|
|
||||||
|
// If true, when the path pattern is "/path//to", accessing "/path//to"
|
||||||
|
// will not redirect
|
||||||
|
skipClean bool
|
||||||
|
|
||||||
|
// Manager for the variables from host and path.
|
||||||
|
regexp routeRegexpGroup
|
||||||
|
|
||||||
|
// List of matchers.
|
||||||
|
matchers []matcher
|
||||||
|
|
||||||
|
// The scheme used when building URLs.
|
||||||
|
buildScheme string
|
||||||
|
|
||||||
|
buildVarsFunc BuildVarsFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
// returns an effective deep copy of `routeConf`
|
||||||
|
func copyRouteConf(r routeConf) routeConf {
|
||||||
|
c := r
|
||||||
|
|
||||||
|
if r.regexp.path != nil {
|
||||||
|
c.regexp.path = copyRouteRegexp(r.regexp.path)
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.regexp.host != nil {
|
||||||
|
c.regexp.host = copyRouteRegexp(r.regexp.host)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.regexp.queries = make([]*routeRegexp, 0, len(r.regexp.queries))
|
||||||
|
for _, q := range r.regexp.queries {
|
||||||
|
c.regexp.queries = append(c.regexp.queries, copyRouteRegexp(q))
|
||||||
|
}
|
||||||
|
|
||||||
|
c.matchers = make([]matcher, 0, len(r.matchers))
|
||||||
|
for _, m := range r.matchers {
|
||||||
|
c.matchers = append(c.matchers, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyRouteRegexp(r *routeRegexp) *routeRegexp {
|
||||||
|
c := *r
|
||||||
|
return &c
|
||||||
|
}
|
||||||
|
|
||||||
|
// Match attempts to match the given request against the router's registered routes.
|
||||||
|
//
|
||||||
|
// If the request matches a route of this router or one of its subrouters the Route,
|
||||||
|
// Handler, and Vars fields of the the match argument are filled and this function
|
||||||
|
// returns true.
|
||||||
|
//
|
||||||
|
// If the request does not match any of this router's or its subrouters' routes
|
||||||
|
// then this function returns false. If available, a reason for the match failure
|
||||||
|
// will be filled in the match argument's MatchErr field. If the match failure type
|
||||||
|
// (eg: not found) has a registered handler, the handler is assigned to the Handler
|
||||||
|
// field of the match argument.
|
||||||
func (r *Router) Match(req *http.Request, match *RouteMatch) bool {
|
func (r *Router) Match(req *http.Request, match *RouteMatch) bool {
|
||||||
for _, route := range r.routes {
|
for _, route := range r.routes {
|
||||||
if route.Match(req, match) {
|
if route.Match(req, match) {
|
||||||
|
// Build middleware chain if no error was found
|
||||||
|
if match.MatchErr == nil {
|
||||||
|
for i := len(r.middlewares) - 1; i >= 0; i-- {
|
||||||
|
match.Handler = r.middlewares[i].Middleware(match.Handler)
|
||||||
|
}
|
||||||
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if match.MatchErr == ErrMethodMismatch {
|
||||||
|
if r.MethodNotAllowedHandler != nil {
|
||||||
|
match.Handler = r.MethodNotAllowedHandler
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// Closest match for a router (includes sub-routers)
|
// Closest match for a router (includes sub-routers)
|
||||||
if r.NotFoundHandler != nil {
|
if r.NotFoundHandler != nil {
|
||||||
match.Handler = r.NotFoundHandler
|
match.Handler = r.NotFoundHandler
|
||||||
|
match.MatchErr = ErrNotFound
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
match.MatchErr = ErrNotFound
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -81,7 +176,7 @@ func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||||
if !r.skipClean {
|
if !r.skipClean {
|
||||||
path := req.URL.Path
|
path := req.URL.Path
|
||||||
if r.useEncodedPath {
|
if r.useEncodedPath {
|
||||||
path = getPath(req)
|
path = req.URL.EscapedPath()
|
||||||
}
|
}
|
||||||
// Clean path to canonical form and redirect.
|
// Clean path to canonical form and redirect.
|
||||||
if p := cleanPath(path); p != path {
|
if p := cleanPath(path); p != path {
|
||||||
|
@ -105,36 +200,44 @@ func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||||
req = setVars(req, match.Vars)
|
req = setVars(req, match.Vars)
|
||||||
req = setCurrentRoute(req, match.Route)
|
req = setCurrentRoute(req, match.Route)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if handler == nil && match.MatchErr == ErrMethodMismatch {
|
||||||
|
handler = methodNotAllowedHandler()
|
||||||
|
}
|
||||||
|
|
||||||
if handler == nil {
|
if handler == nil {
|
||||||
handler = http.NotFoundHandler()
|
handler = http.NotFoundHandler()
|
||||||
}
|
}
|
||||||
if !r.KeepContext {
|
|
||||||
defer contextClear(req)
|
|
||||||
}
|
|
||||||
handler.ServeHTTP(w, req)
|
handler.ServeHTTP(w, req)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get returns a route registered with the given name.
|
// Get returns a route registered with the given name.
|
||||||
func (r *Router) Get(name string) *Route {
|
func (r *Router) Get(name string) *Route {
|
||||||
return r.getNamedRoutes()[name]
|
return r.namedRoutes[name]
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetRoute returns a route registered with the given name. This method
|
// GetRoute returns a route registered with the given name. This method
|
||||||
// was renamed to Get() and remains here for backwards compatibility.
|
// was renamed to Get() and remains here for backwards compatibility.
|
||||||
func (r *Router) GetRoute(name string) *Route {
|
func (r *Router) GetRoute(name string) *Route {
|
||||||
return r.getNamedRoutes()[name]
|
return r.namedRoutes[name]
|
||||||
}
|
}
|
||||||
|
|
||||||
// StrictSlash defines the trailing slash behavior for new routes. The initial
|
// StrictSlash defines the trailing slash behavior for new routes. The initial
|
||||||
// value is false.
|
// value is false.
|
||||||
//
|
//
|
||||||
// When true, if the route path is "/path/", accessing "/path" will redirect
|
// When true, if the route path is "/path/", accessing "/path" will perform a redirect
|
||||||
// to the former and vice versa. In other words, your application will always
|
// to the former and vice versa. In other words, your application will always
|
||||||
// see the path as specified in the route.
|
// see the path as specified in the route.
|
||||||
//
|
//
|
||||||
// When false, if the route path is "/path", accessing "/path/" will not match
|
// When false, if the route path is "/path", accessing "/path/" will not match
|
||||||
// this route and vice versa.
|
// this route and vice versa.
|
||||||
//
|
//
|
||||||
|
// The re-direct is a HTTP 301 (Moved Permanently). Note that when this is set for
|
||||||
|
// routes with a non-idempotent method (e.g. POST, PUT), the subsequent re-directed
|
||||||
|
// request will be made as a GET by most clients. Use middleware or client settings
|
||||||
|
// to modify this behaviour as needed.
|
||||||
|
//
|
||||||
// Special case: when a route sets a path prefix using the PathPrefix() method,
|
// Special case: when a route sets a path prefix using the PathPrefix() method,
|
||||||
// strict slash is ignored for that route because the redirect behavior can't
|
// strict slash is ignored for that route because the redirect behavior can't
|
||||||
// be determined from a prefix alone. However, any subrouters created from that
|
// be determined from a prefix alone. However, any subrouters created from that
|
||||||
|
@ -160,10 +263,6 @@ func (r *Router) SkipClean(value bool) *Router {
|
||||||
// UseEncodedPath tells the router to match the encoded original path
|
// UseEncodedPath tells the router to match the encoded original path
|
||||||
// to the routes.
|
// to the routes.
|
||||||
// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to".
|
// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to".
|
||||||
// This behavior has the drawback of needing to match routes against
|
|
||||||
// r.RequestURI instead of r.URL.Path. Any modifications (such as http.StripPrefix)
|
|
||||||
// to r.URL.Path will not affect routing when this flag is on and thus may
|
|
||||||
// induce unintended behavior.
|
|
||||||
//
|
//
|
||||||
// If not called, the router will match the unencoded path to the routes.
|
// If not called, the router will match the unencoded path to the routes.
|
||||||
// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to"
|
// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to"
|
||||||
|
@ -172,48 +271,24 @@ func (r *Router) UseEncodedPath() *Router {
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
// parentRoute
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// getNamedRoutes returns the map where named routes are registered.
|
|
||||||
func (r *Router) getNamedRoutes() map[string]*Route {
|
|
||||||
if r.namedRoutes == nil {
|
|
||||||
if r.parent != nil {
|
|
||||||
r.namedRoutes = r.parent.getNamedRoutes()
|
|
||||||
} else {
|
|
||||||
r.namedRoutes = make(map[string]*Route)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return r.namedRoutes
|
|
||||||
}
|
|
||||||
|
|
||||||
// getRegexpGroup returns regexp definitions from the parent route, if any.
|
|
||||||
func (r *Router) getRegexpGroup() *routeRegexpGroup {
|
|
||||||
if r.parent != nil {
|
|
||||||
return r.parent.getRegexpGroup()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *Router) buildVars(m map[string]string) map[string]string {
|
|
||||||
if r.parent != nil {
|
|
||||||
m = r.parent.buildVars(m)
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
// ----------------------------------------------------------------------------
|
||||||
// Route factories
|
// Route factories
|
||||||
// ----------------------------------------------------------------------------
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
// NewRoute registers an empty route.
|
// NewRoute registers an empty route.
|
||||||
func (r *Router) NewRoute() *Route {
|
func (r *Router) NewRoute() *Route {
|
||||||
route := &Route{parent: r, strictSlash: r.strictSlash, skipClean: r.skipClean, useEncodedPath: r.useEncodedPath}
|
// initialize a route with a copy of the parent router's configuration
|
||||||
|
route := &Route{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes}
|
||||||
r.routes = append(r.routes, route)
|
r.routes = append(r.routes, route)
|
||||||
return route
|
return route
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Name registers a new route with a name.
|
||||||
|
// See Route.Name().
|
||||||
|
func (r *Router) Name(name string) *Route {
|
||||||
|
return r.NewRoute().Name(name)
|
||||||
|
}
|
||||||
|
|
||||||
// Handle registers a new route with a matcher for the URL path.
|
// Handle registers a new route with a matcher for the URL path.
|
||||||
// See Route.Path() and Route.Handler().
|
// See Route.Path() and Route.Handler().
|
||||||
func (r *Router) Handle(path string, handler http.Handler) *Route {
|
func (r *Router) Handle(path string, handler http.Handler) *Route {
|
||||||
|
@ -299,10 +374,6 @@ type WalkFunc func(route *Route, router *Router, ancestors []*Route) error
|
||||||
|
|
||||||
func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error {
|
func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error {
|
||||||
for _, t := range r.routes {
|
for _, t := range r.routes {
|
||||||
if t.regexp == nil || t.regexp.path == nil || t.regexp.path.template == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
err := walkFn(t, r, ancestors)
|
err := walkFn(t, r, ancestors)
|
||||||
if err == SkipRouter {
|
if err == SkipRouter {
|
||||||
continue
|
continue
|
||||||
|
@ -312,10 +383,12 @@ func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error {
|
||||||
}
|
}
|
||||||
for _, sr := range t.matchers {
|
for _, sr := range t.matchers {
|
||||||
if h, ok := sr.(*Router); ok {
|
if h, ok := sr.(*Router); ok {
|
||||||
|
ancestors = append(ancestors, t)
|
||||||
err := h.walk(walkFn, ancestors)
|
err := h.walk(walkFn, ancestors)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
ancestors = ancestors[:len(ancestors)-1]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if h, ok := t.handler.(*Router); ok {
|
if h, ok := t.handler.(*Router); ok {
|
||||||
|
@ -339,6 +412,11 @@ type RouteMatch struct {
|
||||||
Route *Route
|
Route *Route
|
||||||
Handler http.Handler
|
Handler http.Handler
|
||||||
Vars map[string]string
|
Vars map[string]string
|
||||||
|
|
||||||
|
// MatchErr is set to appropriate matching error
|
||||||
|
// It is set to ErrMethodMismatch if there is a mismatch in
|
||||||
|
// the request method and route method
|
||||||
|
MatchErr error
|
||||||
}
|
}
|
||||||
|
|
||||||
type contextKey int
|
type contextKey int
|
||||||
|
@ -380,28 +458,6 @@ func setCurrentRoute(r *http.Request, val interface{}) *http.Request {
|
||||||
// Helpers
|
// Helpers
|
||||||
// ----------------------------------------------------------------------------
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
// getPath returns the escaped path if possible; doing what URL.EscapedPath()
|
|
||||||
// which was added in go1.5 does
|
|
||||||
func getPath(req *http.Request) string {
|
|
||||||
if req.RequestURI != "" {
|
|
||||||
// Extract the path from RequestURI (which is escaped unlike URL.Path)
|
|
||||||
// as detailed here as detailed in https://golang.org/pkg/net/url/#URL
|
|
||||||
// for < 1.5 server side workaround
|
|
||||||
// http://localhost/path/here?v=1 -> /path/here
|
|
||||||
path := req.RequestURI
|
|
||||||
path = strings.TrimPrefix(path, req.URL.Scheme+`://`)
|
|
||||||
path = strings.TrimPrefix(path, req.URL.Host)
|
|
||||||
if i := strings.LastIndex(path, "?"); i > -1 {
|
|
||||||
path = path[:i]
|
|
||||||
}
|
|
||||||
if i := strings.LastIndex(path, "#"); i > -1 {
|
|
||||||
path = path[:i]
|
|
||||||
}
|
|
||||||
return path
|
|
||||||
}
|
|
||||||
return req.URL.Path
|
|
||||||
}
|
|
||||||
|
|
||||||
// cleanPath returns the canonical path for p, eliminating . and .. elements.
|
// cleanPath returns the canonical path for p, eliminating . and .. elements.
|
||||||
// Borrowed from the net/http package.
|
// Borrowed from the net/http package.
|
||||||
func cleanPath(p string) string {
|
func cleanPath(p string) string {
|
||||||
|
@ -458,7 +514,7 @@ func mapFromPairsToString(pairs ...string) (map[string]string, error) {
|
||||||
return m, nil
|
return m, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// mapFromPairsToRegex converts variadic string paramers to a
|
// mapFromPairsToRegex converts variadic string parameters to a
|
||||||
// string to regex map.
|
// string to regex map.
|
||||||
func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) {
|
func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) {
|
||||||
length, err := checkPairs(pairs...)
|
length, err := checkPairs(pairs...)
|
||||||
|
@ -540,3 +596,12 @@ func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]s
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// methodNotAllowed replies to the request with an HTTP status code 405.
|
||||||
|
func methodNotAllowed(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.WriteHeader(http.StatusMethodNotAllowed)
|
||||||
|
}
|
||||||
|
|
||||||
|
// methodNotAllowedHandler returns a simple request handler
|
||||||
|
// that replies to each request with a status code 405.
|
||||||
|
func methodNotAllowedHandler() http.Handler { return http.HandlerFunc(methodNotAllowed) }
|
||||||
|
|
106
vendor/github.com/gorilla/mux/regexp.go
generated
vendored
106
vendor/github.com/gorilla/mux/regexp.go
generated
vendored
|
@ -14,6 +14,20 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type routeRegexpOptions struct {
|
||||||
|
strictSlash bool
|
||||||
|
useEncodedPath bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type regexpType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
regexpTypePath regexpType = 0
|
||||||
|
regexpTypeHost regexpType = 1
|
||||||
|
regexpTypePrefix regexpType = 2
|
||||||
|
regexpTypeQuery regexpType = 3
|
||||||
|
)
|
||||||
|
|
||||||
// newRouteRegexp parses a route template and returns a routeRegexp,
|
// newRouteRegexp parses a route template and returns a routeRegexp,
|
||||||
// used to match a host, a path or a query string.
|
// used to match a host, a path or a query string.
|
||||||
//
|
//
|
||||||
|
@ -24,7 +38,7 @@ import (
|
||||||
// Previously we accepted only Python-like identifiers for variable
|
// Previously we accepted only Python-like identifiers for variable
|
||||||
// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that
|
// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that
|
||||||
// name and pattern can't be empty, and names can't contain a colon.
|
// name and pattern can't be empty, and names can't contain a colon.
|
||||||
func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash, useEncodedPath bool) (*routeRegexp, error) {
|
func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*routeRegexp, error) {
|
||||||
// Check if it is well-formed.
|
// Check if it is well-formed.
|
||||||
idxs, errBraces := braceIndices(tpl)
|
idxs, errBraces := braceIndices(tpl)
|
||||||
if errBraces != nil {
|
if errBraces != nil {
|
||||||
|
@ -34,19 +48,18 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash,
|
||||||
template := tpl
|
template := tpl
|
||||||
// Now let's parse it.
|
// Now let's parse it.
|
||||||
defaultPattern := "[^/]+"
|
defaultPattern := "[^/]+"
|
||||||
if matchQuery {
|
if typ == regexpTypeQuery {
|
||||||
defaultPattern = "[^?&]*"
|
defaultPattern = ".*"
|
||||||
} else if matchHost {
|
} else if typ == regexpTypeHost {
|
||||||
defaultPattern = "[^.]+"
|
defaultPattern = "[^.]+"
|
||||||
matchPrefix = false
|
|
||||||
}
|
}
|
||||||
// Only match strict slash if not matching
|
// Only match strict slash if not matching
|
||||||
if matchPrefix || matchHost || matchQuery {
|
if typ != regexpTypePath {
|
||||||
strictSlash = false
|
options.strictSlash = false
|
||||||
}
|
}
|
||||||
// Set a flag for strictSlash.
|
// Set a flag for strictSlash.
|
||||||
endSlash := false
|
endSlash := false
|
||||||
if strictSlash && strings.HasSuffix(tpl, "/") {
|
if options.strictSlash && strings.HasSuffix(tpl, "/") {
|
||||||
tpl = tpl[:len(tpl)-1]
|
tpl = tpl[:len(tpl)-1]
|
||||||
endSlash = true
|
endSlash = true
|
||||||
}
|
}
|
||||||
|
@ -88,18 +101,25 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash,
|
||||||
// Add the remaining.
|
// Add the remaining.
|
||||||
raw := tpl[end:]
|
raw := tpl[end:]
|
||||||
pattern.WriteString(regexp.QuoteMeta(raw))
|
pattern.WriteString(regexp.QuoteMeta(raw))
|
||||||
if strictSlash {
|
if options.strictSlash {
|
||||||
pattern.WriteString("[/]?")
|
pattern.WriteString("[/]?")
|
||||||
}
|
}
|
||||||
if matchQuery {
|
if typ == regexpTypeQuery {
|
||||||
// Add the default pattern if the query value is empty
|
// Add the default pattern if the query value is empty
|
||||||
if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" {
|
if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" {
|
||||||
pattern.WriteString(defaultPattern)
|
pattern.WriteString(defaultPattern)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !matchPrefix {
|
if typ != regexpTypePrefix {
|
||||||
pattern.WriteByte('$')
|
pattern.WriteByte('$')
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var wildcardHostPort bool
|
||||||
|
if typ == regexpTypeHost {
|
||||||
|
if !strings.Contains(pattern.String(), ":") {
|
||||||
|
wildcardHostPort = true
|
||||||
|
}
|
||||||
|
}
|
||||||
reverse.WriteString(raw)
|
reverse.WriteString(raw)
|
||||||
if endSlash {
|
if endSlash {
|
||||||
reverse.WriteByte('/')
|
reverse.WriteByte('/')
|
||||||
|
@ -119,14 +139,13 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash,
|
||||||
// Done!
|
// Done!
|
||||||
return &routeRegexp{
|
return &routeRegexp{
|
||||||
template: template,
|
template: template,
|
||||||
matchHost: matchHost,
|
regexpType: typ,
|
||||||
matchQuery: matchQuery,
|
options: options,
|
||||||
strictSlash: strictSlash,
|
|
||||||
useEncodedPath: useEncodedPath,
|
|
||||||
regexp: reg,
|
regexp: reg,
|
||||||
reverse: reverse.String(),
|
reverse: reverse.String(),
|
||||||
varsN: varsN,
|
varsN: varsN,
|
||||||
varsR: varsR,
|
varsR: varsR,
|
||||||
|
wildcardHostPort: wildcardHostPort,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -135,15 +154,10 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash,
|
||||||
type routeRegexp struct {
|
type routeRegexp struct {
|
||||||
// The unmodified template.
|
// The unmodified template.
|
||||||
template string
|
template string
|
||||||
// True for host match, false for path or query string match.
|
// The type of match
|
||||||
matchHost bool
|
regexpType regexpType
|
||||||
// True for query string match, false for path and host match.
|
// Options for matching
|
||||||
matchQuery bool
|
options routeRegexpOptions
|
||||||
// The strictSlash value defined on the route, but disabled if PathPrefix was used.
|
|
||||||
strictSlash bool
|
|
||||||
// Determines whether to use encoded path from getPath function or unencoded
|
|
||||||
// req.URL.Path for path matching
|
|
||||||
useEncodedPath bool
|
|
||||||
// Expanded regexp.
|
// Expanded regexp.
|
||||||
regexp *regexp.Regexp
|
regexp *regexp.Regexp
|
||||||
// Reverse template.
|
// Reverse template.
|
||||||
|
@ -152,22 +166,31 @@ type routeRegexp struct {
|
||||||
varsN []string
|
varsN []string
|
||||||
// Variable regexps (validators).
|
// Variable regexps (validators).
|
||||||
varsR []*regexp.Regexp
|
varsR []*regexp.Regexp
|
||||||
|
// Wildcard host-port (no strict port match in hostname)
|
||||||
|
wildcardHostPort bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// Match matches the regexp against the URL host or path.
|
// Match matches the regexp against the URL host or path.
|
||||||
func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
|
func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
|
||||||
if !r.matchHost {
|
if r.regexpType == regexpTypeHost {
|
||||||
if r.matchQuery {
|
host := getHost(req)
|
||||||
|
if r.wildcardHostPort {
|
||||||
|
// Don't be strict on the port match
|
||||||
|
if i := strings.Index(host, ":"); i != -1 {
|
||||||
|
host = host[:i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return r.regexp.MatchString(host)
|
||||||
|
} else {
|
||||||
|
if r.regexpType == regexpTypeQuery {
|
||||||
return r.matchQueryString(req)
|
return r.matchQueryString(req)
|
||||||
}
|
}
|
||||||
path := req.URL.Path
|
path := req.URL.Path
|
||||||
if r.useEncodedPath {
|
if r.options.useEncodedPath {
|
||||||
path = getPath(req)
|
path = req.URL.EscapedPath()
|
||||||
}
|
}
|
||||||
return r.regexp.MatchString(path)
|
return r.regexp.MatchString(path)
|
||||||
}
|
}
|
||||||
|
|
||||||
return r.regexp.MatchString(getHost(req))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// url builds a URL part using the given values.
|
// url builds a URL part using the given values.
|
||||||
|
@ -178,6 +201,9 @@ func (r *routeRegexp) url(values map[string]string) (string, error) {
|
||||||
if !ok {
|
if !ok {
|
||||||
return "", fmt.Errorf("mux: missing route variable %q", v)
|
return "", fmt.Errorf("mux: missing route variable %q", v)
|
||||||
}
|
}
|
||||||
|
if r.regexpType == regexpTypeQuery {
|
||||||
|
value = url.QueryEscape(value)
|
||||||
|
}
|
||||||
urlValues[k] = value
|
urlValues[k] = value
|
||||||
}
|
}
|
||||||
rv := fmt.Sprintf(r.reverse, urlValues...)
|
rv := fmt.Sprintf(r.reverse, urlValues...)
|
||||||
|
@ -200,7 +226,7 @@ func (r *routeRegexp) url(values map[string]string) (string, error) {
|
||||||
// For a URL with foo=bar&baz=ding, we return only the relevant key
|
// For a URL with foo=bar&baz=ding, we return only the relevant key
|
||||||
// value pair for the routeRegexp.
|
// value pair for the routeRegexp.
|
||||||
func (r *routeRegexp) getURLQuery(req *http.Request) string {
|
func (r *routeRegexp) getURLQuery(req *http.Request) string {
|
||||||
if !r.matchQuery {
|
if r.regexpType != regexpTypeQuery {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
templateKey := strings.SplitN(r.template, "=", 2)[0]
|
templateKey := strings.SplitN(r.template, "=", 2)[0]
|
||||||
|
@ -258,7 +284,7 @@ type routeRegexpGroup struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
// setMatch extracts the variables from the URL once a route matches.
|
// setMatch extracts the variables from the URL once a route matches.
|
||||||
func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) {
|
func (v routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) {
|
||||||
// Store host variables.
|
// Store host variables.
|
||||||
if v.host != nil {
|
if v.host != nil {
|
||||||
host := getHost(req)
|
host := getHost(req)
|
||||||
|
@ -269,7 +295,7 @@ func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route)
|
||||||
}
|
}
|
||||||
path := req.URL.Path
|
path := req.URL.Path
|
||||||
if r.useEncodedPath {
|
if r.useEncodedPath {
|
||||||
path = getPath(req)
|
path = req.URL.EscapedPath()
|
||||||
}
|
}
|
||||||
// Store path variables.
|
// Store path variables.
|
||||||
if v.path != nil {
|
if v.path != nil {
|
||||||
|
@ -277,7 +303,7 @@ func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route)
|
||||||
if len(matches) > 0 {
|
if len(matches) > 0 {
|
||||||
extractVars(path, matches, v.path.varsN, m.Vars)
|
extractVars(path, matches, v.path.varsN, m.Vars)
|
||||||
// Check if we should redirect.
|
// Check if we should redirect.
|
||||||
if v.path.strictSlash {
|
if v.path.options.strictSlash {
|
||||||
p1 := strings.HasSuffix(path, "/")
|
p1 := strings.HasSuffix(path, "/")
|
||||||
p2 := strings.HasSuffix(v.path.template, "/")
|
p2 := strings.HasSuffix(v.path.template, "/")
|
||||||
if p1 != p2 {
|
if p1 != p2 {
|
||||||
|
@ -287,7 +313,7 @@ func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route)
|
||||||
} else {
|
} else {
|
||||||
u.Path += "/"
|
u.Path += "/"
|
||||||
}
|
}
|
||||||
m.Handler = http.RedirectHandler(u.String(), 301)
|
m.Handler = http.RedirectHandler(u.String(), http.StatusMovedPermanently)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -303,17 +329,13 @@ func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getHost tries its best to return the request host.
|
// getHost tries its best to return the request host.
|
||||||
|
// According to section 14.23 of RFC 2616 the Host header
|
||||||
|
// can include the port number if the default value of 80 is not used.
|
||||||
func getHost(r *http.Request) string {
|
func getHost(r *http.Request) string {
|
||||||
if r.URL.IsAbs() {
|
if r.URL.IsAbs() {
|
||||||
return r.URL.Host
|
return r.URL.Host
|
||||||
}
|
}
|
||||||
host := r.Host
|
return r.Host
|
||||||
// Slice off any port information.
|
|
||||||
if i := strings.Index(host, ":"); i != -1 {
|
|
||||||
host = host[:i]
|
|
||||||
}
|
|
||||||
return host
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func extractVars(input string, matches []int, names []string, output map[string]string) {
|
func extractVars(input string, matches []int, names []string, output map[string]string) {
|
||||||
|
|
250
vendor/github.com/gorilla/mux/route.go
generated
vendored
250
vendor/github.com/gorilla/mux/route.go
generated
vendored
|
@ -15,22 +15,8 @@ import (
|
||||||
|
|
||||||
// Route stores information to match a request and build URLs.
|
// Route stores information to match a request and build URLs.
|
||||||
type Route struct {
|
type Route struct {
|
||||||
// Parent where the route was registered (a Router).
|
|
||||||
parent parentRoute
|
|
||||||
// Request handler for the route.
|
// Request handler for the route.
|
||||||
handler http.Handler
|
handler http.Handler
|
||||||
// List of matchers.
|
|
||||||
matchers []matcher
|
|
||||||
// Manager for the variables from host and path.
|
|
||||||
regexp *routeRegexpGroup
|
|
||||||
// If true, when the path pattern is "/path/", accessing "/path" will
|
|
||||||
// redirect to the former and vice versa.
|
|
||||||
strictSlash bool
|
|
||||||
// If true, when the path pattern is "/path//to", accessing "/path//to"
|
|
||||||
// will not redirect
|
|
||||||
skipClean bool
|
|
||||||
// If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to"
|
|
||||||
useEncodedPath bool
|
|
||||||
// If true, this route never matches: it is only used to build URLs.
|
// If true, this route never matches: it is only used to build URLs.
|
||||||
buildOnly bool
|
buildOnly bool
|
||||||
// The name used to build URLs.
|
// The name used to build URLs.
|
||||||
|
@ -38,9 +24,15 @@ type Route struct {
|
||||||
// Error resulted from building a route.
|
// Error resulted from building a route.
|
||||||
err error
|
err error
|
||||||
|
|
||||||
buildVarsFunc BuildVarsFunc
|
// "global" reference to all named routes
|
||||||
|
namedRoutes map[string]*Route
|
||||||
|
|
||||||
|
// config possibly passed in from `Router`
|
||||||
|
routeConf
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SkipClean reports whether path cleaning is enabled for this route via
|
||||||
|
// Router.SkipClean.
|
||||||
func (r *Route) SkipClean() bool {
|
func (r *Route) SkipClean() bool {
|
||||||
return r.skipClean
|
return r.skipClean
|
||||||
}
|
}
|
||||||
|
@ -50,12 +42,45 @@ func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
|
||||||
if r.buildOnly || r.err != nil {
|
if r.buildOnly || r.err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var matchErr error
|
||||||
|
|
||||||
// Match everything.
|
// Match everything.
|
||||||
for _, m := range r.matchers {
|
for _, m := range r.matchers {
|
||||||
if matched := m.Match(req, match); !matched {
|
if matched := m.Match(req, match); !matched {
|
||||||
|
if _, ok := m.(methodMatcher); ok {
|
||||||
|
matchErr = ErrMethodMismatch
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignore ErrNotFound errors. These errors arise from match call
|
||||||
|
// to Subrouters.
|
||||||
|
//
|
||||||
|
// This prevents subsequent matching subrouters from failing to
|
||||||
|
// run middleware. If not ignored, the middleware would see a
|
||||||
|
// non-nil MatchErr and be skipped, even when there was a
|
||||||
|
// matching route.
|
||||||
|
if match.MatchErr == ErrNotFound {
|
||||||
|
match.MatchErr = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
matchErr = nil
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if matchErr != nil {
|
||||||
|
match.MatchErr = matchErr
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if match.MatchErr == ErrMethodMismatch {
|
||||||
|
// We found a route which matches request method, clear MatchErr
|
||||||
|
match.MatchErr = nil
|
||||||
|
// Then override the mis-matched handler
|
||||||
|
match.Handler = r.handler
|
||||||
|
}
|
||||||
|
|
||||||
// Yay, we have a match. Let's collect some info about it.
|
// Yay, we have a match. Let's collect some info about it.
|
||||||
if match.Route == nil {
|
if match.Route == nil {
|
||||||
match.Route = r
|
match.Route = r
|
||||||
|
@ -66,10 +91,9 @@ func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
|
||||||
if match.Vars == nil {
|
if match.Vars == nil {
|
||||||
match.Vars = make(map[string]string)
|
match.Vars = make(map[string]string)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set variables.
|
// Set variables.
|
||||||
if r.regexp != nil {
|
|
||||||
r.regexp.setMatch(req, match, r)
|
r.regexp.setMatch(req, match, r)
|
||||||
}
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -111,7 +135,7 @@ func (r *Route) GetHandler() http.Handler {
|
||||||
// Name -----------------------------------------------------------------------
|
// Name -----------------------------------------------------------------------
|
||||||
|
|
||||||
// Name sets the name for the route, used to build URLs.
|
// Name sets the name for the route, used to build URLs.
|
||||||
// If the name was registered already it will be overwritten.
|
// It is an error to call Name more than once on a route.
|
||||||
func (r *Route) Name(name string) *Route {
|
func (r *Route) Name(name string) *Route {
|
||||||
if r.name != "" {
|
if r.name != "" {
|
||||||
r.err = fmt.Errorf("mux: route already has name %q, can't set %q",
|
r.err = fmt.Errorf("mux: route already has name %q, can't set %q",
|
||||||
|
@ -119,7 +143,7 @@ func (r *Route) Name(name string) *Route {
|
||||||
}
|
}
|
||||||
if r.err == nil {
|
if r.err == nil {
|
||||||
r.name = name
|
r.name = name
|
||||||
r.getNamedRoutes()[name] = r
|
r.namedRoutes[name] = r
|
||||||
}
|
}
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
@ -147,12 +171,11 @@ func (r *Route) addMatcher(m matcher) *Route {
|
||||||
}
|
}
|
||||||
|
|
||||||
// addRegexpMatcher adds a host or path matcher and builder to a route.
|
// addRegexpMatcher adds a host or path matcher and builder to a route.
|
||||||
func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error {
|
func (r *Route) addRegexpMatcher(tpl string, typ regexpType) error {
|
||||||
if r.err != nil {
|
if r.err != nil {
|
||||||
return r.err
|
return r.err
|
||||||
}
|
}
|
||||||
r.regexp = r.getRegexpGroup()
|
if typ == regexpTypePath || typ == regexpTypePrefix {
|
||||||
if !matchHost && !matchQuery {
|
|
||||||
if len(tpl) > 0 && tpl[0] != '/' {
|
if len(tpl) > 0 && tpl[0] != '/' {
|
||||||
return fmt.Errorf("mux: path must start with a slash, got %q", tpl)
|
return fmt.Errorf("mux: path must start with a slash, got %q", tpl)
|
||||||
}
|
}
|
||||||
|
@ -160,7 +183,10 @@ func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery
|
||||||
tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl
|
tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash, r.useEncodedPath)
|
rr, err := newRouteRegexp(tpl, typ, routeRegexpOptions{
|
||||||
|
strictSlash: r.strictSlash,
|
||||||
|
useEncodedPath: r.useEncodedPath,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -169,7 +195,7 @@ func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if matchHost {
|
if typ == regexpTypeHost {
|
||||||
if r.regexp.path != nil {
|
if r.regexp.path != nil {
|
||||||
if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil {
|
if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -182,7 +208,7 @@ func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if matchQuery {
|
if typ == regexpTypeQuery {
|
||||||
r.regexp.queries = append(r.regexp.queries, rr)
|
r.regexp.queries = append(r.regexp.queries, rr)
|
||||||
} else {
|
} else {
|
||||||
r.regexp.path = rr
|
r.regexp.path = rr
|
||||||
|
@ -234,7 +260,8 @@ func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool {
|
||||||
// "X-Requested-With", "XMLHttpRequest")
|
// "X-Requested-With", "XMLHttpRequest")
|
||||||
//
|
//
|
||||||
// The above route will only match if both the request header matches both regular expressions.
|
// The above route will only match if both the request header matches both regular expressions.
|
||||||
// It the value is an empty string, it will match any value if the key is set.
|
// If the value is an empty string, it will match any value if the key is set.
|
||||||
|
// Use the start and end of string anchors (^ and $) to match an exact value.
|
||||||
func (r *Route) HeadersRegexp(pairs ...string) *Route {
|
func (r *Route) HeadersRegexp(pairs ...string) *Route {
|
||||||
if r.err == nil {
|
if r.err == nil {
|
||||||
var headers map[string]*regexp.Regexp
|
var headers map[string]*regexp.Regexp
|
||||||
|
@ -264,7 +291,7 @@ func (r *Route) HeadersRegexp(pairs ...string) *Route {
|
||||||
// Variable names must be unique in a given route. They can be retrieved
|
// Variable names must be unique in a given route. They can be retrieved
|
||||||
// calling mux.Vars(request).
|
// calling mux.Vars(request).
|
||||||
func (r *Route) Host(tpl string) *Route {
|
func (r *Route) Host(tpl string) *Route {
|
||||||
r.err = r.addRegexpMatcher(tpl, true, false, false)
|
r.err = r.addRegexpMatcher(tpl, regexpTypeHost)
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -324,7 +351,7 @@ func (r *Route) Methods(methods ...string) *Route {
|
||||||
// Variable names must be unique in a given route. They can be retrieved
|
// Variable names must be unique in a given route. They can be retrieved
|
||||||
// calling mux.Vars(request).
|
// calling mux.Vars(request).
|
||||||
func (r *Route) Path(tpl string) *Route {
|
func (r *Route) Path(tpl string) *Route {
|
||||||
r.err = r.addRegexpMatcher(tpl, false, false, false)
|
r.err = r.addRegexpMatcher(tpl, regexpTypePath)
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -340,7 +367,7 @@ func (r *Route) Path(tpl string) *Route {
|
||||||
// Also note that the setting of Router.StrictSlash() has no effect on routes
|
// Also note that the setting of Router.StrictSlash() has no effect on routes
|
||||||
// with a PathPrefix matcher.
|
// with a PathPrefix matcher.
|
||||||
func (r *Route) PathPrefix(tpl string) *Route {
|
func (r *Route) PathPrefix(tpl string) *Route {
|
||||||
r.err = r.addRegexpMatcher(tpl, false, true, false)
|
r.err = r.addRegexpMatcher(tpl, regexpTypePrefix)
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -356,7 +383,7 @@ func (r *Route) PathPrefix(tpl string) *Route {
|
||||||
// The above route will only match if the URL contains the defined queries
|
// The above route will only match if the URL contains the defined queries
|
||||||
// values, e.g.: ?foo=bar&id=42.
|
// values, e.g.: ?foo=bar&id=42.
|
||||||
//
|
//
|
||||||
// It the value is an empty string, it will match any value if the key is set.
|
// If the value is an empty string, it will match any value if the key is set.
|
||||||
//
|
//
|
||||||
// Variables can define an optional regexp pattern to be matched:
|
// Variables can define an optional regexp pattern to be matched:
|
||||||
//
|
//
|
||||||
|
@ -371,7 +398,7 @@ func (r *Route) Queries(pairs ...string) *Route {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
for i := 0; i < length; i += 2 {
|
for i := 0; i < length; i += 2 {
|
||||||
if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, false, true); r.err != nil {
|
if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], regexpTypeQuery); r.err != nil {
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -394,6 +421,9 @@ func (r *Route) Schemes(schemes ...string) *Route {
|
||||||
for k, v := range schemes {
|
for k, v := range schemes {
|
||||||
schemes[k] = strings.ToLower(v)
|
schemes[k] = strings.ToLower(v)
|
||||||
}
|
}
|
||||||
|
if len(schemes) > 0 {
|
||||||
|
r.buildScheme = schemes[0]
|
||||||
|
}
|
||||||
return r.addMatcher(schemeMatcher(schemes))
|
return r.addMatcher(schemeMatcher(schemes))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -406,7 +436,15 @@ type BuildVarsFunc func(map[string]string) map[string]string
|
||||||
// BuildVarsFunc adds a custom function to be used to modify build variables
|
// BuildVarsFunc adds a custom function to be used to modify build variables
|
||||||
// before a route's URL is built.
|
// before a route's URL is built.
|
||||||
func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route {
|
func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route {
|
||||||
|
if r.buildVarsFunc != nil {
|
||||||
|
// compose the old and new functions
|
||||||
|
old := r.buildVarsFunc
|
||||||
|
r.buildVarsFunc = func(m map[string]string) map[string]string {
|
||||||
|
return f(old(m))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
r.buildVarsFunc = f
|
r.buildVarsFunc = f
|
||||||
|
}
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -425,7 +463,8 @@ func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route {
|
||||||
// Here, the routes registered in the subrouter won't be tested if the host
|
// Here, the routes registered in the subrouter won't be tested if the host
|
||||||
// doesn't match.
|
// doesn't match.
|
||||||
func (r *Route) Subrouter() *Router {
|
func (r *Route) Subrouter() *Router {
|
||||||
router := &Router{parent: r, strictSlash: r.strictSlash}
|
// initialize a subrouter with a copy of the parent route's configuration
|
||||||
|
router := &Router{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes}
|
||||||
r.addMatcher(router)
|
r.addMatcher(router)
|
||||||
return router
|
return router
|
||||||
}
|
}
|
||||||
|
@ -469,30 +508,38 @@ func (r *Route) URL(pairs ...string) (*url.URL, error) {
|
||||||
if r.err != nil {
|
if r.err != nil {
|
||||||
return nil, r.err
|
return nil, r.err
|
||||||
}
|
}
|
||||||
if r.regexp == nil {
|
|
||||||
return nil, errors.New("mux: route doesn't have a host or path")
|
|
||||||
}
|
|
||||||
values, err := r.prepareVars(pairs...)
|
values, err := r.prepareVars(pairs...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
var scheme, host, path string
|
var scheme, host, path string
|
||||||
|
queries := make([]string, 0, len(r.regexp.queries))
|
||||||
if r.regexp.host != nil {
|
if r.regexp.host != nil {
|
||||||
// Set a default scheme.
|
|
||||||
scheme = "http"
|
|
||||||
if host, err = r.regexp.host.url(values); err != nil {
|
if host, err = r.regexp.host.url(values); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
scheme = "http"
|
||||||
|
if r.buildScheme != "" {
|
||||||
|
scheme = r.buildScheme
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if r.regexp.path != nil {
|
if r.regexp.path != nil {
|
||||||
if path, err = r.regexp.path.url(values); err != nil {
|
if path, err = r.regexp.path.url(values); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
for _, q := range r.regexp.queries {
|
||||||
|
var query string
|
||||||
|
if query, err = q.url(values); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
queries = append(queries, query)
|
||||||
|
}
|
||||||
return &url.URL{
|
return &url.URL{
|
||||||
Scheme: scheme,
|
Scheme: scheme,
|
||||||
Host: host,
|
Host: host,
|
||||||
Path: path,
|
Path: path,
|
||||||
|
RawQuery: strings.Join(queries, "&"),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -503,7 +550,7 @@ func (r *Route) URLHost(pairs ...string) (*url.URL, error) {
|
||||||
if r.err != nil {
|
if r.err != nil {
|
||||||
return nil, r.err
|
return nil, r.err
|
||||||
}
|
}
|
||||||
if r.regexp == nil || r.regexp.host == nil {
|
if r.regexp.host == nil {
|
||||||
return nil, errors.New("mux: route doesn't have a host")
|
return nil, errors.New("mux: route doesn't have a host")
|
||||||
}
|
}
|
||||||
values, err := r.prepareVars(pairs...)
|
values, err := r.prepareVars(pairs...)
|
||||||
|
@ -514,10 +561,14 @@ func (r *Route) URLHost(pairs ...string) (*url.URL, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &url.URL{
|
u := &url.URL{
|
||||||
Scheme: "http",
|
Scheme: "http",
|
||||||
Host: host,
|
Host: host,
|
||||||
}, nil
|
}
|
||||||
|
if r.buildScheme != "" {
|
||||||
|
u.Scheme = r.buildScheme
|
||||||
|
}
|
||||||
|
return u, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// URLPath builds the path part of the URL for a route. See Route.URL().
|
// URLPath builds the path part of the URL for a route. See Route.URL().
|
||||||
|
@ -527,7 +578,7 @@ func (r *Route) URLPath(pairs ...string) (*url.URL, error) {
|
||||||
if r.err != nil {
|
if r.err != nil {
|
||||||
return nil, r.err
|
return nil, r.err
|
||||||
}
|
}
|
||||||
if r.regexp == nil || r.regexp.path == nil {
|
if r.regexp.path == nil {
|
||||||
return nil, errors.New("mux: route doesn't have a path")
|
return nil, errors.New("mux: route doesn't have a path")
|
||||||
}
|
}
|
||||||
values, err := r.prepareVars(pairs...)
|
values, err := r.prepareVars(pairs...)
|
||||||
|
@ -552,12 +603,80 @@ func (r *Route) GetPathTemplate() (string, error) {
|
||||||
if r.err != nil {
|
if r.err != nil {
|
||||||
return "", r.err
|
return "", r.err
|
||||||
}
|
}
|
||||||
if r.regexp == nil || r.regexp.path == nil {
|
if r.regexp.path == nil {
|
||||||
return "", errors.New("mux: route doesn't have a path")
|
return "", errors.New("mux: route doesn't have a path")
|
||||||
}
|
}
|
||||||
return r.regexp.path.template, nil
|
return r.regexp.path.template, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetPathRegexp returns the expanded regular expression used to match route path.
|
||||||
|
// This is useful for building simple REST API documentation and for instrumentation
|
||||||
|
// against third-party services.
|
||||||
|
// An error will be returned if the route does not define a path.
|
||||||
|
func (r *Route) GetPathRegexp() (string, error) {
|
||||||
|
if r.err != nil {
|
||||||
|
return "", r.err
|
||||||
|
}
|
||||||
|
if r.regexp.path == nil {
|
||||||
|
return "", errors.New("mux: route does not have a path")
|
||||||
|
}
|
||||||
|
return r.regexp.path.regexp.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetQueriesRegexp returns the expanded regular expressions used to match the
|
||||||
|
// route queries.
|
||||||
|
// This is useful for building simple REST API documentation and for instrumentation
|
||||||
|
// against third-party services.
|
||||||
|
// An error will be returned if the route does not have queries.
|
||||||
|
func (r *Route) GetQueriesRegexp() ([]string, error) {
|
||||||
|
if r.err != nil {
|
||||||
|
return nil, r.err
|
||||||
|
}
|
||||||
|
if r.regexp.queries == nil {
|
||||||
|
return nil, errors.New("mux: route doesn't have queries")
|
||||||
|
}
|
||||||
|
var queries []string
|
||||||
|
for _, query := range r.regexp.queries {
|
||||||
|
queries = append(queries, query.regexp.String())
|
||||||
|
}
|
||||||
|
return queries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetQueriesTemplates returns the templates used to build the
|
||||||
|
// query matching.
|
||||||
|
// This is useful for building simple REST API documentation and for instrumentation
|
||||||
|
// against third-party services.
|
||||||
|
// An error will be returned if the route does not define queries.
|
||||||
|
func (r *Route) GetQueriesTemplates() ([]string, error) {
|
||||||
|
if r.err != nil {
|
||||||
|
return nil, r.err
|
||||||
|
}
|
||||||
|
if r.regexp.queries == nil {
|
||||||
|
return nil, errors.New("mux: route doesn't have queries")
|
||||||
|
}
|
||||||
|
var queries []string
|
||||||
|
for _, query := range r.regexp.queries {
|
||||||
|
queries = append(queries, query.template)
|
||||||
|
}
|
||||||
|
return queries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMethods returns the methods the route matches against
|
||||||
|
// This is useful for building simple REST API documentation and for instrumentation
|
||||||
|
// against third-party services.
|
||||||
|
// An error will be returned if route does not have methods.
|
||||||
|
func (r *Route) GetMethods() ([]string, error) {
|
||||||
|
if r.err != nil {
|
||||||
|
return nil, r.err
|
||||||
|
}
|
||||||
|
for _, m := range r.matchers {
|
||||||
|
if methods, ok := m.(methodMatcher); ok {
|
||||||
|
return []string(methods), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, errors.New("mux: route doesn't have methods")
|
||||||
|
}
|
||||||
|
|
||||||
// GetHostTemplate returns the template used to build the
|
// GetHostTemplate returns the template used to build the
|
||||||
// route match.
|
// route match.
|
||||||
// This is useful for building simple REST API documentation and for instrumentation
|
// This is useful for building simple REST API documentation and for instrumentation
|
||||||
|
@ -567,7 +686,7 @@ func (r *Route) GetHostTemplate() (string, error) {
|
||||||
if r.err != nil {
|
if r.err != nil {
|
||||||
return "", r.err
|
return "", r.err
|
||||||
}
|
}
|
||||||
if r.regexp == nil || r.regexp.host == nil {
|
if r.regexp.host == nil {
|
||||||
return "", errors.New("mux: route doesn't have a host")
|
return "", errors.New("mux: route doesn't have a host")
|
||||||
}
|
}
|
||||||
return r.regexp.host.template, nil
|
return r.regexp.host.template, nil
|
||||||
|
@ -584,53 +703,8 @@ func (r *Route) prepareVars(pairs ...string) (map[string]string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Route) buildVars(m map[string]string) map[string]string {
|
func (r *Route) buildVars(m map[string]string) map[string]string {
|
||||||
if r.parent != nil {
|
|
||||||
m = r.parent.buildVars(m)
|
|
||||||
}
|
|
||||||
if r.buildVarsFunc != nil {
|
if r.buildVarsFunc != nil {
|
||||||
m = r.buildVarsFunc(m)
|
m = r.buildVarsFunc(m)
|
||||||
}
|
}
|
||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
// parentRoute
|
|
||||||
// ----------------------------------------------------------------------------
|
|
||||||
|
|
||||||
// parentRoute allows routes to know about parent host and path definitions.
|
|
||||||
type parentRoute interface {
|
|
||||||
getNamedRoutes() map[string]*Route
|
|
||||||
getRegexpGroup() *routeRegexpGroup
|
|
||||||
buildVars(map[string]string) map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
// getNamedRoutes returns the map where named routes are registered.
|
|
||||||
func (r *Route) getNamedRoutes() map[string]*Route {
|
|
||||||
if r.parent == nil {
|
|
||||||
// During tests router is not always set.
|
|
||||||
r.parent = NewRouter()
|
|
||||||
}
|
|
||||||
return r.parent.getNamedRoutes()
|
|
||||||
}
|
|
||||||
|
|
||||||
// getRegexpGroup returns regexp definitions from this route.
|
|
||||||
func (r *Route) getRegexpGroup() *routeRegexpGroup {
|
|
||||||
if r.regexp == nil {
|
|
||||||
if r.parent == nil {
|
|
||||||
// During tests router is not always set.
|
|
||||||
r.parent = NewRouter()
|
|
||||||
}
|
|
||||||
regexp := r.parent.getRegexpGroup()
|
|
||||||
if regexp == nil {
|
|
||||||
r.regexp = new(routeRegexpGroup)
|
|
||||||
} else {
|
|
||||||
// Copy.
|
|
||||||
r.regexp = &routeRegexpGroup{
|
|
||||||
host: regexp.host,
|
|
||||||
path: regexp.path,
|
|
||||||
queries: regexp.queries,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return r.regexp
|
|
||||||
}
|
|
||||||
|
|
19
vendor/github.com/gorilla/mux/test_helpers.go
generated
vendored
Normal file
19
vendor/github.com/gorilla/mux/test_helpers.go
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
||||||
|
// Copyright 2012 The Gorilla Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package mux
|
||||||
|
|
||||||
|
import "net/http"
|
||||||
|
|
||||||
|
// SetURLVars sets the URL variables for the given request, to be accessed via
|
||||||
|
// mux.Vars for testing route behaviour. Arguments are not modified, a shallow
|
||||||
|
// copy is returned.
|
||||||
|
//
|
||||||
|
// This API should only be used for testing purposes; it provides a way to
|
||||||
|
// inject variables into the request context. Alternatively, URL variables
|
||||||
|
// can be set by making a route that captures the required variables,
|
||||||
|
// starting a server and sending the request to that server.
|
||||||
|
func SetURLVars(r *http.Request, val map[string]string) *http.Request {
|
||||||
|
return setVars(r, val)
|
||||||
|
}
|
9
vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE
generated
vendored
Normal file
9
vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
||||||
|
(The MIT License)
|
||||||
|
|
||||||
|
Copyright (c) 2017 marvin + konsorten GmbH (open-source@konsorten.de)
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
40
vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
generated
vendored
Normal file
40
vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
||||||
|
# Windows Terminal Sequences
|
||||||
|
|
||||||
|
This library allow for enabling Windows terminal color support for Go.
|
||||||
|
|
||||||
|
See [Console Virtual Terminal Sequences](https://docs.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences) for details.
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
sequences "github.com/konsorten/go-windows-terminal-sequences"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
sequences.EnableVirtualTerminalProcessing(syscall.Stdout, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
## Authors
|
||||||
|
|
||||||
|
The tool is sponsored by the [marvin + konsorten GmbH](http://www.konsorten.de).
|
||||||
|
|
||||||
|
We thank all the authors who provided code to this library:
|
||||||
|
|
||||||
|
* Felix Kollmann
|
||||||
|
|
||||||
|
## License
|
||||||
|
|
||||||
|
(The MIT License)
|
||||||
|
|
||||||
|
Copyright (c) 2018 marvin + konsorten GmbH (open-source@konsorten.de)
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
1
vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod
generated
vendored
Normal file
1
vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
module github.com/konsorten/go-windows-terminal-sequences
|
36
vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go
generated
vendored
Normal file
36
vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package sequences
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
kernel32Dll *syscall.LazyDLL = syscall.NewLazyDLL("Kernel32.dll")
|
||||||
|
setConsoleMode *syscall.LazyProc = kernel32Dll.NewProc("SetConsoleMode")
|
||||||
|
)
|
||||||
|
|
||||||
|
func EnableVirtualTerminalProcessing(stream syscall.Handle, enable bool) error {
|
||||||
|
const ENABLE_VIRTUAL_TERMINAL_PROCESSING uint32 = 0x4
|
||||||
|
|
||||||
|
var mode uint32
|
||||||
|
err := syscall.GetConsoleMode(syscall.Stdout, &mode)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if enable {
|
||||||
|
mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING
|
||||||
|
} else {
|
||||||
|
mode &^= ENABLE_VIRTUAL_TERMINAL_PROCESSING
|
||||||
|
}
|
||||||
|
|
||||||
|
ret, _, err := setConsoleMode.Call(uintptr(unsafe.Pointer(stream)), uintptr(mode))
|
||||||
|
if ret == 0 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
3
vendor/github.com/mitchellh/mapstructure/.travis.yml
generated
vendored
3
vendor/github.com/mitchellh/mapstructure/.travis.yml
generated
vendored
|
@ -1,7 +1,8 @@
|
||||||
language: go
|
language: go
|
||||||
|
|
||||||
go:
|
go:
|
||||||
- 1.4
|
- "1.11.x"
|
||||||
|
- tip
|
||||||
|
|
||||||
script:
|
script:
|
||||||
- go test
|
- go test
|
||||||
|
|
21
vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
generated
vendored
Normal file
21
vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
## 1.1.2
|
||||||
|
|
||||||
|
* Fix error when decode hook decodes interface implementation into interface
|
||||||
|
type. [GH-140]
|
||||||
|
|
||||||
|
## 1.1.1
|
||||||
|
|
||||||
|
* Fix panic that can happen in `decodePtr`
|
||||||
|
|
||||||
|
## 1.1.0
|
||||||
|
|
||||||
|
* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133]
|
||||||
|
* Support struct to struct decoding [GH-137]
|
||||||
|
* If source map value is nil, then destination map value is nil (instead of empty)
|
||||||
|
* If source slice value is nil, then destination slice value is nil (instead of empty)
|
||||||
|
* If source pointer is nil, then destination pointer is set to nil (instead of
|
||||||
|
allocated zero value of type)
|
||||||
|
|
||||||
|
## 1.0.0
|
||||||
|
|
||||||
|
* Initial tagged stable release.
|
2
vendor/github.com/mitchellh/mapstructure/README.md
generated
vendored
2
vendor/github.com/mitchellh/mapstructure/README.md
generated
vendored
|
@ -1,4 +1,4 @@
|
||||||
# mapstructure
|
# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure)
|
||||||
|
|
||||||
mapstructure is a Go library for decoding generic map values to structures
|
mapstructure is a Go library for decoding generic map values to structures
|
||||||
and vice versa, while providing helpful error handling.
|
and vice versa, while providing helpful error handling.
|
||||||
|
|
84
vendor/github.com/mitchellh/mapstructure/decode_hooks.go
generated
vendored
84
vendor/github.com/mitchellh/mapstructure/decode_hooks.go
generated
vendored
|
@ -2,6 +2,8 @@ package mapstructure
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -38,12 +40,6 @@ func DecodeHookExec(
|
||||||
raw DecodeHookFunc,
|
raw DecodeHookFunc,
|
||||||
from reflect.Type, to reflect.Type,
|
from reflect.Type, to reflect.Type,
|
||||||
data interface{}) (interface{}, error) {
|
data interface{}) (interface{}, error) {
|
||||||
// Build our arguments that reflect expects
|
|
||||||
argVals := make([]reflect.Value, 3)
|
|
||||||
argVals[0] = reflect.ValueOf(from)
|
|
||||||
argVals[1] = reflect.ValueOf(to)
|
|
||||||
argVals[2] = reflect.ValueOf(data)
|
|
||||||
|
|
||||||
switch f := typedDecodeHook(raw).(type) {
|
switch f := typedDecodeHook(raw).(type) {
|
||||||
case DecodeHookFuncType:
|
case DecodeHookFuncType:
|
||||||
return f(from, to, data)
|
return f(from, to, data)
|
||||||
|
@ -72,7 +68,10 @@ func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Modify the from kind to be correct with the new data
|
// Modify the from kind to be correct with the new data
|
||||||
f = reflect.ValueOf(data).Type()
|
f = nil
|
||||||
|
if val := reflect.ValueOf(data); val.IsValid() {
|
||||||
|
f = val.Type()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return data, nil
|
return data, nil
|
||||||
|
@ -118,6 +117,74 @@ func StringToTimeDurationHookFunc() DecodeHookFunc {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// StringToIPHookFunc returns a DecodeHookFunc that converts
|
||||||
|
// strings to net.IP
|
||||||
|
func StringToIPHookFunc() DecodeHookFunc {
|
||||||
|
return func(
|
||||||
|
f reflect.Type,
|
||||||
|
t reflect.Type,
|
||||||
|
data interface{}) (interface{}, error) {
|
||||||
|
if f.Kind() != reflect.String {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
if t != reflect.TypeOf(net.IP{}) {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert it by parsing
|
||||||
|
ip := net.ParseIP(data.(string))
|
||||||
|
if ip == nil {
|
||||||
|
return net.IP{}, fmt.Errorf("failed parsing ip %v", data)
|
||||||
|
}
|
||||||
|
|
||||||
|
return ip, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToIPNetHookFunc returns a DecodeHookFunc that converts
|
||||||
|
// strings to net.IPNet
|
||||||
|
func StringToIPNetHookFunc() DecodeHookFunc {
|
||||||
|
return func(
|
||||||
|
f reflect.Type,
|
||||||
|
t reflect.Type,
|
||||||
|
data interface{}) (interface{}, error) {
|
||||||
|
if f.Kind() != reflect.String {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
if t != reflect.TypeOf(net.IPNet{}) {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert it by parsing
|
||||||
|
_, net, err := net.ParseCIDR(data.(string))
|
||||||
|
return net, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringToTimeHookFunc returns a DecodeHookFunc that converts
|
||||||
|
// strings to time.Time.
|
||||||
|
func StringToTimeHookFunc(layout string) DecodeHookFunc {
|
||||||
|
return func(
|
||||||
|
f reflect.Type,
|
||||||
|
t reflect.Type,
|
||||||
|
data interface{}) (interface{}, error) {
|
||||||
|
if f.Kind() != reflect.String {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
if t != reflect.TypeOf(time.Time{}) {
|
||||||
|
return data, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert it by parsing
|
||||||
|
return time.Parse(layout, data.(string))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to
|
||||||
|
// the decoder.
|
||||||
|
//
|
||||||
|
// Note that this is significantly different from the WeaklyTypedInput option
|
||||||
|
// of the DecoderConfig.
|
||||||
func WeaklyTypedHook(
|
func WeaklyTypedHook(
|
||||||
f reflect.Kind,
|
f reflect.Kind,
|
||||||
t reflect.Kind,
|
t reflect.Kind,
|
||||||
|
@ -129,9 +196,8 @@ func WeaklyTypedHook(
|
||||||
case reflect.Bool:
|
case reflect.Bool:
|
||||||
if dataVal.Bool() {
|
if dataVal.Bool() {
|
||||||
return "1", nil
|
return "1", nil
|
||||||
} else {
|
|
||||||
return "0", nil
|
|
||||||
}
|
}
|
||||||
|
return "0", nil
|
||||||
case reflect.Float32:
|
case reflect.Float32:
|
||||||
return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
|
return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
|
||||||
case reflect.Int:
|
case reflect.Int:
|
||||||
|
|
1
vendor/github.com/mitchellh/mapstructure/go.mod
generated
vendored
Normal file
1
vendor/github.com/mitchellh/mapstructure/go.mod
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
module github.com/mitchellh/mapstructure
|
575
vendor/github.com/mitchellh/mapstructure/mapstructure.go
generated
vendored
575
vendor/github.com/mitchellh/mapstructure/mapstructure.go
generated
vendored
|
@ -1,5 +1,5 @@
|
||||||
// The mapstructure package exposes functionality to convert an
|
// Package mapstructure exposes functionality to convert an arbitrary
|
||||||
// abitrary map[string]interface{} into a native Go structure.
|
// map[string]interface{} into a native Go structure.
|
||||||
//
|
//
|
||||||
// The Go structure can be arbitrarily complex, containing slices,
|
// The Go structure can be arbitrarily complex, containing slices,
|
||||||
// other structs, etc. and the decoder will properly decode nested
|
// other structs, etc. and the decoder will properly decode nested
|
||||||
|
@ -8,6 +8,7 @@
|
||||||
package mapstructure
|
package mapstructure
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
@ -31,7 +32,12 @@ import (
|
||||||
// both.
|
// both.
|
||||||
type DecodeHookFunc interface{}
|
type DecodeHookFunc interface{}
|
||||||
|
|
||||||
|
// DecodeHookFuncType is a DecodeHookFunc which has complete information about
|
||||||
|
// the source and target types.
|
||||||
type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error)
|
type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error)
|
||||||
|
|
||||||
|
// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the
|
||||||
|
// source and target types.
|
||||||
type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
|
type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
|
||||||
|
|
||||||
// DecoderConfig is the configuration that is used to create a new decoder
|
// DecoderConfig is the configuration that is used to create a new decoder
|
||||||
|
@ -67,6 +73,10 @@ type DecoderConfig struct {
|
||||||
// FALSE, false, False. Anything else is an error)
|
// FALSE, false, False. Anything else is an error)
|
||||||
// - empty array = empty map and vice versa
|
// - empty array = empty map and vice versa
|
||||||
// - negative numbers to overflowed uint values (base 10)
|
// - negative numbers to overflowed uint values (base 10)
|
||||||
|
// - slice of maps to a merged map
|
||||||
|
// - single values are converted to slices if required. Each
|
||||||
|
// element is weakly decoded. For example: "4" can become []int{4}
|
||||||
|
// if the target type is an int slice.
|
||||||
//
|
//
|
||||||
WeaklyTypedInput bool
|
WeaklyTypedInput bool
|
||||||
|
|
||||||
|
@ -104,12 +114,12 @@ type Metadata struct {
|
||||||
Unused []string
|
Unused []string
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decode takes a map and uses reflection to convert it into the
|
// Decode takes an input structure and uses reflection to translate it to
|
||||||
// given Go native structure. val must be a pointer to a struct.
|
// the output structure. output must be a pointer to a map or struct.
|
||||||
func Decode(m interface{}, rawVal interface{}) error {
|
func Decode(input interface{}, output interface{}) error {
|
||||||
config := &DecoderConfig{
|
config := &DecoderConfig{
|
||||||
Metadata: nil,
|
Metadata: nil,
|
||||||
Result: rawVal,
|
Result: output,
|
||||||
}
|
}
|
||||||
|
|
||||||
decoder, err := NewDecoder(config)
|
decoder, err := NewDecoder(config)
|
||||||
|
@ -117,7 +127,7 @@ func Decode(m interface{}, rawVal interface{}) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return decoder.Decode(m)
|
return decoder.Decode(input)
|
||||||
}
|
}
|
||||||
|
|
||||||
// WeakDecode is the same as Decode but is shorthand to enable
|
// WeakDecode is the same as Decode but is shorthand to enable
|
||||||
|
@ -137,6 +147,40 @@ func WeakDecode(input, output interface{}) error {
|
||||||
return decoder.Decode(input)
|
return decoder.Decode(input)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DecodeMetadata is the same as Decode, but is shorthand to
|
||||||
|
// enable metadata collection. See DecoderConfig for more info.
|
||||||
|
func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error {
|
||||||
|
config := &DecoderConfig{
|
||||||
|
Metadata: metadata,
|
||||||
|
Result: output,
|
||||||
|
}
|
||||||
|
|
||||||
|
decoder, err := NewDecoder(config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return decoder.Decode(input)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WeakDecodeMetadata is the same as Decode, but is shorthand to
|
||||||
|
// enable both WeaklyTypedInput and metadata collection. See
|
||||||
|
// DecoderConfig for more info.
|
||||||
|
func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error {
|
||||||
|
config := &DecoderConfig{
|
||||||
|
Metadata: metadata,
|
||||||
|
Result: output,
|
||||||
|
WeaklyTypedInput: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
decoder, err := NewDecoder(config)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return decoder.Decode(input)
|
||||||
|
}
|
||||||
|
|
||||||
// NewDecoder returns a new decoder for the given configuration. Once
|
// NewDecoder returns a new decoder for the given configuration. Once
|
||||||
// a decoder has been returned, the same configuration must not be used
|
// a decoder has been returned, the same configuration must not be used
|
||||||
// again.
|
// again.
|
||||||
|
@ -174,66 +218,91 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) {
|
||||||
|
|
||||||
// Decode decodes the given raw interface to the target pointer specified
|
// Decode decodes the given raw interface to the target pointer specified
|
||||||
// by the configuration.
|
// by the configuration.
|
||||||
func (d *Decoder) Decode(raw interface{}) error {
|
func (d *Decoder) Decode(input interface{}) error {
|
||||||
return d.decode("", raw, reflect.ValueOf(d.config.Result).Elem())
|
return d.decode("", input, reflect.ValueOf(d.config.Result).Elem())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decodes an unknown data type into a specific reflection value.
|
// Decodes an unknown data type into a specific reflection value.
|
||||||
func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error {
|
func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error {
|
||||||
if data == nil {
|
var inputVal reflect.Value
|
||||||
// If the data is nil, then we don't set anything.
|
if input != nil {
|
||||||
|
inputVal = reflect.ValueOf(input)
|
||||||
|
|
||||||
|
// We need to check here if input is a typed nil. Typed nils won't
|
||||||
|
// match the "input == nil" below so we check that here.
|
||||||
|
if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() {
|
||||||
|
input = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if input == nil {
|
||||||
|
// If the data is nil, then we don't set anything, unless ZeroFields is set
|
||||||
|
// to true.
|
||||||
|
if d.config.ZeroFields {
|
||||||
|
outVal.Set(reflect.Zero(outVal.Type()))
|
||||||
|
|
||||||
|
if d.config.Metadata != nil && name != "" {
|
||||||
|
d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
dataVal := reflect.ValueOf(data)
|
if !inputVal.IsValid() {
|
||||||
if !dataVal.IsValid() {
|
// If the input value is invalid, then we just set the value
|
||||||
// If the data value is invalid, then we just set the value
|
|
||||||
// to be the zero value.
|
// to be the zero value.
|
||||||
val.Set(reflect.Zero(val.Type()))
|
outVal.Set(reflect.Zero(outVal.Type()))
|
||||||
|
if d.config.Metadata != nil && name != "" {
|
||||||
|
d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.config.DecodeHook != nil {
|
if d.config.DecodeHook != nil {
|
||||||
// We have a DecodeHook, so let's pre-process the data.
|
// We have a DecodeHook, so let's pre-process the input.
|
||||||
var err error
|
var err error
|
||||||
data, err = DecodeHookExec(
|
input, err = DecodeHookExec(
|
||||||
d.config.DecodeHook,
|
d.config.DecodeHook,
|
||||||
dataVal.Type(), val.Type(), data)
|
inputVal.Type(), outVal.Type(), input)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return fmt.Errorf("error decoding '%s': %s", name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
dataKind := getKind(val)
|
outputKind := getKind(outVal)
|
||||||
switch dataKind {
|
switch outputKind {
|
||||||
case reflect.Bool:
|
case reflect.Bool:
|
||||||
err = d.decodeBool(name, data, val)
|
err = d.decodeBool(name, input, outVal)
|
||||||
case reflect.Interface:
|
case reflect.Interface:
|
||||||
err = d.decodeBasic(name, data, val)
|
err = d.decodeBasic(name, input, outVal)
|
||||||
case reflect.String:
|
case reflect.String:
|
||||||
err = d.decodeString(name, data, val)
|
err = d.decodeString(name, input, outVal)
|
||||||
case reflect.Int:
|
case reflect.Int:
|
||||||
err = d.decodeInt(name, data, val)
|
err = d.decodeInt(name, input, outVal)
|
||||||
case reflect.Uint:
|
case reflect.Uint:
|
||||||
err = d.decodeUint(name, data, val)
|
err = d.decodeUint(name, input, outVal)
|
||||||
case reflect.Float32:
|
case reflect.Float32:
|
||||||
err = d.decodeFloat(name, data, val)
|
err = d.decodeFloat(name, input, outVal)
|
||||||
case reflect.Struct:
|
case reflect.Struct:
|
||||||
err = d.decodeStruct(name, data, val)
|
err = d.decodeStruct(name, input, outVal)
|
||||||
case reflect.Map:
|
case reflect.Map:
|
||||||
err = d.decodeMap(name, data, val)
|
err = d.decodeMap(name, input, outVal)
|
||||||
case reflect.Ptr:
|
case reflect.Ptr:
|
||||||
err = d.decodePtr(name, data, val)
|
err = d.decodePtr(name, input, outVal)
|
||||||
case reflect.Slice:
|
case reflect.Slice:
|
||||||
err = d.decodeSlice(name, data, val)
|
err = d.decodeSlice(name, input, outVal)
|
||||||
|
case reflect.Array:
|
||||||
|
err = d.decodeArray(name, input, outVal)
|
||||||
|
case reflect.Func:
|
||||||
|
err = d.decodeFunc(name, input, outVal)
|
||||||
default:
|
default:
|
||||||
// If we reached this point then we weren't able to decode it
|
// If we reached this point then we weren't able to decode it
|
||||||
return fmt.Errorf("%s: unsupported type: %s", name, dataKind)
|
return fmt.Errorf("%s: unsupported type: %s", name, outputKind)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we reached here, then we successfully decoded SOMETHING, so
|
// If we reached here, then we successfully decoded SOMETHING, so
|
||||||
// mark the key as used if we're tracking metadata.
|
// mark the key as used if we're tracking metainput.
|
||||||
if d.config.Metadata != nil && name != "" {
|
if d.config.Metadata != nil && name != "" {
|
||||||
d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
|
d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
|
||||||
}
|
}
|
||||||
|
@ -244,7 +313,23 @@ func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error
|
||||||
// This decodes a basic type (bool, int, string, etc.) and sets the
|
// This decodes a basic type (bool, int, string, etc.) and sets the
|
||||||
// value to "data" of that type.
|
// value to "data" of that type.
|
||||||
func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error {
|
func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error {
|
||||||
|
if val.IsValid() && val.Elem().IsValid() {
|
||||||
|
return d.decode(name, data, val.Elem())
|
||||||
|
}
|
||||||
|
|
||||||
dataVal := reflect.ValueOf(data)
|
dataVal := reflect.ValueOf(data)
|
||||||
|
|
||||||
|
// If the input data is a pointer, and the assigned type is the dereference
|
||||||
|
// of that exact pointer, then indirect it so that we can assign it.
|
||||||
|
// Example: *string to string
|
||||||
|
if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() {
|
||||||
|
dataVal = reflect.Indirect(dataVal)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !dataVal.IsValid() {
|
||||||
|
dataVal = reflect.Zero(val.Type())
|
||||||
|
}
|
||||||
|
|
||||||
dataValType := dataVal.Type()
|
dataValType := dataVal.Type()
|
||||||
if !dataValType.AssignableTo(val.Type()) {
|
if !dataValType.AssignableTo(val.Type()) {
|
||||||
return fmt.Errorf(
|
return fmt.Errorf(
|
||||||
|
@ -257,7 +342,7 @@ func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error {
|
func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error {
|
||||||
dataVal := reflect.ValueOf(data)
|
dataVal := reflect.Indirect(reflect.ValueOf(data))
|
||||||
dataKind := getKind(dataVal)
|
dataKind := getKind(dataVal)
|
||||||
|
|
||||||
converted := true
|
converted := true
|
||||||
|
@ -276,12 +361,22 @@ func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value)
|
||||||
val.SetString(strconv.FormatUint(dataVal.Uint(), 10))
|
val.SetString(strconv.FormatUint(dataVal.Uint(), 10))
|
||||||
case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
|
case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
|
||||||
val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64))
|
val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64))
|
||||||
case dataKind == reflect.Slice && d.config.WeaklyTypedInput:
|
case dataKind == reflect.Slice && d.config.WeaklyTypedInput,
|
||||||
|
dataKind == reflect.Array && d.config.WeaklyTypedInput:
|
||||||
dataType := dataVal.Type()
|
dataType := dataVal.Type()
|
||||||
elemKind := dataType.Elem().Kind()
|
elemKind := dataType.Elem().Kind()
|
||||||
switch {
|
switch elemKind {
|
||||||
case elemKind == reflect.Uint8:
|
case reflect.Uint8:
|
||||||
val.SetString(string(dataVal.Interface().([]uint8)))
|
var uints []uint8
|
||||||
|
if dataKind == reflect.Array {
|
||||||
|
uints = make([]uint8, dataVal.Len(), dataVal.Len())
|
||||||
|
for i := range uints {
|
||||||
|
uints[i] = dataVal.Index(i).Interface().(uint8)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
uints = dataVal.Interface().([]uint8)
|
||||||
|
}
|
||||||
|
val.SetString(string(uints))
|
||||||
default:
|
default:
|
||||||
converted = false
|
converted = false
|
||||||
}
|
}
|
||||||
|
@ -299,8 +394,9 @@ func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error {
|
func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error {
|
||||||
dataVal := reflect.ValueOf(data)
|
dataVal := reflect.Indirect(reflect.ValueOf(data))
|
||||||
dataKind := getKind(dataVal)
|
dataKind := getKind(dataVal)
|
||||||
|
dataType := dataVal.Type()
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case dataKind == reflect.Int:
|
case dataKind == reflect.Int:
|
||||||
|
@ -322,6 +418,14 @@ func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) er
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("cannot parse '%s' as int: %s", name, err)
|
return fmt.Errorf("cannot parse '%s' as int: %s", name, err)
|
||||||
}
|
}
|
||||||
|
case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
|
||||||
|
jn := data.(json.Number)
|
||||||
|
i, err := jn.Int64()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"error decoding json.Number into %s: %s", name, err)
|
||||||
|
}
|
||||||
|
val.SetInt(i)
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf(
|
return fmt.Errorf(
|
||||||
"'%s' expected type '%s', got unconvertible type '%s'",
|
"'%s' expected type '%s', got unconvertible type '%s'",
|
||||||
|
@ -332,7 +436,7 @@ func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) er
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error {
|
func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error {
|
||||||
dataVal := reflect.ValueOf(data)
|
dataVal := reflect.Indirect(reflect.ValueOf(data))
|
||||||
dataKind := getKind(dataVal)
|
dataKind := getKind(dataVal)
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
|
@ -375,7 +479,7 @@ func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) e
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error {
|
func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error {
|
||||||
dataVal := reflect.ValueOf(data)
|
dataVal := reflect.Indirect(reflect.ValueOf(data))
|
||||||
dataKind := getKind(dataVal)
|
dataKind := getKind(dataVal)
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
|
@ -406,8 +510,9 @@ func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) e
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error {
|
func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error {
|
||||||
dataVal := reflect.ValueOf(data)
|
dataVal := reflect.Indirect(reflect.ValueOf(data))
|
||||||
dataKind := getKind(dataVal)
|
dataKind := getKind(dataVal)
|
||||||
|
dataType := dataVal.Type()
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case dataKind == reflect.Int:
|
case dataKind == reflect.Int:
|
||||||
|
@ -415,7 +520,7 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value)
|
||||||
case dataKind == reflect.Uint:
|
case dataKind == reflect.Uint:
|
||||||
val.SetFloat(float64(dataVal.Uint()))
|
val.SetFloat(float64(dataVal.Uint()))
|
||||||
case dataKind == reflect.Float32:
|
case dataKind == reflect.Float32:
|
||||||
val.SetFloat(float64(dataVal.Float()))
|
val.SetFloat(dataVal.Float())
|
||||||
case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
|
case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
|
||||||
if dataVal.Bool() {
|
if dataVal.Bool() {
|
||||||
val.SetFloat(1)
|
val.SetFloat(1)
|
||||||
|
@ -429,6 +534,14 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value)
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("cannot parse '%s' as float: %s", name, err)
|
return fmt.Errorf("cannot parse '%s' as float: %s", name, err)
|
||||||
}
|
}
|
||||||
|
case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
|
||||||
|
jn := data.(json.Number)
|
||||||
|
i, err := jn.Float64()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"error decoding json.Number into %s: %s", name, err)
|
||||||
|
}
|
||||||
|
val.SetFloat(i)
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf(
|
return fmt.Errorf(
|
||||||
"'%s' expected type '%s', got unconvertible type '%s'",
|
"'%s' expected type '%s', got unconvertible type '%s'",
|
||||||
|
@ -453,23 +566,68 @@ func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) er
|
||||||
valMap = reflect.MakeMap(mapType)
|
valMap = reflect.MakeMap(mapType)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check input type
|
// Check input type and based on the input type jump to the proper func
|
||||||
dataVal := reflect.Indirect(reflect.ValueOf(data))
|
dataVal := reflect.Indirect(reflect.ValueOf(data))
|
||||||
if dataVal.Kind() != reflect.Map {
|
switch dataVal.Kind() {
|
||||||
// Accept empty array/slice instead of an empty map in weakly typed mode
|
case reflect.Map:
|
||||||
if d.config.WeaklyTypedInput &&
|
return d.decodeMapFromMap(name, dataVal, val, valMap)
|
||||||
(dataVal.Kind() == reflect.Slice || dataVal.Kind() == reflect.Array) &&
|
|
||||||
dataVal.Len() == 0 {
|
case reflect.Struct:
|
||||||
val.Set(valMap)
|
return d.decodeMapFromStruct(name, dataVal, val, valMap)
|
||||||
return nil
|
|
||||||
} else {
|
case reflect.Array, reflect.Slice:
|
||||||
|
if d.config.WeaklyTypedInput {
|
||||||
|
return d.decodeMapFromSlice(name, dataVal, val, valMap)
|
||||||
|
}
|
||||||
|
|
||||||
|
fallthrough
|
||||||
|
|
||||||
|
default:
|
||||||
return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
|
return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
|
||||||
|
// Special case for BC reasons (covered by tests)
|
||||||
|
if dataVal.Len() == 0 {
|
||||||
|
val.Set(valMap)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < dataVal.Len(); i++ {
|
||||||
|
err := d.decode(
|
||||||
|
fmt.Sprintf("%s[%d]", name, i),
|
||||||
|
dataVal.Index(i).Interface(), val)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
|
||||||
|
valType := val.Type()
|
||||||
|
valKeyType := valType.Key()
|
||||||
|
valElemType := valType.Elem()
|
||||||
|
|
||||||
// Accumulate errors
|
// Accumulate errors
|
||||||
errors := make([]string, 0)
|
errors := make([]string, 0)
|
||||||
|
|
||||||
|
// If the input data is empty, then we just match what the input data is.
|
||||||
|
if dataVal.Len() == 0 {
|
||||||
|
if dataVal.IsNil() {
|
||||||
|
if !val.IsNil() {
|
||||||
|
val.Set(dataVal)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Set to empty allocated value
|
||||||
|
val.Set(valMap)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
for _, k := range dataVal.MapKeys() {
|
for _, k := range dataVal.MapKeys() {
|
||||||
fieldName := fmt.Sprintf("%s[%s]", name, k)
|
fieldName := fmt.Sprintf("%s[%s]", name, k)
|
||||||
|
|
||||||
|
@ -502,17 +660,141 @@ func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) er
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
|
||||||
|
typ := dataVal.Type()
|
||||||
|
for i := 0; i < typ.NumField(); i++ {
|
||||||
|
// Get the StructField first since this is a cheap operation. If the
|
||||||
|
// field is unexported, then ignore it.
|
||||||
|
f := typ.Field(i)
|
||||||
|
if f.PkgPath != "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next get the actual value of this field and verify it is assignable
|
||||||
|
// to the map value.
|
||||||
|
v := dataVal.Field(i)
|
||||||
|
if !v.Type().AssignableTo(valMap.Type().Elem()) {
|
||||||
|
return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem())
|
||||||
|
}
|
||||||
|
|
||||||
|
tagValue := f.Tag.Get(d.config.TagName)
|
||||||
|
tagParts := strings.Split(tagValue, ",")
|
||||||
|
|
||||||
|
// Determine the name of the key in the map
|
||||||
|
keyName := f.Name
|
||||||
|
if tagParts[0] != "" {
|
||||||
|
if tagParts[0] == "-" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
keyName = tagParts[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// If "squash" is specified in the tag, we squash the field down.
|
||||||
|
squash := false
|
||||||
|
for _, tag := range tagParts[1:] {
|
||||||
|
if tag == "squash" {
|
||||||
|
squash = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if squash && v.Kind() != reflect.Struct {
|
||||||
|
return fmt.Errorf("cannot squash non-struct type '%s'", v.Type())
|
||||||
|
}
|
||||||
|
|
||||||
|
switch v.Kind() {
|
||||||
|
// this is an embedded struct, so handle it differently
|
||||||
|
case reflect.Struct:
|
||||||
|
x := reflect.New(v.Type())
|
||||||
|
x.Elem().Set(v)
|
||||||
|
|
||||||
|
vType := valMap.Type()
|
||||||
|
vKeyType := vType.Key()
|
||||||
|
vElemType := vType.Elem()
|
||||||
|
mType := reflect.MapOf(vKeyType, vElemType)
|
||||||
|
vMap := reflect.MakeMap(mType)
|
||||||
|
|
||||||
|
err := d.decode(keyName, x.Interface(), vMap)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if squash {
|
||||||
|
for _, k := range vMap.MapKeys() {
|
||||||
|
valMap.SetMapIndex(k, vMap.MapIndex(k))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
valMap.SetMapIndex(reflect.ValueOf(keyName), vMap)
|
||||||
|
}
|
||||||
|
|
||||||
|
default:
|
||||||
|
valMap.SetMapIndex(reflect.ValueOf(keyName), v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if val.CanAddr() {
|
||||||
|
val.Set(valMap)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error {
|
func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error {
|
||||||
|
// If the input data is nil, then we want to just set the output
|
||||||
|
// pointer to be nil as well.
|
||||||
|
isNil := data == nil
|
||||||
|
if !isNil {
|
||||||
|
switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() {
|
||||||
|
case reflect.Chan,
|
||||||
|
reflect.Func,
|
||||||
|
reflect.Interface,
|
||||||
|
reflect.Map,
|
||||||
|
reflect.Ptr,
|
||||||
|
reflect.Slice:
|
||||||
|
isNil = v.IsNil()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if isNil {
|
||||||
|
if !val.IsNil() && val.CanSet() {
|
||||||
|
nilValue := reflect.New(val.Type()).Elem()
|
||||||
|
val.Set(nilValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Create an element of the concrete (non pointer) type and decode
|
// Create an element of the concrete (non pointer) type and decode
|
||||||
// into that. Then set the value of the pointer to this type.
|
// into that. Then set the value of the pointer to this type.
|
||||||
valType := val.Type()
|
valType := val.Type()
|
||||||
valElemType := valType.Elem()
|
valElemType := valType.Elem()
|
||||||
realVal := reflect.New(valElemType)
|
if val.CanSet() {
|
||||||
|
realVal := val
|
||||||
|
if realVal.IsNil() || d.config.ZeroFields {
|
||||||
|
realVal = reflect.New(valElemType)
|
||||||
|
}
|
||||||
|
|
||||||
if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
|
if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
val.Set(realVal)
|
val.Set(realVal)
|
||||||
|
} else {
|
||||||
|
if err := d.decode(name, data, reflect.Indirect(val)); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error {
|
||||||
|
// Create an element of the concrete (non pointer) type and decode
|
||||||
|
// into that. Then set the value of the pointer to this type.
|
||||||
|
dataVal := reflect.Indirect(reflect.ValueOf(data))
|
||||||
|
if val.Type() != dataVal.Type() {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"'%s' expected type '%s', got unconvertible type '%s'",
|
||||||
|
name, val.Type(), dataVal.Type())
|
||||||
|
}
|
||||||
|
val.Set(dataVal)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -523,26 +805,58 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value)
|
||||||
valElemType := valType.Elem()
|
valElemType := valType.Elem()
|
||||||
sliceType := reflect.SliceOf(valElemType)
|
sliceType := reflect.SliceOf(valElemType)
|
||||||
|
|
||||||
// Check input type
|
valSlice := val
|
||||||
if dataValKind != reflect.Array && dataValKind != reflect.Slice {
|
if valSlice.IsNil() || d.config.ZeroFields {
|
||||||
// Accept empty map instead of array/slice in weakly typed mode
|
if d.config.WeaklyTypedInput {
|
||||||
if d.config.WeaklyTypedInput && dataVal.Kind() == reflect.Map && dataVal.Len() == 0 {
|
switch {
|
||||||
|
// Slice and array we use the normal logic
|
||||||
|
case dataValKind == reflect.Slice, dataValKind == reflect.Array:
|
||||||
|
break
|
||||||
|
|
||||||
|
// Empty maps turn into empty slices
|
||||||
|
case dataValKind == reflect.Map:
|
||||||
|
if dataVal.Len() == 0 {
|
||||||
val.Set(reflect.MakeSlice(sliceType, 0, 0))
|
val.Set(reflect.MakeSlice(sliceType, 0, 0))
|
||||||
return nil
|
return nil
|
||||||
} else {
|
}
|
||||||
return fmt.Errorf(
|
// Create slice of maps of other sizes
|
||||||
"'%s': source data must be an array or slice, got %s", name, dataValKind)
|
return d.decodeSlice(name, []interface{}{data}, val)
|
||||||
|
|
||||||
|
case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8:
|
||||||
|
return d.decodeSlice(name, []byte(dataVal.String()), val)
|
||||||
|
|
||||||
|
// All other types we try to convert to the slice type
|
||||||
|
// and "lift" it into it. i.e. a string becomes a string slice.
|
||||||
|
default:
|
||||||
|
// Just re-try this function with data as a slice.
|
||||||
|
return d.decodeSlice(name, []interface{}{data}, val)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check input type
|
||||||
|
if dataValKind != reflect.Array && dataValKind != reflect.Slice {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"'%s': source data must be an array or slice, got %s", name, dataValKind)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the input value is empty, then don't allocate since non-nil != nil
|
||||||
|
if dataVal.Len() == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Make a new slice to hold our result, same size as the original data.
|
// Make a new slice to hold our result, same size as the original data.
|
||||||
valSlice := reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
|
valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
|
||||||
|
}
|
||||||
|
|
||||||
// Accumulate any errors
|
// Accumulate any errors
|
||||||
errors := make([]string, 0)
|
errors := make([]string, 0)
|
||||||
|
|
||||||
for i := 0; i < dataVal.Len(); i++ {
|
for i := 0; i < dataVal.Len(); i++ {
|
||||||
currentData := dataVal.Index(i).Interface()
|
currentData := dataVal.Index(i).Interface()
|
||||||
|
for valSlice.Len() <= i {
|
||||||
|
valSlice = reflect.Append(valSlice, reflect.Zero(valElemType))
|
||||||
|
}
|
||||||
currentField := valSlice.Index(i)
|
currentField := valSlice.Index(i)
|
||||||
|
|
||||||
fieldName := fmt.Sprintf("%s[%d]", name, i)
|
fieldName := fmt.Sprintf("%s[%d]", name, i)
|
||||||
|
@ -562,6 +876,73 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error {
|
||||||
|
dataVal := reflect.Indirect(reflect.ValueOf(data))
|
||||||
|
dataValKind := dataVal.Kind()
|
||||||
|
valType := val.Type()
|
||||||
|
valElemType := valType.Elem()
|
||||||
|
arrayType := reflect.ArrayOf(valType.Len(), valElemType)
|
||||||
|
|
||||||
|
valArray := val
|
||||||
|
|
||||||
|
if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields {
|
||||||
|
// Check input type
|
||||||
|
if dataValKind != reflect.Array && dataValKind != reflect.Slice {
|
||||||
|
if d.config.WeaklyTypedInput {
|
||||||
|
switch {
|
||||||
|
// Empty maps turn into empty arrays
|
||||||
|
case dataValKind == reflect.Map:
|
||||||
|
if dataVal.Len() == 0 {
|
||||||
|
val.Set(reflect.Zero(arrayType))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// All other types we try to convert to the array type
|
||||||
|
// and "lift" it into it. i.e. a string becomes a string array.
|
||||||
|
default:
|
||||||
|
// Just re-try this function with data as a slice.
|
||||||
|
return d.decodeArray(name, []interface{}{data}, val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf(
|
||||||
|
"'%s': source data must be an array or slice, got %s", name, dataValKind)
|
||||||
|
|
||||||
|
}
|
||||||
|
if dataVal.Len() > arrayType.Len() {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len())
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make a new array to hold our result, same size as the original data.
|
||||||
|
valArray = reflect.New(arrayType).Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Accumulate any errors
|
||||||
|
errors := make([]string, 0)
|
||||||
|
|
||||||
|
for i := 0; i < dataVal.Len(); i++ {
|
||||||
|
currentData := dataVal.Index(i).Interface()
|
||||||
|
currentField := valArray.Index(i)
|
||||||
|
|
||||||
|
fieldName := fmt.Sprintf("%s[%d]", name, i)
|
||||||
|
if err := d.decode(fieldName, currentData, currentField); err != nil {
|
||||||
|
errors = appendErrors(errors, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Finally, set the value to the array we built up
|
||||||
|
val.Set(valArray)
|
||||||
|
|
||||||
|
// If there were errors, we return those
|
||||||
|
if len(errors) > 0 {
|
||||||
|
return &Error{errors}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error {
|
func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error {
|
||||||
dataVal := reflect.Indirect(reflect.ValueOf(data))
|
dataVal := reflect.Indirect(reflect.ValueOf(data))
|
||||||
|
|
||||||
|
@ -573,10 +954,29 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
|
||||||
}
|
}
|
||||||
|
|
||||||
dataValKind := dataVal.Kind()
|
dataValKind := dataVal.Kind()
|
||||||
if dataValKind != reflect.Map {
|
switch dataValKind {
|
||||||
return fmt.Errorf("'%s' expected a map, got '%s'", name, dataValKind)
|
case reflect.Map:
|
||||||
|
return d.decodeStructFromMap(name, dataVal, val)
|
||||||
|
|
||||||
|
case reflect.Struct:
|
||||||
|
// Not the most efficient way to do this but we can optimize later if
|
||||||
|
// we want to. To convert from struct to struct we go to map first
|
||||||
|
// as an intermediary.
|
||||||
|
m := make(map[string]interface{})
|
||||||
|
mval := reflect.Indirect(reflect.ValueOf(&m))
|
||||||
|
if err := d.decodeMapFromStruct(name, dataVal, mval, mval); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
result := d.decodeStructFromMap(name, mval, val)
|
||||||
|
return result
|
||||||
|
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error {
|
||||||
dataValType := dataVal.Type()
|
dataValType := dataVal.Type()
|
||||||
if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface {
|
if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface {
|
||||||
return fmt.Errorf(
|
return fmt.Errorf(
|
||||||
|
@ -601,25 +1001,22 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
|
||||||
|
|
||||||
// Compile the list of all the fields that we're going to be decoding
|
// Compile the list of all the fields that we're going to be decoding
|
||||||
// from all the structs.
|
// from all the structs.
|
||||||
fields := make(map[*reflect.StructField]reflect.Value)
|
type field struct {
|
||||||
|
field reflect.StructField
|
||||||
|
val reflect.Value
|
||||||
|
}
|
||||||
|
fields := []field{}
|
||||||
for len(structs) > 0 {
|
for len(structs) > 0 {
|
||||||
structVal := structs[0]
|
structVal := structs[0]
|
||||||
structs = structs[1:]
|
structs = structs[1:]
|
||||||
|
|
||||||
structType := structVal.Type()
|
structType := structVal.Type()
|
||||||
|
|
||||||
for i := 0; i < structType.NumField(); i++ {
|
for i := 0; i < structType.NumField(); i++ {
|
||||||
fieldType := structType.Field(i)
|
fieldType := structType.Field(i)
|
||||||
|
|
||||||
if fieldType.Anonymous {
|
|
||||||
fieldKind := fieldType.Type.Kind()
|
fieldKind := fieldType.Type.Kind()
|
||||||
if fieldKind != reflect.Struct {
|
|
||||||
errors = appendErrors(errors,
|
|
||||||
fmt.Errorf("%s: unsupported type: %s", fieldType.Name, fieldKind))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// We have an embedded field. We "squash" the fields down
|
// If "squash" is specified in the tag, we squash the field down.
|
||||||
// if specified in the tag.
|
|
||||||
squash := false
|
squash := false
|
||||||
tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",")
|
tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",")
|
||||||
for _, tag := range tagParts[1:] {
|
for _, tag := range tagParts[1:] {
|
||||||
|
@ -630,20 +1027,26 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
|
||||||
}
|
}
|
||||||
|
|
||||||
if squash {
|
if squash {
|
||||||
structs = append(structs, val.FieldByName(fieldType.Name))
|
if fieldKind != reflect.Struct {
|
||||||
continue
|
errors = appendErrors(errors,
|
||||||
|
fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind))
|
||||||
|
} else {
|
||||||
|
structs = append(structs, structVal.FieldByName(fieldType.Name))
|
||||||
}
|
}
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Normal struct field, store it away
|
// Normal struct field, store it away
|
||||||
fields[&fieldType] = structVal.Field(i)
|
fields = append(fields, field{fieldType, structVal.Field(i)})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for fieldType, field := range fields {
|
// for fieldType, field := range fields {
|
||||||
fieldName := fieldType.Name
|
for _, f := range fields {
|
||||||
|
field, fieldValue := f.field, f.val
|
||||||
|
fieldName := field.Name
|
||||||
|
|
||||||
tagValue := fieldType.Tag.Get(d.config.TagName)
|
tagValue := field.Tag.Get(d.config.TagName)
|
||||||
tagValue = strings.SplitN(tagValue, ",", 2)[0]
|
tagValue = strings.SplitN(tagValue, ",", 2)[0]
|
||||||
if tagValue != "" {
|
if tagValue != "" {
|
||||||
fieldName = tagValue
|
fieldName = tagValue
|
||||||
|
@ -654,7 +1057,7 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
|
||||||
if !rawMapVal.IsValid() {
|
if !rawMapVal.IsValid() {
|
||||||
// Do a slower search by iterating over each key and
|
// Do a slower search by iterating over each key and
|
||||||
// doing case-insensitive search.
|
// doing case-insensitive search.
|
||||||
for dataValKey, _ := range dataValKeys {
|
for dataValKey := range dataValKeys {
|
||||||
mK, ok := dataValKey.Interface().(string)
|
mK, ok := dataValKey.Interface().(string)
|
||||||
if !ok {
|
if !ok {
|
||||||
// Not a string key
|
// Not a string key
|
||||||
|
@ -678,14 +1081,14 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
|
||||||
// Delete the key we're using from the unused map so we stop tracking
|
// Delete the key we're using from the unused map so we stop tracking
|
||||||
delete(dataValKeysUnused, rawMapKey.Interface())
|
delete(dataValKeysUnused, rawMapKey.Interface())
|
||||||
|
|
||||||
if !field.IsValid() {
|
if !fieldValue.IsValid() {
|
||||||
// This should never happen
|
// This should never happen
|
||||||
panic("field is not valid")
|
panic("field is not valid")
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we can't set the field, then it is unexported or something,
|
// If we can't set the field, then it is unexported or something,
|
||||||
// and we just continue onwards.
|
// and we just continue onwards.
|
||||||
if !field.CanSet() {
|
if !fieldValue.CanSet() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -695,14 +1098,14 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
|
||||||
fieldName = fmt.Sprintf("%s.%s", name, fieldName)
|
fieldName = fmt.Sprintf("%s.%s", name, fieldName)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := d.decode(fieldName, rawMapVal.Interface(), field); err != nil {
|
if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil {
|
||||||
errors = appendErrors(errors, err)
|
errors = appendErrors(errors, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.config.ErrorUnused && len(dataValKeysUnused) > 0 {
|
if d.config.ErrorUnused && len(dataValKeysUnused) > 0 {
|
||||||
keys := make([]string, 0, len(dataValKeysUnused))
|
keys := make([]string, 0, len(dataValKeysUnused))
|
||||||
for rawKey, _ := range dataValKeysUnused {
|
for rawKey := range dataValKeysUnused {
|
||||||
keys = append(keys, rawKey.(string))
|
keys = append(keys, rawKey.(string))
|
||||||
}
|
}
|
||||||
sort.Strings(keys)
|
sort.Strings(keys)
|
||||||
|
@ -717,7 +1120,7 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
|
||||||
|
|
||||||
// Add the unused keys to the list of unused keys if we're tracking metadata
|
// Add the unused keys to the list of unused keys if we're tracking metadata
|
||||||
if d.config.Metadata != nil {
|
if d.config.Metadata != nil {
|
||||||
for rawKey, _ := range dataValKeysUnused {
|
for rawKey := range dataValKeysUnused {
|
||||||
key := rawKey.(string)
|
key := rawKey.(string)
|
||||||
if name != "" {
|
if name != "" {
|
||||||
key = fmt.Sprintf("%s.%s", name, key)
|
key = fmt.Sprintf("%s.%s", name, key)
|
||||||
|
|
10
vendor/github.com/ncw/swift/.travis.yml
generated
vendored
10
vendor/github.com/ncw/swift/.travis.yml
generated
vendored
|
@ -2,7 +2,6 @@ language: go
|
||||||
sudo: false
|
sudo: false
|
||||||
|
|
||||||
go:
|
go:
|
||||||
- 1.1.x
|
|
||||||
- 1.2.x
|
- 1.2.x
|
||||||
- 1.3.x
|
- 1.3.x
|
||||||
- 1.4.x
|
- 1.4.x
|
||||||
|
@ -13,18 +12,19 @@ go:
|
||||||
- 1.9.x
|
- 1.9.x
|
||||||
- 1.10.x
|
- 1.10.x
|
||||||
- 1.11.x
|
- 1.11.x
|
||||||
|
- 1.12.x
|
||||||
- master
|
- master
|
||||||
|
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- go: 1.11.x
|
- go: 1.12.x
|
||||||
env: TEST_REAL_SERVER=rackspace
|
env: TEST_REAL_SERVER=rackspace
|
||||||
- go: 1.11.x
|
- go: 1.12.x
|
||||||
env: TEST_REAL_SERVER=memset
|
env: TEST_REAL_SERVER=memset
|
||||||
allow_failures:
|
allow_failures:
|
||||||
- go: 1.11.x
|
- go: 1.12.x
|
||||||
env: TEST_REAL_SERVER=rackspace
|
env: TEST_REAL_SERVER=rackspace
|
||||||
- go: 1.11.x
|
- go: 1.12.x
|
||||||
env: TEST_REAL_SERVER=memset
|
env: TEST_REAL_SERVER=memset
|
||||||
install: go test -i ./...
|
install: go test -i ./...
|
||||||
script:
|
script:
|
||||||
|
|
21
vendor/github.com/ncw/swift/README.md
generated
vendored
21
vendor/github.com/ncw/swift/README.md
generated
vendored
|
@ -127,17 +127,30 @@ Contributors
|
||||||
- Paul Querna <pquerna@apache.org>
|
- Paul Querna <pquerna@apache.org>
|
||||||
- Livio Soares <liviobs@gmail.com>
|
- Livio Soares <liviobs@gmail.com>
|
||||||
- thesyncim <thesyncim@gmail.com>
|
- thesyncim <thesyncim@gmail.com>
|
||||||
- lsowen <lsowen@s1network.com>
|
- lsowen <lsowen@s1network.com> <logan@s1network.com>
|
||||||
- Sylvain Baubeau <sbaubeau@redhat.com>
|
- Sylvain Baubeau <sbaubeau@redhat.com>
|
||||||
- Chris Kastorff <encryptio@gmail.com>
|
- Chris Kastorff <encryptio@gmail.com>
|
||||||
- Dai HaoJun <haojun.dai@hp.com>
|
- Dai HaoJun <haojun.dai@hp.com>
|
||||||
- Hua Wang <wanghua.humble@gmail.com>
|
- Hua Wang <wanghua.humble@gmail.com>
|
||||||
- Fabian Ruff <fabian@progra.de>
|
- Fabian Ruff <fabian@progra.de> <fabian.ruff@sap.com>
|
||||||
- Arturo Reuschenbach Puncernau <reuschenbach@gmail.com>
|
- Arturo Reuschenbach Puncernau <reuschenbach@gmail.com>
|
||||||
- Petr Kotek <petr.kotek@bigcommerce.com>
|
- Petr Kotek <petr.kotek@bigcommerce.com>
|
||||||
- Stefan Majewsky <stefan.majewsky@sap.com>
|
- Stefan Majewsky <stefan.majewsky@sap.com> <majewsky@gmx.net>
|
||||||
- Cezar Sa Espinola <cezarsa@gmail.com>
|
- Cezar Sa Espinola <cezarsa@gmail.com>
|
||||||
- Sam Gunaratne <samgzeit@gmail.com>
|
- Sam Gunaratne <samgzeit@gmail.com>
|
||||||
- Richard Scothern <richard.scothern@gmail.com>
|
- Richard Scothern <richard.scothern@gmail.com>
|
||||||
- Michel Couillard <couillard.michel@voxlog.ca>
|
- Michel Couillard <!--<couillard.michel@voxlog.ca>--> <michel.couillard@gmail.com>
|
||||||
- Christopher Waldon <ckwaldon@us.ibm.com>
|
- Christopher Waldon <ckwaldon@us.ibm.com>
|
||||||
|
- dennis <dai.haojun@gmail.com>
|
||||||
|
- hag <hannes.georg@xing.com>
|
||||||
|
- Alexander Neumann <alexander@bumpern.de>
|
||||||
|
- eclipseo <30413512+eclipseo@users.noreply.github.com>
|
||||||
|
- Yuri Per <yuri@acronis.com>
|
||||||
|
- Falk Reimann <falk.reimann@sap.com>
|
||||||
|
- Arthur Paim Arnold <arthurpaimarnold@gmail.com>
|
||||||
|
- Bruno Michel <bmichel@menfin.info>
|
||||||
|
- Charles Hsu <charles0126@gmail.com>
|
||||||
|
- Omar Ali <omarali@users.noreply.github.com>
|
||||||
|
- Andreas Andersen <andreas@softwaredesign.se>
|
||||||
|
- kayrus <kay.diam@gmail.com>
|
||||||
|
- CodeLingo Bot <bot@codelingo.io>
|
||||||
|
|
15
vendor/github.com/ncw/swift/auth.go
generated
vendored
15
vendor/github.com/ncw/swift/auth.go
generated
vendored
|
@ -6,6 +6,7 @@ import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Auth defines the operations needed to authenticate with swift
|
// Auth defines the operations needed to authenticate with swift
|
||||||
|
@ -25,6 +26,11 @@ type Authenticator interface {
|
||||||
CdnUrl() string
|
CdnUrl() string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Expireser is an optional interface to read the expiration time of the token
|
||||||
|
type Expireser interface {
|
||||||
|
Expires() time.Time
|
||||||
|
}
|
||||||
|
|
||||||
type CustomEndpointAuthenticator interface {
|
type CustomEndpointAuthenticator interface {
|
||||||
StorageUrlForEndpoint(endpointType EndpointType) string
|
StorageUrlForEndpoint(endpointType EndpointType) string
|
||||||
}
|
}
|
||||||
|
@ -240,6 +246,15 @@ func (auth *v2Auth) Token() string {
|
||||||
return auth.Auth.Access.Token.Id
|
return auth.Auth.Access.Token.Id
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// v2 Authentication - read expires
|
||||||
|
func (auth *v2Auth) Expires() time.Time {
|
||||||
|
t, err := time.Parse(time.RFC3339, auth.Auth.Access.Token.Expires)
|
||||||
|
if err != nil {
|
||||||
|
return time.Time{} // return Zero if not parsed
|
||||||
|
}
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
// v2 Authentication - read cdn url
|
// v2 Authentication - read cdn url
|
||||||
func (auth *v2Auth) CdnUrl() string {
|
func (auth *v2Auth) CdnUrl() string {
|
||||||
return auth.endpointUrl("rax:object-cdn", EndpointTypePublic)
|
return auth.endpointUrl("rax:object-cdn", EndpointTypePublic)
|
||||||
|
|
76
vendor/github.com/ncw/swift/auth_v3.go
generated
vendored
76
vendor/github.com/ncw/swift/auth_v3.go
generated
vendored
|
@ -3,13 +3,16 @@ package swift
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
v3AuthMethodToken = "token"
|
v3AuthMethodToken = "token"
|
||||||
v3AuthMethodPassword = "password"
|
v3AuthMethodPassword = "password"
|
||||||
|
v3AuthMethodApplicationCredential = "application_credential"
|
||||||
v3CatalogTypeObjectStore = "object-store"
|
v3CatalogTypeObjectStore = "object-store"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -22,6 +25,7 @@ type v3AuthRequest struct {
|
||||||
Methods []string `json:"methods"`
|
Methods []string `json:"methods"`
|
||||||
Password *v3AuthPassword `json:"password,omitempty"`
|
Password *v3AuthPassword `json:"password,omitempty"`
|
||||||
Token *v3AuthToken `json:"token,omitempty"`
|
Token *v3AuthToken `json:"token,omitempty"`
|
||||||
|
ApplicationCredential *v3AuthApplicationCredential `json:"application_credential,omitempty"`
|
||||||
} `json:"identity"`
|
} `json:"identity"`
|
||||||
Scope *v3Scope `json:"scope,omitempty"`
|
Scope *v3Scope `json:"scope,omitempty"`
|
||||||
} `json:"auth"`
|
} `json:"auth"`
|
||||||
|
@ -63,10 +67,18 @@ type v3AuthPassword struct {
|
||||||
User v3User `json:"user"`
|
User v3User `json:"user"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type v3AuthApplicationCredential struct {
|
||||||
|
Id string `json:"id,omitempty"`
|
||||||
|
Name string `json:"name,omitempty"`
|
||||||
|
Secret string `json:"secret,omitempty"`
|
||||||
|
User *v3User `json:"user,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
// V3 Authentication response
|
// V3 Authentication response
|
||||||
type v3AuthResponse struct {
|
type v3AuthResponse struct {
|
||||||
Token struct {
|
Token struct {
|
||||||
Expires_At, Issued_At string
|
ExpiresAt string `json:"expires_at"`
|
||||||
|
IssuedAt string `json:"issued_at"`
|
||||||
Methods []string
|
Methods []string
|
||||||
Roles []struct {
|
Roles []struct {
|
||||||
Id, Name string
|
Id, Name string
|
||||||
|
@ -117,7 +129,57 @@ func (auth *v3Auth) Request(c *Connection) (*http.Request, error) {
|
||||||
|
|
||||||
v3 := v3AuthRequest{}
|
v3 := v3AuthRequest{}
|
||||||
|
|
||||||
if c.UserName == "" && c.UserId == "" {
|
if (c.ApplicationCredentialId != "" || c.ApplicationCredentialName != "") && c.ApplicationCredentialSecret != "" {
|
||||||
|
var user *v3User
|
||||||
|
|
||||||
|
if c.ApplicationCredentialId != "" {
|
||||||
|
c.ApplicationCredentialName = ""
|
||||||
|
user = &v3User{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if user == nil && c.UserId != "" {
|
||||||
|
// UserID could be used without the domain information
|
||||||
|
user = &v3User{
|
||||||
|
Id: c.UserId,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if user == nil && c.UserName == "" {
|
||||||
|
// Make sure that Username or UserID are provided
|
||||||
|
return nil, fmt.Errorf("UserID or Name should be provided")
|
||||||
|
}
|
||||||
|
|
||||||
|
if user == nil && c.DomainId != "" {
|
||||||
|
user = &v3User{
|
||||||
|
Name: c.UserName,
|
||||||
|
Domain: &v3Domain{
|
||||||
|
Id: c.DomainId,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if user == nil && c.Domain != "" {
|
||||||
|
user = &v3User{
|
||||||
|
Name: c.UserName,
|
||||||
|
Domain: &v3Domain{
|
||||||
|
Name: c.Domain,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure that DomainID or DomainName are provided among Username
|
||||||
|
if user == nil {
|
||||||
|
return nil, fmt.Errorf("DomainID or Domain should be provided")
|
||||||
|
}
|
||||||
|
|
||||||
|
v3.Auth.Identity.Methods = []string{v3AuthMethodApplicationCredential}
|
||||||
|
v3.Auth.Identity.ApplicationCredential = &v3AuthApplicationCredential{
|
||||||
|
Id: c.ApplicationCredentialId,
|
||||||
|
Name: c.ApplicationCredentialName,
|
||||||
|
Secret: c.ApplicationCredentialSecret,
|
||||||
|
User: user,
|
||||||
|
}
|
||||||
|
} else if c.UserName == "" && c.UserId == "" {
|
||||||
v3.Auth.Identity.Methods = []string{v3AuthMethodToken}
|
v3.Auth.Identity.Methods = []string{v3AuthMethodToken}
|
||||||
v3.Auth.Identity.Token = &v3AuthToken{Id: c.ApiKey}
|
v3.Auth.Identity.Token = &v3AuthToken{Id: c.ApiKey}
|
||||||
} else {
|
} else {
|
||||||
|
@ -140,6 +202,7 @@ func (auth *v3Auth) Request(c *Connection) (*http.Request, error) {
|
||||||
v3.Auth.Identity.Password.User.Domain = domain
|
v3.Auth.Identity.Password.User.Domain = domain
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if v3.Auth.Identity.Methods[0] != v3AuthMethodApplicationCredential {
|
||||||
if c.TrustId != "" {
|
if c.TrustId != "" {
|
||||||
v3.Auth.Scope = &v3Scope{Trust: &v3Trust{Id: c.TrustId}}
|
v3.Auth.Scope = &v3Scope{Trust: &v3Trust{Id: c.TrustId}}
|
||||||
} else if c.TenantId != "" || c.Tenant != "" {
|
} else if c.TenantId != "" || c.Tenant != "" {
|
||||||
|
@ -164,6 +227,7 @@ func (auth *v3Auth) Request(c *Connection) (*http.Request, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
v3i = v3
|
v3i = v3
|
||||||
|
|
||||||
|
@ -223,6 +287,14 @@ func (auth *v3Auth) Token() string {
|
||||||
return auth.Headers.Get("X-Subject-Token")
|
return auth.Headers.Get("X-Subject-Token")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (auth *v3Auth) Expires() time.Time {
|
||||||
|
t, err := time.Parse(time.RFC3339, auth.Auth.Token.ExpiresAt)
|
||||||
|
if err != nil {
|
||||||
|
return time.Time{} // return Zero if not parsed
|
||||||
|
}
|
||||||
|
return t
|
||||||
|
}
|
||||||
|
|
||||||
func (auth *v3Auth) CdnUrl() string {
|
func (auth *v3Auth) CdnUrl() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
23
vendor/github.com/ncw/swift/compatibility_1_6.go
generated
vendored
Normal file
23
vendor/github.com/ncw/swift/compatibility_1_6.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
// +build go1.6
|
||||||
|
|
||||||
|
package swift
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const IS_AT_LEAST_GO_16 = true
|
||||||
|
|
||||||
|
func SetExpectContinueTimeout(tr *http.Transport, t time.Duration) {
|
||||||
|
tr.ExpectContinueTimeout = t
|
||||||
|
}
|
||||||
|
|
||||||
|
func AddExpectAndTransferEncoding(req *http.Request, hasContentLength bool) {
|
||||||
|
if req.Body != nil {
|
||||||
|
req.Header.Add("Expect", "100-continue")
|
||||||
|
}
|
||||||
|
if !hasContentLength {
|
||||||
|
req.TransferEncoding = []string{"chunked"}
|
||||||
|
}
|
||||||
|
}
|
13
vendor/github.com/ncw/swift/compatibility_not_1_6.go
generated
vendored
Normal file
13
vendor/github.com/ncw/swift/compatibility_not_1_6.go
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
// +build !go1.6
|
||||||
|
|
||||||
|
package swift
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const IS_AT_LEAST_GO_16 = false
|
||||||
|
|
||||||
|
func SetExpectContinueTimeout(tr *http.Transport, t time.Duration) {}
|
||||||
|
func AddExpectAndTransferEncoding(req *http.Request, hasContentLength bool) {}
|
1
vendor/github.com/ncw/swift/go.mod
generated
vendored
Normal file
1
vendor/github.com/ncw/swift/go.mod
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
module github.com/ncw/swift
|
4
vendor/github.com/ncw/swift/meta.go
generated
vendored
4
vendor/github.com/ncw/swift/meta.go
generated
vendored
|
@ -151,7 +151,7 @@ func TimeToFloatString(t time.Time) string {
|
||||||
return nsToFloatString(t.UnixNano())
|
return nsToFloatString(t.UnixNano())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read a modification time (mtime) from a Metadata object
|
// GetModTime reads a modification time (mtime) from a Metadata object
|
||||||
//
|
//
|
||||||
// This is a defacto standard (used in the official python-swiftclient
|
// This is a defacto standard (used in the official python-swiftclient
|
||||||
// amongst others) for storing the modification time (as read using
|
// amongst others) for storing the modification time (as read using
|
||||||
|
@ -163,7 +163,7 @@ func (m Metadata) GetModTime() (t time.Time, err error) {
|
||||||
return FloatStringToTime(m["mtime"])
|
return FloatStringToTime(m["mtime"])
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write an modification time (mtime) to a Metadata object
|
// SetModTime writes an modification time (mtime) to a Metadata object
|
||||||
//
|
//
|
||||||
// This is a defacto standard (used in the official python-swiftclient
|
// This is a defacto standard (used in the official python-swiftclient
|
||||||
// amongst others) for storing the modification time (as read using
|
// amongst others) for storing the modification time (as read using
|
||||||
|
|
63
vendor/github.com/ncw/swift/swift.go
generated
vendored
63
vendor/github.com/ncw/swift/swift.go
generated
vendored
|
@ -101,6 +101,9 @@ type Connection struct {
|
||||||
UserName string // UserName for api
|
UserName string // UserName for api
|
||||||
UserId string // User Id
|
UserId string // User Id
|
||||||
ApiKey string // Key for api access
|
ApiKey string // Key for api access
|
||||||
|
ApplicationCredentialId string // Application Credential ID
|
||||||
|
ApplicationCredentialName string // Application Credential Name
|
||||||
|
ApplicationCredentialSecret string // Application Credential Secret
|
||||||
AuthUrl string // Auth URL
|
AuthUrl string // Auth URL
|
||||||
Retries int // Retries on error (default is 3)
|
Retries int // Retries on error (default is 3)
|
||||||
UserAgent string // Http User agent (default goswift/1.0)
|
UserAgent string // Http User agent (default goswift/1.0)
|
||||||
|
@ -119,6 +122,7 @@ type Connection struct {
|
||||||
// These are filled in after Authenticate is called as are the defaults for above
|
// These are filled in after Authenticate is called as are the defaults for above
|
||||||
StorageUrl string
|
StorageUrl string
|
||||||
AuthToken string
|
AuthToken string
|
||||||
|
Expires time.Time // time the token expires, may be Zero if unknown
|
||||||
client *http.Client
|
client *http.Client
|
||||||
Auth Authenticator `json:"-" xml:"-"` // the current authenticator
|
Auth Authenticator `json:"-" xml:"-"` // the current authenticator
|
||||||
authLock sync.Mutex // lock when R/W StorageUrl, AuthToken, Auth
|
authLock sync.Mutex // lock when R/W StorageUrl, AuthToken, Auth
|
||||||
|
@ -194,6 +198,9 @@ func setFromEnv(param interface{}, name string) (err error) {
|
||||||
// OS_USERNAME - UserName for api
|
// OS_USERNAME - UserName for api
|
||||||
// OS_USER_ID - User Id
|
// OS_USER_ID - User Id
|
||||||
// OS_PASSWORD - Key for api access
|
// OS_PASSWORD - Key for api access
|
||||||
|
// OS_APPLICATION_CREDENTIAL_ID - Application Credential ID
|
||||||
|
// OS_APPLICATION_CREDENTIAL_NAME - Application Credential Name
|
||||||
|
// OS_APPLICATION_CREDENTIAL_SECRET - Application Credential Secret
|
||||||
// OS_USER_DOMAIN_NAME - User's domain name
|
// OS_USER_DOMAIN_NAME - User's domain name
|
||||||
// OS_USER_DOMAIN_ID - User's domain Id
|
// OS_USER_DOMAIN_ID - User's domain Id
|
||||||
// OS_PROJECT_NAME - Name of the project
|
// OS_PROJECT_NAME - Name of the project
|
||||||
|
@ -227,6 +234,9 @@ func (c *Connection) ApplyEnvironment() (err error) {
|
||||||
{&c.UserName, "OS_USERNAME"},
|
{&c.UserName, "OS_USERNAME"},
|
||||||
{&c.UserId, "OS_USER_ID"},
|
{&c.UserId, "OS_USER_ID"},
|
||||||
{&c.ApiKey, "OS_PASSWORD"},
|
{&c.ApiKey, "OS_PASSWORD"},
|
||||||
|
{&c.ApplicationCredentialId, "OS_APPLICATION_CREDENTIAL_ID"},
|
||||||
|
{&c.ApplicationCredentialName, "OS_APPLICATION_CREDENTIAL_NAME"},
|
||||||
|
{&c.ApplicationCredentialSecret, "OS_APPLICATION_CREDENTIAL_SECRET"},
|
||||||
{&c.AuthUrl, "OS_AUTH_URL"},
|
{&c.AuthUrl, "OS_AUTH_URL"},
|
||||||
{&c.Retries, "GOSWIFT_RETRIES"},
|
{&c.Retries, "GOSWIFT_RETRIES"},
|
||||||
{&c.UserAgent, "GOSWIFT_USER_AGENT"},
|
{&c.UserAgent, "GOSWIFT_USER_AGENT"},
|
||||||
|
@ -298,6 +308,7 @@ var (
|
||||||
Forbidden = newError(403, "Operation forbidden")
|
Forbidden = newError(403, "Operation forbidden")
|
||||||
TooLargeObject = newError(413, "Too Large Object")
|
TooLargeObject = newError(413, "Too Large Object")
|
||||||
RateLimit = newError(498, "Rate Limit")
|
RateLimit = newError(498, "Rate Limit")
|
||||||
|
TooManyRequests = newError(429, "TooManyRequests")
|
||||||
|
|
||||||
// Mappings for authentication errors
|
// Mappings for authentication errors
|
||||||
authErrorMap = errorMap{
|
authErrorMap = errorMap{
|
||||||
|
@ -323,6 +334,7 @@ var (
|
||||||
404: ObjectNotFound,
|
404: ObjectNotFound,
|
||||||
413: TooLargeObject,
|
413: TooLargeObject,
|
||||||
422: ObjectCorrupted,
|
422: ObjectCorrupted,
|
||||||
|
429: TooManyRequests,
|
||||||
498: RateLimit,
|
498: RateLimit,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
@ -423,12 +435,15 @@ func (c *Connection) setDefaults() {
|
||||||
c.Timeout = 60 * time.Second
|
c.Timeout = 60 * time.Second
|
||||||
}
|
}
|
||||||
if c.Transport == nil {
|
if c.Transport == nil {
|
||||||
c.Transport = &http.Transport{
|
t := &http.Transport{
|
||||||
// TLSClientConfig: &tls.Config{RootCAs: pool},
|
// TLSClientConfig: &tls.Config{RootCAs: pool},
|
||||||
// DisableCompression: true,
|
// DisableCompression: true,
|
||||||
Proxy: http.ProxyFromEnvironment,
|
Proxy: http.ProxyFromEnvironment,
|
||||||
MaxIdleConnsPerHost: 2048,
|
// Half of linux's default open files limit (1024).
|
||||||
|
MaxIdleConnsPerHost: 512,
|
||||||
}
|
}
|
||||||
|
SetExpectContinueTimeout(t, 5*time.Second)
|
||||||
|
c.Transport = t
|
||||||
}
|
}
|
||||||
if c.client == nil {
|
if c.client == nil {
|
||||||
c.client = &http.Client{
|
c.client = &http.Client{
|
||||||
|
@ -507,6 +522,12 @@ again:
|
||||||
c.StorageUrl = c.Auth.StorageUrl(c.Internal)
|
c.StorageUrl = c.Auth.StorageUrl(c.Internal)
|
||||||
}
|
}
|
||||||
c.AuthToken = c.Auth.Token()
|
c.AuthToken = c.Auth.Token()
|
||||||
|
if do, ok := c.Auth.(Expireser); ok {
|
||||||
|
c.Expires = do.Expires()
|
||||||
|
} else {
|
||||||
|
c.Expires = time.Time{}
|
||||||
|
}
|
||||||
|
|
||||||
if !c.authenticated() {
|
if !c.authenticated() {
|
||||||
err = newError(0, "Response didn't have storage url and auth token")
|
err = newError(0, "Response didn't have storage url and auth token")
|
||||||
return
|
return
|
||||||
|
@ -568,7 +589,14 @@ func (c *Connection) Authenticated() bool {
|
||||||
//
|
//
|
||||||
// Call with authLock held
|
// Call with authLock held
|
||||||
func (c *Connection) authenticated() bool {
|
func (c *Connection) authenticated() bool {
|
||||||
return c.StorageUrl != "" && c.AuthToken != ""
|
if c.StorageUrl == "" || c.AuthToken == "" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if c.Expires.IsZero() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
timeUntilExpiry := c.Expires.Sub(time.Now())
|
||||||
|
return timeUntilExpiry >= 60*time.Second
|
||||||
}
|
}
|
||||||
|
|
||||||
// SwiftInfo contains the JSON object returned by Swift when the /info
|
// SwiftInfo contains the JSON object returned by Swift when the /info
|
||||||
|
@ -708,11 +736,11 @@ func (c *Connection) Call(targetUrl string, p RequestOpts) (resp *http.Response,
|
||||||
for k, v := range p.Headers {
|
for k, v := range p.Headers {
|
||||||
// Set ContentLength in req if the user passed it in in the headers
|
// Set ContentLength in req if the user passed it in in the headers
|
||||||
if k == "Content-Length" {
|
if k == "Content-Length" {
|
||||||
contentLength, err := strconv.ParseInt(v, 10, 64)
|
req.ContentLength, err = strconv.ParseInt(v, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, fmt.Errorf("Invalid %q header %q: %v", k, v, err)
|
err = fmt.Errorf("Invalid %q header %q: %v", k, v, err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
req.ContentLength = contentLength
|
|
||||||
} else {
|
} else {
|
||||||
req.Header.Add(k, v)
|
req.Header.Add(k, v)
|
||||||
}
|
}
|
||||||
|
@ -720,13 +748,17 @@ func (c *Connection) Call(targetUrl string, p RequestOpts) (resp *http.Response,
|
||||||
}
|
}
|
||||||
req.Header.Add("User-Agent", c.UserAgent)
|
req.Header.Add("User-Agent", c.UserAgent)
|
||||||
req.Header.Add("X-Auth-Token", authToken)
|
req.Header.Add("X-Auth-Token", authToken)
|
||||||
|
|
||||||
|
_, hasCL := p.Headers["Content-Length"]
|
||||||
|
AddExpectAndTransferEncoding(req, hasCL)
|
||||||
|
|
||||||
resp, err = c.doTimeoutRequest(timer, req)
|
resp, err = c.doTimeoutRequest(timer, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if (p.Operation == "HEAD" || p.Operation == "GET") && retries > 0 {
|
if (p.Operation == "HEAD" || p.Operation == "GET") && retries > 0 {
|
||||||
retries--
|
retries--
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return nil, nil, err
|
return
|
||||||
}
|
}
|
||||||
// Check to see if token has expired
|
// Check to see if token has expired
|
||||||
if resp.StatusCode == 401 && retries > 0 {
|
if resp.StatusCode == 401 && retries > 0 {
|
||||||
|
@ -738,15 +770,14 @@ func (c *Connection) Call(targetUrl string, p RequestOpts) (resp *http.Response,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = c.parseHeaders(resp, p.ErrorMap); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
headers = readHeaders(resp)
|
headers = readHeaders(resp)
|
||||||
|
if err = c.parseHeaders(resp, p.ErrorMap); err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
if p.NoResponse {
|
if p.NoResponse {
|
||||||
var err error
|
|
||||||
drainAndClose(resp.Body, &err)
|
drainAndClose(resp.Body, &err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Cancel the request on timeout
|
// Cancel the request on timeout
|
||||||
|
@ -1014,6 +1045,7 @@ type Object struct {
|
||||||
ServerLastModified string `json:"last_modified"` // Last modified time, eg '2011-06-30T08:20:47.736680' as a string supplied by the server
|
ServerLastModified string `json:"last_modified"` // Last modified time, eg '2011-06-30T08:20:47.736680' as a string supplied by the server
|
||||||
LastModified time.Time // Last modified time converted to a time.Time
|
LastModified time.Time // Last modified time converted to a time.Time
|
||||||
Hash string `json:"hash"` // MD5 hash, eg "d41d8cd98f00b204e9800998ecf8427e"
|
Hash string `json:"hash"` // MD5 hash, eg "d41d8cd98f00b204e9800998ecf8427e"
|
||||||
|
SLOHash string `json:"slo_etag"` // MD5 hash of all segments' MD5 hash, eg "d41d8cd98f00b204e9800998ecf8427e"
|
||||||
PseudoDirectory bool // Set when using delimiter to show that this directory object does not really exist
|
PseudoDirectory bool // Set when using delimiter to show that this directory object does not really exist
|
||||||
SubDir string `json:"subdir"` // returned only when using delimiter to mark "pseudo directories"
|
SubDir string `json:"subdir"` // returned only when using delimiter to mark "pseudo directories"
|
||||||
ObjectType ObjectType // type of this object
|
ObjectType ObjectType // type of this object
|
||||||
|
@ -1065,6 +1097,9 @@ func (c *Connection) Objects(container string, opts *ObjectsOpts) ([]Object, err
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if object.SLOHash != "" {
|
||||||
|
object.ObjectType = StaticLargeObjectType
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return objects, err
|
return objects, err
|
||||||
}
|
}
|
||||||
|
@ -1895,6 +1930,10 @@ func (c *Connection) doBulkDelete(objects []string) (result BulkDeleteResult, er
|
||||||
// * http://docs.openstack.org/trunk/openstack-object-storage/admin/content/object-storage-bulk-delete.html
|
// * http://docs.openstack.org/trunk/openstack-object-storage/admin/content/object-storage-bulk-delete.html
|
||||||
// * http://docs.rackspace.com/files/api/v1/cf-devguide/content/Bulk_Delete-d1e2338.html
|
// * http://docs.rackspace.com/files/api/v1/cf-devguide/content/Bulk_Delete-d1e2338.html
|
||||||
func (c *Connection) BulkDelete(container string, objectNames []string) (result BulkDeleteResult, err error) {
|
func (c *Connection) BulkDelete(container string, objectNames []string) (result BulkDeleteResult, err error) {
|
||||||
|
if len(objectNames) == 0 {
|
||||||
|
result.Errors = make(map[string]error)
|
||||||
|
return
|
||||||
|
}
|
||||||
fullPaths := make([]string, len(objectNames))
|
fullPaths := make([]string, len(objectNames))
|
||||||
for i, name := range objectNames {
|
for i, name := range objectNames {
|
||||||
fullPaths[i] = fmt.Sprintf("/%s/%s", container, name)
|
fullPaths[i] = fmt.Sprintf("/%s/%s", container, name)
|
||||||
|
|
1
vendor/github.com/sirupsen/logrus/.gitignore
generated
vendored
1
vendor/github.com/sirupsen/logrus/.gitignore
generated
vendored
|
@ -1 +1,2 @@
|
||||||
logrus
|
logrus
|
||||||
|
vendor
|
||||||
|
|
29
vendor/github.com/sirupsen/logrus/.travis.yml
generated
vendored
29
vendor/github.com/sirupsen/logrus/.travis.yml
generated
vendored
|
@ -1,12 +1,25 @@
|
||||||
language: go
|
language: go
|
||||||
go:
|
go_import_path: github.com/sirupsen/logrus
|
||||||
- 1.6.x
|
git:
|
||||||
- 1.7.x
|
depth: 1
|
||||||
- 1.8.x
|
|
||||||
- tip
|
|
||||||
env:
|
env:
|
||||||
- GOMAXPROCS=4 GORACE=halt_on_error=1
|
- GO111MODULE=on
|
||||||
|
- GO111MODULE=off
|
||||||
|
go: [ 1.11.x, 1.12.x ]
|
||||||
|
os: [ linux, osx ]
|
||||||
|
matrix:
|
||||||
|
exclude:
|
||||||
|
- go: 1.12.x
|
||||||
|
env: GO111MODULE=off
|
||||||
|
- go: 1.11.x
|
||||||
|
os: osx
|
||||||
install:
|
install:
|
||||||
- go get github.com/stretchr/testify/assert
|
- ./travis/install.sh
|
||||||
|
- if [[ "$GO111MODULE" == "on" ]]; then go mod download; fi
|
||||||
|
- if [[ "$GO111MODULE" == "off" ]]; then go get github.com/stretchr/testify/assert golang.org/x/sys/unix github.com/konsorten/go-windows-terminal-sequences; fi
|
||||||
script:
|
script:
|
||||||
- go test -race -v .
|
- ./travis/cross_build.sh
|
||||||
|
- export GOMAXPROCS=4
|
||||||
|
- export GORACE=halt_on_error=1
|
||||||
|
- go test -race -v ./...
|
||||||
|
- if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then go test -race -v -tags appengine ./... ; fi
|
||||||
|
|
100
vendor/github.com/sirupsen/logrus/CHANGELOG.md
generated
vendored
100
vendor/github.com/sirupsen/logrus/CHANGELOG.md
generated
vendored
|
@ -1,3 +1,103 @@
|
||||||
|
# 1.4.2
|
||||||
|
* Fixes build break for plan9, nacl, solaris
|
||||||
|
# 1.4.1
|
||||||
|
This new release introduces:
|
||||||
|
* Enhance TextFormatter to not print caller information when they are empty (#944)
|
||||||
|
* Remove dependency on golang.org/x/crypto (#932, #943)
|
||||||
|
|
||||||
|
Fixes:
|
||||||
|
* Fix Entry.WithContext method to return a copy of the initial entry (#941)
|
||||||
|
|
||||||
|
# 1.4.0
|
||||||
|
This new release introduces:
|
||||||
|
* Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848).
|
||||||
|
* Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter (#909, #911)
|
||||||
|
* Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919).
|
||||||
|
|
||||||
|
Fixes:
|
||||||
|
* Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893).
|
||||||
|
* Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903)
|
||||||
|
* Fix infinite recursion on unknown `Level.String()` (#907)
|
||||||
|
* Fix race condition in `getCaller` (#916).
|
||||||
|
|
||||||
|
|
||||||
|
# 1.3.0
|
||||||
|
This new release introduces:
|
||||||
|
* Log, Logf, Logln functions for Logger and Entry that take a Level
|
||||||
|
|
||||||
|
Fixes:
|
||||||
|
* Building prometheus node_exporter on AIX (#840)
|
||||||
|
* Race condition in TextFormatter (#468)
|
||||||
|
* Travis CI import path (#868)
|
||||||
|
* Remove coloured output on Windows (#862)
|
||||||
|
* Pointer to func as field in JSONFormatter (#870)
|
||||||
|
* Properly marshal Levels (#873)
|
||||||
|
|
||||||
|
# 1.2.0
|
||||||
|
This new release introduces:
|
||||||
|
* A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued
|
||||||
|
* A new trace level named `Trace` whose level is below `Debug`
|
||||||
|
* A configurable exit function to be called upon a Fatal trace
|
||||||
|
* The `Level` object now implements `encoding.TextUnmarshaler` interface
|
||||||
|
|
||||||
|
# 1.1.1
|
||||||
|
This is a bug fix release.
|
||||||
|
* fix the build break on Solaris
|
||||||
|
* don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized
|
||||||
|
|
||||||
|
# 1.1.0
|
||||||
|
This new release introduces:
|
||||||
|
* several fixes:
|
||||||
|
* a fix for a race condition on entry formatting
|
||||||
|
* proper cleanup of previously used entries before putting them back in the pool
|
||||||
|
* the extra new line at the end of message in text formatter has been removed
|
||||||
|
* a new global public API to check if a level is activated: IsLevelEnabled
|
||||||
|
* the following methods have been added to the Logger object
|
||||||
|
* IsLevelEnabled
|
||||||
|
* SetFormatter
|
||||||
|
* SetOutput
|
||||||
|
* ReplaceHooks
|
||||||
|
* introduction of go module
|
||||||
|
* an indent configuration for the json formatter
|
||||||
|
* output colour support for windows
|
||||||
|
* the field sort function is now configurable for text formatter
|
||||||
|
* the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater
|
||||||
|
|
||||||
|
# 1.0.6
|
||||||
|
|
||||||
|
This new release introduces:
|
||||||
|
* a new api WithTime which allows to easily force the time of the log entry
|
||||||
|
which is mostly useful for logger wrapper
|
||||||
|
* a fix reverting the immutability of the entry given as parameter to the hooks
|
||||||
|
a new configuration field of the json formatter in order to put all the fields
|
||||||
|
in a nested dictionnary
|
||||||
|
* a new SetOutput method in the Logger
|
||||||
|
* a new configuration of the textformatter to configure the name of the default keys
|
||||||
|
* a new configuration of the text formatter to disable the level truncation
|
||||||
|
|
||||||
|
# 1.0.5
|
||||||
|
|
||||||
|
* Fix hooks race (#707)
|
||||||
|
* Fix panic deadlock (#695)
|
||||||
|
|
||||||
|
# 1.0.4
|
||||||
|
|
||||||
|
* Fix race when adding hooks (#612)
|
||||||
|
* Fix terminal check in AppEngine (#635)
|
||||||
|
|
||||||
|
# 1.0.3
|
||||||
|
|
||||||
|
* Replace example files with testable examples
|
||||||
|
|
||||||
|
# 1.0.2
|
||||||
|
|
||||||
|
* bug: quote non-string values in text formatter (#583)
|
||||||
|
* Make (*Logger) SetLevel a public method
|
||||||
|
|
||||||
|
# 1.0.1
|
||||||
|
|
||||||
|
* bug: fix escaping in text formatter (#575)
|
||||||
|
|
||||||
# 1.0.0
|
# 1.0.0
|
||||||
|
|
||||||
* Officially changed name to lower-case
|
* Officially changed name to lower-case
|
||||||
|
|
118
vendor/github.com/sirupsen/logrus/README.md
generated
vendored
118
vendor/github.com/sirupsen/logrus/README.md
generated
vendored
|
@ -1,18 +1,24 @@
|
||||||
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus)
|
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus)
|
||||||
|
|
||||||
Logrus is a structured logger for Go (golang), completely API compatible with
|
Logrus is a structured logger for Go (golang), completely API compatible with
|
||||||
the standard library logger. [Godoc][godoc].
|
the standard library logger.
|
||||||
|
|
||||||
**Seeing weird case-sensitive problems?** Unfortunately, the author failed to
|
**Seeing weird case-sensitive problems?** It's in the past been possible to
|
||||||
realize the consequences of renaming to lower-case. Due to the Go package
|
import Logrus as both upper- and lower-case. Due to the Go package environment,
|
||||||
environment, this caused issues. Regretfully, there's no turning back now.
|
this caused issues in the community and we needed a standard. Some environments
|
||||||
|
experienced problems with the upper-case variant, so the lower-case was decided.
|
||||||
Everything using `logrus` will need to use the lower-case:
|
Everything using `logrus` will need to use the lower-case:
|
||||||
`github.com/sirupsen/logrus`. Any package that isn't, should be changed.
|
`github.com/sirupsen/logrus`. Any package that isn't, should be changed.
|
||||||
|
|
||||||
I am terribly sorry for this inconvenience. Logrus strives hard for backwards
|
To fix Glide, see [these
|
||||||
compatibility, and the author failed to realize the cascading consequences of
|
|
||||||
such a name-change. To fix Glide, see [these
|
|
||||||
comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437).
|
comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437).
|
||||||
|
For an in-depth explanation of the casing issue, see [this
|
||||||
|
comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276).
|
||||||
|
|
||||||
|
**Are you interested in assisting in maintaining Logrus?** Currently I have a
|
||||||
|
lot of obligations, and I am unable to provide Logrus with the maintainership it
|
||||||
|
needs. If you'd like to help, please reach out to me at `simon at author's
|
||||||
|
username dot com`.
|
||||||
|
|
||||||
Nicely color-coded in development (when a TTY is attached, otherwise just
|
Nicely color-coded in development (when a TTY is attached, otherwise just
|
||||||
plain text):
|
plain text):
|
||||||
|
@ -50,8 +56,39 @@ time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased
|
||||||
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
|
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
|
||||||
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
|
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
|
||||||
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
|
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
|
||||||
exit status 1
|
|
||||||
```
|
```
|
||||||
|
To ensure this behaviour even if a TTY is attached, set your formatter as follows:
|
||||||
|
|
||||||
|
```go
|
||||||
|
log.SetFormatter(&log.TextFormatter{
|
||||||
|
DisableColors: true,
|
||||||
|
FullTimestamp: true,
|
||||||
|
})
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Logging Method Name
|
||||||
|
|
||||||
|
If you wish to add the calling method as a field, instruct the logger via:
|
||||||
|
```go
|
||||||
|
log.SetReportCaller(true)
|
||||||
|
```
|
||||||
|
This adds the caller as 'method' like so:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by",
|
||||||
|
"time":"2014-03-10 19:57:38.562543129 -0400 EDT"}
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin
|
||||||
|
```
|
||||||
|
Note that this does add measurable overhead - the cost will depend on the version of Go, but is
|
||||||
|
between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your
|
||||||
|
environment via benchmarks:
|
||||||
|
```
|
||||||
|
go test -bench=.*CallerTracing
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
#### Case-sensitivity
|
#### Case-sensitivity
|
||||||
|
|
||||||
|
@ -214,7 +251,7 @@ Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
|
||||||
```go
|
```go
|
||||||
import (
|
import (
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
|
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake"
|
||||||
logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
|
logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
|
||||||
"log/syslog"
|
"log/syslog"
|
||||||
)
|
)
|
||||||
|
@ -235,59 +272,15 @@ func init() {
|
||||||
```
|
```
|
||||||
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
|
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
|
||||||
|
|
||||||
| Hook | Description |
|
A list of currently known of service hook can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks)
|
||||||
| ----- | ----------- |
|
|
||||||
| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
|
|
||||||
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
|
|
||||||
| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) |
|
|
||||||
| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
|
|
||||||
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
|
|
||||||
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
|
|
||||||
| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) |
|
|
||||||
| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
|
|
||||||
| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/)
|
|
||||||
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
|
|
||||||
| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) |
|
|
||||||
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
|
|
||||||
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
|
|
||||||
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
|
|
||||||
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
|
|
||||||
| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) |
|
|
||||||
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
|
|
||||||
| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
|
|
||||||
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
|
|
||||||
| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) |
|
|
||||||
| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
|
|
||||||
| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
|
|
||||||
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
|
|
||||||
| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
|
|
||||||
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
|
|
||||||
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
|
|
||||||
| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) |
|
|
||||||
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
|
|
||||||
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
|
|
||||||
| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
|
|
||||||
| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
|
|
||||||
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
|
|
||||||
| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
|
|
||||||
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
|
|
||||||
| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
|
|
||||||
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
|
|
||||||
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
|
|
||||||
| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) |
|
|
||||||
| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
|
|
||||||
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
|
|
||||||
| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. |
|
|
||||||
| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) |
|
|
||||||
| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
|
|
||||||
| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
|
|
||||||
| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) |
|
|
||||||
|
|
||||||
#### Level logging
|
#### Level logging
|
||||||
|
|
||||||
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
|
Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic.
|
||||||
|
|
||||||
```go
|
```go
|
||||||
|
log.Trace("Something very low level.")
|
||||||
log.Debug("Useful debugging information.")
|
log.Debug("Useful debugging information.")
|
||||||
log.Info("Something noteworthy happened!")
|
log.Info("Something noteworthy happened!")
|
||||||
log.Warn("You should probably take a look at this.")
|
log.Warn("You should probably take a look at this.")
|
||||||
|
@ -359,15 +352,20 @@ The built-in logging formatters are:
|
||||||
field to `true`. To force no colored output even if there is a TTY set the
|
field to `true`. To force no colored output even if there is a TTY set the
|
||||||
`DisableColors` field to `true`. For Windows, see
|
`DisableColors` field to `true`. For Windows, see
|
||||||
[github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
|
[github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
|
||||||
|
* When colors are enabled, levels are truncated to 4 characters by default. To disable
|
||||||
|
truncation set the `DisableLevelTruncation` field to `true`.
|
||||||
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
|
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
|
||||||
* `logrus.JSONFormatter`. Logs fields as JSON.
|
* `logrus.JSONFormatter`. Logs fields as JSON.
|
||||||
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
|
* All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
|
||||||
|
|
||||||
Third party logging formatters:
|
Third party logging formatters:
|
||||||
|
|
||||||
|
* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine.
|
||||||
|
* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html).
|
||||||
* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
|
* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
|
||||||
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
|
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
|
||||||
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
|
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
|
||||||
|
* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure.
|
||||||
|
|
||||||
You can define your formatter by implementing the `Formatter` interface,
|
You can define your formatter by implementing the `Formatter` interface,
|
||||||
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
|
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
|
||||||
|
@ -445,13 +443,13 @@ Logrus has a built in facility for asserting the presence of log messages. This
|
||||||
```go
|
```go
|
||||||
import(
|
import(
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/sirupsen/logrus/hooks/null"
|
"github.com/sirupsen/logrus/hooks/test"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestSomething(t*testing.T){
|
func TestSomething(t*testing.T){
|
||||||
logger, hook := null.NewNullLogger()
|
logger, hook := test.NewNullLogger()
|
||||||
logger.Error("Helloerror")
|
logger.Error("Helloerror")
|
||||||
|
|
||||||
assert.Equal(t, 1, len(hook.Entries))
|
assert.Equal(t, 1, len(hook.Entries))
|
||||||
|
@ -481,7 +479,7 @@ logrus.RegisterExitHandler(handler)
|
||||||
|
|
||||||
#### Thread safety
|
#### Thread safety
|
||||||
|
|
||||||
By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
|
By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs.
|
||||||
If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
|
If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
|
||||||
|
|
||||||
Situation when locking is not needed includes:
|
Situation when locking is not needed includes:
|
||||||
|
|
18
vendor/github.com/sirupsen/logrus/alt_exit.go
generated
vendored
18
vendor/github.com/sirupsen/logrus/alt_exit.go
generated
vendored
|
@ -51,9 +51,9 @@ func Exit(code int) {
|
||||||
os.Exit(code)
|
os.Exit(code)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
|
// RegisterExitHandler appends a Logrus Exit handler to the list of handlers,
|
||||||
// all handlers. The handlers will also be invoked when any Fatal log entry is
|
// call logrus.Exit to invoke all handlers. The handlers will also be invoked when
|
||||||
// made.
|
// any Fatal log entry is made.
|
||||||
//
|
//
|
||||||
// This method is useful when a caller wishes to use logrus to log a fatal
|
// This method is useful when a caller wishes to use logrus to log a fatal
|
||||||
// message but also needs to gracefully shutdown. An example usecase could be
|
// message but also needs to gracefully shutdown. An example usecase could be
|
||||||
|
@ -62,3 +62,15 @@ func Exit(code int) {
|
||||||
func RegisterExitHandler(handler func()) {
|
func RegisterExitHandler(handler func()) {
|
||||||
handlers = append(handlers, handler)
|
handlers = append(handlers, handler)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeferExitHandler prepends a Logrus Exit handler to the list of handlers,
|
||||||
|
// call logrus.Exit to invoke all handlers. The handlers will also be invoked when
|
||||||
|
// any Fatal log entry is made.
|
||||||
|
//
|
||||||
|
// This method is useful when a caller wishes to use logrus to log a fatal
|
||||||
|
// message but also needs to gracefully shutdown. An example usecase could be
|
||||||
|
// closing database connections, or sending a alert that the application is
|
||||||
|
// closing.
|
||||||
|
func DeferExitHandler(handler func()) {
|
||||||
|
handlers = append([]func(){handler}, handlers...)
|
||||||
|
}
|
||||||
|
|
14
vendor/github.com/sirupsen/logrus/appveyor.yml
generated
vendored
Normal file
14
vendor/github.com/sirupsen/logrus/appveyor.yml
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
version: "{build}"
|
||||||
|
platform: x64
|
||||||
|
clone_folder: c:\gopath\src\github.com\sirupsen\logrus
|
||||||
|
environment:
|
||||||
|
GOPATH: c:\gopath
|
||||||
|
branches:
|
||||||
|
only:
|
||||||
|
- master
|
||||||
|
install:
|
||||||
|
- set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
|
||||||
|
- go version
|
||||||
|
build_script:
|
||||||
|
- go get -t
|
||||||
|
- go test
|
302
vendor/github.com/sirupsen/logrus/entry.go
generated
vendored
302
vendor/github.com/sirupsen/logrus/entry.go
generated
vendored
|
@ -2,13 +2,33 @@ package logrus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
var bufferPool *sync.Pool
|
var (
|
||||||
|
bufferPool *sync.Pool
|
||||||
|
|
||||||
|
// qualified package name, cached at first use
|
||||||
|
logrusPackage string
|
||||||
|
|
||||||
|
// Positions in the call stack when tracing to report the calling method
|
||||||
|
minimumCallerDepth int
|
||||||
|
|
||||||
|
// Used for caller information initialisation
|
||||||
|
callerInitOnce sync.Once
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
maximumCallerDepth int = 25
|
||||||
|
knownLogrusFrames int = 4
|
||||||
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
bufferPool = &sync.Pool{
|
bufferPool = &sync.Pool{
|
||||||
|
@ -16,15 +36,18 @@ func init() {
|
||||||
return new(bytes.Buffer)
|
return new(bytes.Buffer)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// start at the bottom of the stack before the package-name cache is primed
|
||||||
|
minimumCallerDepth = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// Defines the key when adding errors using WithError.
|
// Defines the key when adding errors using WithError.
|
||||||
var ErrorKey = "error"
|
var ErrorKey = "error"
|
||||||
|
|
||||||
// An entry is the final or intermediate Logrus logging entry. It contains all
|
// An entry is the final or intermediate Logrus logging entry. It contains all
|
||||||
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
|
// the fields passed with WithField{,s}. It's finally logged when Trace, Debug,
|
||||||
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
|
// Info, Warn, Error, Fatal or Panic is called on it. These objects can be
|
||||||
// passed around as much as you wish to avoid field duplication.
|
// reused and passed around as much as you wish to avoid field duplication.
|
||||||
type Entry struct {
|
type Entry struct {
|
||||||
Logger *Logger
|
Logger *Logger
|
||||||
|
|
||||||
|
@ -34,21 +57,31 @@ type Entry struct {
|
||||||
// Time at which the log entry was created
|
// Time at which the log entry was created
|
||||||
Time time.Time
|
Time time.Time
|
||||||
|
|
||||||
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
|
// Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic
|
||||||
|
// This field will be set on entry firing and the value will be equal to the one in Logger struct field.
|
||||||
Level Level
|
Level Level
|
||||||
|
|
||||||
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
|
// Calling method, with package name
|
||||||
|
Caller *runtime.Frame
|
||||||
|
|
||||||
|
// Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic
|
||||||
Message string
|
Message string
|
||||||
|
|
||||||
// When formatter is called in entry.log(), an Buffer may be set to entry
|
// When formatter is called in entry.log(), a Buffer may be set to entry
|
||||||
Buffer *bytes.Buffer
|
Buffer *bytes.Buffer
|
||||||
|
|
||||||
|
// Contains the context set by the user. Useful for hook processing etc.
|
||||||
|
Context context.Context
|
||||||
|
|
||||||
|
// err may contain a field formatting error
|
||||||
|
err string
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewEntry(logger *Logger) *Entry {
|
func NewEntry(logger *Logger) *Entry {
|
||||||
return &Entry{
|
return &Entry{
|
||||||
Logger: logger,
|
Logger: logger,
|
||||||
// Default is three fields, give a little extra room
|
// Default is three fields, plus one optional. Give a little extra room.
|
||||||
Data: make(Fields, 5),
|
Data: make(Fields, 6),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -68,6 +101,11 @@ func (entry *Entry) WithError(err error) *Entry {
|
||||||
return entry.WithField(ErrorKey, err)
|
return entry.WithField(ErrorKey, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add a context to the Entry.
|
||||||
|
func (entry *Entry) WithContext(ctx context.Context) *Entry {
|
||||||
|
return &Entry{Logger: entry.Logger, Data: entry.Data, Time: entry.Time, err: entry.err, Context: ctx}
|
||||||
|
}
|
||||||
|
|
||||||
// Add a single field to the Entry.
|
// Add a single field to the Entry.
|
||||||
func (entry *Entry) WithField(key string, value interface{}) *Entry {
|
func (entry *Entry) WithField(key string, value interface{}) *Entry {
|
||||||
return entry.WithFields(Fields{key: value})
|
return entry.WithFields(Fields{key: value})
|
||||||
|
@ -79,43 +117,120 @@ func (entry *Entry) WithFields(fields Fields) *Entry {
|
||||||
for k, v := range entry.Data {
|
for k, v := range entry.Data {
|
||||||
data[k] = v
|
data[k] = v
|
||||||
}
|
}
|
||||||
|
fieldErr := entry.err
|
||||||
for k, v := range fields {
|
for k, v := range fields {
|
||||||
|
isErrField := false
|
||||||
|
if t := reflect.TypeOf(v); t != nil {
|
||||||
|
switch t.Kind() {
|
||||||
|
case reflect.Func:
|
||||||
|
isErrField = true
|
||||||
|
case reflect.Ptr:
|
||||||
|
isErrField = t.Elem().Kind() == reflect.Func
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if isErrField {
|
||||||
|
tmp := fmt.Sprintf("can not add field %q", k)
|
||||||
|
if fieldErr != "" {
|
||||||
|
fieldErr = entry.err + ", " + tmp
|
||||||
|
} else {
|
||||||
|
fieldErr = tmp
|
||||||
|
}
|
||||||
|
} else {
|
||||||
data[k] = v
|
data[k] = v
|
||||||
}
|
}
|
||||||
return &Entry{Logger: entry.Logger, Data: data}
|
}
|
||||||
|
return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Overrides the time of the Entry.
|
||||||
|
func (entry *Entry) WithTime(t time.Time) *Entry {
|
||||||
|
return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t, err: entry.err, Context: entry.Context}
|
||||||
|
}
|
||||||
|
|
||||||
|
// getPackageName reduces a fully qualified function name to the package name
|
||||||
|
// There really ought to be to be a better way...
|
||||||
|
func getPackageName(f string) string {
|
||||||
|
for {
|
||||||
|
lastPeriod := strings.LastIndex(f, ".")
|
||||||
|
lastSlash := strings.LastIndex(f, "/")
|
||||||
|
if lastPeriod > lastSlash {
|
||||||
|
f = f[:lastPeriod]
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
// getCaller retrieves the name of the first non-logrus calling function
|
||||||
|
func getCaller() *runtime.Frame {
|
||||||
|
|
||||||
|
// cache this package's fully-qualified name
|
||||||
|
callerInitOnce.Do(func() {
|
||||||
|
pcs := make([]uintptr, 2)
|
||||||
|
_ = runtime.Callers(0, pcs)
|
||||||
|
logrusPackage = getPackageName(runtime.FuncForPC(pcs[1]).Name())
|
||||||
|
|
||||||
|
// now that we have the cache, we can skip a minimum count of known-logrus functions
|
||||||
|
// XXX this is dubious, the number of frames may vary
|
||||||
|
minimumCallerDepth = knownLogrusFrames
|
||||||
|
})
|
||||||
|
|
||||||
|
// Restrict the lookback frames to avoid runaway lookups
|
||||||
|
pcs := make([]uintptr, maximumCallerDepth)
|
||||||
|
depth := runtime.Callers(minimumCallerDepth, pcs)
|
||||||
|
frames := runtime.CallersFrames(pcs[:depth])
|
||||||
|
|
||||||
|
for f, again := frames.Next(); again; f, again = frames.Next() {
|
||||||
|
pkg := getPackageName(f.Function)
|
||||||
|
|
||||||
|
// If the caller isn't part of this package, we're done
|
||||||
|
if pkg != logrusPackage {
|
||||||
|
return &f
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// if we got here, we failed to find the caller's context
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry Entry) HasCaller() (has bool) {
|
||||||
|
return entry.Logger != nil &&
|
||||||
|
entry.Logger.ReportCaller &&
|
||||||
|
entry.Caller != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// This function is not declared with a pointer value because otherwise
|
// This function is not declared with a pointer value because otherwise
|
||||||
// race conditions will occur when using multiple goroutines
|
// race conditions will occur when using multiple goroutines
|
||||||
func (entry Entry) log(level Level, msg string) {
|
func (entry Entry) log(level Level, msg string) {
|
||||||
var buffer *bytes.Buffer
|
var buffer *bytes.Buffer
|
||||||
|
|
||||||
|
// Default to now, but allow users to override if they want.
|
||||||
|
//
|
||||||
|
// We don't have to worry about polluting future calls to Entry#log()
|
||||||
|
// with this assignment because this function is declared with a
|
||||||
|
// non-pointer receiver.
|
||||||
|
if entry.Time.IsZero() {
|
||||||
entry.Time = time.Now()
|
entry.Time = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
entry.Level = level
|
entry.Level = level
|
||||||
entry.Message = msg
|
entry.Message = msg
|
||||||
|
if entry.Logger.ReportCaller {
|
||||||
if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
|
entry.Caller = getCaller()
|
||||||
entry.Logger.mu.Lock()
|
|
||||||
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
|
|
||||||
entry.Logger.mu.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
entry.fireHooks()
|
||||||
|
|
||||||
buffer = bufferPool.Get().(*bytes.Buffer)
|
buffer = bufferPool.Get().(*bytes.Buffer)
|
||||||
buffer.Reset()
|
buffer.Reset()
|
||||||
defer bufferPool.Put(buffer)
|
defer bufferPool.Put(buffer)
|
||||||
entry.Buffer = buffer
|
entry.Buffer = buffer
|
||||||
serialized, err := entry.Logger.Formatter.Format(&entry)
|
|
||||||
|
entry.write()
|
||||||
|
|
||||||
entry.Buffer = nil
|
entry.Buffer = nil
|
||||||
if err != nil {
|
|
||||||
entry.Logger.mu.Lock()
|
|
||||||
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
|
|
||||||
entry.Logger.mu.Unlock()
|
|
||||||
} else {
|
|
||||||
entry.Logger.mu.Lock()
|
|
||||||
_, err = entry.Logger.Out.Write(serialized)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
|
|
||||||
}
|
|
||||||
entry.Logger.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
// To avoid Entry#log() returning a value that only would make sense for
|
// To avoid Entry#log() returning a value that only would make sense for
|
||||||
// panic() to use in Entry#Panic(), we avoid the allocation by checking
|
// panic() to use in Entry#Panic(), we avoid the allocation by checking
|
||||||
|
@ -125,26 +240,53 @@ func (entry Entry) log(level Level, msg string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Debug(args ...interface{}) {
|
func (entry *Entry) fireHooks() {
|
||||||
if entry.Logger.level() >= DebugLevel {
|
entry.Logger.mu.Lock()
|
||||||
entry.log(DebugLevel, fmt.Sprint(args...))
|
defer entry.Logger.mu.Unlock()
|
||||||
|
err := entry.Logger.Hooks.Fire(entry.Level, entry)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) write() {
|
||||||
|
entry.Logger.mu.Lock()
|
||||||
|
defer entry.Logger.mu.Unlock()
|
||||||
|
serialized, err := entry.Logger.Formatter.Format(entry)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
|
||||||
|
} else {
|
||||||
|
_, err = entry.Logger.Out.Write(serialized)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Log(level Level, args ...interface{}) {
|
||||||
|
if entry.Logger.IsLevelEnabled(level) {
|
||||||
|
entry.log(level, fmt.Sprint(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Trace(args ...interface{}) {
|
||||||
|
entry.Log(TraceLevel, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Debug(args ...interface{}) {
|
||||||
|
entry.Log(DebugLevel, args...)
|
||||||
|
}
|
||||||
|
|
||||||
func (entry *Entry) Print(args ...interface{}) {
|
func (entry *Entry) Print(args ...interface{}) {
|
||||||
entry.Info(args...)
|
entry.Info(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Info(args ...interface{}) {
|
func (entry *Entry) Info(args ...interface{}) {
|
||||||
if entry.Logger.level() >= InfoLevel {
|
entry.Log(InfoLevel, args...)
|
||||||
entry.log(InfoLevel, fmt.Sprint(args...))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Warn(args ...interface{}) {
|
func (entry *Entry) Warn(args ...interface{}) {
|
||||||
if entry.Logger.level() >= WarnLevel {
|
entry.Log(WarnLevel, args...)
|
||||||
entry.log(WarnLevel, fmt.Sprint(args...))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Warning(args ...interface{}) {
|
func (entry *Entry) Warning(args ...interface{}) {
|
||||||
|
@ -152,37 +294,37 @@ func (entry *Entry) Warning(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Error(args ...interface{}) {
|
func (entry *Entry) Error(args ...interface{}) {
|
||||||
if entry.Logger.level() >= ErrorLevel {
|
entry.Log(ErrorLevel, args...)
|
||||||
entry.log(ErrorLevel, fmt.Sprint(args...))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Fatal(args ...interface{}) {
|
func (entry *Entry) Fatal(args ...interface{}) {
|
||||||
if entry.Logger.level() >= FatalLevel {
|
entry.Log(FatalLevel, args...)
|
||||||
entry.log(FatalLevel, fmt.Sprint(args...))
|
entry.Logger.Exit(1)
|
||||||
}
|
|
||||||
Exit(1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Panic(args ...interface{}) {
|
func (entry *Entry) Panic(args ...interface{}) {
|
||||||
if entry.Logger.level() >= PanicLevel {
|
entry.Log(PanicLevel, args...)
|
||||||
entry.log(PanicLevel, fmt.Sprint(args...))
|
|
||||||
}
|
|
||||||
panic(fmt.Sprint(args...))
|
panic(fmt.Sprint(args...))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Entry Printf family functions
|
// Entry Printf family functions
|
||||||
|
|
||||||
func (entry *Entry) Debugf(format string, args ...interface{}) {
|
func (entry *Entry) Logf(level Level, format string, args ...interface{}) {
|
||||||
if entry.Logger.level() >= DebugLevel {
|
if entry.Logger.IsLevelEnabled(level) {
|
||||||
entry.Debug(fmt.Sprintf(format, args...))
|
entry.Log(level, fmt.Sprintf(format, args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Infof(format string, args ...interface{}) {
|
func (entry *Entry) Tracef(format string, args ...interface{}) {
|
||||||
if entry.Logger.level() >= InfoLevel {
|
entry.Logf(TraceLevel, format, args...)
|
||||||
entry.Info(fmt.Sprintf(format, args...))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Debugf(format string, args ...interface{}) {
|
||||||
|
entry.Logf(DebugLevel, format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Infof(format string, args ...interface{}) {
|
||||||
|
entry.Logf(InfoLevel, format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Printf(format string, args ...interface{}) {
|
func (entry *Entry) Printf(format string, args ...interface{}) {
|
||||||
|
@ -190,9 +332,7 @@ func (entry *Entry) Printf(format string, args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Warnf(format string, args ...interface{}) {
|
func (entry *Entry) Warnf(format string, args ...interface{}) {
|
||||||
if entry.Logger.level() >= WarnLevel {
|
entry.Logf(WarnLevel, format, args...)
|
||||||
entry.Warn(fmt.Sprintf(format, args...))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Warningf(format string, args ...interface{}) {
|
func (entry *Entry) Warningf(format string, args ...interface{}) {
|
||||||
|
@ -200,36 +340,36 @@ func (entry *Entry) Warningf(format string, args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Errorf(format string, args ...interface{}) {
|
func (entry *Entry) Errorf(format string, args ...interface{}) {
|
||||||
if entry.Logger.level() >= ErrorLevel {
|
entry.Logf(ErrorLevel, format, args...)
|
||||||
entry.Error(fmt.Sprintf(format, args...))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Fatalf(format string, args ...interface{}) {
|
func (entry *Entry) Fatalf(format string, args ...interface{}) {
|
||||||
if entry.Logger.level() >= FatalLevel {
|
entry.Logf(FatalLevel, format, args...)
|
||||||
entry.Fatal(fmt.Sprintf(format, args...))
|
entry.Logger.Exit(1)
|
||||||
}
|
|
||||||
Exit(1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
||||||
if entry.Logger.level() >= PanicLevel {
|
entry.Logf(PanicLevel, format, args...)
|
||||||
entry.Panic(fmt.Sprintf(format, args...))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Entry Println family functions
|
// Entry Println family functions
|
||||||
|
|
||||||
func (entry *Entry) Debugln(args ...interface{}) {
|
func (entry *Entry) Logln(level Level, args ...interface{}) {
|
||||||
if entry.Logger.level() >= DebugLevel {
|
if entry.Logger.IsLevelEnabled(level) {
|
||||||
entry.Debug(entry.sprintlnn(args...))
|
entry.Log(level, entry.sprintlnn(args...))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Infoln(args ...interface{}) {
|
func (entry *Entry) Traceln(args ...interface{}) {
|
||||||
if entry.Logger.level() >= InfoLevel {
|
entry.Logln(TraceLevel, args...)
|
||||||
entry.Info(entry.sprintlnn(args...))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Debugln(args ...interface{}) {
|
||||||
|
entry.Logln(DebugLevel, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Infoln(args ...interface{}) {
|
||||||
|
entry.Logln(InfoLevel, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Println(args ...interface{}) {
|
func (entry *Entry) Println(args ...interface{}) {
|
||||||
|
@ -237,9 +377,7 @@ func (entry *Entry) Println(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Warnln(args ...interface{}) {
|
func (entry *Entry) Warnln(args ...interface{}) {
|
||||||
if entry.Logger.level() >= WarnLevel {
|
entry.Logln(WarnLevel, args...)
|
||||||
entry.Warn(entry.sprintlnn(args...))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Warningln(args ...interface{}) {
|
func (entry *Entry) Warningln(args ...interface{}) {
|
||||||
|
@ -247,22 +385,16 @@ func (entry *Entry) Warningln(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Errorln(args ...interface{}) {
|
func (entry *Entry) Errorln(args ...interface{}) {
|
||||||
if entry.Logger.level() >= ErrorLevel {
|
entry.Logln(ErrorLevel, args...)
|
||||||
entry.Error(entry.sprintlnn(args...))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Fatalln(args ...interface{}) {
|
func (entry *Entry) Fatalln(args ...interface{}) {
|
||||||
if entry.Logger.level() >= FatalLevel {
|
entry.Logln(FatalLevel, args...)
|
||||||
entry.Fatal(entry.sprintlnn(args...))
|
entry.Logger.Exit(1)
|
||||||
}
|
|
||||||
Exit(1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (entry *Entry) Panicln(args ...interface{}) {
|
func (entry *Entry) Panicln(args ...interface{}) {
|
||||||
if entry.Logger.level() >= PanicLevel {
|
entry.Logln(PanicLevel, args...)
|
||||||
entry.Panic(entry.sprintlnn(args...))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sprintlnn => Sprint no newline. This is to get the behavior of how
|
// Sprintlnn => Sprint no newline. This is to get the behavior of how
|
||||||
|
|
68
vendor/github.com/sirupsen/logrus/exported.go
generated
vendored
68
vendor/github.com/sirupsen/logrus/exported.go
generated
vendored
|
@ -1,7 +1,9 @@
|
||||||
package logrus
|
package logrus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"io"
|
"io"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -15,37 +17,38 @@ func StandardLogger() *Logger {
|
||||||
|
|
||||||
// SetOutput sets the standard logger output.
|
// SetOutput sets the standard logger output.
|
||||||
func SetOutput(out io.Writer) {
|
func SetOutput(out io.Writer) {
|
||||||
std.mu.Lock()
|
std.SetOutput(out)
|
||||||
defer std.mu.Unlock()
|
|
||||||
std.Out = out
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetFormatter sets the standard logger formatter.
|
// SetFormatter sets the standard logger formatter.
|
||||||
func SetFormatter(formatter Formatter) {
|
func SetFormatter(formatter Formatter) {
|
||||||
std.mu.Lock()
|
std.SetFormatter(formatter)
|
||||||
defer std.mu.Unlock()
|
}
|
||||||
std.Formatter = formatter
|
|
||||||
|
// SetReportCaller sets whether the standard logger will include the calling
|
||||||
|
// method as a field.
|
||||||
|
func SetReportCaller(include bool) {
|
||||||
|
std.SetReportCaller(include)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetLevel sets the standard logger level.
|
// SetLevel sets the standard logger level.
|
||||||
func SetLevel(level Level) {
|
func SetLevel(level Level) {
|
||||||
std.mu.Lock()
|
std.SetLevel(level)
|
||||||
defer std.mu.Unlock()
|
|
||||||
std.setLevel(level)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetLevel returns the standard logger level.
|
// GetLevel returns the standard logger level.
|
||||||
func GetLevel() Level {
|
func GetLevel() Level {
|
||||||
std.mu.Lock()
|
return std.GetLevel()
|
||||||
defer std.mu.Unlock()
|
}
|
||||||
return std.level()
|
|
||||||
|
// IsLevelEnabled checks if the log level of the standard logger is greater than the level param
|
||||||
|
func IsLevelEnabled(level Level) bool {
|
||||||
|
return std.IsLevelEnabled(level)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddHook adds a hook to the standard logger hooks.
|
// AddHook adds a hook to the standard logger hooks.
|
||||||
func AddHook(hook Hook) {
|
func AddHook(hook Hook) {
|
||||||
std.mu.Lock()
|
std.AddHook(hook)
|
||||||
defer std.mu.Unlock()
|
|
||||||
std.Hooks.Add(hook)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
|
// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
|
||||||
|
@ -53,6 +56,11 @@ func WithError(err error) *Entry {
|
||||||
return std.WithField(ErrorKey, err)
|
return std.WithField(ErrorKey, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithContext creates an entry from the standard logger and adds a context to it.
|
||||||
|
func WithContext(ctx context.Context) *Entry {
|
||||||
|
return std.WithContext(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
// WithField creates an entry from the standard logger and adds a field to
|
// WithField creates an entry from the standard logger and adds a field to
|
||||||
// it. If you want multiple fields, use `WithFields`.
|
// it. If you want multiple fields, use `WithFields`.
|
||||||
//
|
//
|
||||||
|
@ -72,6 +80,20 @@ func WithFields(fields Fields) *Entry {
|
||||||
return std.WithFields(fields)
|
return std.WithFields(fields)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithTime creats an entry from the standard logger and overrides the time of
|
||||||
|
// logs generated with it.
|
||||||
|
//
|
||||||
|
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
||||||
|
// or Panic on the Entry it returns.
|
||||||
|
func WithTime(t time.Time) *Entry {
|
||||||
|
return std.WithTime(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Trace logs a message at level Trace on the standard logger.
|
||||||
|
func Trace(args ...interface{}) {
|
||||||
|
std.Trace(args...)
|
||||||
|
}
|
||||||
|
|
||||||
// Debug logs a message at level Debug on the standard logger.
|
// Debug logs a message at level Debug on the standard logger.
|
||||||
func Debug(args ...interface{}) {
|
func Debug(args ...interface{}) {
|
||||||
std.Debug(args...)
|
std.Debug(args...)
|
||||||
|
@ -107,11 +129,16 @@ func Panic(args ...interface{}) {
|
||||||
std.Panic(args...)
|
std.Panic(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fatal logs a message at level Fatal on the standard logger.
|
// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
|
||||||
func Fatal(args ...interface{}) {
|
func Fatal(args ...interface{}) {
|
||||||
std.Fatal(args...)
|
std.Fatal(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Tracef logs a message at level Trace on the standard logger.
|
||||||
|
func Tracef(format string, args ...interface{}) {
|
||||||
|
std.Tracef(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
// Debugf logs a message at level Debug on the standard logger.
|
// Debugf logs a message at level Debug on the standard logger.
|
||||||
func Debugf(format string, args ...interface{}) {
|
func Debugf(format string, args ...interface{}) {
|
||||||
std.Debugf(format, args...)
|
std.Debugf(format, args...)
|
||||||
|
@ -147,11 +174,16 @@ func Panicf(format string, args ...interface{}) {
|
||||||
std.Panicf(format, args...)
|
std.Panicf(format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fatalf logs a message at level Fatal on the standard logger.
|
// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
|
||||||
func Fatalf(format string, args ...interface{}) {
|
func Fatalf(format string, args ...interface{}) {
|
||||||
std.Fatalf(format, args...)
|
std.Fatalf(format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Traceln logs a message at level Trace on the standard logger.
|
||||||
|
func Traceln(args ...interface{}) {
|
||||||
|
std.Traceln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
// Debugln logs a message at level Debug on the standard logger.
|
// Debugln logs a message at level Debug on the standard logger.
|
||||||
func Debugln(args ...interface{}) {
|
func Debugln(args ...interface{}) {
|
||||||
std.Debugln(args...)
|
std.Debugln(args...)
|
||||||
|
@ -187,7 +219,7 @@ func Panicln(args ...interface{}) {
|
||||||
std.Panicln(args...)
|
std.Panicln(args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fatalln logs a message at level Fatal on the standard logger.
|
// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
|
||||||
func Fatalln(args ...interface{}) {
|
func Fatalln(args ...interface{}) {
|
||||||
std.Fatalln(args...)
|
std.Fatalln(args...)
|
||||||
}
|
}
|
||||||
|
|
51
vendor/github.com/sirupsen/logrus/formatter.go
generated
vendored
51
vendor/github.com/sirupsen/logrus/formatter.go
generated
vendored
|
@ -2,7 +2,16 @@ package logrus
|
||||||
|
|
||||||
import "time"
|
import "time"
|
||||||
|
|
||||||
const DefaultTimestampFormat = time.RFC3339
|
// Default key names for the default fields
|
||||||
|
const (
|
||||||
|
defaultTimestampFormat = time.RFC3339
|
||||||
|
FieldKeyMsg = "msg"
|
||||||
|
FieldKeyLevel = "level"
|
||||||
|
FieldKeyTime = "time"
|
||||||
|
FieldKeyLogrusError = "logrus_error"
|
||||||
|
FieldKeyFunc = "func"
|
||||||
|
FieldKeyFile = "file"
|
||||||
|
)
|
||||||
|
|
||||||
// The Formatter interface is used to implement a custom Formatter. It takes an
|
// The Formatter interface is used to implement a custom Formatter. It takes an
|
||||||
// `Entry`. It exposes all the fields, including the default ones:
|
// `Entry`. It exposes all the fields, including the default ones:
|
||||||
|
@ -18,7 +27,7 @@ type Formatter interface {
|
||||||
Format(*Entry) ([]byte, error)
|
Format(*Entry) ([]byte, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is to not silently overwrite `time`, `msg` and `level` fields when
|
// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when
|
||||||
// dumping it. If this code wasn't there doing:
|
// dumping it. If this code wasn't there doing:
|
||||||
//
|
//
|
||||||
// logrus.WithField("level", 1).Info("hello")
|
// logrus.WithField("level", 1).Info("hello")
|
||||||
|
@ -30,16 +39,40 @@ type Formatter interface {
|
||||||
//
|
//
|
||||||
// It's not exported because it's still using Data in an opinionated way. It's to
|
// It's not exported because it's still using Data in an opinionated way. It's to
|
||||||
// avoid code duplication between the two default formatters.
|
// avoid code duplication between the two default formatters.
|
||||||
func prefixFieldClashes(data Fields) {
|
func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) {
|
||||||
if t, ok := data["time"]; ok {
|
timeKey := fieldMap.resolve(FieldKeyTime)
|
||||||
data["fields.time"] = t
|
if t, ok := data[timeKey]; ok {
|
||||||
|
data["fields."+timeKey] = t
|
||||||
|
delete(data, timeKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
if m, ok := data["msg"]; ok {
|
msgKey := fieldMap.resolve(FieldKeyMsg)
|
||||||
data["fields.msg"] = m
|
if m, ok := data[msgKey]; ok {
|
||||||
|
data["fields."+msgKey] = m
|
||||||
|
delete(data, msgKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
if l, ok := data["level"]; ok {
|
levelKey := fieldMap.resolve(FieldKeyLevel)
|
||||||
data["fields.level"] = l
|
if l, ok := data[levelKey]; ok {
|
||||||
|
data["fields."+levelKey] = l
|
||||||
|
delete(data, levelKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
logrusErrKey := fieldMap.resolve(FieldKeyLogrusError)
|
||||||
|
if l, ok := data[logrusErrKey]; ok {
|
||||||
|
data["fields."+logrusErrKey] = l
|
||||||
|
delete(data, logrusErrKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If reportCaller is not set, 'func' will not conflict.
|
||||||
|
if reportCaller {
|
||||||
|
funcKey := fieldMap.resolve(FieldKeyFunc)
|
||||||
|
if l, ok := data[funcKey]; ok {
|
||||||
|
data["fields."+funcKey] = l
|
||||||
|
}
|
||||||
|
fileKey := fieldMap.resolve(FieldKeyFile)
|
||||||
|
if l, ok := data[fileKey]; ok {
|
||||||
|
data["fields."+fileKey] = l
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
10
vendor/github.com/sirupsen/logrus/go.mod
generated
vendored
Normal file
10
vendor/github.com/sirupsen/logrus/go.mod
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
||||||
|
module github.com/sirupsen/logrus
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
|
github.com/stretchr/objx v0.1.1 // indirect
|
||||||
|
github.com/stretchr/testify v1.2.2
|
||||||
|
golang.org/x/sys v0.0.0-20190422165155-953cdadca894
|
||||||
|
)
|
16
vendor/github.com/sirupsen/logrus/go.sum
generated
vendored
Normal file
16
vendor/github.com/sirupsen/logrus/go.sum
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe h1:CHRGQ8V7OlCYtwaKPJi3iA7J+YdNKdo8j7nG5IgDhjs=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
|
||||||
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
|
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
||||||
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
|
github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
|
||||||
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8=
|
||||||
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
|
||||||
|
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
75
vendor/github.com/sirupsen/logrus/json_formatter.go
generated
vendored
75
vendor/github.com/sirupsen/logrus/json_formatter.go
generated
vendored
|
@ -1,18 +1,16 @@
|
||||||
package logrus
|
package logrus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
type fieldKey string
|
type fieldKey string
|
||||||
type FieldMap map[fieldKey]string
|
|
||||||
|
|
||||||
const (
|
// FieldMap allows customization of the key names for default fields.
|
||||||
FieldKeyMsg = "msg"
|
type FieldMap map[fieldKey]string
|
||||||
FieldKeyLevel = "level"
|
|
||||||
FieldKeyTime = "time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (f FieldMap) resolve(key fieldKey) string {
|
func (f FieldMap) resolve(key fieldKey) string {
|
||||||
if k, ok := f[key]; ok {
|
if k, ok := f[key]; ok {
|
||||||
|
@ -22,6 +20,7 @@ func (f FieldMap) resolve(key fieldKey) string {
|
||||||
return string(key)
|
return string(key)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// JSONFormatter formats logs into parsable json
|
||||||
type JSONFormatter struct {
|
type JSONFormatter struct {
|
||||||
// TimestampFormat sets the format used for marshaling timestamps.
|
// TimestampFormat sets the format used for marshaling timestamps.
|
||||||
TimestampFormat string
|
TimestampFormat string
|
||||||
|
@ -29,20 +28,34 @@ type JSONFormatter struct {
|
||||||
// DisableTimestamp allows disabling automatic timestamps in output
|
// DisableTimestamp allows disabling automatic timestamps in output
|
||||||
DisableTimestamp bool
|
DisableTimestamp bool
|
||||||
|
|
||||||
// FieldMap allows users to customize the names of keys for various fields.
|
// DataKey allows users to put all the log entry parameters into a nested dictionary at a given key.
|
||||||
|
DataKey string
|
||||||
|
|
||||||
|
// FieldMap allows users to customize the names of keys for default fields.
|
||||||
// As an example:
|
// As an example:
|
||||||
// formatter := &JSONFormatter{
|
// formatter := &JSONFormatter{
|
||||||
// FieldMap: FieldMap{
|
// FieldMap: FieldMap{
|
||||||
// FieldKeyTime: "@timestamp",
|
// FieldKeyTime: "@timestamp",
|
||||||
// FieldKeyLevel: "@level",
|
// FieldKeyLevel: "@level",
|
||||||
// FieldKeyMsg: "@message",
|
// FieldKeyMsg: "@message",
|
||||||
|
// FieldKeyFunc: "@caller",
|
||||||
// },
|
// },
|
||||||
// }
|
// }
|
||||||
FieldMap FieldMap
|
FieldMap FieldMap
|
||||||
|
|
||||||
|
// CallerPrettyfier can be set by the user to modify the content
|
||||||
|
// of the function and file keys in the json data when ReportCaller is
|
||||||
|
// activated. If any of the returned value is the empty string the
|
||||||
|
// corresponding key will be removed from json fields.
|
||||||
|
CallerPrettyfier func(*runtime.Frame) (function string, file string)
|
||||||
|
|
||||||
|
// PrettyPrint will indent all json logs
|
||||||
|
PrettyPrint bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Format renders a single log entry
|
||||||
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||||
data := make(Fields, len(entry.Data)+3)
|
data := make(Fields, len(entry.Data)+4)
|
||||||
for k, v := range entry.Data {
|
for k, v := range entry.Data {
|
||||||
switch v := v.(type) {
|
switch v := v.(type) {
|
||||||
case error:
|
case error:
|
||||||
|
@ -53,22 +66,56 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||||
data[k] = v
|
data[k] = v
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
prefixFieldClashes(data)
|
|
||||||
|
if f.DataKey != "" {
|
||||||
|
newData := make(Fields, 4)
|
||||||
|
newData[f.DataKey] = data
|
||||||
|
data = newData
|
||||||
|
}
|
||||||
|
|
||||||
|
prefixFieldClashes(data, f.FieldMap, entry.HasCaller())
|
||||||
|
|
||||||
timestampFormat := f.TimestampFormat
|
timestampFormat := f.TimestampFormat
|
||||||
if timestampFormat == "" {
|
if timestampFormat == "" {
|
||||||
timestampFormat = DefaultTimestampFormat
|
timestampFormat = defaultTimestampFormat
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if entry.err != "" {
|
||||||
|
data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err
|
||||||
|
}
|
||||||
if !f.DisableTimestamp {
|
if !f.DisableTimestamp {
|
||||||
data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
|
data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
|
||||||
}
|
}
|
||||||
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
|
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
|
||||||
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
|
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
|
||||||
|
if entry.HasCaller() {
|
||||||
|
funcVal := entry.Caller.Function
|
||||||
|
fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
|
||||||
|
if f.CallerPrettyfier != nil {
|
||||||
|
funcVal, fileVal = f.CallerPrettyfier(entry.Caller)
|
||||||
|
}
|
||||||
|
if funcVal != "" {
|
||||||
|
data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal
|
||||||
|
}
|
||||||
|
if fileVal != "" {
|
||||||
|
data[f.FieldMap.resolve(FieldKeyFile)] = fileVal
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
serialized, err := json.Marshal(data)
|
var b *bytes.Buffer
|
||||||
if err != nil {
|
if entry.Buffer != nil {
|
||||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
b = entry.Buffer
|
||||||
|
} else {
|
||||||
|
b = &bytes.Buffer{}
|
||||||
}
|
}
|
||||||
return append(serialized, '\n'), nil
|
|
||||||
|
encoder := json.NewEncoder(b)
|
||||||
|
if f.PrettyPrint {
|
||||||
|
encoder.SetIndent("", " ")
|
||||||
|
}
|
||||||
|
if err := encoder.Encode(data); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to marshal fields to JSON, %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return b.Bytes(), nil
|
||||||
}
|
}
|
||||||
|
|
242
vendor/github.com/sirupsen/logrus/logger.go
generated
vendored
242
vendor/github.com/sirupsen/logrus/logger.go
generated
vendored
|
@ -1,16 +1,18 @@
|
||||||
package logrus
|
package logrus
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Logger struct {
|
type Logger struct {
|
||||||
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
|
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
|
||||||
// file, or leave it default which is `os.Stderr`. You can also set this to
|
// file, or leave it default which is `os.Stderr`. You can also set this to
|
||||||
// something more adventorous, such as logging to Kafka.
|
// something more adventurous, such as logging to Kafka.
|
||||||
Out io.Writer
|
Out io.Writer
|
||||||
// Hooks for the logger instance. These allow firing events based on logging
|
// Hooks for the logger instance. These allow firing events based on logging
|
||||||
// levels and log entries. For example, to send errors to an error tracking
|
// levels and log entries. For example, to send errors to an error tracking
|
||||||
|
@ -23,16 +25,24 @@ type Logger struct {
|
||||||
// own that implements the `Formatter` interface, see the `README` or included
|
// own that implements the `Formatter` interface, see the `README` or included
|
||||||
// formatters for examples.
|
// formatters for examples.
|
||||||
Formatter Formatter
|
Formatter Formatter
|
||||||
|
|
||||||
|
// Flag for whether to log caller info (off by default)
|
||||||
|
ReportCaller bool
|
||||||
|
|
||||||
// The logging level the logger should log at. This is typically (and defaults
|
// The logging level the logger should log at. This is typically (and defaults
|
||||||
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
|
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
|
||||||
// logged. `logrus.Debug` is useful in
|
// logged.
|
||||||
Level Level
|
Level Level
|
||||||
// Used to sync writing to the log. Locking is enabled by Default
|
// Used to sync writing to the log. Locking is enabled by Default
|
||||||
mu MutexWrap
|
mu MutexWrap
|
||||||
// Reusable empty entry
|
// Reusable empty entry
|
||||||
entryPool sync.Pool
|
entryPool sync.Pool
|
||||||
|
// Function to exit the application, defaults to `os.Exit()`
|
||||||
|
ExitFunc exitFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type exitFunc func(int)
|
||||||
|
|
||||||
type MutexWrap struct {
|
type MutexWrap struct {
|
||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
disabled bool
|
disabled bool
|
||||||
|
@ -72,6 +82,8 @@ func New() *Logger {
|
||||||
Formatter: new(TextFormatter),
|
Formatter: new(TextFormatter),
|
||||||
Hooks: make(LevelHooks),
|
Hooks: make(LevelHooks),
|
||||||
Level: InfoLevel,
|
Level: InfoLevel,
|
||||||
|
ExitFunc: os.Exit,
|
||||||
|
ReportCaller: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -84,11 +96,12 @@ func (logger *Logger) newEntry() *Entry {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) releaseEntry(entry *Entry) {
|
func (logger *Logger) releaseEntry(entry *Entry) {
|
||||||
|
entry.Data = map[string]interface{}{}
|
||||||
logger.entryPool.Put(entry)
|
logger.entryPool.Put(entry)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Adds a field to the log entry, note that it doesn't log until you call
|
// Adds a field to the log entry, note that it doesn't log until you call
|
||||||
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
|
// Debug, Print, Info, Warn, Error, Fatal or Panic. It only creates a log entry.
|
||||||
// If you want multiple fields, use `WithFields`.
|
// If you want multiple fields, use `WithFields`.
|
||||||
func (logger *Logger) WithField(key string, value interface{}) *Entry {
|
func (logger *Logger) WithField(key string, value interface{}) *Entry {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
|
@ -112,20 +125,38 @@ func (logger *Logger) WithError(err error) *Entry {
|
||||||
return entry.WithError(err)
|
return entry.WithError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Debugf(format string, args ...interface{}) {
|
// Add a context to the log entry.
|
||||||
if logger.level() >= DebugLevel {
|
func (logger *Logger) WithContext(ctx context.Context) *Entry {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Debugf(format, args...)
|
defer logger.releaseEntry(entry)
|
||||||
|
return entry.WithContext(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Overrides the time of the log entry.
|
||||||
|
func (logger *Logger) WithTime(t time.Time) *Entry {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
defer logger.releaseEntry(entry)
|
||||||
|
return entry.WithTime(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Logf(level Level, format string, args ...interface{}) {
|
||||||
|
if logger.IsLevelEnabled(level) {
|
||||||
|
entry := logger.newEntry()
|
||||||
|
entry.Logf(level, format, args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Infof(format string, args ...interface{}) {
|
func (logger *Logger) Tracef(format string, args ...interface{}) {
|
||||||
if logger.level() >= InfoLevel {
|
logger.Logf(TraceLevel, format, args...)
|
||||||
entry := logger.newEntry()
|
|
||||||
entry.Infof(format, args...)
|
|
||||||
logger.releaseEntry(entry)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Debugf(format string, args ...interface{}) {
|
||||||
|
logger.Logf(DebugLevel, format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Infof(format string, args ...interface{}) {
|
||||||
|
logger.Logf(InfoLevel, format, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Printf(format string, args ...interface{}) {
|
func (logger *Logger) Printf(format string, args ...interface{}) {
|
||||||
|
@ -135,123 +166,91 @@ func (logger *Logger) Printf(format string, args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Warnf(format string, args ...interface{}) {
|
func (logger *Logger) Warnf(format string, args ...interface{}) {
|
||||||
if logger.level() >= WarnLevel {
|
logger.Logf(WarnLevel, format, args...)
|
||||||
entry := logger.newEntry()
|
|
||||||
entry.Warnf(format, args...)
|
|
||||||
logger.releaseEntry(entry)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Warningf(format string, args ...interface{}) {
|
func (logger *Logger) Warningf(format string, args ...interface{}) {
|
||||||
if logger.level() >= WarnLevel {
|
logger.Warnf(format, args...)
|
||||||
entry := logger.newEntry()
|
|
||||||
entry.Warnf(format, args...)
|
|
||||||
logger.releaseEntry(entry)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Errorf(format string, args ...interface{}) {
|
func (logger *Logger) Errorf(format string, args ...interface{}) {
|
||||||
if logger.level() >= ErrorLevel {
|
logger.Logf(ErrorLevel, format, args...)
|
||||||
entry := logger.newEntry()
|
|
||||||
entry.Errorf(format, args...)
|
|
||||||
logger.releaseEntry(entry)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
||||||
if logger.level() >= FatalLevel {
|
logger.Logf(FatalLevel, format, args...)
|
||||||
entry := logger.newEntry()
|
logger.Exit(1)
|
||||||
entry.Fatalf(format, args...)
|
|
||||||
logger.releaseEntry(entry)
|
|
||||||
}
|
|
||||||
Exit(1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
||||||
if logger.level() >= PanicLevel {
|
logger.Logf(PanicLevel, format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Log(level Level, args ...interface{}) {
|
||||||
|
if logger.IsLevelEnabled(level) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Panicf(format, args...)
|
entry.Log(level, args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Trace(args ...interface{}) {
|
||||||
|
logger.Log(TraceLevel, args...)
|
||||||
|
}
|
||||||
|
|
||||||
func (logger *Logger) Debug(args ...interface{}) {
|
func (logger *Logger) Debug(args ...interface{}) {
|
||||||
if logger.level() >= DebugLevel {
|
logger.Log(DebugLevel, args...)
|
||||||
entry := logger.newEntry()
|
|
||||||
entry.Debug(args...)
|
|
||||||
logger.releaseEntry(entry)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Info(args ...interface{}) {
|
func (logger *Logger) Info(args ...interface{}) {
|
||||||
if logger.level() >= InfoLevel {
|
logger.Log(InfoLevel, args...)
|
||||||
entry := logger.newEntry()
|
|
||||||
entry.Info(args...)
|
|
||||||
logger.releaseEntry(entry)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Print(args ...interface{}) {
|
func (logger *Logger) Print(args ...interface{}) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Info(args...)
|
entry.Print(args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Warn(args ...interface{}) {
|
func (logger *Logger) Warn(args ...interface{}) {
|
||||||
if logger.level() >= WarnLevel {
|
logger.Log(WarnLevel, args...)
|
||||||
entry := logger.newEntry()
|
|
||||||
entry.Warn(args...)
|
|
||||||
logger.releaseEntry(entry)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Warning(args ...interface{}) {
|
func (logger *Logger) Warning(args ...interface{}) {
|
||||||
if logger.level() >= WarnLevel {
|
logger.Warn(args...)
|
||||||
entry := logger.newEntry()
|
|
||||||
entry.Warn(args...)
|
|
||||||
logger.releaseEntry(entry)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Error(args ...interface{}) {
|
func (logger *Logger) Error(args ...interface{}) {
|
||||||
if logger.level() >= ErrorLevel {
|
logger.Log(ErrorLevel, args...)
|
||||||
entry := logger.newEntry()
|
|
||||||
entry.Error(args...)
|
|
||||||
logger.releaseEntry(entry)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Fatal(args ...interface{}) {
|
func (logger *Logger) Fatal(args ...interface{}) {
|
||||||
if logger.level() >= FatalLevel {
|
logger.Log(FatalLevel, args...)
|
||||||
entry := logger.newEntry()
|
logger.Exit(1)
|
||||||
entry.Fatal(args...)
|
|
||||||
logger.releaseEntry(entry)
|
|
||||||
}
|
|
||||||
Exit(1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Panic(args ...interface{}) {
|
func (logger *Logger) Panic(args ...interface{}) {
|
||||||
if logger.level() >= PanicLevel {
|
logger.Log(PanicLevel, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Logln(level Level, args ...interface{}) {
|
||||||
|
if logger.IsLevelEnabled(level) {
|
||||||
entry := logger.newEntry()
|
entry := logger.newEntry()
|
||||||
entry.Panic(args...)
|
entry.Logln(level, args...)
|
||||||
logger.releaseEntry(entry)
|
logger.releaseEntry(entry)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Traceln(args ...interface{}) {
|
||||||
|
logger.Logln(TraceLevel, args...)
|
||||||
|
}
|
||||||
|
|
||||||
func (logger *Logger) Debugln(args ...interface{}) {
|
func (logger *Logger) Debugln(args ...interface{}) {
|
||||||
if logger.level() >= DebugLevel {
|
logger.Logln(DebugLevel, args...)
|
||||||
entry := logger.newEntry()
|
|
||||||
entry.Debugln(args...)
|
|
||||||
logger.releaseEntry(entry)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Infoln(args ...interface{}) {
|
func (logger *Logger) Infoln(args ...interface{}) {
|
||||||
if logger.level() >= InfoLevel {
|
logger.Logln(InfoLevel, args...)
|
||||||
entry := logger.newEntry()
|
|
||||||
entry.Infoln(args...)
|
|
||||||
logger.releaseEntry(entry)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Println(args ...interface{}) {
|
func (logger *Logger) Println(args ...interface{}) {
|
||||||
|
@ -261,44 +260,32 @@ func (logger *Logger) Println(args ...interface{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Warnln(args ...interface{}) {
|
func (logger *Logger) Warnln(args ...interface{}) {
|
||||||
if logger.level() >= WarnLevel {
|
logger.Logln(WarnLevel, args...)
|
||||||
entry := logger.newEntry()
|
|
||||||
entry.Warnln(args...)
|
|
||||||
logger.releaseEntry(entry)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Warningln(args ...interface{}) {
|
func (logger *Logger) Warningln(args ...interface{}) {
|
||||||
if logger.level() >= WarnLevel {
|
logger.Warnln(args...)
|
||||||
entry := logger.newEntry()
|
|
||||||
entry.Warnln(args...)
|
|
||||||
logger.releaseEntry(entry)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Errorln(args ...interface{}) {
|
func (logger *Logger) Errorln(args ...interface{}) {
|
||||||
if logger.level() >= ErrorLevel {
|
logger.Logln(ErrorLevel, args...)
|
||||||
entry := logger.newEntry()
|
|
||||||
entry.Errorln(args...)
|
|
||||||
logger.releaseEntry(entry)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Fatalln(args ...interface{}) {
|
func (logger *Logger) Fatalln(args ...interface{}) {
|
||||||
if logger.level() >= FatalLevel {
|
logger.Logln(FatalLevel, args...)
|
||||||
entry := logger.newEntry()
|
logger.Exit(1)
|
||||||
entry.Fatalln(args...)
|
|
||||||
logger.releaseEntry(entry)
|
|
||||||
}
|
|
||||||
Exit(1)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) Panicln(args ...interface{}) {
|
func (logger *Logger) Panicln(args ...interface{}) {
|
||||||
if logger.level() >= PanicLevel {
|
logger.Logln(PanicLevel, args...)
|
||||||
entry := logger.newEntry()
|
|
||||||
entry.Panicln(args...)
|
|
||||||
logger.releaseEntry(entry)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Exit(code int) {
|
||||||
|
runHandlers()
|
||||||
|
if logger.ExitFunc == nil {
|
||||||
|
logger.ExitFunc = os.Exit
|
||||||
|
}
|
||||||
|
logger.ExitFunc(code)
|
||||||
}
|
}
|
||||||
|
|
||||||
//When file is opened with appending mode, it's safe to
|
//When file is opened with appending mode, it's safe to
|
||||||
|
@ -312,6 +299,53 @@ func (logger *Logger) level() Level {
|
||||||
return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
|
return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (logger *Logger) setLevel(level Level) {
|
// SetLevel sets the logger level.
|
||||||
|
func (logger *Logger) SetLevel(level Level) {
|
||||||
atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
|
atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetLevel returns the logger level.
|
||||||
|
func (logger *Logger) GetLevel() Level {
|
||||||
|
return logger.level()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddHook adds a hook to the logger hooks.
|
||||||
|
func (logger *Logger) AddHook(hook Hook) {
|
||||||
|
logger.mu.Lock()
|
||||||
|
defer logger.mu.Unlock()
|
||||||
|
logger.Hooks.Add(hook)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsLevelEnabled checks if the log level of the logger is greater than the level param
|
||||||
|
func (logger *Logger) IsLevelEnabled(level Level) bool {
|
||||||
|
return logger.level() >= level
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetFormatter sets the logger formatter.
|
||||||
|
func (logger *Logger) SetFormatter(formatter Formatter) {
|
||||||
|
logger.mu.Lock()
|
||||||
|
defer logger.mu.Unlock()
|
||||||
|
logger.Formatter = formatter
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetOutput sets the logger output.
|
||||||
|
func (logger *Logger) SetOutput(output io.Writer) {
|
||||||
|
logger.mu.Lock()
|
||||||
|
defer logger.mu.Unlock()
|
||||||
|
logger.Out = output
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) SetReportCaller(reportCaller bool) {
|
||||||
|
logger.mu.Lock()
|
||||||
|
defer logger.mu.Unlock()
|
||||||
|
logger.ReportCaller = reportCaller
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReplaceHooks replaces the logger hooks and returns the old ones
|
||||||
|
func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks {
|
||||||
|
logger.mu.Lock()
|
||||||
|
oldHooks := logger.Hooks
|
||||||
|
logger.Hooks = hooks
|
||||||
|
logger.mu.Unlock()
|
||||||
|
return oldHooks
|
||||||
|
}
|
||||||
|
|
75
vendor/github.com/sirupsen/logrus/logrus.go
generated
vendored
75
vendor/github.com/sirupsen/logrus/logrus.go
generated
vendored
|
@ -14,23 +14,12 @@ type Level uint32
|
||||||
|
|
||||||
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
|
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
|
||||||
func (level Level) String() string {
|
func (level Level) String() string {
|
||||||
switch level {
|
if b, err := level.MarshalText(); err == nil {
|
||||||
case DebugLevel:
|
return string(b)
|
||||||
return "debug"
|
} else {
|
||||||
case InfoLevel:
|
|
||||||
return "info"
|
|
||||||
case WarnLevel:
|
|
||||||
return "warning"
|
|
||||||
case ErrorLevel:
|
|
||||||
return "error"
|
|
||||||
case FatalLevel:
|
|
||||||
return "fatal"
|
|
||||||
case PanicLevel:
|
|
||||||
return "panic"
|
|
||||||
}
|
|
||||||
|
|
||||||
return "unknown"
|
return "unknown"
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ParseLevel takes a string level and returns the Logrus log level constant.
|
// ParseLevel takes a string level and returns the Logrus log level constant.
|
||||||
func ParseLevel(lvl string) (Level, error) {
|
func ParseLevel(lvl string) (Level, error) {
|
||||||
|
@ -47,12 +36,47 @@ func ParseLevel(lvl string) (Level, error) {
|
||||||
return InfoLevel, nil
|
return InfoLevel, nil
|
||||||
case "debug":
|
case "debug":
|
||||||
return DebugLevel, nil
|
return DebugLevel, nil
|
||||||
|
case "trace":
|
||||||
|
return TraceLevel, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var l Level
|
var l Level
|
||||||
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
|
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||||
|
func (level *Level) UnmarshalText(text []byte) error {
|
||||||
|
l, err := ParseLevel(string(text))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
*level = Level(l)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (level Level) MarshalText() ([]byte, error) {
|
||||||
|
switch level {
|
||||||
|
case TraceLevel:
|
||||||
|
return []byte("trace"), nil
|
||||||
|
case DebugLevel:
|
||||||
|
return []byte("debug"), nil
|
||||||
|
case InfoLevel:
|
||||||
|
return []byte("info"), nil
|
||||||
|
case WarnLevel:
|
||||||
|
return []byte("warning"), nil
|
||||||
|
case ErrorLevel:
|
||||||
|
return []byte("error"), nil
|
||||||
|
case FatalLevel:
|
||||||
|
return []byte("fatal"), nil
|
||||||
|
case PanicLevel:
|
||||||
|
return []byte("panic"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("not a valid logrus level %d", level)
|
||||||
|
}
|
||||||
|
|
||||||
// A constant exposing all logging levels
|
// A constant exposing all logging levels
|
||||||
var AllLevels = []Level{
|
var AllLevels = []Level{
|
||||||
PanicLevel,
|
PanicLevel,
|
||||||
|
@ -61,6 +85,7 @@ var AllLevels = []Level{
|
||||||
WarnLevel,
|
WarnLevel,
|
||||||
InfoLevel,
|
InfoLevel,
|
||||||
DebugLevel,
|
DebugLevel,
|
||||||
|
TraceLevel,
|
||||||
}
|
}
|
||||||
|
|
||||||
// These are the different logging levels. You can set the logging level to log
|
// These are the different logging levels. You can set the logging level to log
|
||||||
|
@ -69,7 +94,7 @@ const (
|
||||||
// PanicLevel level, highest level of severity. Logs and then calls panic with the
|
// PanicLevel level, highest level of severity. Logs and then calls panic with the
|
||||||
// message passed to Debug, Info, ...
|
// message passed to Debug, Info, ...
|
||||||
PanicLevel Level = iota
|
PanicLevel Level = iota
|
||||||
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
|
// FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the
|
||||||
// logging level is set to Panic.
|
// logging level is set to Panic.
|
||||||
FatalLevel
|
FatalLevel
|
||||||
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
|
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
|
||||||
|
@ -82,6 +107,8 @@ const (
|
||||||
InfoLevel
|
InfoLevel
|
||||||
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
|
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
|
||||||
DebugLevel
|
DebugLevel
|
||||||
|
// TraceLevel level. Designates finer-grained informational events than the Debug.
|
||||||
|
TraceLevel
|
||||||
)
|
)
|
||||||
|
|
||||||
// Won't compile if StdLogger can't be realized by a log.Logger
|
// Won't compile if StdLogger can't be realized by a log.Logger
|
||||||
|
@ -140,4 +167,20 @@ type FieldLogger interface {
|
||||||
Errorln(args ...interface{})
|
Errorln(args ...interface{})
|
||||||
Fatalln(args ...interface{})
|
Fatalln(args ...interface{})
|
||||||
Panicln(args ...interface{})
|
Panicln(args ...interface{})
|
||||||
|
|
||||||
|
// IsDebugEnabled() bool
|
||||||
|
// IsInfoEnabled() bool
|
||||||
|
// IsWarnEnabled() bool
|
||||||
|
// IsErrorEnabled() bool
|
||||||
|
// IsFatalEnabled() bool
|
||||||
|
// IsPanicEnabled() bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is
|
||||||
|
// here for consistancy. Do not use. Use Logger or Entry instead.
|
||||||
|
type Ext1FieldLogger interface {
|
||||||
|
FieldLogger
|
||||||
|
Tracef(format string, args ...interface{})
|
||||||
|
Trace(args ...interface{})
|
||||||
|
Traceln(args ...interface{})
|
||||||
}
|
}
|
||||||
|
|
10
vendor/github.com/sirupsen/logrus/terminal_appengine.go
generated
vendored
10
vendor/github.com/sirupsen/logrus/terminal_appengine.go
generated
vendored
|
@ -1,10 +0,0 @@
|
||||||
// +build appengine
|
|
||||||
|
|
||||||
package logrus
|
|
||||||
|
|
||||||
import "io"
|
|
||||||
|
|
||||||
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
|
||||||
func IsTerminal(f io.Writer) bool {
|
|
||||||
return true
|
|
||||||
}
|
|
10
vendor/github.com/sirupsen/logrus/terminal_bsd.go
generated
vendored
10
vendor/github.com/sirupsen/logrus/terminal_bsd.go
generated
vendored
|
@ -1,10 +0,0 @@
|
||||||
// +build darwin freebsd openbsd netbsd dragonfly
|
|
||||||
// +build !appengine
|
|
||||||
|
|
||||||
package logrus
|
|
||||||
|
|
||||||
import "syscall"
|
|
||||||
|
|
||||||
const ioctlReadTermios = syscall.TIOCGETA
|
|
||||||
|
|
||||||
type Termios syscall.Termios
|
|
11
vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
generated
vendored
Normal file
11
vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
// +build appengine
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
func checkIfTerminal(w io.Writer) bool {
|
||||||
|
return true
|
||||||
|
}
|
13
vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
generated
vendored
Normal file
13
vendor/github.com/sirupsen/logrus/terminal_check_bsd.go
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
// +build darwin dragonfly freebsd netbsd openbsd
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import "golang.org/x/sys/unix"
|
||||||
|
|
||||||
|
const ioctlReadTermios = unix.TIOCGETA
|
||||||
|
|
||||||
|
func isTerminal(fd int) bool {
|
||||||
|
_, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|
11
vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go
generated
vendored
Normal file
11
vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
// +build js nacl plan9
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
func checkIfTerminal(w io.Writer) bool {
|
||||||
|
return false
|
||||||
|
}
|
17
vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
generated
vendored
Normal file
17
vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
||||||
|
// +build !appengine,!js,!windows,!nacl,!plan9
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
func checkIfTerminal(w io.Writer) bool {
|
||||||
|
switch v := w.(type) {
|
||||||
|
case *os.File:
|
||||||
|
return isTerminal(int(v.Fd()))
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
11
vendor/github.com/sirupsen/logrus/terminal_check_solaris.go
generated
vendored
Normal file
11
vendor/github.com/sirupsen/logrus/terminal_check_solaris.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||||
|
func isTerminal(fd int) bool {
|
||||||
|
_, err := unix.IoctlGetTermio(fd, unix.TCGETA)
|
||||||
|
return err == nil
|
||||||
|
}
|
13
vendor/github.com/sirupsen/logrus/terminal_check_unix.go
generated
vendored
Normal file
13
vendor/github.com/sirupsen/logrus/terminal_check_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
// +build linux aix
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import "golang.org/x/sys/unix"
|
||||||
|
|
||||||
|
const ioctlReadTermios = unix.TCGETS
|
||||||
|
|
||||||
|
func isTerminal(fd int) bool {
|
||||||
|
_, err := unix.IoctlGetTermios(fd, ioctlReadTermios)
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|
34
vendor/github.com/sirupsen/logrus/terminal_check_windows.go
generated
vendored
Normal file
34
vendor/github.com/sirupsen/logrus/terminal_check_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
// +build !appengine,!js,windows
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
sequences "github.com/konsorten/go-windows-terminal-sequences"
|
||||||
|
)
|
||||||
|
|
||||||
|
func initTerminal(w io.Writer) {
|
||||||
|
switch v := w.(type) {
|
||||||
|
case *os.File:
|
||||||
|
sequences.EnableVirtualTerminalProcessing(syscall.Handle(v.Fd()), true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkIfTerminal(w io.Writer) bool {
|
||||||
|
var ret bool
|
||||||
|
switch v := w.(type) {
|
||||||
|
case *os.File:
|
||||||
|
var mode uint32
|
||||||
|
err := syscall.GetConsoleMode(syscall.Handle(v.Fd()), &mode)
|
||||||
|
ret = (err == nil)
|
||||||
|
default:
|
||||||
|
ret = false
|
||||||
|
}
|
||||||
|
if ret {
|
||||||
|
initTerminal(w)
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
14
vendor/github.com/sirupsen/logrus/terminal_linux.go
generated
vendored
14
vendor/github.com/sirupsen/logrus/terminal_linux.go
generated
vendored
|
@ -1,14 +0,0 @@
|
||||||
// Based on ssh/terminal:
|
|
||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build !appengine
|
|
||||||
|
|
||||||
package logrus
|
|
||||||
|
|
||||||
import "syscall"
|
|
||||||
|
|
||||||
const ioctlReadTermios = syscall.TCGETS
|
|
||||||
|
|
||||||
type Termios syscall.Termios
|
|
28
vendor/github.com/sirupsen/logrus/terminal_notwindows.go
generated
vendored
28
vendor/github.com/sirupsen/logrus/terminal_notwindows.go
generated
vendored
|
@ -1,28 +0,0 @@
|
||||||
// Based on ssh/terminal:
|
|
||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build linux darwin freebsd openbsd netbsd dragonfly
|
|
||||||
// +build !appengine
|
|
||||||
|
|
||||||
package logrus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
|
||||||
func IsTerminal(f io.Writer) bool {
|
|
||||||
var termios Termios
|
|
||||||
switch v := f.(type) {
|
|
||||||
case *os.File:
|
|
||||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(v.Fd()), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
|
||||||
return err == 0
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
21
vendor/github.com/sirupsen/logrus/terminal_solaris.go
generated
vendored
21
vendor/github.com/sirupsen/logrus/terminal_solaris.go
generated
vendored
|
@ -1,21 +0,0 @@
|
||||||
// +build solaris,!appengine
|
|
||||||
|
|
||||||
package logrus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
|
||||||
func IsTerminal(f io.Writer) bool {
|
|
||||||
switch v := f.(type) {
|
|
||||||
case *os.File:
|
|
||||||
_, err := unix.IoctlGetTermios(int(v.Fd()), unix.TCGETA)
|
|
||||||
return err == nil
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
82
vendor/github.com/sirupsen/logrus/terminal_windows.go
generated
vendored
82
vendor/github.com/sirupsen/logrus/terminal_windows.go
generated
vendored
|
@ -1,82 +0,0 @@
|
||||||
// Based on ssh/terminal:
|
|
||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build windows,!appengine
|
|
||||||
|
|
||||||
package logrus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
|
||||||
|
|
||||||
var (
|
|
||||||
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
|
|
||||||
procSetConsoleMode = kernel32.NewProc("SetConsoleMode")
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
enableProcessedOutput = 0x0001
|
|
||||||
enableWrapAtEolOutput = 0x0002
|
|
||||||
enableVirtualTerminalProcessing = 0x0004
|
|
||||||
)
|
|
||||||
|
|
||||||
func getVersion() (float64, error) {
|
|
||||||
stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
|
|
||||||
cmd := exec.Command("cmd", "ver")
|
|
||||||
cmd.Stdout = stdout
|
|
||||||
cmd.Stderr = stderr
|
|
||||||
err := cmd.Run()
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// The output should be like "Microsoft Windows [Version XX.X.XXXXXX]"
|
|
||||||
version := strings.Replace(stdout.String(), "\n", "", -1)
|
|
||||||
version = strings.Replace(version, "\r\n", "", -1)
|
|
||||||
|
|
||||||
x1 := strings.Index(version, "[Version")
|
|
||||||
|
|
||||||
if x1 == -1 || strings.Index(version, "]") == -1 {
|
|
||||||
return -1, errors.New("Can't determine Windows version")
|
|
||||||
}
|
|
||||||
|
|
||||||
return strconv.ParseFloat(version[x1+9:x1+13], 64)
|
|
||||||
}
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
ver, err := getVersion()
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Activate Virtual Processing for Windows CMD
|
|
||||||
// Info: https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
|
|
||||||
if ver >= 10 {
|
|
||||||
handle := syscall.Handle(os.Stderr.Fd())
|
|
||||||
procSetConsoleMode.Call(uintptr(handle), enableProcessedOutput|enableWrapAtEolOutput|enableVirtualTerminalProcessing)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
|
||||||
func IsTerminal(f io.Writer) bool {
|
|
||||||
switch v := f.(type) {
|
|
||||||
case *os.File:
|
|
||||||
var st uint32
|
|
||||||
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(v.Fd()), uintptr(unsafe.Pointer(&st)), 0)
|
|
||||||
return r != 0 && e == 0
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
228
vendor/github.com/sirupsen/logrus/text_formatter.go
generated
vendored
228
vendor/github.com/sirupsen/logrus/text_formatter.go
generated
vendored
|
@ -3,6 +3,8 @@ package logrus
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -10,22 +12,19 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
nocolor = 0
|
|
||||||
red = 31
|
red = 31
|
||||||
green = 32
|
|
||||||
yellow = 33
|
yellow = 33
|
||||||
blue = 34
|
blue = 36
|
||||||
gray = 37
|
gray = 37
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var baseTimestamp time.Time
|
||||||
baseTimestamp time.Time
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
baseTimestamp = time.Now()
|
baseTimestamp = time.Now()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TextFormatter formats logs into text
|
||||||
type TextFormatter struct {
|
type TextFormatter struct {
|
||||||
// Set to true to bypass checking for a TTY before outputting colors.
|
// Set to true to bypass checking for a TTY before outputting colors.
|
||||||
ForceColors bool
|
ForceColors bool
|
||||||
|
@ -33,6 +32,9 @@ type TextFormatter struct {
|
||||||
// Force disabling colors.
|
// Force disabling colors.
|
||||||
DisableColors bool
|
DisableColors bool
|
||||||
|
|
||||||
|
// Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/
|
||||||
|
EnvironmentOverrideColors bool
|
||||||
|
|
||||||
// Disable timestamp logging. useful when output is redirected to logging
|
// Disable timestamp logging. useful when output is redirected to logging
|
||||||
// system that already adds timestamps.
|
// system that already adds timestamps.
|
||||||
DisableTimestamp bool
|
DisableTimestamp bool
|
||||||
|
@ -49,66 +51,151 @@ type TextFormatter struct {
|
||||||
// be desired.
|
// be desired.
|
||||||
DisableSorting bool
|
DisableSorting bool
|
||||||
|
|
||||||
|
// The keys sorting function, when uninitialized it uses sort.Strings.
|
||||||
|
SortingFunc func([]string)
|
||||||
|
|
||||||
|
// Disables the truncation of the level text to 4 characters.
|
||||||
|
DisableLevelTruncation bool
|
||||||
|
|
||||||
// QuoteEmptyFields will wrap empty fields in quotes if true
|
// QuoteEmptyFields will wrap empty fields in quotes if true
|
||||||
QuoteEmptyFields bool
|
QuoteEmptyFields bool
|
||||||
|
|
||||||
// QuoteCharacter can be set to the override the default quoting character "
|
|
||||||
// with something else. For example: ', or `.
|
|
||||||
QuoteCharacter string
|
|
||||||
|
|
||||||
// Whether the logger's out is to a terminal
|
// Whether the logger's out is to a terminal
|
||||||
isTerminal bool
|
isTerminal bool
|
||||||
|
|
||||||
sync.Once
|
// FieldMap allows users to customize the names of keys for default fields.
|
||||||
|
// As an example:
|
||||||
|
// formatter := &TextFormatter{
|
||||||
|
// FieldMap: FieldMap{
|
||||||
|
// FieldKeyTime: "@timestamp",
|
||||||
|
// FieldKeyLevel: "@level",
|
||||||
|
// FieldKeyMsg: "@message"}}
|
||||||
|
FieldMap FieldMap
|
||||||
|
|
||||||
|
// CallerPrettyfier can be set by the user to modify the content
|
||||||
|
// of the function and file keys in the data when ReportCaller is
|
||||||
|
// activated. If any of the returned value is the empty string the
|
||||||
|
// corresponding key will be removed from fields.
|
||||||
|
CallerPrettyfier func(*runtime.Frame) (function string, file string)
|
||||||
|
|
||||||
|
terminalInitOnce sync.Once
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *TextFormatter) init(entry *Entry) {
|
func (f *TextFormatter) init(entry *Entry) {
|
||||||
if len(f.QuoteCharacter) == 0 {
|
|
||||||
f.QuoteCharacter = "\""
|
|
||||||
}
|
|
||||||
if entry.Logger != nil {
|
if entry.Logger != nil {
|
||||||
f.isTerminal = IsTerminal(entry.Logger.Out)
|
f.isTerminal = checkIfTerminal(entry.Logger.Out)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *TextFormatter) isColored() bool {
|
||||||
|
isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows"))
|
||||||
|
|
||||||
|
if f.EnvironmentOverrideColors {
|
||||||
|
if force, ok := os.LookupEnv("CLICOLOR_FORCE"); ok && force != "0" {
|
||||||
|
isColored = true
|
||||||
|
} else if ok && force == "0" {
|
||||||
|
isColored = false
|
||||||
|
} else if os.Getenv("CLICOLOR") == "0" {
|
||||||
|
isColored = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return isColored && !f.DisableColors
|
||||||
|
}
|
||||||
|
|
||||||
|
// Format renders a single log entry
|
||||||
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
||||||
var b *bytes.Buffer
|
data := make(Fields)
|
||||||
keys := make([]string, 0, len(entry.Data))
|
for k, v := range entry.Data {
|
||||||
for k := range entry.Data {
|
data[k] = v
|
||||||
|
}
|
||||||
|
prefixFieldClashes(data, f.FieldMap, entry.HasCaller())
|
||||||
|
keys := make([]string, 0, len(data))
|
||||||
|
for k := range data {
|
||||||
keys = append(keys, k)
|
keys = append(keys, k)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !f.DisableSorting {
|
var funcVal, fileVal string
|
||||||
sort.Strings(keys)
|
|
||||||
|
fixedKeys := make([]string, 0, 4+len(data))
|
||||||
|
if !f.DisableTimestamp {
|
||||||
|
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime))
|
||||||
}
|
}
|
||||||
|
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel))
|
||||||
|
if entry.Message != "" {
|
||||||
|
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg))
|
||||||
|
}
|
||||||
|
if entry.err != "" {
|
||||||
|
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError))
|
||||||
|
}
|
||||||
|
if entry.HasCaller() {
|
||||||
|
if f.CallerPrettyfier != nil {
|
||||||
|
funcVal, fileVal = f.CallerPrettyfier(entry.Caller)
|
||||||
|
} else {
|
||||||
|
funcVal = entry.Caller.Function
|
||||||
|
fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
|
||||||
|
}
|
||||||
|
|
||||||
|
if funcVal != "" {
|
||||||
|
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFunc))
|
||||||
|
}
|
||||||
|
if fileVal != "" {
|
||||||
|
fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFile))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !f.DisableSorting {
|
||||||
|
if f.SortingFunc == nil {
|
||||||
|
sort.Strings(keys)
|
||||||
|
fixedKeys = append(fixedKeys, keys...)
|
||||||
|
} else {
|
||||||
|
if !f.isColored() {
|
||||||
|
fixedKeys = append(fixedKeys, keys...)
|
||||||
|
f.SortingFunc(fixedKeys)
|
||||||
|
} else {
|
||||||
|
f.SortingFunc(keys)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
fixedKeys = append(fixedKeys, keys...)
|
||||||
|
}
|
||||||
|
|
||||||
|
var b *bytes.Buffer
|
||||||
if entry.Buffer != nil {
|
if entry.Buffer != nil {
|
||||||
b = entry.Buffer
|
b = entry.Buffer
|
||||||
} else {
|
} else {
|
||||||
b = &bytes.Buffer{}
|
b = &bytes.Buffer{}
|
||||||
}
|
}
|
||||||
|
|
||||||
prefixFieldClashes(entry.Data)
|
f.terminalInitOnce.Do(func() { f.init(entry) })
|
||||||
|
|
||||||
f.Do(func() { f.init(entry) })
|
|
||||||
|
|
||||||
isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
|
|
||||||
|
|
||||||
timestampFormat := f.TimestampFormat
|
timestampFormat := f.TimestampFormat
|
||||||
if timestampFormat == "" {
|
if timestampFormat == "" {
|
||||||
timestampFormat = DefaultTimestampFormat
|
timestampFormat = defaultTimestampFormat
|
||||||
}
|
}
|
||||||
if isColored {
|
if f.isColored() {
|
||||||
f.printColored(b, entry, keys, timestampFormat)
|
f.printColored(b, entry, keys, data, timestampFormat)
|
||||||
} else {
|
} else {
|
||||||
if !f.DisableTimestamp {
|
|
||||||
f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
|
for _, key := range fixedKeys {
|
||||||
|
var value interface{}
|
||||||
|
switch {
|
||||||
|
case key == f.FieldMap.resolve(FieldKeyTime):
|
||||||
|
value = entry.Time.Format(timestampFormat)
|
||||||
|
case key == f.FieldMap.resolve(FieldKeyLevel):
|
||||||
|
value = entry.Level.String()
|
||||||
|
case key == f.FieldMap.resolve(FieldKeyMsg):
|
||||||
|
value = entry.Message
|
||||||
|
case key == f.FieldMap.resolve(FieldKeyLogrusError):
|
||||||
|
value = entry.err
|
||||||
|
case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller():
|
||||||
|
value = funcVal
|
||||||
|
case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller():
|
||||||
|
value = fileVal
|
||||||
|
default:
|
||||||
|
value = data[key]
|
||||||
}
|
}
|
||||||
f.appendKeyValue(b, "level", entry.Level.String())
|
f.appendKeyValue(b, key, value)
|
||||||
if entry.Message != "" {
|
|
||||||
f.appendKeyValue(b, "msg", entry.Message)
|
|
||||||
}
|
|
||||||
for _, key := range keys {
|
|
||||||
f.appendKeyValue(b, key, entry.Data[key])
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -116,10 +203,10 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
||||||
return b.Bytes(), nil
|
return b.Bytes(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
|
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) {
|
||||||
var levelColor int
|
var levelColor int
|
||||||
switch entry.Level {
|
switch entry.Level {
|
||||||
case DebugLevel:
|
case DebugLevel, TraceLevel:
|
||||||
levelColor = gray
|
levelColor = gray
|
||||||
case WarnLevel:
|
case WarnLevel:
|
||||||
levelColor = yellow
|
levelColor = yellow
|
||||||
|
@ -129,17 +216,42 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
|
||||||
levelColor = blue
|
levelColor = blue
|
||||||
}
|
}
|
||||||
|
|
||||||
levelText := strings.ToUpper(entry.Level.String())[0:4]
|
levelText := strings.ToUpper(entry.Level.String())
|
||||||
|
if !f.DisableLevelTruncation {
|
||||||
|
levelText = levelText[0:4]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove a single newline if it already exists in the message to keep
|
||||||
|
// the behavior of logrus text_formatter the same as the stdlib log package
|
||||||
|
entry.Message = strings.TrimSuffix(entry.Message, "\n")
|
||||||
|
|
||||||
|
caller := ""
|
||||||
|
if entry.HasCaller() {
|
||||||
|
funcVal := fmt.Sprintf("%s()", entry.Caller.Function)
|
||||||
|
fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
|
||||||
|
|
||||||
|
if f.CallerPrettyfier != nil {
|
||||||
|
funcVal, fileVal = f.CallerPrettyfier(entry.Caller)
|
||||||
|
}
|
||||||
|
|
||||||
|
if fileVal == "" {
|
||||||
|
caller = funcVal
|
||||||
|
} else if funcVal == "" {
|
||||||
|
caller = fileVal
|
||||||
|
} else {
|
||||||
|
caller = fileVal + " " + funcVal
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if f.DisableTimestamp {
|
if f.DisableTimestamp {
|
||||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
|
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message)
|
||||||
} else if !f.FullTimestamp {
|
} else if !f.FullTimestamp {
|
||||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
|
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message)
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
|
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message)
|
||||||
}
|
}
|
||||||
for _, k := range keys {
|
for _, k := range keys {
|
||||||
v := entry.Data[k]
|
v := data[k]
|
||||||
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
|
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
|
||||||
f.appendValue(b, v)
|
f.appendValue(b, v)
|
||||||
}
|
}
|
||||||
|
@ -153,7 +265,7 @@ func (f *TextFormatter) needsQuoting(text string) bool {
|
||||||
if !((ch >= 'a' && ch <= 'z') ||
|
if !((ch >= 'a' && ch <= 'z') ||
|
||||||
(ch >= 'A' && ch <= 'Z') ||
|
(ch >= 'A' && ch <= 'Z') ||
|
||||||
(ch >= '0' && ch <= '9') ||
|
(ch >= '0' && ch <= '9') ||
|
||||||
ch == '-' || ch == '.') {
|
ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -161,29 +273,23 @@ func (f *TextFormatter) needsQuoting(text string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
|
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
|
||||||
|
if b.Len() > 0 {
|
||||||
|
b.WriteByte(' ')
|
||||||
|
}
|
||||||
b.WriteString(key)
|
b.WriteString(key)
|
||||||
b.WriteByte('=')
|
b.WriteByte('=')
|
||||||
f.appendValue(b, value)
|
f.appendValue(b, value)
|
||||||
b.WriteByte(' ')
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
|
func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
|
||||||
switch value := value.(type) {
|
stringVal, ok := value.(string)
|
||||||
case string:
|
if !ok {
|
||||||
if !f.needsQuoting(value) {
|
stringVal = fmt.Sprint(value)
|
||||||
b.WriteString(value)
|
}
|
||||||
|
|
||||||
|
if !f.needsQuoting(stringVal) {
|
||||||
|
b.WriteString(stringVal)
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, value, f.QuoteCharacter)
|
b.WriteString(fmt.Sprintf("%q", stringVal))
|
||||||
}
|
|
||||||
case error:
|
|
||||||
errmsg := value.Error()
|
|
||||||
if !f.needsQuoting(errmsg) {
|
|
||||||
b.WriteString(errmsg)
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, errmsg, f.QuoteCharacter)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
fmt.Fprint(b, value)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
2
vendor/github.com/sirupsen/logrus/writer.go
generated
vendored
2
vendor/github.com/sirupsen/logrus/writer.go
generated
vendored
|
@ -24,6 +24,8 @@ func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
|
||||||
var printFunc func(args ...interface{})
|
var printFunc func(args ...interface{})
|
||||||
|
|
||||||
switch level {
|
switch level {
|
||||||
|
case TraceLevel:
|
||||||
|
printFunc = entry.Trace
|
||||||
case DebugLevel:
|
case DebugLevel:
|
||||||
printFunc = entry.Debug
|
printFunc = entry.Debug
|
||||||
case InfoLevel:
|
case InfoLevel:
|
||||||
|
|
12
vendor/github.com/spf13/cobra/.gitignore
generated
vendored
12
vendor/github.com/spf13/cobra/.gitignore
generated
vendored
|
@ -19,6 +19,18 @@ _cgo_export.*
|
||||||
|
|
||||||
_testmain.go
|
_testmain.go
|
||||||
|
|
||||||
|
# Vim files https://github.com/github/gitignore/blob/master/Global/Vim.gitignore
|
||||||
|
# swap
|
||||||
|
[._]*.s[a-w][a-z]
|
||||||
|
[._]s[a-w][a-z]
|
||||||
|
# session
|
||||||
|
Session.vim
|
||||||
|
# temporary
|
||||||
|
.netrwhist
|
||||||
|
*~
|
||||||
|
# auto-generated tag files
|
||||||
|
tags
|
||||||
|
|
||||||
*.exe
|
*.exe
|
||||||
|
|
||||||
cobra.test
|
cobra.test
|
||||||
|
|
3
vendor/github.com/spf13/cobra/.mailmap
generated
vendored
Normal file
3
vendor/github.com/spf13/cobra/.mailmap
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
Steve Francia <steve.francia@gmail.com>
|
||||||
|
Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
|
||||||
|
Fabiano Franz <ffranz@redhat.com> <contact@fabianofranz.com>
|
23
vendor/github.com/spf13/cobra/.travis.yml
generated
vendored
23
vendor/github.com/spf13/cobra/.travis.yml
generated
vendored
|
@ -1,8 +1,21 @@
|
||||||
language: go
|
language: go
|
||||||
go:
|
|
||||||
- 1.3
|
matrix:
|
||||||
- 1.4.2
|
include:
|
||||||
- tip
|
- go: 1.9.4
|
||||||
|
- go: 1.10.0
|
||||||
|
- go: tip
|
||||||
|
allow_failures:
|
||||||
|
- go: tip
|
||||||
|
|
||||||
|
before_install:
|
||||||
|
- mkdir -p bin
|
||||||
|
- curl -Lso bin/shellcheck https://github.com/caarlos0/shellcheck-docker/releases/download/v0.4.3/shellcheck
|
||||||
|
- chmod +x bin/shellcheck
|
||||||
script:
|
script:
|
||||||
- go test ./...
|
- PATH=$PATH:$PWD/bin go test -v ./...
|
||||||
- go build
|
- go build
|
||||||
|
- diff -u <(echo -n) <(gofmt -d -s .)
|
||||||
|
- if [ -z $NOVET ]; then
|
||||||
|
diff -u <(echo -n) <(go tool vet . 2>&1 | grep -vE 'ExampleCommand|bash_completions.*Fprint');
|
||||||
|
fi
|
||||||
|
|
695
vendor/github.com/spf13/cobra/README.md
generated
vendored
695
vendor/github.com/spf13/cobra/README.md
generated
vendored
|
@ -1,95 +1,188 @@
|
||||||
# Cobra
|
![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png)
|
||||||
|
|
||||||
A Commander for modern go CLI interactions
|
Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files.
|
||||||
|
|
||||||
[![Build Status](https://travis-ci.org/spf13/cobra.svg)](https://travis-ci.org/spf13/cobra)
|
Many of the most widely used Go projects are built using Cobra including:
|
||||||
|
|
||||||
## Overview
|
* [Kubernetes](http://kubernetes.io/)
|
||||||
|
* [Hugo](http://gohugo.io)
|
||||||
|
* [rkt](https://github.com/coreos/rkt)
|
||||||
|
* [etcd](https://github.com/coreos/etcd)
|
||||||
|
* [Moby (former Docker)](https://github.com/moby/moby)
|
||||||
|
* [Docker (distribution)](https://github.com/docker/distribution)
|
||||||
|
* [OpenShift](https://www.openshift.com/)
|
||||||
|
* [Delve](https://github.com/derekparker/delve)
|
||||||
|
* [GopherJS](http://www.gopherjs.org/)
|
||||||
|
* [CockroachDB](http://www.cockroachlabs.com/)
|
||||||
|
* [Bleve](http://www.blevesearch.com/)
|
||||||
|
* [ProjectAtomic (enterprise)](http://www.projectatomic.io/)
|
||||||
|
* [GiantSwarm's swarm](https://github.com/giantswarm/cli)
|
||||||
|
* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack)
|
||||||
|
* [rclone](http://rclone.org/)
|
||||||
|
* [nehm](https://github.com/bogem/nehm)
|
||||||
|
* [Pouch](https://github.com/alibaba/pouch)
|
||||||
|
|
||||||
Cobra is a commander providing a simple interface to create powerful modern CLI
|
[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra)
|
||||||
interfaces similar to git & go tools. In addition to providing an interface, Cobra
|
[![CircleCI status](https://circleci.com/gh/spf13/cobra.png?circle-token=:circle-token "CircleCI status")](https://circleci.com/gh/spf13/cobra)
|
||||||
simultaneously provides a controller to organize your application code.
|
[![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra)
|
||||||
|
|
||||||
Inspired by go, go-Commander, gh and subcommand, Cobra improves on these by
|
# Table of Contents
|
||||||
providing **fully posix compliant flags** (including short & long versions),
|
|
||||||
**nesting commands**, and the ability to **define your own help and usage** for any or
|
|
||||||
all commands.
|
|
||||||
|
|
||||||
Cobra has an exceptionally clean interface and simple design without needless
|
- [Overview](#overview)
|
||||||
constructors or initialization methods.
|
- [Concepts](#concepts)
|
||||||
|
* [Commands](#commands)
|
||||||
|
* [Flags](#flags)
|
||||||
|
- [Installing](#installing)
|
||||||
|
- [Getting Started](#getting-started)
|
||||||
|
* [Using the Cobra Generator](#using-the-cobra-generator)
|
||||||
|
* [Using the Cobra Library](#using-the-cobra-library)
|
||||||
|
* [Working with Flags](#working-with-flags)
|
||||||
|
* [Positional and Custom Arguments](#positional-and-custom-arguments)
|
||||||
|
* [Example](#example)
|
||||||
|
* [Help Command](#help-command)
|
||||||
|
* [Usage Message](#usage-message)
|
||||||
|
* [PreRun and PostRun Hooks](#prerun-and-postrun-hooks)
|
||||||
|
* [Suggestions when "unknown command" happens](#suggestions-when-unknown-command-happens)
|
||||||
|
* [Generating documentation for your command](#generating-documentation-for-your-command)
|
||||||
|
* [Generating bash completions](#generating-bash-completions)
|
||||||
|
- [Contributing](#contributing)
|
||||||
|
- [License](#license)
|
||||||
|
|
||||||
Applications built with Cobra commands are designed to be as user friendly as
|
# Overview
|
||||||
possible. Flags can be placed before or after the command (as long as a
|
|
||||||
confusing space isn’t provided). Both short and long flags can be used. A
|
|
||||||
command need not even be fully typed. The shortest unambiguous string will
|
|
||||||
suffice. Help is automatically generated and available for the application or
|
|
||||||
for a specific command using either the help command or the --help flag.
|
|
||||||
|
|
||||||
## Concepts
|
Cobra is a library providing a simple interface to create powerful modern CLI
|
||||||
|
interfaces similar to git & go tools.
|
||||||
|
|
||||||
Cobra is built on a structure of commands & flags.
|
Cobra is also an application that will generate your application scaffolding to rapidly
|
||||||
|
develop a Cobra-based application.
|
||||||
|
|
||||||
**Commands** represent actions and **Flags** are modifiers for those actions.
|
Cobra provides:
|
||||||
|
* Easy subcommand-based CLIs: `app server`, `app fetch`, etc.
|
||||||
|
* Fully POSIX-compliant flags (including short & long versions)
|
||||||
|
* Nested subcommands
|
||||||
|
* Global, local and cascading flags
|
||||||
|
* Easy generation of applications & commands with `cobra init appname` & `cobra add cmdname`
|
||||||
|
* Intelligent suggestions (`app srver`... did you mean `app server`?)
|
||||||
|
* Automatic help generation for commands and flags
|
||||||
|
* Automatic help flag recognition of `-h`, `--help`, etc.
|
||||||
|
* Automatically generated bash autocomplete for your application
|
||||||
|
* Automatically generated man pages for your application
|
||||||
|
* Command aliases so you can change things without breaking them
|
||||||
|
* The flexibility to define your own help, usage, etc.
|
||||||
|
* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps
|
||||||
|
|
||||||
In the following example 'server' is a command and 'port' is a flag.
|
# Concepts
|
||||||
|
|
||||||
|
Cobra is built on a structure of commands, arguments & flags.
|
||||||
|
|
||||||
|
**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions.
|
||||||
|
|
||||||
|
The best applications will read like sentences when used. Users will know how
|
||||||
|
to use the application because they will natively understand how to use it.
|
||||||
|
|
||||||
|
The pattern to follow is
|
||||||
|
`APPNAME VERB NOUN --ADJECTIVE.`
|
||||||
|
or
|
||||||
|
`APPNAME COMMAND ARG --FLAG`
|
||||||
|
|
||||||
|
A few good real world examples may better illustrate this point.
|
||||||
|
|
||||||
|
In the following example, 'server' is a command, and 'port' is a flag:
|
||||||
|
|
||||||
hugo server --port=1313
|
hugo server --port=1313
|
||||||
|
|
||||||
### Commands
|
In this command we are telling Git to clone the url bare.
|
||||||
|
|
||||||
|
git clone URL --bare
|
||||||
|
|
||||||
|
## Commands
|
||||||
|
|
||||||
Command is the central point of the application. Each interaction that
|
Command is the central point of the application. Each interaction that
|
||||||
the application supports will be contained in a Command. A command can
|
the application supports will be contained in a Command. A command can
|
||||||
have children commands and optionally run an action.
|
have children commands and optionally run an action.
|
||||||
|
|
||||||
In the example above 'server' is the command
|
In the example above, 'server' is the command.
|
||||||
|
|
||||||
A Command has the following structure:
|
[More about cobra.Command](https://godoc.org/github.com/spf13/cobra#Command)
|
||||||
|
|
||||||
type Command struct {
|
## Flags
|
||||||
Use string // The one-line usage message.
|
|
||||||
Short string // The short description shown in the 'help' output.
|
|
||||||
Long string // The long message shown in the 'help <this-command>' output.
|
|
||||||
Run func(cmd *Command, args []string) // Run runs the command.
|
|
||||||
}
|
|
||||||
|
|
||||||
### Flags
|
A flag is a way to modify the behavior of a command. Cobra supports
|
||||||
|
fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/).
|
||||||
A Flag is a way to modify the behavior of an command. Cobra supports
|
|
||||||
fully posix compliant flags as well as the go flag package.
|
|
||||||
A Cobra command can define flags that persist through to children commands
|
A Cobra command can define flags that persist through to children commands
|
||||||
and flags that are only available to that command.
|
and flags that are only available to that command.
|
||||||
|
|
||||||
In the example above 'port' is the flag.
|
In the example above, 'port' is the flag.
|
||||||
|
|
||||||
Flag functionality is provided by the [pflag
|
Flag functionality is provided by the [pflag
|
||||||
library](https://github.com/ogier/pflag), a fork of the flag standard library
|
library](https://github.com/spf13/pflag), a fork of the flag standard library
|
||||||
which maintains the same interface while adding posix compliance.
|
which maintains the same interface while adding POSIX compliance.
|
||||||
|
|
||||||
## Usage
|
# Installing
|
||||||
|
Using Cobra is easy. First, use `go get` to install the latest version
|
||||||
|
of the library. This command will install the `cobra` generator executable
|
||||||
|
along with the library and its dependencies:
|
||||||
|
|
||||||
Cobra works by creating a set of commands and then organizing them into a tree.
|
go get -u github.com/spf13/cobra/cobra
|
||||||
The tree defines the structure of the application.
|
|
||||||
|
|
||||||
Once each command is defined with it's corresponding flags, then the
|
Next, include Cobra in your application:
|
||||||
tree is assigned to the commander which is finally executed.
|
|
||||||
|
|
||||||
### Installing
|
|
||||||
Using Cobra is easy. First use go get to install the latest version
|
|
||||||
of the library.
|
|
||||||
|
|
||||||
$ go get github.com/spf13/cobra
|
|
||||||
|
|
||||||
Next include cobra in your application.
|
|
||||||
|
|
||||||
|
```go
|
||||||
import "github.com/spf13/cobra"
|
import "github.com/spf13/cobra"
|
||||||
|
```
|
||||||
|
|
||||||
### Create the root command
|
# Getting Started
|
||||||
|
|
||||||
The root command represents your binary itself.
|
While you are welcome to provide your own organization, typically a Cobra-based
|
||||||
|
application will follow the following organizational structure:
|
||||||
|
|
||||||
|
```
|
||||||
|
▾ appName/
|
||||||
|
▾ cmd/
|
||||||
|
add.go
|
||||||
|
your.go
|
||||||
|
commands.go
|
||||||
|
here.go
|
||||||
|
main.go
|
||||||
|
```
|
||||||
|
|
||||||
|
In a Cobra app, typically the main.go file is very bare. It serves one purpose: initializing Cobra.
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"{pathToYourApp}/cmd"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
cmd.Execute()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Using the Cobra Generator
|
||||||
|
|
||||||
|
Cobra provides its own program that will create your application and add any
|
||||||
|
commands you want. It's the easiest way to incorporate Cobra into your application.
|
||||||
|
|
||||||
|
[Here](https://github.com/spf13/cobra/blob/master/cobra/README.md) you can find more information about it.
|
||||||
|
|
||||||
|
## Using the Cobra Library
|
||||||
|
|
||||||
|
To manually implement Cobra you need to create a bare main.go file and a rootCmd file.
|
||||||
|
You will optionally provide additional commands as you see fit.
|
||||||
|
|
||||||
|
### Create rootCmd
|
||||||
|
|
||||||
Cobra doesn't require any special constructors. Simply create your commands.
|
Cobra doesn't require any special constructors. Simply create your commands.
|
||||||
|
|
||||||
var HugoCmd = &cobra.Command{
|
Ideally you place this in app/cmd/root.go:
|
||||||
|
|
||||||
|
```go
|
||||||
|
var rootCmd = &cobra.Command{
|
||||||
Use: "hugo",
|
Use: "hugo",
|
||||||
Short: "Hugo is a very fast static site generator",
|
Short: "Hugo is a very fast static site generator",
|
||||||
Long: `A Fast and Flexible Static Site Generator built with
|
Long: `A Fast and Flexible Static Site Generator built with
|
||||||
|
@ -100,9 +193,109 @@ Cobra doesn't require any special constructors. Simply create your commands.
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Execute() {
|
||||||
|
if err := rootCmd.Execute(); err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
You will additionally define flags and handle configuration in your init() function.
|
||||||
|
|
||||||
|
For example cmd/root.go:
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
homedir "github.com/mitchellh/go-homedir"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/spf13/viper"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
cobra.OnInitialize(initConfig)
|
||||||
|
rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)")
|
||||||
|
rootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory eg. github.com/spf13/")
|
||||||
|
rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution")
|
||||||
|
rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `licensetext` in config)")
|
||||||
|
rootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration")
|
||||||
|
viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author"))
|
||||||
|
viper.BindPFlag("projectbase", rootCmd.PersistentFlags().Lookup("projectbase"))
|
||||||
|
viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper"))
|
||||||
|
viper.SetDefault("author", "NAME HERE <EMAIL ADDRESS>")
|
||||||
|
viper.SetDefault("license", "apache")
|
||||||
|
}
|
||||||
|
|
||||||
|
func initConfig() {
|
||||||
|
// Don't forget to read config either from cfgFile or from home directory!
|
||||||
|
if cfgFile != "" {
|
||||||
|
// Use config file from the flag.
|
||||||
|
viper.SetConfigFile(cfgFile)
|
||||||
|
} else {
|
||||||
|
// Find home directory.
|
||||||
|
home, err := homedir.Dir()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Search config in home directory with name ".cobra" (without extension).
|
||||||
|
viper.AddConfigPath(home)
|
||||||
|
viper.SetConfigName(".cobra")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := viper.ReadInConfig(); err != nil {
|
||||||
|
fmt.Println("Can't read config:", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Create your main.go
|
||||||
|
|
||||||
|
With the root command you need to have your main function execute it.
|
||||||
|
Execute should be run on the root for clarity, though it can be called on any command.
|
||||||
|
|
||||||
|
In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra.
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"{pathToYourApp}/cmd"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
cmd.Execute()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
### Create additional commands
|
### Create additional commands
|
||||||
|
|
||||||
Additional commands can be defined.
|
Additional commands can be defined and typically are each given their own file
|
||||||
|
inside of the cmd/ directory.
|
||||||
|
|
||||||
|
If you wanted to create a version command you would create cmd/version.go and
|
||||||
|
populate it with the following:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rootCmd.AddCommand(versionCmd)
|
||||||
|
}
|
||||||
|
|
||||||
var versionCmd = &cobra.Command{
|
var versionCmd = &cobra.Command{
|
||||||
Use: "version",
|
Use: "version",
|
||||||
|
@ -112,11 +305,11 @@ Additional commands can be defined.
|
||||||
fmt.Println("Hugo Static Site Generator v0.9 -- HEAD")
|
fmt.Println("Hugo Static Site Generator v0.9 -- HEAD")
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
```
|
||||||
|
|
||||||
### Attach command to its parent
|
## Working with Flags
|
||||||
In this example we are attaching it to the root, but commands can be attached at any level.
|
|
||||||
|
|
||||||
HugoCmd.AddCommand(versionCmd)
|
Flags provide modifiers to control how the action command operates.
|
||||||
|
|
||||||
### Assign flags to a command
|
### Assign flags to a command
|
||||||
|
|
||||||
|
@ -124,43 +317,109 @@ Since the flags are defined and used in different locations, we need to
|
||||||
define a variable outside with the correct scope to assign the flag to
|
define a variable outside with the correct scope to assign the flag to
|
||||||
work with.
|
work with.
|
||||||
|
|
||||||
|
```go
|
||||||
var Verbose bool
|
var Verbose bool
|
||||||
var Source string
|
var Source string
|
||||||
|
```
|
||||||
|
|
||||||
There are two different approaches to assign a flag.
|
There are two different approaches to assign a flag.
|
||||||
|
|
||||||
#### Persistent Flags
|
### Persistent Flags
|
||||||
|
|
||||||
A flag can be 'persistent' meaning that this flag will be available to the
|
A flag can be 'persistent' meaning that this flag will be available to the
|
||||||
command it's assigned to as well as every command under that command. For
|
command it's assigned to as well as every command under that command. For
|
||||||
global flags assign a flag as a persistent flag on the root.
|
global flags, assign a flag as a persistent flag on the root.
|
||||||
|
|
||||||
HugoCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output")
|
```go
|
||||||
|
rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output")
|
||||||
|
```
|
||||||
|
|
||||||
#### Local Flags
|
### Local Flags
|
||||||
|
|
||||||
A flag can also be assigned locally which will only apply to that specific command.
|
A flag can also be assigned locally which will only apply to that specific command.
|
||||||
|
|
||||||
HugoCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from")
|
```go
|
||||||
|
rootCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from")
|
||||||
|
```
|
||||||
|
|
||||||
### Remove a command from its parent
|
### Local Flag on Parent Commands
|
||||||
|
|
||||||
Removing a command is not a common action in simple programs but it allows 3rd parties to customize an existing command tree.
|
By default Cobra only parses local flags on the target command, any local flags on
|
||||||
|
parent commands are ignored. By enabling `Command.TraverseChildren` Cobra will
|
||||||
|
parse local flags on each command before executing the target command.
|
||||||
|
|
||||||
In this example, we remove the existing `VersionCmd` command of an existing root command, and we replace it by our own version.
|
```go
|
||||||
|
command := cobra.Command{
|
||||||
|
Use: "print [OPTIONS] [COMMANDS]",
|
||||||
|
TraverseChildren: true,
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
mainlib.RootCmd.RemoveCommand(mainlib.VersionCmd)
|
### Bind Flags with Config
|
||||||
mainlib.RootCmd.AddCommand(versionCmd)
|
|
||||||
|
|
||||||
### Once all commands and flags are defined, Execute the commands
|
You can also bind your flags with [viper](https://github.com/spf13/viper):
|
||||||
|
```go
|
||||||
|
var author string
|
||||||
|
|
||||||
Execute should be run on the root for clarity, though it can be called on any command.
|
func init() {
|
||||||
|
rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution")
|
||||||
|
viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author"))
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
HugoCmd.Execute()
|
In this example the persistent flag `author` is bound with `viper`.
|
||||||
|
**Note**, that the variable `author` will not be set to the value from config,
|
||||||
|
when the `--author` flag is not provided by user.
|
||||||
|
|
||||||
|
More in [viper documentation](https://github.com/spf13/viper#working-with-flags).
|
||||||
|
|
||||||
|
### Required flags
|
||||||
|
|
||||||
|
Flags are optional by default. If instead you wish your command to report an error
|
||||||
|
when a flag has not been set, mark it as required:
|
||||||
|
```go
|
||||||
|
rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)")
|
||||||
|
rootCmd.MarkFlagRequired("region")
|
||||||
|
```
|
||||||
|
|
||||||
|
## Positional and Custom Arguments
|
||||||
|
|
||||||
|
Validation of positional arguments can be specified using the `Args` field
|
||||||
|
of `Command`.
|
||||||
|
|
||||||
|
The following validators are built in:
|
||||||
|
|
||||||
|
- `NoArgs` - the command will report an error if there are any positional args.
|
||||||
|
- `ArbitraryArgs` - the command will accept any args.
|
||||||
|
- `OnlyValidArgs` - the command will report an error if there are any positional args that are not in the `ValidArgs` field of `Command`.
|
||||||
|
- `MinimumNArgs(int)` - the command will report an error if there are not at least N positional args.
|
||||||
|
- `MaximumNArgs(int)` - the command will report an error if there are more than N positional args.
|
||||||
|
- `ExactArgs(int)` - the command will report an error if there are not exactly N positional args.
|
||||||
|
- `RangeArgs(min, max)` - the command will report an error if the number of args is not between the minimum and maximum number of expected args.
|
||||||
|
|
||||||
|
An example of setting the custom validator:
|
||||||
|
|
||||||
|
```go
|
||||||
|
var cmd = &cobra.Command{
|
||||||
|
Short: "hello",
|
||||||
|
Args: func(cmd *cobra.Command, args []string) error {
|
||||||
|
if len(args) < 1 {
|
||||||
|
return errors.New("requires at least one arg")
|
||||||
|
}
|
||||||
|
if myapp.IsValidColor(args[0]) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("invalid color specified: %s", args[0])
|
||||||
|
},
|
||||||
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
fmt.Println("Hello, World!")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
## Example
|
## Example
|
||||||
|
|
||||||
In the example below we have defined three commands. Two are at the top level
|
In the example below, we have defined three commands. Two are at the top level
|
||||||
and one (cmdTimes) is a child of one of the top commands. In this case the root
|
and one (cmdTimes) is a child of one of the top commands. In this case the root
|
||||||
is not executable meaning that a subcommand is required. This is accomplished
|
is not executable meaning that a subcommand is required. This is accomplished
|
||||||
by not providing a 'Run' for the 'rootCmd'.
|
by not providing a 'Run' for the 'rootCmd'.
|
||||||
|
@ -169,22 +428,25 @@ We have only defined one flag for a single command.
|
||||||
|
|
||||||
More documentation about flags is available at https://github.com/spf13/pflag
|
More documentation about flags is available at https://github.com/spf13/pflag
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/spf13/cobra"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
|
||||||
var echoTimes int
|
var echoTimes int
|
||||||
|
|
||||||
var cmdPrint = &cobra.Command{
|
var cmdPrint = &cobra.Command{
|
||||||
Use: "print [string to print]",
|
Use: "print [string to print]",
|
||||||
Short: "Print anything to the screen",
|
Short: "Print anything to the screen",
|
||||||
Long: `print is for printing anything back to the screen.
|
Long: `print is for printing anything back to the screen.
|
||||||
For many years people have printed back to the screen.
|
For many years people have printed back to the screen.`,
|
||||||
`,
|
Args: cobra.MinimumNArgs(1),
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
fmt.Println("Print: " + strings.Join(args, " "))
|
fmt.Println("Print: " + strings.Join(args, " "))
|
||||||
},
|
},
|
||||||
|
@ -194,8 +456,8 @@ More documentation about flags is available at https://github.com/spf13/pflag
|
||||||
Use: "echo [string to echo]",
|
Use: "echo [string to echo]",
|
||||||
Short: "Echo anything to the screen",
|
Short: "Echo anything to the screen",
|
||||||
Long: `echo is for echoing anything back.
|
Long: `echo is for echoing anything back.
|
||||||
Echo works a lot like print, except it has a child command.
|
Echo works a lot like print, except it has a child command.`,
|
||||||
`,
|
Args: cobra.MinimumNArgs(1),
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
fmt.Println("Print: " + strings.Join(args, " "))
|
fmt.Println("Print: " + strings.Join(args, " "))
|
||||||
},
|
},
|
||||||
|
@ -206,6 +468,7 @@ More documentation about flags is available at https://github.com/spf13/pflag
|
||||||
Short: "Echo anything to the screen more times",
|
Short: "Echo anything to the screen more times",
|
||||||
Long: `echo things multiple times back to the user by providing
|
Long: `echo things multiple times back to the user by providing
|
||||||
a count and a string.`,
|
a count and a string.`,
|
||||||
|
Args: cobra.MinimumNArgs(1),
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
for i := 0; i < echoTimes; i++ {
|
for i := 0; i < echoTimes; i++ {
|
||||||
fmt.Println("Echo: " + strings.Join(args, " "))
|
fmt.Println("Echo: " + strings.Join(args, " "))
|
||||||
|
@ -220,145 +483,118 @@ More documentation about flags is available at https://github.com/spf13/pflag
|
||||||
cmdEcho.AddCommand(cmdTimes)
|
cmdEcho.AddCommand(cmdTimes)
|
||||||
rootCmd.Execute()
|
rootCmd.Execute()
|
||||||
}
|
}
|
||||||
|
```
|
||||||
|
|
||||||
For a more complete example of a larger application, please checkout [Hugo](http://hugo.spf13.com)
|
For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/).
|
||||||
|
|
||||||
## The Help Command
|
## Help Command
|
||||||
|
|
||||||
Cobra automatically adds a help command to your application when you have subcommands.
|
Cobra automatically adds a help command to your application when you have subcommands.
|
||||||
This will be called when a user runs 'app help'. Additionally help will also
|
This will be called when a user runs 'app help'. Additionally, help will also
|
||||||
support all other commands as input. Say for instance you have a command called
|
support all other commands as input. Say, for instance, you have a command called
|
||||||
'create' without any additional configuration cobra will work when 'app help
|
'create' without any additional configuration; Cobra will work when 'app help
|
||||||
create' is called. Every command will automatically have the '--help' flag added.
|
create' is called. Every command will automatically have the '--help' flag added.
|
||||||
|
|
||||||
### Example
|
### Example
|
||||||
|
|
||||||
The following output is automatically generated by cobra. Nothing beyond the
|
The following output is automatically generated by Cobra. Nothing beyond the
|
||||||
command and flag definitions are needed.
|
command and flag definitions are needed.
|
||||||
|
|
||||||
> hugo help
|
$ cobra help
|
||||||
|
|
||||||
A Fast and Flexible Static Site Generator built with
|
Cobra is a CLI library for Go that empowers applications.
|
||||||
love by spf13 and friends in Go.
|
This application is a tool to generate the needed files
|
||||||
|
to quickly create a Cobra application.
|
||||||
Complete documentation is available at http://hugo.spf13.com
|
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
hugo [flags]
|
cobra [command]
|
||||||
hugo [command]
|
|
||||||
|
|
||||||
Available Commands:
|
Available Commands:
|
||||||
server :: Hugo runs it's own a webserver to render the files
|
add Add a command to a Cobra Application
|
||||||
version :: Print the version number of Hugo
|
help Help about any command
|
||||||
check :: Check content in the source directory
|
init Initialize a Cobra Application
|
||||||
benchmark :: Benchmark hugo by building a site a number of times
|
|
||||||
help [command] :: Help about any command
|
|
||||||
|
|
||||||
Available Flags:
|
Flags:
|
||||||
-b, --base-url="": hostname (and path) to the root eg. http://spf13.com/
|
-a, --author string author name for copyright attribution (default "YOUR NAME")
|
||||||
-D, --build-drafts=false: include content marked as draft
|
--config string config file (default is $HOME/.cobra.yaml)
|
||||||
--config="": config file (default is path/config.yaml|json|toml)
|
-h, --help help for cobra
|
||||||
-d, --destination="": filesystem path to write files to
|
-l, --license string name of license for the project
|
||||||
-s, --source="": filesystem path to read files relative from
|
--viper use Viper for configuration (default true)
|
||||||
--stepAnalysis=false: display memory and timing of different steps of the program
|
|
||||||
--uglyurls=false: if true, use /filename.html instead of /filename/
|
|
||||||
-v, --verbose=false: verbose output
|
|
||||||
-w, --watch=false: watch filesystem for changes and recreate as needed
|
|
||||||
|
|
||||||
Use "hugo help [command]" for more information about that command.
|
|
||||||
|
|
||||||
|
Use "cobra [command] --help" for more information about a command.
|
||||||
|
|
||||||
|
|
||||||
Help is just a command like any other. There is no special logic or behavior
|
Help is just a command like any other. There is no special logic or behavior
|
||||||
around it. In fact you can provide your own if you want.
|
around it. In fact, you can provide your own if you want.
|
||||||
|
|
||||||
### Defining your own help
|
### Defining your own help
|
||||||
|
|
||||||
You can provide your own Help command or you own template for the default command to use.
|
You can provide your own Help command or your own template for the default command to use
|
||||||
|
with following functions:
|
||||||
|
|
||||||
The default help command is
|
```go
|
||||||
|
cmd.SetHelpCommand(cmd *Command)
|
||||||
func (c *Command) initHelp() {
|
cmd.SetHelpFunc(f func(*Command, []string))
|
||||||
if c.helpCommand == nil {
|
cmd.SetHelpTemplate(s string)
|
||||||
c.helpCommand = &Command{
|
```
|
||||||
Use: "help [command]",
|
|
||||||
Short: "Help about any command",
|
|
||||||
Long: `Help provides help for any command in the application.
|
|
||||||
Simply type ` + c.Name() + ` help [path to command] for full details.`,
|
|
||||||
Run: c.HelpFunc(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
c.AddCommand(c.helpCommand)
|
|
||||||
}
|
|
||||||
|
|
||||||
You can provide your own command, function or template through the following methods.
|
|
||||||
|
|
||||||
command.SetHelpCommand(cmd *Command)
|
|
||||||
|
|
||||||
command.SetHelpFunc(f func(*Command, []string))
|
|
||||||
|
|
||||||
command.SetHelpTemplate(s string)
|
|
||||||
|
|
||||||
The latter two will also apply to any children commands.
|
The latter two will also apply to any children commands.
|
||||||
|
|
||||||
## Usage
|
## Usage Message
|
||||||
|
|
||||||
When the user provides an invalid flag or invalid command Cobra responds by
|
When the user provides an invalid flag or invalid command, Cobra responds by
|
||||||
showing the user the 'usage'
|
showing the user the 'usage'.
|
||||||
|
|
||||||
### Example
|
### Example
|
||||||
You may recognize this from the help above. That's because the default help
|
You may recognize this from the help above. That's because the default help
|
||||||
embeds the usage as part of it's output.
|
embeds the usage as part of its output.
|
||||||
|
|
||||||
|
$ cobra --invalid
|
||||||
|
Error: unknown flag: --invalid
|
||||||
Usage:
|
Usage:
|
||||||
hugo [flags]
|
cobra [command]
|
||||||
hugo [command]
|
|
||||||
|
|
||||||
Available Commands:
|
Available Commands:
|
||||||
server Hugo runs it's own a webserver to render the files
|
add Add a command to a Cobra Application
|
||||||
version Print the version number of Hugo
|
help Help about any command
|
||||||
check Check content in the source directory
|
init Initialize a Cobra Application
|
||||||
benchmark Benchmark hugo by building a site a number of times
|
|
||||||
help [command] Help about any command
|
|
||||||
|
|
||||||
Available Flags:
|
Flags:
|
||||||
-b, --base-url="": hostname (and path) to the root eg. http://spf13.com/
|
-a, --author string author name for copyright attribution (default "YOUR NAME")
|
||||||
-D, --build-drafts=false: include content marked as draft
|
--config string config file (default is $HOME/.cobra.yaml)
|
||||||
--config="": config file (default is path/config.yaml|json|toml)
|
-h, --help help for cobra
|
||||||
-d, --destination="": filesystem path to write files to
|
-l, --license string name of license for the project
|
||||||
-s, --source="": filesystem path to read files relative from
|
--viper use Viper for configuration (default true)
|
||||||
--stepAnalysis=false: display memory and timing of different steps of the program
|
|
||||||
--uglyurls=false: if true, use /filename.html instead of /filename/
|
Use "cobra [command] --help" for more information about a command.
|
||||||
-v, --verbose=false: verbose output
|
|
||||||
-w, --watch=false: watch filesystem for changes and recreate as needed
|
|
||||||
|
|
||||||
### Defining your own usage
|
### Defining your own usage
|
||||||
You can provide your own usage function or template for cobra to use.
|
You can provide your own usage function or template for Cobra to use.
|
||||||
|
Like help, the function and template are overridable through public methods:
|
||||||
|
|
||||||
The default usage function is
|
```go
|
||||||
|
cmd.SetUsageFunc(f func(*Command) error)
|
||||||
|
cmd.SetUsageTemplate(s string)
|
||||||
|
```
|
||||||
|
|
||||||
return func(c *Command) error {
|
## Version Flag
|
||||||
err := tmpl(c.Out(), c.UsageTemplate(), c)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
Like help the function and template are over ridable through public methods.
|
Cobra adds a top-level '--version' flag if the Version field is set on the root command.
|
||||||
|
Running an application with the '--version' flag will print the version to stdout using
|
||||||
|
the version template. The template can be customized using the
|
||||||
|
`cmd.SetVersionTemplate(s string)` function.
|
||||||
|
|
||||||
command.SetUsageFunc(f func(*Command) error)
|
## PreRun and PostRun Hooks
|
||||||
|
|
||||||
command.SetUsageTemplate(s string)
|
It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order:
|
||||||
|
|
||||||
## PreRun or PostRun Hooks
|
|
||||||
|
|
||||||
It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistendPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherrited by children if they do not declare their own. These function are run in the following order:
|
|
||||||
|
|
||||||
- `PersistentPreRun`
|
- `PersistentPreRun`
|
||||||
- `PreRun`
|
- `PreRun`
|
||||||
- `Run`
|
- `Run`
|
||||||
- `PostRun`
|
- `PostRun`
|
||||||
- `PersistenPostRun`
|
- `PersistentPostRun`
|
||||||
|
|
||||||
And example of two commands which use all of these features is below. When the subcommand in executed it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`
|
An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`:
|
||||||
|
|
||||||
```go
|
```go
|
||||||
package main
|
package main
|
||||||
|
@ -411,75 +647,90 @@ func main() {
|
||||||
rootCmd.AddCommand(subCmd)
|
rootCmd.AddCommand(subCmd)
|
||||||
|
|
||||||
rootCmd.SetArgs([]string{""})
|
rootCmd.SetArgs([]string{""})
|
||||||
_ = rootCmd.Execute()
|
rootCmd.Execute()
|
||||||
fmt.Print("\n")
|
fmt.Println()
|
||||||
rootCmd.SetArgs([]string{"sub", "arg1", "arg2"})
|
rootCmd.SetArgs([]string{"sub", "arg1", "arg2"})
|
||||||
_ = rootCmd.Execute()
|
rootCmd.Execute()
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
## Generating markdown formatted documentation for your command
|
Output:
|
||||||
|
```
|
||||||
|
Inside rootCmd PersistentPreRun with args: []
|
||||||
|
Inside rootCmd PreRun with args: []
|
||||||
|
Inside rootCmd Run with args: []
|
||||||
|
Inside rootCmd PostRun with args: []
|
||||||
|
Inside rootCmd PersistentPostRun with args: []
|
||||||
|
|
||||||
Cobra can generate a markdown formatted document based on the subcommands, flags, etc. A simple example of how to do this for your command can be found in [Markdown Docs](md_docs.md)
|
Inside rootCmd PersistentPreRun with args: [arg1 arg2]
|
||||||
|
Inside subCmd PreRun with args: [arg1 arg2]
|
||||||
|
Inside subCmd Run with args: [arg1 arg2]
|
||||||
|
Inside subCmd PostRun with args: [arg1 arg2]
|
||||||
|
Inside subCmd PersistentPostRun with args: [arg1 arg2]
|
||||||
|
```
|
||||||
|
|
||||||
## Generating bash completions for your command
|
## Suggestions when "unknown command" happens
|
||||||
|
|
||||||
Cobra can generate a bash completions file. If you add more information to your command these completions can be amazingly powerful and flexible. Read more about [Bash Completions](bash_completions.md)
|
Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example:
|
||||||
|
|
||||||
## Debugging
|
```
|
||||||
|
$ hugo srever
|
||||||
|
Error: unknown command "srever" for "hugo"
|
||||||
|
|
||||||
Cobra provides a ‘DebugFlags’ method on a command which when called will print
|
Did you mean this?
|
||||||
out everything Cobra knows about the flags for each command
|
server
|
||||||
|
|
||||||
### Example
|
Run 'hugo --help' for usage.
|
||||||
|
```
|
||||||
|
|
||||||
command.DebugFlags()
|
Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion.
|
||||||
|
|
||||||
## Release Notes
|
If you need to disable suggestions or tweak the string distance in your command, use:
|
||||||
* **0.9.0** June 17, 2014
|
|
||||||
* flags can appears anywhere in the args (provided they are unambiguous)
|
|
||||||
* --help prints usage screen for app or command
|
|
||||||
* Prefix matching for commands
|
|
||||||
* Cleaner looking help and usage output
|
|
||||||
* Extensive test suite
|
|
||||||
* **0.8.0** Nov 5, 2013
|
|
||||||
* Reworked interface to remove commander completely
|
|
||||||
* Command now primary structure
|
|
||||||
* No initialization needed
|
|
||||||
* Usage & Help templates & functions definable at any level
|
|
||||||
* Updated Readme
|
|
||||||
* **0.7.0** Sept 24, 2013
|
|
||||||
* Needs more eyes
|
|
||||||
* Test suite
|
|
||||||
* Support for automatic error messages
|
|
||||||
* Support for help command
|
|
||||||
* Support for printing to any io.Writer instead of os.Stderr
|
|
||||||
* Support for persistent flags which cascade down tree
|
|
||||||
* Ready for integration into Hugo
|
|
||||||
* **0.1.0** Sept 3, 2013
|
|
||||||
* Implement first draft
|
|
||||||
|
|
||||||
## ToDo
|
```go
|
||||||
* Launch proper documentation site
|
command.DisableSuggestions = true
|
||||||
|
```
|
||||||
|
|
||||||
## Contributing
|
or
|
||||||
|
|
||||||
|
```go
|
||||||
|
command.SuggestionsMinimumDistance = 1
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example:
|
||||||
|
|
||||||
|
```
|
||||||
|
$ kubectl remove
|
||||||
|
Error: unknown command "remove" for "kubectl"
|
||||||
|
|
||||||
|
Did you mean this?
|
||||||
|
delete
|
||||||
|
|
||||||
|
Run 'kubectl help' for usage.
|
||||||
|
```
|
||||||
|
|
||||||
|
## Generating documentation for your command
|
||||||
|
|
||||||
|
Cobra can generate documentation based on subcommands, flags, etc. in the following formats:
|
||||||
|
|
||||||
|
- [Markdown](doc/md_docs.md)
|
||||||
|
- [ReStructured Text](doc/rest_docs.md)
|
||||||
|
- [Man Page](doc/man_docs.md)
|
||||||
|
|
||||||
|
## Generating bash completions
|
||||||
|
|
||||||
|
Cobra can generate a bash-completion file. If you add more information to your command, these completions can be amazingly powerful and flexible. Read more about it in [Bash Completions](bash_completions.md).
|
||||||
|
|
||||||
|
# Contributing
|
||||||
|
|
||||||
1. Fork it
|
1. Fork it
|
||||||
2. Create your feature branch (`git checkout -b my-new-feature`)
|
2. Download your fork to your PC (`git clone https://github.com/your_username/cobra && cd cobra`)
|
||||||
3. Commit your changes (`git commit -am 'Add some feature'`)
|
3. Create your feature branch (`git checkout -b my-new-feature`)
|
||||||
4. Push to the branch (`git push origin my-new-feature`)
|
4. Make changes and add them (`git add .`)
|
||||||
5. Create new Pull Request
|
5. Commit your changes (`git commit -m 'Add some feature'`)
|
||||||
|
6. Push to the branch (`git push origin my-new-feature`)
|
||||||
|
7. Create new pull request
|
||||||
|
|
||||||
## Contributors
|
# License
|
||||||
|
|
||||||
Names in no particular order:
|
|
||||||
|
|
||||||
* [spf13](https://github.com/spf13)
|
|
||||||
|
|
||||||
## License
|
|
||||||
|
|
||||||
Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt)
|
Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt)
|
||||||
|
|
||||||
|
|
||||||
[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/spf13/cobra/trend.png)](https://bitdeli.com/free "Bitdeli Badge")
|
|
||||||
|
|
||||||
|
|
89
vendor/github.com/spf13/cobra/args.go
generated
vendored
Normal file
89
vendor/github.com/spf13/cobra/args.go
generated
vendored
Normal file
|
@ -0,0 +1,89 @@
|
||||||
|
package cobra
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
type PositionalArgs func(cmd *Command, args []string) error
|
||||||
|
|
||||||
|
// Legacy arg validation has the following behaviour:
|
||||||
|
// - root commands with no subcommands can take arbitrary arguments
|
||||||
|
// - root commands with subcommands will do subcommand validity checking
|
||||||
|
// - subcommands will always accept arbitrary arguments
|
||||||
|
func legacyArgs(cmd *Command, args []string) error {
|
||||||
|
// no subcommand, always take args
|
||||||
|
if !cmd.HasSubCommands() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// root command with subcommands, do subcommand checking.
|
||||||
|
if !cmd.HasParent() && len(args) > 0 {
|
||||||
|
return fmt.Errorf("unknown command %q for %q%s", args[0], cmd.CommandPath(), cmd.findSuggestions(args[0]))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NoArgs returns an error if any args are included.
|
||||||
|
func NoArgs(cmd *Command, args []string) error {
|
||||||
|
if len(args) > 0 {
|
||||||
|
return fmt.Errorf("unknown command %q for %q", args[0], cmd.CommandPath())
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnlyValidArgs returns an error if any args are not in the list of ValidArgs.
|
||||||
|
func OnlyValidArgs(cmd *Command, args []string) error {
|
||||||
|
if len(cmd.ValidArgs) > 0 {
|
||||||
|
for _, v := range args {
|
||||||
|
if !stringInSlice(v, cmd.ValidArgs) {
|
||||||
|
return fmt.Errorf("invalid argument %q for %q%s", v, cmd.CommandPath(), cmd.findSuggestions(args[0]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ArbitraryArgs never returns an error.
|
||||||
|
func ArbitraryArgs(cmd *Command, args []string) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MinimumNArgs returns an error if there is not at least N args.
|
||||||
|
func MinimumNArgs(n int) PositionalArgs {
|
||||||
|
return func(cmd *Command, args []string) error {
|
||||||
|
if len(args) < n {
|
||||||
|
return fmt.Errorf("requires at least %d arg(s), only received %d", n, len(args))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaximumNArgs returns an error if there are more than N args.
|
||||||
|
func MaximumNArgs(n int) PositionalArgs {
|
||||||
|
return func(cmd *Command, args []string) error {
|
||||||
|
if len(args) > n {
|
||||||
|
return fmt.Errorf("accepts at most %d arg(s), received %d", n, len(args))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExactArgs returns an error if there are not exactly n args.
|
||||||
|
func ExactArgs(n int) PositionalArgs {
|
||||||
|
return func(cmd *Command, args []string) error {
|
||||||
|
if len(args) != n {
|
||||||
|
return fmt.Errorf("accepts %d arg(s), received %d", n, len(args))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RangeArgs returns an error if the number of args is not within the expected range.
|
||||||
|
func RangeArgs(min int, max int) PositionalArgs {
|
||||||
|
return func(cmd *Command, args []string) error {
|
||||||
|
if len(args) < min || len(args) > max {
|
||||||
|
return fmt.Errorf("accepts between %d and %d arg(s), received %d", min, max, len(args))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
465
vendor/github.com/spf13/cobra/bash_completions.go
generated
vendored
465
vendor/github.com/spf13/cobra/bash_completions.go
generated
vendored
|
@ -3,6 +3,7 @@ package cobra
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -10,23 +11,33 @@ import (
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Annotations for Bash completion.
|
||||||
const (
|
const (
|
||||||
BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extentions"
|
BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extensions"
|
||||||
|
BashCompCustom = "cobra_annotation_bash_completion_custom"
|
||||||
BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag"
|
BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag"
|
||||||
|
BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir"
|
||||||
)
|
)
|
||||||
|
|
||||||
func preamble(out *bytes.Buffer) {
|
func writePreamble(buf *bytes.Buffer, name string) {
|
||||||
fmt.Fprintf(out, `#!/bin/bash
|
buf.WriteString(fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name))
|
||||||
|
buf.WriteString(fmt.Sprintf(`
|
||||||
|
__%[1]s_debug()
|
||||||
__debug()
|
|
||||||
{
|
{
|
||||||
if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then
|
if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then
|
||||||
echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
|
echo "$*" >> "${BASH_COMP_DEBUG_FILE}"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
__index_of_word()
|
# Homebrew on Macs have version 1.3 of bash-completion which doesn't include
|
||||||
|
# _init_completion. This is a very minimal version of that function.
|
||||||
|
__%[1]s_init_completion()
|
||||||
|
{
|
||||||
|
COMPREPLY=()
|
||||||
|
_get_comp_words_by_ref "$@" cur prev words cword
|
||||||
|
}
|
||||||
|
|
||||||
|
__%[1]s_index_of_word()
|
||||||
{
|
{
|
||||||
local w word=$1
|
local w word=$1
|
||||||
shift
|
shift
|
||||||
|
@ -38,7 +49,7 @@ __index_of_word()
|
||||||
index=-1
|
index=-1
|
||||||
}
|
}
|
||||||
|
|
||||||
__contains_word()
|
__%[1]s_contains_word()
|
||||||
{
|
{
|
||||||
local w word=$1; shift
|
local w word=$1; shift
|
||||||
for w in "$@"; do
|
for w in "$@"; do
|
||||||
|
@ -47,12 +58,14 @@ __contains_word()
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
__handle_reply()
|
__%[1]s_handle_reply()
|
||||||
{
|
{
|
||||||
__debug "${FUNCNAME}"
|
__%[1]s_debug "${FUNCNAME[0]}"
|
||||||
case $cur in
|
case $cur in
|
||||||
-*)
|
-*)
|
||||||
|
if [[ $(type -t compopt) = "builtin" ]]; then
|
||||||
compopt -o nospace
|
compopt -o nospace
|
||||||
|
fi
|
||||||
local allflags
|
local allflags
|
||||||
if [ ${#must_have_one_flag[@]} -ne 0 ]; then
|
if [ ${#must_have_one_flag[@]} -ne 0 ]; then
|
||||||
allflags=("${must_have_one_flag[@]}")
|
allflags=("${must_have_one_flag[@]}")
|
||||||
|
@ -60,14 +73,37 @@ __handle_reply()
|
||||||
allflags=("${flags[*]} ${two_word_flags[*]}")
|
allflags=("${flags[*]} ${two_word_flags[*]}")
|
||||||
fi
|
fi
|
||||||
COMPREPLY=( $(compgen -W "${allflags[*]}" -- "$cur") )
|
COMPREPLY=( $(compgen -W "${allflags[*]}" -- "$cur") )
|
||||||
[[ $COMPREPLY == *= ]] || compopt +o nospace
|
if [[ $(type -t compopt) = "builtin" ]]; then
|
||||||
|
[[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace
|
||||||
|
fi
|
||||||
|
|
||||||
|
# complete after --flag=abc
|
||||||
|
if [[ $cur == *=* ]]; then
|
||||||
|
if [[ $(type -t compopt) = "builtin" ]]; then
|
||||||
|
compopt +o nospace
|
||||||
|
fi
|
||||||
|
|
||||||
|
local index flag
|
||||||
|
flag="${cur%%=*}"
|
||||||
|
__%[1]s_index_of_word "${flag}" "${flags_with_completion[@]}"
|
||||||
|
COMPREPLY=()
|
||||||
|
if [[ ${index} -ge 0 ]]; then
|
||||||
|
PREFIX=""
|
||||||
|
cur="${cur#*=}"
|
||||||
|
${flags_completion[${index}]}
|
||||||
|
if [ -n "${ZSH_VERSION}" ]; then
|
||||||
|
# zsh completion needs --flag= prefix
|
||||||
|
eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
return 0;
|
return 0;
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
# check if we are handling a flag with special work handling
|
# check if we are handling a flag with special work handling
|
||||||
local index
|
local index
|
||||||
__index_of_word "${prev}" "${flags_with_completion[@]}"
|
__%[1]s_index_of_word "${prev}" "${flags_with_completion[@]}"
|
||||||
if [[ ${index} -ge 0 ]]; then
|
if [[ ${index} -ge 0 ]]; then
|
||||||
${flags_completion[${index}]}
|
${flags_completion[${index}]}
|
||||||
return
|
return
|
||||||
|
@ -79,45 +115,85 @@ __handle_reply()
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local completions
|
local completions
|
||||||
if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then
|
|
||||||
completions=("${must_have_one_flag[@]}")
|
|
||||||
elif [[ ${#must_have_one_noun[@]} -ne 0 ]]; then
|
|
||||||
completions=("${must_have_one_noun[@]}")
|
|
||||||
else
|
|
||||||
completions=("${commands[@]}")
|
completions=("${commands[@]}")
|
||||||
|
if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then
|
||||||
|
completions=("${must_have_one_noun[@]}")
|
||||||
|
fi
|
||||||
|
if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then
|
||||||
|
completions+=("${must_have_one_flag[@]}")
|
||||||
fi
|
fi
|
||||||
COMPREPLY=( $(compgen -W "${completions[*]}" -- "$cur") )
|
COMPREPLY=( $(compgen -W "${completions[*]}" -- "$cur") )
|
||||||
|
|
||||||
|
if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then
|
||||||
|
COMPREPLY=( $(compgen -W "${noun_aliases[*]}" -- "$cur") )
|
||||||
|
fi
|
||||||
|
|
||||||
if [[ ${#COMPREPLY[@]} -eq 0 ]]; then
|
if [[ ${#COMPREPLY[@]} -eq 0 ]]; then
|
||||||
declare -F __custom_func >/dev/null && __custom_func
|
declare -F __custom_func >/dev/null && __custom_func
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# available in bash-completion >= 2, not always present on macOS
|
||||||
|
if declare -F __ltrim_colon_completions >/dev/null; then
|
||||||
|
__ltrim_colon_completions "$cur"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# If there is only 1 completion and it is a flag with an = it will be completed
|
||||||
|
# but we don't want a space after the =
|
||||||
|
if [[ "${#COMPREPLY[@]}" -eq "1" ]] && [[ $(type -t compopt) = "builtin" ]] && [[ "${COMPREPLY[0]}" == --*= ]]; then
|
||||||
|
compopt -o nospace
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# The arguments should be in the form "ext1|ext2|extn"
|
# The arguments should be in the form "ext1|ext2|extn"
|
||||||
__handle_filename_extension_flag()
|
__%[1]s_handle_filename_extension_flag()
|
||||||
{
|
{
|
||||||
local ext="$1"
|
local ext="$1"
|
||||||
_filedir "@(${ext})"
|
_filedir "@(${ext})"
|
||||||
}
|
}
|
||||||
|
|
||||||
__handle_flag()
|
__%[1]s_handle_subdirs_in_dir_flag()
|
||||||
{
|
{
|
||||||
__debug "${FUNCNAME}: c is $c words[c] is ${words[c]}"
|
local dir="$1"
|
||||||
|
pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1
|
||||||
|
}
|
||||||
|
|
||||||
|
__%[1]s_handle_flag()
|
||||||
|
{
|
||||||
|
__%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
|
||||||
|
|
||||||
# if a command required a flag, and we found it, unset must_have_one_flag()
|
# if a command required a flag, and we found it, unset must_have_one_flag()
|
||||||
local flagname=${words[c]}
|
local flagname=${words[c]}
|
||||||
|
local flagvalue
|
||||||
# if the word contained an =
|
# if the word contained an =
|
||||||
if [[ ${words[c]} == *"="* ]]; then
|
if [[ ${words[c]} == *"="* ]]; then
|
||||||
|
flagvalue=${flagname#*=} # take in as flagvalue after the =
|
||||||
flagname=${flagname%%=*} # strip everything after the =
|
flagname=${flagname%%=*} # strip everything after the =
|
||||||
flagname="${flagname}=" # but put the = back
|
flagname="${flagname}=" # but put the = back
|
||||||
fi
|
fi
|
||||||
__debug "${FUNCNAME}: looking for ${flagname}"
|
__%[1]s_debug "${FUNCNAME[0]}: looking for ${flagname}"
|
||||||
if __contains_word "${flagname}" "${must_have_one_flag[@]}"; then
|
if __%[1]s_contains_word "${flagname}" "${must_have_one_flag[@]}"; then
|
||||||
must_have_one_flag=()
|
must_have_one_flag=()
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# if you set a flag which only applies to this command, don't show subcommands
|
||||||
|
if __%[1]s_contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then
|
||||||
|
commands=()
|
||||||
|
fi
|
||||||
|
|
||||||
|
# keep flag value with flagname as flaghash
|
||||||
|
# flaghash variable is an associative array which is only supported in bash > 3.
|
||||||
|
if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then
|
||||||
|
if [ -n "${flagvalue}" ] ; then
|
||||||
|
flaghash[${flagname}]=${flagvalue}
|
||||||
|
elif [ -n "${words[ $((c+1)) ]}" ] ; then
|
||||||
|
flaghash[${flagname}]=${words[ $((c+1)) ]}
|
||||||
|
else
|
||||||
|
flaghash[${flagname}]="true" # pad "true" for bool flag
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
# skip the argument to a two word flag
|
# skip the argument to a two word flag
|
||||||
if __contains_word "${words[c]}" "${two_word_flags[@]}"; then
|
if __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then
|
||||||
c=$((c+1))
|
c=$((c+1))
|
||||||
# if we are looking for a flags value, don't show commands
|
# if we are looking for a flags value, don't show commands
|
||||||
if [[ $c -eq $cword ]]; then
|
if [[ $c -eq $cword ]]; then
|
||||||
|
@ -125,16 +201,17 @@ __handle_flag()
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# skip the flag itself
|
|
||||||
c=$((c+1))
|
c=$((c+1))
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
__handle_noun()
|
__%[1]s_handle_noun()
|
||||||
{
|
{
|
||||||
__debug "${FUNCNAME}: c is $c words[c] is ${words[c]}"
|
__%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
|
||||||
|
|
||||||
if __contains_word "${words[c]}" "${must_have_one_noun[@]}"; then
|
if __%[1]s_contains_word "${words[c]}" "${must_have_one_noun[@]}"; then
|
||||||
|
must_have_one_noun=()
|
||||||
|
elif __%[1]s_contains_word "${words[c]}" "${noun_aliases[@]}"; then
|
||||||
must_have_one_noun=()
|
must_have_one_noun=()
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -142,216 +219,366 @@ __handle_noun()
|
||||||
c=$((c+1))
|
c=$((c+1))
|
||||||
}
|
}
|
||||||
|
|
||||||
__handle_command()
|
__%[1]s_handle_command()
|
||||||
{
|
{
|
||||||
__debug "${FUNCNAME}: c is $c words[c] is ${words[c]}"
|
__%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
|
||||||
|
|
||||||
local next_command
|
local next_command
|
||||||
if [[ -n ${last_command} ]]; then
|
if [[ -n ${last_command} ]]; then
|
||||||
next_command="_${last_command}_${words[c]}"
|
next_command="_${last_command}_${words[c]//:/__}"
|
||||||
else
|
else
|
||||||
next_command="_${words[c]}"
|
if [[ $c -eq 0 ]]; then
|
||||||
|
next_command="_%[1]s_root_command"
|
||||||
|
else
|
||||||
|
next_command="_${words[c]//:/__}"
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
c=$((c+1))
|
c=$((c+1))
|
||||||
__debug "${FUNCNAME}: looking for ${next_command}"
|
__%[1]s_debug "${FUNCNAME[0]}: looking for ${next_command}"
|
||||||
declare -F $next_command >/dev/null && $next_command
|
declare -F "$next_command" >/dev/null && $next_command
|
||||||
}
|
}
|
||||||
|
|
||||||
__handle_word()
|
__%[1]s_handle_word()
|
||||||
{
|
{
|
||||||
if [[ $c -ge $cword ]]; then
|
if [[ $c -ge $cword ]]; then
|
||||||
__handle_reply
|
__%[1]s_handle_reply
|
||||||
return
|
return
|
||||||
fi
|
fi
|
||||||
__debug "${FUNCNAME}: c is $c words[c] is ${words[c]}"
|
__%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}"
|
||||||
if [[ "${words[c]}" == -* ]]; then
|
if [[ "${words[c]}" == -* ]]; then
|
||||||
__handle_flag
|
__%[1]s_handle_flag
|
||||||
elif __contains_word "${words[c]}" "${commands[@]}"; then
|
elif __%[1]s_contains_word "${words[c]}" "${commands[@]}"; then
|
||||||
__handle_command
|
__%[1]s_handle_command
|
||||||
|
elif [[ $c -eq 0 ]]; then
|
||||||
|
__%[1]s_handle_command
|
||||||
|
elif __%[1]s_contains_word "${words[c]}" "${command_aliases[@]}"; then
|
||||||
|
# aliashash variable is an associative array which is only supported in bash > 3.
|
||||||
|
if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then
|
||||||
|
words[c]=${aliashash[${words[c]}]}
|
||||||
|
__%[1]s_handle_command
|
||||||
else
|
else
|
||||||
__handle_noun
|
__%[1]s_handle_noun
|
||||||
fi
|
fi
|
||||||
__handle_word
|
else
|
||||||
|
__%[1]s_handle_noun
|
||||||
|
fi
|
||||||
|
__%[1]s_handle_word
|
||||||
}
|
}
|
||||||
|
|
||||||
`)
|
`, name))
|
||||||
}
|
}
|
||||||
|
|
||||||
func postscript(out *bytes.Buffer, name string) {
|
func writePostscript(buf *bytes.Buffer, name string) {
|
||||||
fmt.Fprintf(out, "__start_%s()\n", name)
|
name = strings.Replace(name, ":", "__", -1)
|
||||||
fmt.Fprintf(out, `{
|
buf.WriteString(fmt.Sprintf("__start_%s()\n", name))
|
||||||
|
buf.WriteString(fmt.Sprintf(`{
|
||||||
local cur prev words cword
|
local cur prev words cword
|
||||||
|
declare -A flaghash 2>/dev/null || :
|
||||||
|
declare -A aliashash 2>/dev/null || :
|
||||||
|
if declare -F _init_completion >/dev/null 2>&1; then
|
||||||
_init_completion -s || return
|
_init_completion -s || return
|
||||||
|
else
|
||||||
|
__%[1]s_init_completion -n "=" || return
|
||||||
|
fi
|
||||||
|
|
||||||
local c=0
|
local c=0
|
||||||
local flags=()
|
local flags=()
|
||||||
local two_word_flags=()
|
local two_word_flags=()
|
||||||
|
local local_nonpersistent_flags=()
|
||||||
local flags_with_completion=()
|
local flags_with_completion=()
|
||||||
local flags_completion=()
|
local flags_completion=()
|
||||||
local commands=("%s")
|
local commands=("%[1]s")
|
||||||
local must_have_one_flag=()
|
local must_have_one_flag=()
|
||||||
local must_have_one_noun=()
|
local must_have_one_noun=()
|
||||||
local last_command
|
local last_command
|
||||||
local nouns=()
|
local nouns=()
|
||||||
|
|
||||||
__handle_word
|
__%[1]s_handle_word
|
||||||
}
|
}
|
||||||
|
|
||||||
`, name)
|
`, name))
|
||||||
fmt.Fprintf(out, "complete -F __start_%s %s\n", name, name)
|
buf.WriteString(fmt.Sprintf(`if [[ $(type -t compopt) = "builtin" ]]; then
|
||||||
fmt.Fprintf(out, "# ex: ts=4 sw=4 et filetype=sh\n")
|
complete -o default -F __start_%s %s
|
||||||
|
else
|
||||||
|
complete -o default -o nospace -F __start_%s %s
|
||||||
|
fi
|
||||||
|
|
||||||
|
`, name, name, name, name))
|
||||||
|
buf.WriteString("# ex: ts=4 sw=4 et filetype=sh\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeCommands(cmd *Command, out *bytes.Buffer) {
|
func writeCommands(buf *bytes.Buffer, cmd *Command) {
|
||||||
fmt.Fprintf(out, " commands=()\n")
|
buf.WriteString(" commands=()\n")
|
||||||
for _, c := range cmd.Commands() {
|
for _, c := range cmd.Commands() {
|
||||||
if len(c.Deprecated) > 0 {
|
if !c.IsAvailableCommand() || c == cmd.helpCommand {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
fmt.Fprintf(out, " commands+=(%q)\n", c.Name())
|
buf.WriteString(fmt.Sprintf(" commands+=(%q)\n", c.Name()))
|
||||||
|
writeCmdAliases(buf, c)
|
||||||
}
|
}
|
||||||
fmt.Fprintf(out, "\n")
|
buf.WriteString("\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeFlagHandler(name string, annotations map[string][]string, out *bytes.Buffer) {
|
func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]string, cmd *Command) {
|
||||||
for key, value := range annotations {
|
for key, value := range annotations {
|
||||||
switch key {
|
switch key {
|
||||||
case BashCompFilenameExt:
|
case BashCompFilenameExt:
|
||||||
fmt.Fprintf(out, " flags_with_completion+=(%q)\n", name)
|
buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
|
||||||
|
|
||||||
ext := strings.Join(value, "|")
|
var ext string
|
||||||
ext = "__handle_filename_extension_flag " + ext
|
if len(value) > 0 {
|
||||||
fmt.Fprintf(out, " flags_completion+=(%q)\n", ext)
|
ext = fmt.Sprintf("__%s_handle_filename_extension_flag ", cmd.Root().Name()) + strings.Join(value, "|")
|
||||||
|
} else {
|
||||||
|
ext = "_filedir"
|
||||||
|
}
|
||||||
|
buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext))
|
||||||
|
case BashCompCustom:
|
||||||
|
buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
|
||||||
|
if len(value) > 0 {
|
||||||
|
handlers := strings.Join(value, "; ")
|
||||||
|
buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", handlers))
|
||||||
|
} else {
|
||||||
|
buf.WriteString(" flags_completion+=(:)\n")
|
||||||
|
}
|
||||||
|
case BashCompSubdirsInDir:
|
||||||
|
buf.WriteString(fmt.Sprintf(" flags_with_completion+=(%q)\n", name))
|
||||||
|
|
||||||
|
var ext string
|
||||||
|
if len(value) == 1 {
|
||||||
|
ext = fmt.Sprintf("__%s_handle_subdirs_in_dir_flag ", cmd.Root().Name()) + value[0]
|
||||||
|
} else {
|
||||||
|
ext = "_filedir -d"
|
||||||
|
}
|
||||||
|
buf.WriteString(fmt.Sprintf(" flags_completion+=(%q)\n", ext))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeShortFlag(flag *pflag.Flag, out *bytes.Buffer) {
|
func writeShortFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) {
|
||||||
b := (flag.Value.Type() == "bool")
|
|
||||||
name := flag.Shorthand
|
name := flag.Shorthand
|
||||||
format := " "
|
format := " "
|
||||||
if !b {
|
if len(flag.NoOptDefVal) == 0 {
|
||||||
format += "two_word_"
|
format += "two_word_"
|
||||||
}
|
}
|
||||||
format += "flags+=(\"-%s\")\n"
|
format += "flags+=(\"-%s\")\n"
|
||||||
fmt.Fprintf(out, format, name)
|
buf.WriteString(fmt.Sprintf(format, name))
|
||||||
writeFlagHandler("-"+name, flag.Annotations, out)
|
writeFlagHandler(buf, "-"+name, flag.Annotations, cmd)
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeFlag(flag *pflag.Flag, out *bytes.Buffer) {
|
func writeFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) {
|
||||||
b := (flag.Value.Type() == "bool")
|
|
||||||
name := flag.Name
|
name := flag.Name
|
||||||
format := " flags+=(\"--%s"
|
format := " flags+=(\"--%s"
|
||||||
if !b {
|
if len(flag.NoOptDefVal) == 0 {
|
||||||
format += "="
|
format += "="
|
||||||
}
|
}
|
||||||
format += "\")\n"
|
format += "\")\n"
|
||||||
fmt.Fprintf(out, format, name)
|
buf.WriteString(fmt.Sprintf(format, name))
|
||||||
writeFlagHandler("--"+name, flag.Annotations, out)
|
writeFlagHandler(buf, "--"+name, flag.Annotations, cmd)
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeFlags(cmd *Command, out *bytes.Buffer) {
|
func writeLocalNonPersistentFlag(buf *bytes.Buffer, flag *pflag.Flag) {
|
||||||
fmt.Fprintf(out, ` flags=()
|
name := flag.Name
|
||||||
|
format := " local_nonpersistent_flags+=(\"--%s"
|
||||||
|
if len(flag.NoOptDefVal) == 0 {
|
||||||
|
format += "="
|
||||||
|
}
|
||||||
|
format += "\")\n"
|
||||||
|
buf.WriteString(fmt.Sprintf(format, name))
|
||||||
|
}
|
||||||
|
|
||||||
|
func writeFlags(buf *bytes.Buffer, cmd *Command) {
|
||||||
|
buf.WriteString(` flags=()
|
||||||
two_word_flags=()
|
two_word_flags=()
|
||||||
|
local_nonpersistent_flags=()
|
||||||
flags_with_completion=()
|
flags_with_completion=()
|
||||||
flags_completion=()
|
flags_completion=()
|
||||||
|
|
||||||
`)
|
`)
|
||||||
|
localNonPersistentFlags := cmd.LocalNonPersistentFlags()
|
||||||
cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
|
cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) {
|
||||||
writeFlag(flag, out)
|
if nonCompletableFlag(flag) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
writeFlag(buf, flag, cmd)
|
||||||
if len(flag.Shorthand) > 0 {
|
if len(flag.Shorthand) > 0 {
|
||||||
writeShortFlag(flag, out)
|
writeShortFlag(buf, flag, cmd)
|
||||||
|
}
|
||||||
|
if localNonPersistentFlags.Lookup(flag.Name) != nil {
|
||||||
|
writeLocalNonPersistentFlag(buf, flag)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) {
|
||||||
|
if nonCompletableFlag(flag) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
writeFlag(buf, flag, cmd)
|
||||||
|
if len(flag.Shorthand) > 0 {
|
||||||
|
writeShortFlag(buf, flag, cmd)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
fmt.Fprintf(out, "\n")
|
buf.WriteString("\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeRequiredFlag(cmd *Command, out *bytes.Buffer) {
|
func writeRequiredFlag(buf *bytes.Buffer, cmd *Command) {
|
||||||
fmt.Fprintf(out, " must_have_one_flag=()\n")
|
buf.WriteString(" must_have_one_flag=()\n")
|
||||||
flags := cmd.NonInheritedFlags()
|
flags := cmd.NonInheritedFlags()
|
||||||
flags.VisitAll(func(flag *pflag.Flag) {
|
flags.VisitAll(func(flag *pflag.Flag) {
|
||||||
for key, _ := range flag.Annotations {
|
if nonCompletableFlag(flag) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
for key := range flag.Annotations {
|
||||||
switch key {
|
switch key {
|
||||||
case BashCompOneRequiredFlag:
|
case BashCompOneRequiredFlag:
|
||||||
format := " must_have_one_flag+=(\"--%s"
|
format := " must_have_one_flag+=(\"--%s"
|
||||||
b := (flag.Value.Type() == "bool")
|
if flag.Value.Type() != "bool" {
|
||||||
if !b {
|
|
||||||
format += "="
|
format += "="
|
||||||
}
|
}
|
||||||
format += "\")\n"
|
format += "\")\n"
|
||||||
fmt.Fprintf(out, format, flag.Name)
|
buf.WriteString(fmt.Sprintf(format, flag.Name))
|
||||||
|
|
||||||
if len(flag.Shorthand) > 0 {
|
if len(flag.Shorthand) > 0 {
|
||||||
fmt.Fprintf(out, " must_have_one_flag+=(\"-%s\")\n", flag.Shorthand)
|
buf.WriteString(fmt.Sprintf(" must_have_one_flag+=(\"-%s\")\n", flag.Shorthand))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeRequiredNoun(cmd *Command, out *bytes.Buffer) {
|
func writeRequiredNouns(buf *bytes.Buffer, cmd *Command) {
|
||||||
fmt.Fprintf(out, " must_have_one_noun=()\n")
|
buf.WriteString(" must_have_one_noun=()\n")
|
||||||
sort.Sort(sort.StringSlice(cmd.ValidArgs))
|
sort.Sort(sort.StringSlice(cmd.ValidArgs))
|
||||||
for _, value := range cmd.ValidArgs {
|
for _, value := range cmd.ValidArgs {
|
||||||
fmt.Fprintf(out, " must_have_one_noun+=(%q)\n", value)
|
buf.WriteString(fmt.Sprintf(" must_have_one_noun+=(%q)\n", value))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func gen(cmd *Command, out *bytes.Buffer) {
|
func writeCmdAliases(buf *bytes.Buffer, cmd *Command) {
|
||||||
|
if len(cmd.Aliases) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.Sort(sort.StringSlice(cmd.Aliases))
|
||||||
|
|
||||||
|
buf.WriteString(fmt.Sprint(` if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then`, "\n"))
|
||||||
|
for _, value := range cmd.Aliases {
|
||||||
|
buf.WriteString(fmt.Sprintf(" command_aliases+=(%q)\n", value))
|
||||||
|
buf.WriteString(fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name()))
|
||||||
|
}
|
||||||
|
buf.WriteString(` fi`)
|
||||||
|
buf.WriteString("\n")
|
||||||
|
}
|
||||||
|
func writeArgAliases(buf *bytes.Buffer, cmd *Command) {
|
||||||
|
buf.WriteString(" noun_aliases=()\n")
|
||||||
|
sort.Sort(sort.StringSlice(cmd.ArgAliases))
|
||||||
|
for _, value := range cmd.ArgAliases {
|
||||||
|
buf.WriteString(fmt.Sprintf(" noun_aliases+=(%q)\n", value))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func gen(buf *bytes.Buffer, cmd *Command) {
|
||||||
for _, c := range cmd.Commands() {
|
for _, c := range cmd.Commands() {
|
||||||
if len(c.Deprecated) > 0 {
|
if !c.IsAvailableCommand() || c == cmd.helpCommand {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
gen(c, out)
|
gen(buf, c)
|
||||||
}
|
}
|
||||||
commandName := cmd.CommandPath()
|
commandName := cmd.CommandPath()
|
||||||
commandName = strings.Replace(commandName, " ", "_", -1)
|
commandName = strings.Replace(commandName, " ", "_", -1)
|
||||||
fmt.Fprintf(out, "_%s()\n{\n", commandName)
|
commandName = strings.Replace(commandName, ":", "__", -1)
|
||||||
fmt.Fprintf(out, " last_command=%q\n", commandName)
|
|
||||||
writeCommands(cmd, out)
|
if cmd.Root() == cmd {
|
||||||
writeFlags(cmd, out)
|
buf.WriteString(fmt.Sprintf("_%s_root_command()\n{\n", commandName))
|
||||||
writeRequiredFlag(cmd, out)
|
} else {
|
||||||
writeRequiredNoun(cmd, out)
|
buf.WriteString(fmt.Sprintf("_%s()\n{\n", commandName))
|
||||||
fmt.Fprintf(out, "}\n\n")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cmd *Command) GenBashCompletion(out *bytes.Buffer) {
|
buf.WriteString(fmt.Sprintf(" last_command=%q\n", commandName))
|
||||||
preamble(out)
|
buf.WriteString("\n")
|
||||||
if len(cmd.BashCompletionFunction) > 0 {
|
buf.WriteString(" command_aliases=()\n")
|
||||||
fmt.Fprintf(out, "%s\n", cmd.BashCompletionFunction)
|
buf.WriteString("\n")
|
||||||
}
|
|
||||||
gen(cmd, out)
|
writeCommands(buf, cmd)
|
||||||
postscript(out, cmd.Name())
|
writeFlags(buf, cmd)
|
||||||
|
writeRequiredFlag(buf, cmd)
|
||||||
|
writeRequiredNouns(buf, cmd)
|
||||||
|
writeArgAliases(buf, cmd)
|
||||||
|
buf.WriteString("}\n\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cmd *Command) GenBashCompletionFile(filename string) error {
|
// GenBashCompletion generates bash completion file and writes to the passed writer.
|
||||||
out := new(bytes.Buffer)
|
func (c *Command) GenBashCompletion(w io.Writer) error {
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
writePreamble(buf, c.Name())
|
||||||
|
if len(c.BashCompletionFunction) > 0 {
|
||||||
|
buf.WriteString(c.BashCompletionFunction + "\n")
|
||||||
|
}
|
||||||
|
gen(buf, c)
|
||||||
|
writePostscript(buf, c.Name())
|
||||||
|
|
||||||
cmd.GenBashCompletion(out)
|
_, err := buf.WriteTo(w)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func nonCompletableFlag(flag *pflag.Flag) bool {
|
||||||
|
return flag.Hidden || len(flag.Deprecated) > 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// GenBashCompletionFile generates bash completion file.
|
||||||
|
func (c *Command) GenBashCompletionFile(filename string) error {
|
||||||
outFile, err := os.Create(filename)
|
outFile, err := os.Create(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer outFile.Close()
|
defer outFile.Close()
|
||||||
|
|
||||||
_, err = outFile.Write(out.Bytes())
|
return c.GenBashCompletion(outFile)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cmd *Command) MarkFlagRequired(name string) {
|
// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists,
|
||||||
flag := cmd.Flags().Lookup(name)
|
// and causes your command to report an error if invoked without the flag.
|
||||||
if flag == nil {
|
func (c *Command) MarkFlagRequired(name string) error {
|
||||||
return
|
return MarkFlagRequired(c.Flags(), name)
|
||||||
}
|
}
|
||||||
if flag.Annotations == nil {
|
|
||||||
flag.Annotations = make(map[string][]string)
|
// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag if it exists,
|
||||||
|
// and causes your command to report an error if invoked without the flag.
|
||||||
|
func (c *Command) MarkPersistentFlagRequired(name string) error {
|
||||||
|
return MarkFlagRequired(c.PersistentFlags(), name)
|
||||||
}
|
}
|
||||||
annotation := make([]string, 1)
|
|
||||||
annotation[0] = "true"
|
// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists,
|
||||||
flag.Annotations[BashCompOneRequiredFlag] = annotation
|
// and causes your command to report an error if invoked without the flag.
|
||||||
|
func MarkFlagRequired(flags *pflag.FlagSet, name string) error {
|
||||||
|
return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"})
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag, if it exists.
|
||||||
|
// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided.
|
||||||
|
func (c *Command) MarkFlagFilename(name string, extensions ...string) error {
|
||||||
|
return MarkFlagFilename(c.Flags(), name, extensions...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists.
|
||||||
|
// Generated bash autocompletion will call the bash function f for the flag.
|
||||||
|
func (c *Command) MarkFlagCustom(name string, f string) error {
|
||||||
|
return MarkFlagCustom(c.Flags(), name, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarkPersistentFlagFilename adds the BashCompFilenameExt annotation to the named persistent flag, if it exists.
|
||||||
|
// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided.
|
||||||
|
func (c *Command) MarkPersistentFlagFilename(name string, extensions ...string) error {
|
||||||
|
return MarkFlagFilename(c.PersistentFlags(), name, extensions...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag in the flag set, if it exists.
|
||||||
|
// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided.
|
||||||
|
func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error {
|
||||||
|
return flags.SetAnnotation(name, BashCompFilenameExt, extensions)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarkFlagCustom adds the BashCompCustom annotation to the named flag in the flag set, if it exists.
|
||||||
|
// Generated bash autocompletion will call the bash function f for the flag.
|
||||||
|
func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error {
|
||||||
|
return flags.SetAnnotation(name, BashCompCustom, []string{f})
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue