Merge pull request #780 from runcom/move-master-kube-1.7

*: update kube vendor to v1.7.4
This commit is contained in:
Mrunal Patel 2017-08-22 14:44:03 -07:00 committed by GitHub
commit 94469cc788
1041 changed files with 274066 additions and 40149 deletions

View file

@ -4,20 +4,31 @@ set -o errexit
set -o nounset
set -o pipefail
for d in $(find . -type d -not -iwholename '*.git*' -a -not -iname '.tool' -a -not -iwholename '*vendor*' -a -not -iwholename '*.artifacts*' -a -not -iwholename '*contrib*' -a -not -iwholename '*test*' -a -not -iwholename '*logo*' -a -not -iwholename '*conmon*' -a -not -iwholename '*completions*' -a -not -iwholename '*docs*' -a -not -iwholename '*pause*'); do
${GOPATH}/bin/gometalinter \
--exclude='error return value not checked.*(Close|Log|Print|RemoveAll).*\(errcheck\)$' \
--exclude='declaration of.*err.*shadows declaration.*\(vetshadow\)$' \
--exclude='.*_test\.go:.*error return value not checked.*\(errcheck\)$' \
--exclude='duplicate of.*_test.go.*\(dupl\)$' \
--exclude='cmd\/client\/.*\.go.*\(dupl\)$' \
--exclude='vendor\/.*' \
--exclude='server\/seccomp\/.*\.go.*$' \
--disable=aligncheck \
--disable=gotype \
--disable=gas \
--cyclo-over=80 \
--dupl-threshold=100 \
--tests \
--deadline=600s "${d}"
done
PKGS=$(find . -type d -not -path . -a -not -iwholename '*.git*' -a -not -iname '.tool' -a -not -iwholename '*vendor*' -a -not -iname 'hack' -a -not -iwholename '*.artifacts*' -a -not -iwholename '*contrib*' -a -not -iwholename '*test*' -a -not -iwholename '*logo*' -a -not -iwholename '*conmon*' -a -not -iwholename '*completions*' -a -not -iwholename '*docs*' -a -not -iwholename '*pause*')
${GOPATH}/bin/gometalinter \
--concurrency=4\
--enable-gc\
--vendored-linters\
--deadline=600s --disable-all\
--enable=deadcode\
--enable=errcheck\
--enable=goconst\
--enable=gofmt\
--enable=golint\
--enable=ineffassign\
--enable=interfacer\
--enable=megacheck\
--enable=misspell\
--enable=structcheck\
--enable=varcheck\
--enable=vet\
--enable=vetshadow\
--exclude='error return value not checked.*\(errcheck\)$'\
--exclude='declaration of.*err.*shadows declaration.*\(vetshadow\)$'\
--exclude='.*_test\.go:.*error return value not checked.*\(errcheck\)$'\
--exclude='duplicate of.*_test.go.*\(dupl\)$'\
--exclude='cmd\/client\/.*\.go.*\(dupl\)$'\
--exclude='vendor\/.*'\
--exclude='server\/seccomp\/.*\.go.*$'\
${PKGS[@]}

View file

@ -1,4 +1,4 @@
FROM golang:1.7
FROM golang:1.8
# libseccomp in jessie is not _quite_ new enough -- need backports version
RUN echo 'deb http://httpredir.debian.org/debian jessie-backports main' > /etc/apt/sources.list.d/backports.list
@ -76,6 +76,17 @@ RUN set -x \
&& cp bin/* /opt/cni/bin/ \
&& rm -rf "$GOPATH"
# Install crictl
ENV CRICTL_COMMIT a2d0e8f7bc7e80111a7d79052ab9aca3469609aa
RUN set -x \
&& export GOPATH="$(mktemp -d)" \
&& git clone https://github.com/kubernetes-incubator/cri-tools.git "$GOPATH/src/github.com/kubernetes-incubator/cri-tools" \
&& cd "$GOPATH/src/github.com/kubernetes-incubator/cri-tools" \
&& git checkout -q "$CRICTL_COMMIT" \
&& go install github.com/kubernetes-incubator/cri-tools/cmd/crictl \
&& cp "$GOPATH"/bin/crictl /usr/bin/ \
&& rm -rf "$GOPATH"
COPY test/plugin_test_args.bash /opt/cni/bin/plugin_test_args.bash
# Make sure we have some policy for pulling images

View file

@ -18,7 +18,7 @@ import (
"github.com/urfave/cli"
"golang.org/x/sys/unix"
"google.golang.org/grpc"
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
const crioConfigPath = "/etc/crio/crio.conf"

View file

@ -10,10 +10,10 @@ import (
"github.com/urfave/cli"
"golang.org/x/net/context"
remocommandconsts "k8s.io/apimachinery/pkg/util/remotecommand"
restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/pkg/client/unversioned/remotecommand"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
remotecommandserver "k8s.io/kubernetes/pkg/kubelet/server/remotecommand"
"k8s.io/client-go/tools/remotecommand"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
var containerCommand = cli.Command{
@ -533,7 +533,7 @@ func Exec(client pb.RuntimeServiceClient, ID string, tty bool, stdin bool, urlOn
}
options := remotecommand.StreamOptions{
SupportedProtocols: remotecommandserver.SupportedStreamingProtocols,
SupportedProtocols: remocommandconsts.SupportedStreamingProtocols,
Stdout: os.Stdout,
Stderr: os.Stderr,
Tty: tty,

View file

@ -5,7 +5,7 @@ import (
"github.com/urfave/cli"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
var imageCommand = cli.Command{

View file

@ -10,7 +10,7 @@ import (
"github.com/sirupsen/logrus"
"github.com/urfave/cli"
"google.golang.org/grpc"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
func getClientConnection(context *cli.Context) (*grpc.ClientConn, error) {

View file

@ -9,7 +9,7 @@ import (
"github.com/urfave/cli"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
var podSandboxCommand = cli.Command{
@ -289,7 +289,6 @@ func PodSandboxStatus(client pb.RuntimeServiceClient, ID string) error {
fmt.Printf("Status: %s\n", r.Status.State)
ctm := time.Unix(0, r.Status.CreatedAt)
fmt.Printf("Created: %v\n", ctm)
fmt.Printf("Network namespace: %s\n", r.Status.Linux.Namespaces.Network)
if r.Status.Network != nil {
fmt.Printf("IP Address: %v\n", r.Status.Network.Ip)
}

View file

@ -5,7 +5,7 @@ import (
"github.com/urfave/cli"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
var runtimeVersionCommand = cli.Command{

View file

@ -30,7 +30,7 @@ var basicFunctions = template.FuncMap{
// HeaderFunctions are used to created headers of a table.
// This is a replacement of basicFunctions for header generation
// because we want the header to remain intact.
// Some functions like `split` are irrevelant so not added.
// Some functions like `split` are irrelevant so not added.
var headerFunctions = template.FuncMap{
"json": func(v string) string {
return v

View file

@ -85,14 +85,14 @@ func inspectCmd(c *cli.Context) error {
return errors.Wrapf(err, "error parsing container data")
}
case inspectTypeImage:
data, err = libkpodimage.GetImageData(server.Store(), name)
data, err = libkpodimage.GetData(server.Store(), name)
if err != nil {
return errors.Wrapf(err, "error parsing image data")
}
case inspectAll:
ctrData, err := server.GetContainerData(name, size)
if err != nil {
imgData, err := libkpodimage.GetImageData(server.Store(), name)
imgData, err := libkpodimage.GetData(server.Store(), name)
if err != nil {
return errors.Wrapf(err, "error parsing container or image data")
}

View file

@ -8,9 +8,6 @@ import (
"github.com/urfave/cli"
)
//Version of kpod
const Version string = "0.0.1"
func main() {
if reexec.Init() {
return
@ -19,7 +16,7 @@ func main() {
app := cli.NewApp()
app.Name = "kpod"
app.Usage = "manage pods and images"
app.Version = Version
app.Version = "0.0.1"
app.Commands = []cli.Command{
diffCommand,

View file

@ -17,7 +17,7 @@ var (
// versionCmd gets and prints version info for version command
func versionCmd(c *cli.Context) error {
fmt.Println("Version: ", Version)
fmt.Println("Version: ", c.App.Version)
fmt.Println("Go Version: ", runtime.Version())
if gitCommit != "" {
fmt.Println("Git Commit: ", gitCommit)

View file

@ -127,6 +127,12 @@
async: 600
poll: 10
when: xunit
- name: git clone crictl repo
git:
repo: https://github.com/kubernetes-incubator/cri-tools
dest: /root/src/github.com/kubernetes-incubator/cri-tools
async: 600
poll: 10
- name: git clone runc repo
git:
repo: https://github.com/opencontainers/runc
@ -193,6 +199,12 @@
chdir: /root/src/github.com/opencontainers/runc
async: 600
poll: 10
- name: make crictl
shell: |
go install github.com/kubernetes-incubator/cri-tools/cmd/crictl && \
cp $GOPATH/bin/crictl /usr/bin/crictl
args:
chdir: /root/src/github.com/kubernetes-incubator/cri-o/
- name: make runc
make:
params: BUILDTAGS="seccomp selinux"
@ -216,6 +228,20 @@
shell: "cp test/plugin_test_args.bash /opt/cni/bin/"
args:
chdir: /root/src/github.com/kubernetes-incubator/cri-o/
# k8s builds with go1.8.x, rhel, fedora don't have it yet
- name: install Golang upstream in Fedora/RHEL
shell: |
curl -fsSL "https://golang.org/dl/go1.8.3.linux-amd64.tar.gz" \
| tar -xzC /usr/local
when: ansible_distribution == 'Fedora' or ansible_distribution == 'RedHat'
- name: Set custom Golang path for Fedora/RHEL
lineinfile:
dest: /root/.bashrc
line: 'export PATH=/usr/local/go/bin:$PATH'
insertafter: 'EOF'
regexp: 'export PATH=/usr/local/go/bin:$PATH'
state: present
when: ansible_distribution == 'Fedora' or ansible_distribution == 'RedHat'
- name: run integration tests RHEL
shell: 'CGROUP_MANAGER=cgroupfs STORAGE_OPTS="--storage-driver=overlay2 --storage-opt overlay2.override_kernel_check=1" make localintegration 2>&1 > testout.txt'
args:
@ -258,14 +284,12 @@
shell: 'mv /root/src/github.com/kubernetes-incubator/cri-o/test/TestReport-bats*.xml /root/src/github.com/kubernetes-incubator/cri-o/reports/'
when: xunit
# XXX: kube tests from now on
# TODO: remove custom repo origin and put kube master back after PR #510
# we need this because of node-e2e tests failing on RHEL otherwise
# For more info see issue #529
- name: git clone k8s repo
git:
repo: https://github.com/runcom/kubernetes
dest: /root/src/k8s.io/kubernetes
version: custom-RHEL-node-e2e
# based on kube upstream v1.7.4
version: cri-o-node-e2e-patched
force: yes
async: 600
poll: 10

View file

@ -5,7 +5,7 @@ import (
"os"
"k8s.io/apimachinery/pkg/fields"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
"github.com/kubernetes-incubator/cri-o/libkpod/driver"
libkpodimage "github.com/kubernetes-incubator/cri-o/libkpod/image"
@ -77,7 +77,7 @@ func (c *ContainerServer) GetContainerData(name string, size bool) (*ContainerDa
if container.ImageID == "" {
return nil, errors.Errorf("error reading container image data: container is not based on an image")
}
imageData, err := libkpodimage.GetImageData(c.store, container.ImageID)
imageData, err := libkpodimage.GetData(c.store, container.ImageID)
if err != nil {
return nil, errors.Wrapf(err, "error reading container image data")
}

View file

@ -22,7 +22,7 @@ import (
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
// ContainerServer implements the ImageServer

View file

@ -122,19 +122,13 @@ func matchesLabel(info *types.ImageInspectInfo, store storage.Store, label strin
// Returns true if the image was created since the filter image. Returns
// false otherwise
func matchesBeforeImage(info *types.ImageInspectInfo, name string, params *FilterParams) bool {
if info.Created.Before(params.beforeImage) {
return true
}
return false
return info.Created.Before(params.beforeImage)
}
// Returns true if the image was created since the filter image. Returns
// false otherwise
func matchesSinceImage(info *types.ImageInspectInfo, name string, params *FilterParams) bool {
if info.Created.After(params.sinceImage) {
return true
}
return false
return info.Created.After(params.sinceImage)
}
// MatchesID returns true if argID is a full or partial match for id

View file

@ -13,9 +13,9 @@ import (
"github.com/pkg/errors"
)
// ImageData handles the data used when inspecting a container
// Data handles the data used when inspecting a container
// nolint
type ImageData struct {
type Data struct {
ID string
Tags []string
Digests []string
@ -75,8 +75,8 @@ func annotations(manifest []byte, manifestType string) map[string]string {
return annotations
}
// GetImageData gets the ImageData for a container with the given name in the given store.
func GetImageData(store storage.Store, name string) (*ImageData, error) {
// GetData gets the Data for a container with the given name in the given store.
func GetData(store storage.Store, name string) (*Data, error) {
img, err := FindImage(store, name)
if err != nil {
return nil, errors.Wrapf(err, "error reading image %q", name)
@ -140,7 +140,7 @@ func GetImageData(store storage.Store, name string) (*ImageData, error) {
historyCreatedBy = config.History[len(config.History)-1].CreatedBy
}
return &ImageData{
return &Data{
ID: img.ID,
Tags: tags,
Digests: digests,

View file

@ -42,7 +42,7 @@ func (c *ContainerServer) GetLogs(container string, logChan chan string, opts Lo
t, err := tail.TailFile(logsFile, tail.Config{Follow: false, ReOpen: false, Location: seekInfo})
for line := range t.Lines {
if since, err := logSinceTime(opts.SinceTime, line.Text); err != nil || since == false {
if since, err := logSinceTime(opts.SinceTime, line.Text); err != nil || !since {
continue
}
logMessage := line.Text[secondSpaceIndex(line.Text):]

View file

@ -4,7 +4,7 @@ import (
"encoding/json"
"path/filepath"
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
"github.com/docker/docker/pkg/ioutils"
"github.com/kubernetes-incubator/cri-o/oci"

View file

@ -15,7 +15,7 @@ import (
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
"k8s.io/apimachinery/pkg/fields"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/network/hostport"
)

View file

@ -13,7 +13,7 @@ import (
"github.com/docker/docker/pkg/signal"
specs "github.com/opencontainers/runtime-spec/specs-go"
"k8s.io/apimachinery/pkg/fields"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
const (

View file

@ -269,7 +269,7 @@ func (svc *imageService) isSecureIndex(indexName string) bool {
for _, addr := range addrs {
for _, ipnet := range svc.insecureRegistryCIDRs {
// check if the addr falls in the subnet
if (*net.IPNet)(ipnet).Contains(addr) {
if ipnet.Contains(addr) {
return false
}
}

View file

@ -12,9 +12,9 @@ import (
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
"golang.org/x/sys/unix"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/client-go/tools/remotecommand"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/util/term"
)
/* Sync with stdpipe_t in conmon.c */
@ -37,7 +37,7 @@ func (s *Server) Attach(ctx context.Context, req *pb.AttachRequest) (*pb.AttachR
}
// Attach endpoint for streaming.Runtime
func (ss streamService) Attach(containerID string, inputStream io.Reader, outputStream, errorStream io.WriteCloser, tty bool, resize <-chan term.Size) error {
func (ss streamService) Attach(containerID string, inputStream io.Reader, outputStream, errorStream io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
c := ss.runtimeServer.GetContainer(containerID)
if c == nil {
@ -59,7 +59,7 @@ func (ss streamService) Attach(containerID string, inputStream io.Reader, output
return fmt.Errorf("failed to open container ctl file: %v", err)
}
kubecontainer.HandleResizing(resize, func(size term.Size) {
kubecontainer.HandleResizing(resize, func(size remotecommand.TerminalSize) {
logrus.Infof("Got a resize event: %+v", size)
_, err := fmt.Fprintf(controlFile, "%d %d %d\n", 1, size.Height, size.Width)
if err != nil {

View file

@ -31,7 +31,7 @@ import (
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
"golang.org/x/sys/unix"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
const (

View file

@ -10,7 +10,8 @@ import (
"github.com/kubernetes-incubator/cri-o/oci"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
"k8s.io/client-go/tools/remotecommand"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
utilexec "k8s.io/kubernetes/pkg/util/exec"
"k8s.io/kubernetes/pkg/util/term"
@ -29,7 +30,7 @@ func (s *Server) Exec(ctx context.Context, req *pb.ExecRequest) (*pb.ExecRespons
}
// Exec endpoint for streaming.Runtime
func (ss streamService) Exec(containerID string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan term.Size) error {
func (ss streamService) Exec(containerID string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
c := ss.runtimeServer.GetContainer(containerID)
if c == nil {
@ -63,7 +64,7 @@ func (ss streamService) Exec(containerID string, cmd []string, stdin io.Reader,
// make sure to close the stdout stream
defer stdout.Close()
kubecontainer.HandleResizing(resize, func(size term.Size) {
kubecontainer.HandleResizing(resize, func(size remotecommand.TerminalSize) {
term.SetSize(p.Fd(), size)
})

View file

@ -6,7 +6,7 @@ import (
"github.com/kubernetes-incubator/cri-o/oci"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
// ExecSync runs a command in a container synchronously.

View file

@ -5,7 +5,7 @@ import (
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
"k8s.io/apimachinery/pkg/fields"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
// filterContainer returns whether passed container matches filtering criteria

View file

@ -11,7 +11,7 @@ import (
"github.com/kubernetes-incubator/cri-o/oci"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
// PortForward prepares a streaming endpoint to forward ports from a PodSandbox.

View file

@ -8,7 +8,7 @@ import (
"github.com/kubernetes-incubator/cri-o/oci"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
// RemoveContainer removes the container. If the container is running, the container

View file

@ -6,7 +6,7 @@ import (
"github.com/kubernetes-incubator/cri-o/oci"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
// StartContainer starts the container.

14
server/container_stats.go Normal file
View file

@ -0,0 +1,14 @@
package server
import (
"fmt"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
// ContainerStats returns stats of the container. If the container does not
// exist, the call returns an error.
func (s *Server) ContainerStats(ctx context.Context, req *pb.ContainerStatsRequest) (*pb.ContainerStatsResponse, error) {
return nil, fmt.Errorf("not implemented")
}

View file

@ -0,0 +1,13 @@
package server
import (
"fmt"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
// ListContainerStats returns stats of all running containers.
func (s *Server) ListContainerStats(ctx context.Context, req *pb.ListContainerStatsRequest) (*pb.ListContainerStatsResponse, error) {
return nil, fmt.Errorf("not implemented")
}

View file

@ -4,7 +4,7 @@ import (
"github.com/kubernetes-incubator/cri-o/oci"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
const (

View file

@ -6,7 +6,7 @@ import (
"github.com/kubernetes-incubator/cri-o/oci"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
// StopContainer stops a running container with a grace period (i.e., timeout).

View file

@ -2,7 +2,7 @@ package server
import (
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
// UpdateRuntimeConfig updates the configuration of a running container.

13
server/image_fs_info.go Normal file
View file

@ -0,0 +1,13 @@
package server
import (
"fmt"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
// ImageFsInfo returns information of the filesystem that is used to store images.
func (s *Server) ImageFsInfo(ctx context.Context, req *pb.ImageFsInfoRequest) (*pb.ImageFsInfoResponse, error) {
return nil, fmt.Errorf("not implemented")
}

View file

@ -3,7 +3,7 @@ package server
import (
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
// ListImages lists existing images.

View file

@ -8,7 +8,7 @@ import (
"github.com/containers/image/types"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
// PullImage pulls a image with authentication config.

View file

@ -6,7 +6,7 @@ import (
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
// RemoveImage removes the image.

View file

@ -8,7 +8,7 @@ import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
// ImageStatus returns the status of the image.

View file

@ -5,7 +5,7 @@ import (
"strings"
"github.com/docker/docker/pkg/stringid"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
const (

View file

@ -2,7 +2,7 @@ package server
import (
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
// Status returns the status of the runtime

View file

@ -6,7 +6,7 @@ import (
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
"k8s.io/apimachinery/pkg/fields"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
// filterSandbox returns whether passed container matches filtering criteria

View file

@ -10,7 +10,7 @@ import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
// RemovePodSandbox deletes the sandbox. If there are any running containers in the

View file

@ -23,7 +23,7 @@ import (
"golang.org/x/net/context"
"golang.org/x/sys/unix"
"k8s.io/kubernetes/pkg/api/v1"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/network/hostport"
)

View file

@ -4,7 +4,7 @@ import (
"github.com/kubernetes-incubator/cri-o/oci"
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
// PodSandboxStatus returns the Status of the PodSandbox.
@ -36,13 +36,8 @@ func (s *Server) PodSandboxStatus(ctx context.Context, req *pb.PodSandboxStatusR
sandboxID := sb.ID()
resp := &pb.PodSandboxStatusResponse{
Status: &pb.PodSandboxStatus{
Id: sandboxID,
CreatedAt: podInfraContainer.CreatedAt().UnixNano(),
Linux: &pb.LinuxPodSandboxStatus{
Namespaces: &pb.Namespace{
Network: netNsPath,
},
},
Id: sandboxID,
CreatedAt: podInfraContainer.CreatedAt().UnixNano(),
Network: &pb.PodSandboxNetworkStatus{Ip: ip},
State: rStatus,
Labels: sb.Labels(),

View file

@ -14,7 +14,7 @@ import (
"github.com/sirupsen/logrus"
"golang.org/x/net/context"
"golang.org/x/sys/unix"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/network/hostport"
)

View file

@ -22,7 +22,7 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/sirupsen/logrus"
knet "k8s.io/apimachinery/pkg/util/net"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
"k8s.io/kubernetes/pkg/kubelet/network/hostport"
"k8s.io/kubernetes/pkg/kubelet/server/streaming"
iptablesproxy "k8s.io/kubernetes/pkg/proxy/iptables"

View file

@ -2,7 +2,7 @@ package server
import (
"golang.org/x/net/context"
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
)
// Version returns the runtime name, runtime version and runtime API version

View file

@ -11,8 +11,9 @@ CRIO_ROOT=${CRIO_ROOT:-$(cd "$INTEGRATION_ROOT/../.."; pwd -P)}
# Path of the crio binary.
CRIO_BINARY=${CRIO_BINARY:-${CRIO_ROOT}/cri-o/crio}
# Path of the crioctl binary.
OCIC_BINARY=${OCIC_BINARY:-${CRIO_ROOT}/cri-o/crioctl}
# Path of the crictl binary.
CRICTL_PATH=$(command -v crictl || true)
CRICTL_BINARY=${CRICTL_PATH:-/usr/bin/crictl}
# Path to kpod binary.
KPOD_BINARY=${KPOD_BINARY:-${CRIO_ROOT}/cri-o/kpod}
# Path of the conmon binary.
@ -151,11 +152,18 @@ function crio() {
"$CRIO_BINARY" --listen "$CRIO_SOCKET" "$@"
}
# DEPRECATED
OCIC_BINARY=${OCIC_BINARY:-${CRIO_ROOT}/cri-o/crioctl}
# Run crioctl using the binary specified by $OCIC_BINARY.
function crioctl() {
"$OCIC_BINARY" --connect "$CRIO_SOCKET" "$@"
}
# Run crictl using the binary specified by $CRICTL_BINARY.
function crictl() {
"$CRICTL_BINARY" -r "$CRIO_SOCKET" -i "$CRIO_SOCKET" "$@"
}
# Communicate with Docker on the host machine.
# Should rarely use this.
function docker_host() {
@ -184,7 +192,7 @@ function retry() {
# Waits until the given crio becomes reachable.
function wait_until_reachable() {
retry 15 1 crioctl runtimeversion
retry 15 1 crictl status
}
# Start crio.
@ -228,14 +236,14 @@ function start_crio() {
"$CRIO_BINARY" --debug --config "$CRIO_CONFIG" & CRIO_PID=$!
wait_until_reachable
run crioctl image status --id=redis:alpine
run crictl image status redis:alpine
if [ "$status" -ne 0 ] ; then
crioctl image pull redis:alpine
crictl image pull redis:alpine
fi
REDIS_IMAGEID=$(crioctl image status --id=redis:alpine | head -1 | sed -e "s/ID: //g")
run crioctl image status --id=mrunalp/oom
REDIS_IMAGEID=$(crictl image status redis:alpine | head -1 | sed -e "s/ID: //g")
run crictl image status mrunalp/oom
if [ "$status" -ne 0 ] ; then
crioctl image pull mrunalp/oom
crictl image pull mrunalp/oom
fi
#
#
@ -248,63 +256,63 @@ function start_crio() {
#
#
REDIS_IMAGEID_DIGESTED="redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b"
run crioctl image status --id $REDIS_IMAGEID_DIGESTED
run crictl image status $REDIS_IMAGEID_DIGESTED
if [ "$status" -ne 0 ]; then
crioctl image pull $REDIS_IMAGEID_DIGESTED
crictl image pull $REDIS_IMAGEID_DIGESTED
fi
#
#
#
run crioctl image status --id=runcom/stderr-test
run crictl image status runcom/stderr-test
if [ "$status" -ne 0 ] ; then
crioctl image pull runcom/stderr-test:latest
crictl image pull runcom/stderr-test:latest
fi
STDERR_IMAGEID=$(crioctl image status --id=runcom/stderr-test | head -1 | sed -e "s/ID: //g")
run crioctl image status --id=busybox
STDERR_IMAGEID=$(crictl image status runcom/stderr-test | head -1 | sed -e "s/ID: //g")
run crictl image status busybox
if [ "$status" -ne 0 ] ; then
crioctl image pull busybox:latest
crictl image pull busybox:latest
fi
BUSYBOX_IMAGEID=$(crioctl image status --id=busybox | head -1 | sed -e "s/ID: //g")
run crioctl image status --id=mrunalp/image-volume-test
BUSYBOX_IMAGEID=$(crictl image status busybox | head -1 | sed -e "s/ID: //g")
run crictl image status mrunalp/image-volume-test
if [ "$status" -ne 0 ] ; then
crioctl image pull mrunalp/image-volume-test:latest
crictl image pull mrunalp/image-volume-test:latest
fi
VOLUME_IMAGEID=$(crioctl image status --id=mrunalp/image-volume-test | head -1 | sed -e "s/ID: //g")
VOLUME_IMAGEID=$(crictl image status mrunalp/image-volume-test | head -1 | sed -e "s/ID: //g")
}
function cleanup_ctrs() {
run crioctl ctr list --quiet
run crictl ctr ls --quiet
if [ "$status" -eq 0 ]; then
if [ "$output" != "" ]; then
printf '%s\n' "$output" | while IFS= read -r line
do
crioctl ctr stop --id "$line"
crioctl ctr remove --id "$line"
crictl ctr stop "$line"
crictl ctr rm "$line"
done
fi
fi
}
function cleanup_images() {
run crioctl image list --quiet
run crictl image ls --quiet
if [ "$status" -eq 0 ]; then
if [ "$output" != "" ]; then
printf '%s\n' "$output" | while IFS= read -r line
do
crioctl image remove --id "$line"
crictl image rm "$line"
done
fi
fi
}
function cleanup_pods() {
run crioctl pod list --quiet
run crictl sandbox ls --quiet
if [ "$status" -eq 0 ]; then
if [ "$output" != "" ]; then
printf '%s\n' "$output" | while IFS= read -r line
do
crioctl pod stop --id "$line"
crioctl pod remove --id "$line"
crictl sandbox stop "$line"
crictl sandbox rm "$line"
done
fi
fi
@ -408,10 +416,7 @@ EOF
}
function check_pod_cidr() {
fullnetns=`crioctl pod status --id $1 | grep namespace | cut -d ' ' -f 3`
netns=`basename $fullnetns`
run ip netns exec $netns ip addr show dev eth0 scope global 2>&1
run crioctl ctr execsync --id $1 ip addr show dev eth0 scope global 2>&1
echo "$output"
[ "$status" -eq 0 ]
[[ "$output" =~ $POD_CIDR_MASK ]]
@ -435,8 +440,7 @@ function get_host_ip() {
}
function ping_pod() {
netns=`crioctl pod status --id $1 | grep namespace | cut -d ' ' -f 3`
inet=`ip netns exec \`basename $netns\` ip addr show dev eth0 scope global | grep inet`
inet=`crioctl ctr execsync --id $1 ip addr show dev eth0 scope global 2>&1 | grep inet`
IFS=" "
ip=`parse_pod_ip $inet`
@ -447,12 +451,14 @@ function ping_pod() {
}
function ping_pod_from_pod() {
pod_ip=`crioctl pod status --id $1 | grep "IP Address" | cut -d ' ' -f 3`
netns=`crioctl pod status --id $2 | grep namespace | cut -d ' ' -f 3`
inet=`crioctl ctr execsync --id $1 ip addr show dev eth0 scope global 2>&1 | grep inet`
ip netns exec `basename $netns` ping -W 1 -c 2 $pod_ip
IFS=" "
ip=`parse_pod_ip $inet`
echo $?
run crioctl ctr execsync --id $2 ping -W 1 -c 2 $ip
echo "$output"
[ "$status" -eq 0 ]
}

View file

@ -9,8 +9,14 @@ load helpers
[ "$status" -eq 0 ]
pod_id="$output"
check_pod_cidr $pod_id
run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
check_pod_cidr $ctr_id
cleanup_ctrs
cleanup_pods
stop_crio
}
@ -22,8 +28,14 @@ load helpers
[ "$status" -eq 0 ]
pod_id="$output"
ping_pod $pod_id
run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
echo "$output"
[ "$status" -eq 0 ]
ctr_id="$output"
ping_pod $ctr_id
cleanup_ctrs
cleanup_pods
stop_crio
}
@ -34,6 +46,10 @@ load helpers
echo "$output"
[ "$status" -eq 0 ]
pod1_id="$output"
run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod1_id"
echo "$output"
[ "$status" -eq 0 ]
ctr1_id="$output"
temp_sandbox_conf cni_test
@ -41,13 +57,18 @@ load helpers
echo "$output"
[ "$status" -eq 0 ]
pod2_id="$output"
run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod2_id"
echo "$output"
[ "$status" -eq 0 ]
ctr2_id="$output"
ping_pod_from_pod $pod1_id $pod2_id
ping_pod_from_pod $ctr1_id $ctr2_id
[ "$status" -eq 0 ]
ping_pod_from_pod $pod2_id $pod1_id
ping_pod_from_pod $ctr2_id $ctr1_id
[ "$status" -eq 0 ]
cleanup_ctrs
cleanup_pods
stop_crio
}

View file

@ -1,15 +1,15 @@
k8s.io/kubernetes v1.6.5 https://github.com/kubernetes/kubernetes
k8s.io/kubernetes v1.7.4 https://github.com/kubernetes/kubernetes
# https://github.com/kubernetes/client-go#compatibility-matrix
k8s.io/client-go v3.0.0-beta.0 https://github.com/kubernetes/client-go
k8s.io/apimachinery release-1.6 https://github.com/kubernetes/apimachinery
k8s.io/apiserver release-1.6 https://github.com/kubernetes/apiserver
k8s.io/client-go v4.0.0 https://github.com/kubernetes/client-go
k8s.io/apimachinery release-1.7 https://github.com/kubernetes/apimachinery
k8s.io/apiserver release-1.7 https://github.com/kubernetes/apiserver
#
github.com/sirupsen/logrus v1.0.0
github.com/containers/image 74e359348c7ce9e0caf4fa75aa8de3809cf41c46
github.com/ostreedev/ostree-go master
github.com/containers/storage f8cff0727cf0802f0752ca58d2c05ec5270a47d5
github.com/containernetworking/cni v0.4.0
google.golang.org/grpc v1.0.1-GA https://github.com/grpc/grpc-go
google.golang.org/grpc v1.0.4 https://github.com/grpc/grpc-go
github.com/opencontainers/selinux v1.0.0-rc1
github.com/opencontainers/go-digest v1.0.0-rc0
github.com/opencontainers/runtime-tools 6bcd3b417fd6962ea04dafdbc2c07444e750572d
@ -19,7 +19,7 @@ github.com/vishvananda/netlink master
github.com/vishvananda/netns master
github.com/opencontainers/image-spec v1.0.0
github.com/opencontainers/runtime-spec v1.0.0
github.com/juju/ratelimit acf38b000a03e4ab89e40f20f1e548f4e6ac7f72
github.com/juju/ratelimit 5b9ff866471762aa2ab2dced63c9fb6f53921342
github.com/tchap/go-patricia v2.2.6
gopkg.in/cheggaaa/pb.v1 v1.0.7
gopkg.in/inf.v0 v0.9.0
@ -44,10 +44,10 @@ github.com/BurntSushi/toml v0.2.0
github.com/mitchellh/go-wordwrap ad45545899c7b13c020ea92b2072220eefad42b8
github.com/golang/glog 23def4e6c14b4da8ac2ed8007337bc5eb5007998
github.com/davecgh/go-spew v1.1.0
github.com/go-openapi/spec 02fb9cd3430ed0581e0ceb4804d5d4b3cc702694
github.com/go-openapi/spec 6aced65f8501fe1217321abf0749d354824ba2ff
github.com/go-openapi/jsonpointer 779f45308c19820f1a69e9a4cd965f496e0da10f
github.com/go-openapi/jsonreference 36d33bfe519efae5632669801b180bf1a245da3b
github.com/go-openapi/swag d5f8ebc3b1c55a4cf6489eeae7354f338cfe299e
github.com/go-openapi/swag 1d0bd113de87027671077d3c71eb3ac5d7dbba72
github.com/google/gofuzz 44d81051d367757e1c7c6a5a86423ece9afcf63c
github.com/mailru/easyjson 99e922cf9de1bc0ab38310c277cff32c2147e747
github.com/PuerkitoBio/purell v1.1.0
@ -65,23 +65,34 @@ github.com/coreos/go-systemd v14
github.com/coreos/pkg v3
github.com/golang/groupcache b710c8433bd175204919eb38776e944233235d03
github.com/fsnotify/fsnotify 7d7316ed6e1ed2de075aab8dfc76de5d158d66e1
github.com/emicklei/go-restful 09691a3b6378b740595c1002f40c34dd5f218a22
github.com/Azure/go-ansiterm 19f72df4d05d31cbe1c56bfc8045c96babff6c7e
github.com/Microsoft/go-winio 78439966b38d69bf38227fbf57ac8a6fee70f69a
github.com/Microsoft/hcsshim 43f9725307998e09f2e3816c2c0c36dc98f0c982
github.com/emicklei/go-restful ff4f55a206334ef123e4f79bbf348980da81ca46
github.com/emicklei/go-restful-swagger12 1.0.1
github.com/pkg/errors v0.8.0
github.com/godbus/dbus v4.0.0
github.com/urfave/cli v1.19.1
github.com/vbatts/tar-split v0.10.1
github.com/renstrom/dedent v1.0.0
github.com/prometheus/client_golang v0.8.0
github.com/prometheus/procfs e645f4e5aaa8506fc71d6edbc5c4ff02c04c46f2
github.com/prometheus/common 61f87aac8082fa8c3c5655c7608d7478d46ac2ad
github.com/prometheus/client_model 6f3806018612930941127f2a7c6c453ba2c527d2
github.com/matttproud/golang_protobuf_extensions v1.0.0
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
github.com/hpcloud/tail v1.0.0
gopkg.in/fsnotify.v1 v1.4.2
gopkg.in/tomb.v1 v1
github.com/fatih/camelcase f6a740d52f961c60348ebb109adde9f4635d7540
github.com/buger/goterm 2f8dfbc7dbbff5dd1d391ed91482c24df243b2d3
github.com/dgrijalva/jwt-go v3.0.0
github.com/exponent-io/jsonpath d6023ce2651d8eafb5c75bb0c7167536102ec9f5
github.com/hashicorp/golang-lru 0a025b7e63adc15a622f29b0b2c4c3848243bbf6
github.com/go-openapi/loads 18441dfa706d924a39a030ee2c3b1d8d81917b38
github.com/go-openapi/analysis b44dc874b601d9e4e2f6e19140e794ba24bead3b
github.com/go-openapi/strfmt 93a31ef21ac23f317792fff78f9539219dd74619
github.com/asaskevich/govalidator v6
github.com/go-openapi/errors d24ebc2075bad502fac3a8ae27aa6dd58e1952dc
github.com/mitchellh/mapstructure d0303fe809921458f417bcf828397a65db30a7e4
gopkg.in/mgo.v2 v2
github.com/prometheus/client_golang e7e903064f5e9eb5da98208bae10b475d4db0f8c
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
github.com/prometheus/common 13ba4ddd0caa9c28ca7b7bffe1dfa9ed8d5ef207
github.com/prometheus/procfs 65c1f6f8f0fc1e2185eb9863a3bc751496404259
github.com/matttproud/golang_protobuf_extensions fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a
github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4

View file

@ -133,7 +133,7 @@ func (s *Stream) Query(q float64) float64 {
if l == 0 {
return 0
}
i := int(math.Ceil(float64(l) * q))
i := int(float64(l) * q)
if i > 0 {
i -= 1
}

7
vendor/github.com/dgrijalva/jwt-go/LICENSE generated vendored Normal file
View file

@ -0,0 +1,7 @@
Copyright (c) 2012 Dave Grijalva
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

85
vendor/github.com/dgrijalva/jwt-go/README.md generated vendored Normal file
View file

@ -0,0 +1,85 @@
A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html)
[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go)
**BREAKING CHANGES:*** Version 3.0.0 is here. It includes _a lot_ of changes including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code.
**NOTICE:** A vulnerability in JWT was [recently published](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). As this library doesn't force users to validate the `alg` is what they expected, it's possible your usage is effected. There will be an update soon to remedy this, and it will likey require backwards-incompatible changes to the API. In the short term, please make sure your implementation verifies the `alg` is what you expect.
## What the heck is a JWT?
JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens.
In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way.
The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used.
The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own.
## What's in the box?
This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own.
## Examples
See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage:
* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example_Parse_hmac)
* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example_New_hmac)
* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples)
## Extensions
This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`.
Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go
## Compliance
This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences:
* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key.
## Project Status & Versioning
This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason).
This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases).
While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v2`. It will do the right thing WRT semantic versioning.
## Usage Tips
### Signing vs Encryption
A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data:
* The author of the token was in the possession of the signing secret
* The data has not been modified since it was signed
It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library.
### Choosing a Signing Method
There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric.
Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation.
Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification.
### JWT and OAuth
It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication.
Without going too far down the rabbit hole, here's a description of the interaction of these technologies:
* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth.
* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token.
* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL.
## More
Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go).
The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in to documentation.

134
vendor/github.com/dgrijalva/jwt-go/claims.go generated vendored Normal file
View file

@ -0,0 +1,134 @@
package jwt
import (
"crypto/subtle"
"fmt"
"time"
)
// For a type to be a Claims object, it must just have a Valid method that determines
// if the token is invalid for any supported reason
type Claims interface {
Valid() error
}
// Structured version of Claims Section, as referenced at
// https://tools.ietf.org/html/rfc7519#section-4.1
// See examples for how to use this with your own claim types
type StandardClaims struct {
Audience string `json:"aud,omitempty"`
ExpiresAt int64 `json:"exp,omitempty"`
Id string `json:"jti,omitempty"`
IssuedAt int64 `json:"iat,omitempty"`
Issuer string `json:"iss,omitempty"`
NotBefore int64 `json:"nbf,omitempty"`
Subject string `json:"sub,omitempty"`
}
// Validates time based claims "exp, iat, nbf".
// There is no accounting for clock skew.
// As well, if any of the above claims are not in the token, it will still
// be considered a valid claim.
func (c StandardClaims) Valid() error {
vErr := new(ValidationError)
now := TimeFunc().Unix()
// The claims below are optional, by default, so if they are set to the
// default value in Go, let's not fail the verification for them.
if c.VerifyExpiresAt(now, false) == false {
delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0))
vErr.Inner = fmt.Errorf("token is expired by %v", delta)
vErr.Errors |= ValidationErrorExpired
}
if c.VerifyIssuedAt(now, false) == false {
vErr.Inner = fmt.Errorf("Token used before issued")
vErr.Errors |= ValidationErrorIssuedAt
}
if c.VerifyNotBefore(now, false) == false {
vErr.Inner = fmt.Errorf("token is not valid yet")
vErr.Errors |= ValidationErrorNotValidYet
}
if vErr.valid() {
return nil
}
return vErr
}
// Compares the aud claim against cmp.
// If required is false, this method will return true if the value matches or is unset
func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool {
return verifyAud(c.Audience, cmp, req)
}
// Compares the exp claim against cmp.
// If required is false, this method will return true if the value matches or is unset
func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool {
return verifyExp(c.ExpiresAt, cmp, req)
}
// Compares the iat claim against cmp.
// If required is false, this method will return true if the value matches or is unset
func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool {
return verifyIat(c.IssuedAt, cmp, req)
}
// Compares the iss claim against cmp.
// If required is false, this method will return true if the value matches or is unset
func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool {
return verifyIss(c.Issuer, cmp, req)
}
// Compares the nbf claim against cmp.
// If required is false, this method will return true if the value matches or is unset
func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool {
return verifyNbf(c.NotBefore, cmp, req)
}
// ----- helpers
func verifyAud(aud string, cmp string, required bool) bool {
if aud == "" {
return !required
}
if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 {
return true
} else {
return false
}
}
func verifyExp(exp int64, now int64, required bool) bool {
if exp == 0 {
return !required
}
return now <= exp
}
func verifyIat(iat int64, now int64, required bool) bool {
if iat == 0 {
return !required
}
return now >= iat
}
func verifyIss(iss string, cmp string, required bool) bool {
if iss == "" {
return !required
}
if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 {
return true
} else {
return false
}
}
func verifyNbf(nbf int64, now int64, required bool) bool {
if nbf == 0 {
return !required
}
return now >= nbf
}

4
vendor/github.com/dgrijalva/jwt-go/doc.go generated vendored Normal file
View file

@ -0,0 +1,4 @@
// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html
//
// See README.md for more info.
package jwt

147
vendor/github.com/dgrijalva/jwt-go/ecdsa.go generated vendored Normal file
View file

@ -0,0 +1,147 @@
package jwt
import (
"crypto"
"crypto/ecdsa"
"crypto/rand"
"errors"
"math/big"
)
var (
// Sadly this is missing from crypto/ecdsa compared to crypto/rsa
ErrECDSAVerification = errors.New("crypto/ecdsa: verification error")
)
// Implements the ECDSA family of signing methods signing methods
type SigningMethodECDSA struct {
Name string
Hash crypto.Hash
KeySize int
CurveBits int
}
// Specific instances for EC256 and company
var (
SigningMethodES256 *SigningMethodECDSA
SigningMethodES384 *SigningMethodECDSA
SigningMethodES512 *SigningMethodECDSA
)
func init() {
// ES256
SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256}
RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod {
return SigningMethodES256
})
// ES384
SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384}
RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod {
return SigningMethodES384
})
// ES512
SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521}
RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod {
return SigningMethodES512
})
}
func (m *SigningMethodECDSA) Alg() string {
return m.Name
}
// Implements the Verify method from SigningMethod
// For this verify method, key must be an ecdsa.PublicKey struct
func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error {
var err error
// Decode the signature
var sig []byte
if sig, err = DecodeSegment(signature); err != nil {
return err
}
// Get the key
var ecdsaKey *ecdsa.PublicKey
switch k := key.(type) {
case *ecdsa.PublicKey:
ecdsaKey = k
default:
return ErrInvalidKeyType
}
if len(sig) != 2*m.KeySize {
return ErrECDSAVerification
}
r := big.NewInt(0).SetBytes(sig[:m.KeySize])
s := big.NewInt(0).SetBytes(sig[m.KeySize:])
// Create hasher
if !m.Hash.Available() {
return ErrHashUnavailable
}
hasher := m.Hash.New()
hasher.Write([]byte(signingString))
// Verify the signature
if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true {
return nil
} else {
return ErrECDSAVerification
}
}
// Implements the Sign method from SigningMethod
// For this signing method, key must be an ecdsa.PrivateKey struct
func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) {
// Get the key
var ecdsaKey *ecdsa.PrivateKey
switch k := key.(type) {
case *ecdsa.PrivateKey:
ecdsaKey = k
default:
return "", ErrInvalidKeyType
}
// Create the hasher
if !m.Hash.Available() {
return "", ErrHashUnavailable
}
hasher := m.Hash.New()
hasher.Write([]byte(signingString))
// Sign the string and return r, s
if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil {
curveBits := ecdsaKey.Curve.Params().BitSize
if m.CurveBits != curveBits {
return "", ErrInvalidKey
}
keyBytes := curveBits / 8
if curveBits%8 > 0 {
keyBytes += 1
}
// We serialize the outpus (r and s) into big-endian byte arrays and pad
// them with zeros on the left to make sure the sizes work out. Both arrays
// must be keyBytes long, and the output must be 2*keyBytes long.
rBytes := r.Bytes()
rBytesPadded := make([]byte, keyBytes)
copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
sBytes := s.Bytes()
sBytesPadded := make([]byte, keyBytes)
copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
out := append(rBytesPadded, sBytesPadded...)
return EncodeSegment(out), nil
} else {
return "", err
}
}

67
vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go generated vendored Normal file
View file

@ -0,0 +1,67 @@
package jwt
import (
"crypto/ecdsa"
"crypto/x509"
"encoding/pem"
"errors"
)
var (
ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key")
ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key")
)
// Parse PEM encoded Elliptic Curve Private Key Structure
func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
var err error
// Parse PEM block
var block *pem.Block
if block, _ = pem.Decode(key); block == nil {
return nil, ErrKeyMustBePEMEncoded
}
// Parse the key
var parsedKey interface{}
if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil {
return nil, err
}
var pkey *ecdsa.PrivateKey
var ok bool
if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
return nil, ErrNotECPrivateKey
}
return pkey, nil
}
// Parse PEM encoded PKCS1 or PKCS8 public key
func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) {
var err error
// Parse PEM block
var block *pem.Block
if block, _ = pem.Decode(key); block == nil {
return nil, ErrKeyMustBePEMEncoded
}
// Parse the key
var parsedKey interface{}
if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
parsedKey = cert.PublicKey
} else {
return nil, err
}
}
var pkey *ecdsa.PublicKey
var ok bool
if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
return nil, ErrNotECPublicKey
}
return pkey, nil
}

63
vendor/github.com/dgrijalva/jwt-go/errors.go generated vendored Normal file
View file

@ -0,0 +1,63 @@
package jwt
import (
"errors"
)
// Error constants
var (
ErrInvalidKey = errors.New("key is invalid")
ErrInvalidKeyType = errors.New("key is of invalid type")
ErrHashUnavailable = errors.New("the requested hash function is unavailable")
)
// The errors that might occur when parsing and validating a token
const (
ValidationErrorMalformed uint32 = 1 << iota // Token is malformed
ValidationErrorUnverifiable // Token could not be verified because of signing problems
ValidationErrorSignatureInvalid // Signature validation failed
// Standard Claim validation errors
ValidationErrorAudience // AUD validation failed
ValidationErrorExpired // EXP validation failed
ValidationErrorIssuedAt // IAT validation failed
ValidationErrorIssuer // ISS validation failed
ValidationErrorNotValidYet // NBF validation failed
ValidationErrorId // JTI validation failed
ValidationErrorClaimsInvalid // Generic claims validation error
)
// Helper for constructing a ValidationError with a string error message
func NewValidationError(errorText string, errorFlags uint32) *ValidationError {
return &ValidationError{
text: errorText,
Errors: errorFlags,
}
}
// The error from Parse if token is not valid
type ValidationError struct {
Inner error // stores the error returned by external dependencies, i.e.: KeyFunc
Errors uint32 // bitfield. see ValidationError... constants
text string // errors that do not have a valid error just have text
}
// Validation error is an error type
func (e ValidationError) Error() string {
if e.Inner != nil {
return e.Inner.Error()
} else if e.text != "" {
return e.text
} else {
return "token is invalid"
}
return e.Inner.Error()
}
// No errors
func (e *ValidationError) valid() bool {
if e.Errors > 0 {
return false
}
return true
}

94
vendor/github.com/dgrijalva/jwt-go/hmac.go generated vendored Normal file
View file

@ -0,0 +1,94 @@
package jwt
import (
"crypto"
"crypto/hmac"
"errors"
)
// Implements the HMAC-SHA family of signing methods signing methods
type SigningMethodHMAC struct {
Name string
Hash crypto.Hash
}
// Specific instances for HS256 and company
var (
SigningMethodHS256 *SigningMethodHMAC
SigningMethodHS384 *SigningMethodHMAC
SigningMethodHS512 *SigningMethodHMAC
ErrSignatureInvalid = errors.New("signature is invalid")
)
func init() {
// HS256
SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256}
RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod {
return SigningMethodHS256
})
// HS384
SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384}
RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod {
return SigningMethodHS384
})
// HS512
SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512}
RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod {
return SigningMethodHS512
})
}
func (m *SigningMethodHMAC) Alg() string {
return m.Name
}
// Verify the signature of HSXXX tokens. Returns nil if the signature is valid.
func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error {
// Verify the key is the right type
keyBytes, ok := key.([]byte)
if !ok {
return ErrInvalidKeyType
}
// Decode signature, for comparison
sig, err := DecodeSegment(signature)
if err != nil {
return err
}
// Can we use the specified hashing method?
if !m.Hash.Available() {
return ErrHashUnavailable
}
// This signing method is symmetric, so we validate the signature
// by reproducing the signature from the signing string and key, then
// comparing that against the provided signature.
hasher := hmac.New(m.Hash.New, keyBytes)
hasher.Write([]byte(signingString))
if !hmac.Equal(sig, hasher.Sum(nil)) {
return ErrSignatureInvalid
}
// No validation errors. Signature is good.
return nil
}
// Implements the Sign method from SigningMethod for this signing method.
// Key must be []byte
func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) {
if keyBytes, ok := key.([]byte); ok {
if !m.Hash.Available() {
return "", ErrHashUnavailable
}
hasher := hmac.New(m.Hash.New, keyBytes)
hasher.Write([]byte(signingString))
return EncodeSegment(hasher.Sum(nil)), nil
}
return "", ErrInvalidKey
}

94
vendor/github.com/dgrijalva/jwt-go/map_claims.go generated vendored Normal file
View file

@ -0,0 +1,94 @@
package jwt
import (
"encoding/json"
"errors"
// "fmt"
)
// Claims type that uses the map[string]interface{} for JSON decoding
// This is the default claims type if you don't supply one
type MapClaims map[string]interface{}
// Compares the aud claim against cmp.
// If required is false, this method will return true if the value matches or is unset
func (m MapClaims) VerifyAudience(cmp string, req bool) bool {
aud, _ := m["aud"].(string)
return verifyAud(aud, cmp, req)
}
// Compares the exp claim against cmp.
// If required is false, this method will return true if the value matches or is unset
func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool {
switch exp := m["exp"].(type) {
case float64:
return verifyExp(int64(exp), cmp, req)
case json.Number:
v, _ := exp.Int64()
return verifyExp(v, cmp, req)
}
return req == false
}
// Compares the iat claim against cmp.
// If required is false, this method will return true if the value matches or is unset
func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool {
switch iat := m["iat"].(type) {
case float64:
return verifyIat(int64(iat), cmp, req)
case json.Number:
v, _ := iat.Int64()
return verifyIat(v, cmp, req)
}
return req == false
}
// Compares the iss claim against cmp.
// If required is false, this method will return true if the value matches or is unset
func (m MapClaims) VerifyIssuer(cmp string, req bool) bool {
iss, _ := m["iss"].(string)
return verifyIss(iss, cmp, req)
}
// Compares the nbf claim against cmp.
// If required is false, this method will return true if the value matches or is unset
func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool {
switch nbf := m["nbf"].(type) {
case float64:
return verifyNbf(int64(nbf), cmp, req)
case json.Number:
v, _ := nbf.Int64()
return verifyNbf(v, cmp, req)
}
return req == false
}
// Validates time based claims "exp, iat, nbf".
// There is no accounting for clock skew.
// As well, if any of the above claims are not in the token, it will still
// be considered a valid claim.
func (m MapClaims) Valid() error {
vErr := new(ValidationError)
now := TimeFunc().Unix()
if m.VerifyExpiresAt(now, false) == false {
vErr.Inner = errors.New("Token is expired")
vErr.Errors |= ValidationErrorExpired
}
if m.VerifyIssuedAt(now, false) == false {
vErr.Inner = errors.New("Token used before issued")
vErr.Errors |= ValidationErrorIssuedAt
}
if m.VerifyNotBefore(now, false) == false {
vErr.Inner = errors.New("Token is not valid yet")
vErr.Errors |= ValidationErrorNotValidYet
}
if vErr.valid() {
return nil
}
return vErr
}

52
vendor/github.com/dgrijalva/jwt-go/none.go generated vendored Normal file
View file

@ -0,0 +1,52 @@
package jwt
// Implements the none signing method. This is required by the spec
// but you probably should never use it.
var SigningMethodNone *signingMethodNone
const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed"
var NoneSignatureTypeDisallowedError error
type signingMethodNone struct{}
type unsafeNoneMagicConstant string
func init() {
SigningMethodNone = &signingMethodNone{}
NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid)
RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod {
return SigningMethodNone
})
}
func (m *signingMethodNone) Alg() string {
return "none"
}
// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key
func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) {
// Key must be UnsafeAllowNoneSignatureType to prevent accidentally
// accepting 'none' signing method
if _, ok := key.(unsafeNoneMagicConstant); !ok {
return NoneSignatureTypeDisallowedError
}
// If signing method is none, signature must be an empty string
if signature != "" {
return NewValidationError(
"'none' signing method with non-empty signature",
ValidationErrorSignatureInvalid,
)
}
// Accept 'none' signing method.
return nil
}
// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key
func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) {
if _, ok := key.(unsafeNoneMagicConstant); ok {
return "", nil
}
return "", NoneSignatureTypeDisallowedError
}

128
vendor/github.com/dgrijalva/jwt-go/parser.go generated vendored Normal file
View file

@ -0,0 +1,128 @@
package jwt
import (
"bytes"
"encoding/json"
"fmt"
"strings"
)
type Parser struct {
ValidMethods []string // If populated, only these methods will be considered valid
UseJSONNumber bool // Use JSON Number format in JSON decoder
}
// Parse, validate, and return a token.
// keyFunc will receive the parsed token and should return the key for validating.
// If everything is kosher, err will be nil
func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc)
}
func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
parts := strings.Split(tokenString, ".")
if len(parts) != 3 {
return nil, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed)
}
var err error
token := &Token{Raw: tokenString}
// parse Header
var headerBytes []byte
if headerBytes, err = DecodeSegment(parts[0]); err != nil {
if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") {
return token, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed)
}
return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
}
if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
}
// parse Claims
var claimBytes []byte
token.Claims = claims
if claimBytes, err = DecodeSegment(parts[1]); err != nil {
return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
}
dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
if p.UseJSONNumber {
dec.UseNumber()
}
// JSON Decode. Special case for map type to avoid weird pointer behavior
if c, ok := token.Claims.(MapClaims); ok {
err = dec.Decode(&c)
} else {
err = dec.Decode(&claims)
}
// Handle decode error
if err != nil {
return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
}
// Lookup signature method
if method, ok := token.Header["alg"].(string); ok {
if token.Method = GetSigningMethod(method); token.Method == nil {
return token, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable)
}
} else {
return token, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable)
}
// Verify signing method is in the required set
if p.ValidMethods != nil {
var signingMethodValid = false
var alg = token.Method.Alg()
for _, m := range p.ValidMethods {
if m == alg {
signingMethodValid = true
break
}
}
if !signingMethodValid {
// signing method is not in the listed set
return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid)
}
}
// Lookup key
var key interface{}
if keyFunc == nil {
// keyFunc was not provided. short circuiting validation
return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable)
}
if key, err = keyFunc(token); err != nil {
// keyFunc returned an error
return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable}
}
vErr := &ValidationError{}
// Validate Claims
if err := token.Claims.Valid(); err != nil {
// If the Claims Valid returned an error, check if it is a validation error,
// If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set
if e, ok := err.(*ValidationError); !ok {
vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid}
} else {
vErr = e
}
}
// Perform validation
token.Signature = parts[2]
if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil {
vErr.Inner = err
vErr.Errors |= ValidationErrorSignatureInvalid
}
if vErr.valid() {
token.Valid = true
return token, nil
}
return token, vErr
}

100
vendor/github.com/dgrijalva/jwt-go/rsa.go generated vendored Normal file
View file

@ -0,0 +1,100 @@
package jwt
import (
"crypto"
"crypto/rand"
"crypto/rsa"
)
// Implements the RSA family of signing methods signing methods
type SigningMethodRSA struct {
Name string
Hash crypto.Hash
}
// Specific instances for RS256 and company
var (
SigningMethodRS256 *SigningMethodRSA
SigningMethodRS384 *SigningMethodRSA
SigningMethodRS512 *SigningMethodRSA
)
func init() {
// RS256
SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256}
RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod {
return SigningMethodRS256
})
// RS384
SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384}
RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod {
return SigningMethodRS384
})
// RS512
SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512}
RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod {
return SigningMethodRS512
})
}
func (m *SigningMethodRSA) Alg() string {
return m.Name
}
// Implements the Verify method from SigningMethod
// For this signing method, must be an rsa.PublicKey structure.
func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error {
var err error
// Decode the signature
var sig []byte
if sig, err = DecodeSegment(signature); err != nil {
return err
}
var rsaKey *rsa.PublicKey
var ok bool
if rsaKey, ok = key.(*rsa.PublicKey); !ok {
return ErrInvalidKeyType
}
// Create hasher
if !m.Hash.Available() {
return ErrHashUnavailable
}
hasher := m.Hash.New()
hasher.Write([]byte(signingString))
// Verify the signature
return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig)
}
// Implements the Sign method from SigningMethod
// For this signing method, must be an rsa.PrivateKey structure.
func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) {
var rsaKey *rsa.PrivateKey
var ok bool
// Validate type of key
if rsaKey, ok = key.(*rsa.PrivateKey); !ok {
return "", ErrInvalidKey
}
// Create the hasher
if !m.Hash.Available() {
return "", ErrHashUnavailable
}
hasher := m.Hash.New()
hasher.Write([]byte(signingString))
// Sign the string and return the encoded bytes
if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil {
return EncodeSegment(sigBytes), nil
} else {
return "", err
}
}

126
vendor/github.com/dgrijalva/jwt-go/rsa_pss.go generated vendored Normal file
View file

@ -0,0 +1,126 @@
// +build go1.4
package jwt
import (
"crypto"
"crypto/rand"
"crypto/rsa"
)
// Implements the RSAPSS family of signing methods signing methods
type SigningMethodRSAPSS struct {
*SigningMethodRSA
Options *rsa.PSSOptions
}
// Specific instances for RS/PS and company
var (
SigningMethodPS256 *SigningMethodRSAPSS
SigningMethodPS384 *SigningMethodRSAPSS
SigningMethodPS512 *SigningMethodRSAPSS
)
func init() {
// PS256
SigningMethodPS256 = &SigningMethodRSAPSS{
&SigningMethodRSA{
Name: "PS256",
Hash: crypto.SHA256,
},
&rsa.PSSOptions{
SaltLength: rsa.PSSSaltLengthAuto,
Hash: crypto.SHA256,
},
}
RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod {
return SigningMethodPS256
})
// PS384
SigningMethodPS384 = &SigningMethodRSAPSS{
&SigningMethodRSA{
Name: "PS384",
Hash: crypto.SHA384,
},
&rsa.PSSOptions{
SaltLength: rsa.PSSSaltLengthAuto,
Hash: crypto.SHA384,
},
}
RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod {
return SigningMethodPS384
})
// PS512
SigningMethodPS512 = &SigningMethodRSAPSS{
&SigningMethodRSA{
Name: "PS512",
Hash: crypto.SHA512,
},
&rsa.PSSOptions{
SaltLength: rsa.PSSSaltLengthAuto,
Hash: crypto.SHA512,
},
}
RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod {
return SigningMethodPS512
})
}
// Implements the Verify method from SigningMethod
// For this verify method, key must be an rsa.PublicKey struct
func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error {
var err error
// Decode the signature
var sig []byte
if sig, err = DecodeSegment(signature); err != nil {
return err
}
var rsaKey *rsa.PublicKey
switch k := key.(type) {
case *rsa.PublicKey:
rsaKey = k
default:
return ErrInvalidKey
}
// Create hasher
if !m.Hash.Available() {
return ErrHashUnavailable
}
hasher := m.Hash.New()
hasher.Write([]byte(signingString))
return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options)
}
// Implements the Sign method from SigningMethod
// For this signing method, key must be an rsa.PrivateKey struct
func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) {
var rsaKey *rsa.PrivateKey
switch k := key.(type) {
case *rsa.PrivateKey:
rsaKey = k
default:
return "", ErrInvalidKeyType
}
// Create the hasher
if !m.Hash.Available() {
return "", ErrHashUnavailable
}
hasher := m.Hash.New()
hasher.Write([]byte(signingString))
// Sign the string and return the encoded bytes
if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil {
return EncodeSegment(sigBytes), nil
} else {
return "", err
}
}

69
vendor/github.com/dgrijalva/jwt-go/rsa_utils.go generated vendored Normal file
View file

@ -0,0 +1,69 @@
package jwt
import (
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"errors"
)
var (
ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key")
ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key")
ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key")
)
// Parse PEM encoded PKCS1 or PKCS8 private key
func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
var err error
// Parse PEM block
var block *pem.Block
if block, _ = pem.Decode(key); block == nil {
return nil, ErrKeyMustBePEMEncoded
}
var parsedKey interface{}
if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
return nil, err
}
}
var pkey *rsa.PrivateKey
var ok bool
if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
return nil, ErrNotRSAPrivateKey
}
return pkey, nil
}
// Parse PEM encoded PKCS1 or PKCS8 public key
func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
var err error
// Parse PEM block
var block *pem.Block
if block, _ = pem.Decode(key); block == nil {
return nil, ErrKeyMustBePEMEncoded
}
// Parse the key
var parsedKey interface{}
if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
parsedKey = cert.PublicKey
} else {
return nil, err
}
}
var pkey *rsa.PublicKey
var ok bool
if pkey, ok = parsedKey.(*rsa.PublicKey); !ok {
return nil, ErrNotRSAPublicKey
}
return pkey, nil
}

35
vendor/github.com/dgrijalva/jwt-go/signing_method.go generated vendored Normal file
View file

@ -0,0 +1,35 @@
package jwt
import (
"sync"
)
var signingMethods = map[string]func() SigningMethod{}
var signingMethodLock = new(sync.RWMutex)
// Implement SigningMethod to add new methods for signing or verifying tokens.
type SigningMethod interface {
Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid
Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error
Alg() string // returns the alg identifier for this method (example: 'HS256')
}
// Register the "alg" name and a factory function for signing method.
// This is typically done during init() in the method's implementation
func RegisterSigningMethod(alg string, f func() SigningMethod) {
signingMethodLock.Lock()
defer signingMethodLock.Unlock()
signingMethods[alg] = f
}
// Get a signing method from an "alg" string
func GetSigningMethod(alg string) (method SigningMethod) {
signingMethodLock.RLock()
defer signingMethodLock.RUnlock()
if methodF, ok := signingMethods[alg]; ok {
method = methodF()
}
return
}

108
vendor/github.com/dgrijalva/jwt-go/token.go generated vendored Normal file
View file

@ -0,0 +1,108 @@
package jwt
import (
"encoding/base64"
"encoding/json"
"strings"
"time"
)
// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time).
// You can override it to use another time value. This is useful for testing or if your
// server uses a different time zone than your tokens.
var TimeFunc = time.Now
// Parse methods use this callback function to supply
// the key for verification. The function receives the parsed,
// but unverified Token. This allows you to use properties in the
// Header of the token (such as `kid`) to identify which key to use.
type Keyfunc func(*Token) (interface{}, error)
// A JWT Token. Different fields will be used depending on whether you're
// creating or parsing/verifying a token.
type Token struct {
Raw string // The raw token. Populated when you Parse a token
Method SigningMethod // The signing method used or to be used
Header map[string]interface{} // The first segment of the token
Claims Claims // The second segment of the token
Signature string // The third segment of the token. Populated when you Parse a token
Valid bool // Is the token valid? Populated when you Parse/Verify a token
}
// Create a new Token. Takes a signing method
func New(method SigningMethod) *Token {
return NewWithClaims(method, MapClaims{})
}
func NewWithClaims(method SigningMethod, claims Claims) *Token {
return &Token{
Header: map[string]interface{}{
"typ": "JWT",
"alg": method.Alg(),
},
Claims: claims,
Method: method,
}
}
// Get the complete, signed token
func (t *Token) SignedString(key interface{}) (string, error) {
var sig, sstr string
var err error
if sstr, err = t.SigningString(); err != nil {
return "", err
}
if sig, err = t.Method.Sign(sstr, key); err != nil {
return "", err
}
return strings.Join([]string{sstr, sig}, "."), nil
}
// Generate the signing string. This is the
// most expensive part of the whole deal. Unless you
// need this for something special, just go straight for
// the SignedString.
func (t *Token) SigningString() (string, error) {
var err error
parts := make([]string, 2)
for i, _ := range parts {
var jsonValue []byte
if i == 0 {
if jsonValue, err = json.Marshal(t.Header); err != nil {
return "", err
}
} else {
if jsonValue, err = json.Marshal(t.Claims); err != nil {
return "", err
}
}
parts[i] = EncodeSegment(jsonValue)
}
return strings.Join(parts, "."), nil
}
// Parse, validate, and return a token.
// keyFunc will receive the parsed token and should return the key for validating.
// If everything is kosher, err will be nil
func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
return new(Parser).Parse(tokenString, keyFunc)
}
func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
return new(Parser).ParseWithClaims(tokenString, claims, keyFunc)
}
// Encode JWT specific base64url encoding with padding stripped
func EncodeSegment(seg []byte) string {
return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=")
}
// Decode JWT specific base64url encoding with padding stripped
func DecodeSegment(seg string) ([]byte, error) {
if l := len(seg) % 4; l > 0 {
seg += strings.Repeat("=", 4-l)
}
return base64.URLEncoding.DecodeString(seg)
}

View file

@ -0,0 +1,22 @@
Copyright (c) 2017 Ernest Micklei
MIT License
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View file

@ -1,10 +1,15 @@
# go-restful-swagger12
[![Build Status](https://travis-ci.org/emicklei/go-restful-swagger12.png)](https://travis-ci.org/emicklei/go-restful-swagger12)
[![GoDoc](https://godoc.org/github.com/emicklei/go-restful-swagger12?status.svg)](https://godoc.org/github.com/emicklei/go-restful-swagger12)
How to use Swagger UI with go-restful
=
Get the Swagger UI sources (version 1.2 only)
git clone https://github.com/wordnik/swagger-ui.git
The project contains a "dist" folder.
Its contents has all the Swagger UI files you need.
@ -18,9 +23,9 @@ Now, you can install the Swagger WebService for serving the Swagger specificatio
ApiPath: "/apidocs.json",
SwaggerPath: "/apidocs/",
SwaggerFilePath: "/Users/emicklei/Projects/swagger-ui/dist"}
swagger.InstallSwaggerService(config)
swagger.InstallSwaggerService(config)
Documenting Structs
--
@ -72,5 +77,7 @@ This example will generate a JSON like this
Notes
--
- The Nickname of an Operation is automatically set by finding the name of the function. You can override it using RouteBuilder.Operation(..)
- The Nickname of an Operation is automatically set by finding the name of the function. You can override it using RouteBuilder.Operation(..)
- The WebServices field of swagger.Config can be used to control which service you want to expose and document ; you can have multiple configs and therefore multiple endpoints.
© 2017, ernestmicklei.com. MIT License. Contributions welcome.

View file

@ -226,6 +226,9 @@ func (sws SwaggerService) composeDeclaration(ws *restful.WebService, pathPrefix
pathToRoutes := newOrderedRouteMap()
for _, other := range ws.Routes() {
if strings.HasPrefix(other.Path, pathPrefix) {
if len(pathPrefix) > 1 && len(other.Path) > len(pathPrefix) && other.Path[len(pathPrefix)] != '/' {
continue
}
pathToRoutes.Add(other.Path, other)
}
}
@ -290,13 +293,12 @@ func composeResponseMessages(route restful.Route, decl *ApiDeclaration, config *
if each.Model != nil {
st := reflect.TypeOf(each.Model)
isCollection, st := detectCollectionType(st)
modelName := modelBuilder{}.keyFrom(st)
if isCollection {
modelName = "array[" + modelName + "]"
// collection cannot be in responsemodel
if !isCollection {
modelName := modelBuilder{}.keyFrom(st)
modelBuilder{Models: &decl.Models, Config: config}.addModel(st, "")
message.ResponseModel = modelName
}
modelBuilder{Models: &decl.Models, Config: config}.addModel(st, "")
// reference the model
message.ResponseModel = modelName
}
messages = append(messages, message)
}
@ -331,12 +333,13 @@ func detectCollectionType(st reflect.Type) (bool, reflect.Type) {
// addModelFromSample creates and adds (or overwrites) a Model from a sample resource
func (sws SwaggerService) addModelFromSampleTo(operation *Operation, isResponse bool, sample interface{}, models *ModelList) {
mb := modelBuilder{Models: models, Config: &sws.config}
if isResponse {
type_, items := asDataType(sample, &sws.config)
operation.Type = type_
sampleType, items := asDataType(sample, &sws.config)
operation.Type = sampleType
operation.Items = items
}
modelBuilder{Models: models, Config: &sws.config}.addModelFrom(sample)
mb.addModelFrom(sample)
}
func asSwaggerParameter(param restful.ParameterData) Parameter {

View file

@ -1,8 +1,13 @@
go-restful
==========
package for building REST-style Web Services using Google Go
[![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful)
[![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful)
[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://godoc.org/github.com/emicklei/go-restful)
- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples)
REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping:
- GET = Retrieve a representation of a resource
@ -40,35 +45,30 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo
- Routes for request &#8594; function mapping with path parameter (e.g. {id}) support
- Configurable router:
- Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions (See RouterJSR311 which is used by default)
- Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*}, See CurlyRouter)
- (default) Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*}
- Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions
- Request API for reading structs from JSON/XML and accesing parameters (path,query,header)
- Response API for writing structs to JSON/XML and setting headers
- Customizable encoding using EntityReaderWriter registration
- Filters for intercepting the request &#8594; response flow on Service or Route level
- Request-scoped variables using attributes
- Containers for WebServices on different HTTP endpoints
- Content encoding (gzip,deflate) of request and response payloads
- Automatic responses on OPTIONS (using a filter)
- Automatic CORS request handling (using a filter)
- API declaration for Swagger UI (see swagger package)
- API declaration for Swagger UI (see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12),[go-restful-openapi](https://github.com/emicklei/go-restful-openapi))
- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...)
- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...)
- Configurable (trace) logging
- Customizable encoding using EntityReaderWriter registration
- Customizable gzip/deflate readers and writers using CompressorProvider registration
### Resources
- [Documentation on godoc.org](http://godoc.org/github.com/emicklei/go-restful)
- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples)
- [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/)
- [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/)
- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful)
- [gopkg.in](https://gopkg.in/emicklei/go-restful.v1)
- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora)
[![Build Status](https://drone.io/github.com/emicklei/go-restful/status.png)](https://drone.io/github.com/emicklei/go-restful/latest)
(c) 2012 - 2015, http://ernestmicklei.com. MIT License
Type ```git shortlog -s``` for a full list of contributors.
© 2012 - 2017, http://ernestmicklei.com. MIT License. Contributions are welcome.

View file

@ -9,6 +9,7 @@ import (
"compress/zlib"
)
// CompressorProvider describes a component that can provider compressors for the std methods.
type CompressorProvider interface {
// Returns a *gzip.Writer which needs to be released later.
// Before using it, call Reset().

View file

@ -32,7 +32,7 @@ type Container struct {
contentEncodingEnabled bool // default is false
}
// NewContainer creates a new Container using a new ServeMux and default router (RouterJSR311)
// NewContainer creates a new Container using a new ServeMux and default router (CurlyRouter)
func NewContainer() *Container {
return &Container{
webServices: []*WebService{},
@ -74,7 +74,7 @@ func (c *Container) DoNotRecover(doNot bool) {
c.doNotRecover = doNot
}
// Router changes the default Router (currently RouterJSR311)
// Router changes the default Router (currently CurlyRouter)
func (c *Container) Router(aRouter RouteSelector) {
c.router = aRouter
}
@ -188,6 +188,17 @@ func writeServiceError(err ServiceError, req *Request, resp *Response) {
resp.WriteErrorString(err.Code, err.Message)
}
// Dispatch the incoming Http Request to a matching WebService.
func (c *Container) Dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
if httpWriter == nil {
panic("httpWriter cannot be nil")
}
if httpRequest == nil {
panic("httpRequest cannot be nil")
}
c.dispatch(httpWriter, httpRequest)
}
// Dispatch the incoming Http Request to a matching WebService.
func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
writer := httpWriter
@ -208,12 +219,6 @@ func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.R
}
}()
}
// Install closing the request body (if any)
defer func() {
if nil != httpRequest.Body {
httpRequest.Body.Close()
}
}()
// Detect if compression is needed
// assume without compression, test for override

View file

@ -1,5 +1,5 @@
/*
Package restful, a lean package for creating REST-style WebServices without magic.
Package restful , a lean package for creating REST-style WebServices without magic.
WebServices and Routes

View file

@ -5,7 +5,7 @@ import (
"os"
)
// Logger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger
// StdLogger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger
type StdLogger interface {
Print(v ...interface{})
Printf(format string, v ...interface{})
@ -18,14 +18,17 @@ func init() {
SetLogger(stdlog.New(os.Stderr, "[restful] ", stdlog.LstdFlags|stdlog.Lshortfile))
}
// SetLogger sets the logger for this package
func SetLogger(customLogger StdLogger) {
Logger = customLogger
}
// Print delegates to the Logger
func Print(v ...interface{}) {
Logger.Print(v...)
}
// Printf delegates to the Logger
func Printf(format string, v ...interface{}) {
Logger.Printf(format, v...)
}

View file

@ -5,20 +5,15 @@ package restful
// that can be found in the LICENSE file.
import (
"bytes"
"compress/zlib"
"io/ioutil"
"net/http"
)
var defaultRequestContentType string
var doCacheReadEntityBytes = false
// Request is a wrapper for a http Request that provides convenience methods
type Request struct {
Request *http.Request
bodyContent *[]byte // to cache the request body for multiple reads of ReadEntity
pathParameters map[string]string
attributes map[string]interface{} // for storing request-scoped values
selectedRoutePath string // root path + route path that matched the request, e.g. /meetings/{id}/attendees
@ -41,12 +36,6 @@ func DefaultRequestContentType(mime string) {
defaultRequestContentType = mime
}
// SetCacheReadEntity controls whether the response data ([]byte) is cached such that ReadEntity is repeatable.
// Default is true (due to backwardcompatibility). For better performance, you should set it to false if you don't need it.
func SetCacheReadEntity(doCache bool) {
doCacheReadEntityBytes = doCache
}
// PathParameter accesses the Path parameter value by its name
func (r *Request) PathParameter(name string) string {
return r.pathParameters[name]
@ -81,18 +70,6 @@ func (r *Request) ReadEntity(entityPointer interface{}) (err error) {
contentType := r.Request.Header.Get(HEADER_ContentType)
contentEncoding := r.Request.Header.Get(HEADER_ContentEncoding)
// OLD feature, cache the body for reads
if doCacheReadEntityBytes {
if r.bodyContent == nil {
data, err := ioutil.ReadAll(r.Request.Body)
if err != nil {
return err
}
r.bodyContent = &data
}
r.Request.Body = ioutil.NopCloser(bytes.NewReader(*r.bodyContent))
}
// check if the request body needs decompression
if ENCODING_GZIP == contentEncoding {
gzipReader := currentCompressorProvider.AcquireGzipReader()

View file

@ -9,7 +9,7 @@ import (
"net/http"
)
// DEPRECATED, use DefaultResponseContentType(mime)
// DefaultResponseMimeType is DEPRECATED, use DefaultResponseContentType(mime)
var DefaultResponseMimeType string
//PrettyPrintResponses controls the indentation feature of XML and JSON serialization
@ -27,11 +27,12 @@ type Response struct {
err error // err property is kept when WriteError is called
}
// Creates a new response based on a http ResponseWriter.
// NewResponse creates a new response based on a http ResponseWriter.
func NewResponse(httpWriter http.ResponseWriter) *Response {
return &Response{httpWriter, "", []string{}, http.StatusOK, 0, PrettyPrintResponses, nil} // empty content-types
}
// DefaultResponseContentType set a default.
// If Accept header matching fails, fall back to this type.
// Valid values are restful.MIME_JSON and restful.MIME_XML
// Example:

View file

@ -34,6 +34,9 @@ type Route struct {
ParameterDocs []*Parameter
ResponseErrors map[int]ResponseError
ReadSample, WriteSample interface{} // structs that model an example request or response payload
// Extra information used to store custom information about the route.
Metadata map[string]interface{}
}
// Initialize for Route

View file

@ -5,10 +5,12 @@ package restful
// that can be found in the LICENSE file.
import (
"fmt"
"os"
"reflect"
"runtime"
"strings"
"sync/atomic"
"github.com/emicklei/go-restful/log"
)
@ -22,6 +24,9 @@ type RouteBuilder struct {
httpMethod string // required
function RouteFunction // required
filters []FilterFunction
typeNameHandleFunc TypeNameHandleFunction // required
// documentation
doc string
notes string
@ -29,6 +34,7 @@ type RouteBuilder struct {
readSample, writeSample interface{}
parameters []*Parameter
errorMap map[int]ResponseError
metadata map[string]interface{}
}
// Do evaluates each argument with the RouteBuilder itself.
@ -92,8 +98,13 @@ func (b *RouteBuilder) Notes(notes string) *RouteBuilder {
// Reads tells what resource type will be read from the request payload. Optional.
// A parameter of type "body" is added ,required is set to true and the dataType is set to the qualified name of the sample's type.
func (b *RouteBuilder) Reads(sample interface{}) *RouteBuilder {
fn := b.typeNameHandleFunc
if fn == nil {
fn = reflectTypeName
}
typeAsName := fn(sample)
b.readSample = sample
typeAsName := reflect.TypeOf(sample).String()
bodyParameter := &Parameter{&ParameterData{Name: "body"}}
bodyParameter.beBody()
bodyParameter.Required(true)
@ -145,9 +156,10 @@ func (b *RouteBuilder) ReturnsError(code int, message string, model interface{})
// The model parameter is optional ; either pass a struct instance or use nil if not applicable.
func (b *RouteBuilder) Returns(code int, message string, model interface{}) *RouteBuilder {
err := ResponseError{
Code: code,
Message: message,
Model: model,
Code: code,
Message: message,
Model: model,
IsDefault: false,
}
// lazy init because there is no NewRouteBuilder (yet)
if b.errorMap == nil {
@ -157,10 +169,36 @@ func (b *RouteBuilder) Returns(code int, message string, model interface{}) *Rou
return b
}
// DefaultReturns is a special Returns call that sets the default of the response ; the code is zero.
func (b *RouteBuilder) DefaultReturns(message string, model interface{}) *RouteBuilder {
b.Returns(0, message, model)
// Modify the ResponseError just added/updated
re := b.errorMap[0]
// errorMap is initialized
b.errorMap[0] = ResponseError{
Code: re.Code,
Message: re.Message,
Model: re.Model,
IsDefault: true,
}
return b
}
// Metadata adds or updates a key=value pair to the metadata map.
func (b *RouteBuilder) Metadata(key string, value interface{}) *RouteBuilder {
if b.metadata == nil {
b.metadata = map[string]interface{}{}
}
b.metadata[key] = value
return b
}
// ResponseError represents a response; not necessarily an error.
type ResponseError struct {
Code int
Message string
Model interface{}
Code int
Message string
Model interface{}
IsDefault bool
}
func (b *RouteBuilder) servicePath(path string) *RouteBuilder {
@ -186,6 +224,13 @@ func (b *RouteBuilder) copyDefaults(rootProduces, rootConsumes []string) {
}
}
// typeNameHandler sets the function that will convert types to strings in the parameter
// and model definitions.
func (b *RouteBuilder) typeNameHandler(handler TypeNameHandleFunction) *RouteBuilder {
b.typeNameHandleFunc = handler
return b
}
// Build creates a new Route using the specification details collected by the RouteBuilder
func (b *RouteBuilder) Build() Route {
pathExpr, err := newPathExpression(b.currentPath)
@ -217,7 +262,8 @@ func (b *RouteBuilder) Build() Route {
ParameterDocs: b.parameters,
ResponseErrors: b.errorMap,
ReadSample: b.readSample,
WriteSample: b.writeSample}
WriteSample: b.writeSample,
Metadata: b.metadata}
route.postBuild()
return route
}
@ -226,6 +272,8 @@ func concatPath(path1, path2 string) string {
return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/")
}
var anonymousFuncCount int32
// nameOfFunction returns the short name of the function f for documentation.
// It uses a runtime feature for debugging ; its value may change for later Go versions.
func nameOfFunction(f interface{}) string {
@ -236,5 +284,10 @@ func nameOfFunction(f interface{}) string {
last = strings.TrimSuffix(last, ")-fm") // Go 1.5
last = strings.TrimSuffix(last, "·fm") // < Go 1.5
last = strings.TrimSuffix(last, "-fm") // Go 1.5
if last == "func1" { // this could mean conflicts in API docs
val := atomic.AddInt32(&anonymousFuncCount, 1)
last = "func" + fmt.Sprintf("%d", val)
atomic.StoreInt32(&anonymousFuncCount, val)
}
return last
}

View file

@ -3,6 +3,7 @@ package restful
import (
"errors"
"os"
"reflect"
"sync"
"github.com/emicklei/go-restful/log"
@ -24,6 +25,8 @@ type WebService struct {
documentation string
apiVersion string
typeNameHandleFunc TypeNameHandleFunction
dynamicRoutes bool
// protects 'routes' if dynamic routes are enabled
@ -34,6 +37,25 @@ func (w *WebService) SetDynamicRoutes(enable bool) {
w.dynamicRoutes = enable
}
// TypeNameHandleFunction declares functions that can handle translating the name of a sample object
// into the restful documentation for the service.
type TypeNameHandleFunction func(sample interface{}) string
// TypeNameHandler sets the function that will convert types to strings in the parameter
// and model definitions. If not set, the web service will invoke
// reflect.TypeOf(object).String().
func (w *WebService) TypeNameHandler(handler TypeNameHandleFunction) *WebService {
w.typeNameHandleFunc = handler
return w
}
// reflectTypeName is the default TypeNameHandleFunction and for a given object
// returns the name that Go identifies it with (e.g. "string" or "v1.Object") via
// the reflection API.
func reflectTypeName(sample interface{}) string {
return reflect.TypeOf(sample).String()
}
// compilePathExpression ensures that the path is compiled into a RegEx for those routers that need it.
func (w *WebService) compilePathExpression() {
compiled, err := newPathExpression(w.rootPath)
@ -174,7 +196,7 @@ func (w *WebService) RemoveRoute(path, method string) error {
// Method creates a new RouteBuilder and initialize its http method
func (w *WebService) Method(httpMethod string) *RouteBuilder {
return new(RouteBuilder).servicePath(w.rootPath).Method(httpMethod)
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method(httpMethod)
}
// Produces specifies that this WebService can produce one or more MIME types.
@ -239,30 +261,30 @@ func (w *WebService) Documentation() string {
// HEAD is a shortcut for .Method("HEAD").Path(subPath)
func (w *WebService) HEAD(subPath string) *RouteBuilder {
return new(RouteBuilder).servicePath(w.rootPath).Method("HEAD").Path(subPath)
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("HEAD").Path(subPath)
}
// GET is a shortcut for .Method("GET").Path(subPath)
func (w *WebService) GET(subPath string) *RouteBuilder {
return new(RouteBuilder).servicePath(w.rootPath).Method("GET").Path(subPath)
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("GET").Path(subPath)
}
// POST is a shortcut for .Method("POST").Path(subPath)
func (w *WebService) POST(subPath string) *RouteBuilder {
return new(RouteBuilder).servicePath(w.rootPath).Method("POST").Path(subPath)
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("POST").Path(subPath)
}
// PUT is a shortcut for .Method("PUT").Path(subPath)
func (w *WebService) PUT(subPath string) *RouteBuilder {
return new(RouteBuilder).servicePath(w.rootPath).Method("PUT").Path(subPath)
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PUT").Path(subPath)
}
// PATCH is a shortcut for .Method("PATCH").Path(subPath)
func (w *WebService) PATCH(subPath string) *RouteBuilder {
return new(RouteBuilder).servicePath(w.rootPath).Method("PATCH").Path(subPath)
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PATCH").Path(subPath)
}
// DELETE is a shortcut for .Method("DELETE").Path(subPath)
func (w *WebService) DELETE(subPath string) *RouteBuilder {
return new(RouteBuilder).servicePath(w.rootPath).Method("DELETE").Path(subPath)
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("DELETE").Path(subPath)
}

21
vendor/github.com/exponent-io/jsonpath/LICENSE generated vendored Normal file
View file

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015 Exponent Labs LLC
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

66
vendor/github.com/exponent-io/jsonpath/README.md generated vendored Normal file
View file

@ -0,0 +1,66 @@
[![GoDoc](https://godoc.org/github.com/exponent-io/jsonpath?status.svg)](https://godoc.org/github.com/exponent-io/jsonpath)
[![Build Status](https://travis-ci.org/exponent-io/jsonpath.svg?branch=master)](https://travis-ci.org/exponent-io/jsonpath)
# jsonpath
This package extends the [json.Decoder](https://golang.org/pkg/encoding/json/#Decoder) to support navigating a stream of JSON tokens. You should be able to use this extended Decoder places where a json.Decoder would have been used.
This Decoder has the following enhancements...
* The [Scan](https://godoc.org/github.com/exponent-io/jsonpath/#Decoder.Scan) method supports scanning a JSON stream while extracting particular values along the way using [PathActions](https://godoc.org/github.com/exponent-io/jsonpath#PathActions).
* The [SeekTo](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.SeekTo) method supports seeking forward in a JSON token stream to a particular path.
* The [Path](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Path) method returns the path of the most recently parsed token.
* The [Token](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Token) method has been modified to distinguish between strings that are object keys and strings that are values. Object key strings are returned as the [KeyString](https://godoc.org/github.com/exponent-io/jsonpath#KeyString) type rather than a native string.
## Installation
go get -u github.com/exponent-io/jsonpath
## Example Usage
#### SeekTo
```go
import "github.com/exponent-io/jsonpath"
var j = []byte(`[
{"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10}},
{"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255}}
]`)
w := json.NewDecoder(bytes.NewReader(j))
var v interface{}
w.SeekTo(1, "Point", "G")
w.Decode(&v) // v is 218
```
#### Scan with PathActions
```go
var j = []byte(`{"colors":[
{"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10, "A": 58}},
{"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255, "A": 231}}
]}`)
var actions PathActions
// Extract the value at Point.A
actions.Add(func(d *Decoder) error {
var alpha int
err := d.Decode(&alpha)
fmt.Printf("Alpha: %v\n", alpha)
return err
}, "Point", "A")
w := NewDecoder(bytes.NewReader(j))
w.SeekTo("colors", 0)
var ok = true
var err error
for ok {
ok, err = w.Scan(&actions)
if err != nil && err != io.EOF {
panic(err)
}
}
```

210
vendor/github.com/exponent-io/jsonpath/decoder.go generated vendored Normal file
View file

@ -0,0 +1,210 @@
package jsonpath
import (
"encoding/json"
"io"
)
// KeyString is returned from Decoder.Token to represent each key in a JSON object value.
type KeyString string
// Decoder extends the Go runtime's encoding/json.Decoder to support navigating in a stream of JSON tokens.
type Decoder struct {
json.Decoder
path JsonPath
context jsonContext
}
// NewDecoder creates a new instance of the extended JSON Decoder.
func NewDecoder(r io.Reader) *Decoder {
return &Decoder{Decoder: *json.NewDecoder(r)}
}
// SeekTo causes the Decoder to move forward to a given path in the JSON structure.
//
// The path argument must consist of strings or integers. Each string specifies an JSON object key, and
// each integer specifies an index into a JSON array.
//
// Consider the JSON structure
//
// { "a": [0,"s",12e4,{"b":0,"v":35} ] }
//
// SeekTo("a",3,"v") will move to the value referenced by the "a" key in the current object,
// followed by a move to the 4th value (index 3) in the array, followed by a move to the value at key "v".
// In this example, a subsequent call to the decoder's Decode() would unmarshal the value 35.
//
// SeekTo returns a boolean value indicating whether a match was found.
//
// Decoder is intended to be used with a stream of tokens. As a result it navigates forward only.
func (d *Decoder) SeekTo(path ...interface{}) (bool, error) {
if len(path) == 0 {
return len(d.path) == 0, nil
}
last := len(path) - 1
if i, ok := path[last].(int); ok {
path[last] = i - 1
}
for {
if d.path.Equal(path) {
return true, nil
}
_, err := d.Token()
if err == io.EOF {
return false, nil
} else if err != nil {
return false, err
}
}
}
// Decode reads the next JSON-encoded value from its input and stores it in the value pointed to by v. This is
// equivalent to encoding/json.Decode().
func (d *Decoder) Decode(v interface{}) error {
switch d.context {
case objValue:
d.context = objKey
break
case arrValue:
d.path.incTop()
break
}
return d.Decoder.Decode(v)
}
// Path returns a slice of string and/or int values representing the path from the root of the JSON object to the
// position of the most-recently parsed token.
func (d *Decoder) Path() JsonPath {
p := make(JsonPath, len(d.path))
copy(p, d.path)
return p
}
// Token is equivalent to the Token() method on json.Decoder. The primary difference is that it distinguishes
// between strings that are keys and and strings that are values. String tokens that are object keys are returned as a
// KeyString rather than as a native string.
func (d *Decoder) Token() (json.Token, error) {
t, err := d.Decoder.Token()
if err != nil {
return t, err
}
if t == nil {
switch d.context {
case objValue:
d.context = objKey
break
case arrValue:
d.path.incTop()
break
}
return t, err
}
switch t := t.(type) {
case json.Delim:
switch t {
case json.Delim('{'):
if d.context == arrValue {
d.path.incTop()
}
d.path.push("")
d.context = objKey
break
case json.Delim('}'):
d.path.pop()
d.context = d.path.inferContext()
break
case json.Delim('['):
if d.context == arrValue {
d.path.incTop()
}
d.path.push(-1)
d.context = arrValue
break
case json.Delim(']'):
d.path.pop()
d.context = d.path.inferContext()
break
}
case float64, json.Number, bool:
switch d.context {
case objValue:
d.context = objKey
break
case arrValue:
d.path.incTop()
break
}
break
case string:
switch d.context {
case objKey:
d.path.nameTop(t)
d.context = objValue
return KeyString(t), err
case objValue:
d.context = objKey
case arrValue:
d.path.incTop()
}
break
}
return t, err
}
// Scan moves forward over the JSON stream consuming all the tokens at the current level (current object, current array)
// invoking each matching PathAction along the way.
//
// Scan returns true if there are more contiguous values to scan (for example in an array).
func (d *Decoder) Scan(ext *PathActions) (bool, error) {
rootPath := d.Path()
// If this is an array path, increment the root path in our local copy.
if rootPath.inferContext() == arrValue {
rootPath.incTop()
}
for {
// advance the token position
_, err := d.Token()
if err != nil {
return false, err
}
match:
var relPath JsonPath
// capture the new JSON path
path := d.Path()
if len(path) > len(rootPath) {
// capture the path relative to where the scan started
relPath = path[len(rootPath):]
} else {
// if the path is not longer than the root, then we are done with this scan
// return boolean flag indicating if there are more items to scan at the same level
return d.Decoder.More(), nil
}
// match the relative path against the path actions
if node := ext.node.match(relPath); node != nil {
if node.action != nil {
// we have a match so execute the action
err = node.action(d)
if err != nil {
return d.Decoder.More(), err
}
// The action may have advanced the decoder. If we are in an array, advancing it further would
// skip tokens. So, if we are scanning an array, jump to the top without advancing the token.
if d.path.inferContext() == arrValue && d.Decoder.More() {
goto match
}
}
}
}
}

67
vendor/github.com/exponent-io/jsonpath/path.go generated vendored Normal file
View file

@ -0,0 +1,67 @@
// Extends the Go runtime's json.Decoder enabling navigation of a stream of json tokens.
package jsonpath
import "fmt"
type jsonContext int
const (
none jsonContext = iota
objKey
objValue
arrValue
)
// AnyIndex can be used in a pattern to match any array index.
const AnyIndex = -2
// JsonPath is a slice of strings and/or integers. Each string specifies an JSON object key, and
// each integer specifies an index into a JSON array.
type JsonPath []interface{}
func (p *JsonPath) push(n interface{}) { *p = append(*p, n) }
func (p *JsonPath) pop() { *p = (*p)[:len(*p)-1] }
// increment the index at the top of the stack (must be an array index)
func (p *JsonPath) incTop() { (*p)[len(*p)-1] = (*p)[len(*p)-1].(int) + 1 }
// name the key at the top of the stack (must be an object key)
func (p *JsonPath) nameTop(n string) { (*p)[len(*p)-1] = n }
// infer the context from the item at the top of the stack
func (p *JsonPath) inferContext() jsonContext {
if len(*p) == 0 {
return none
}
t := (*p)[len(*p)-1]
switch t.(type) {
case string:
return objKey
case int:
return arrValue
default:
panic(fmt.Sprintf("Invalid stack type %T", t))
}
}
// Equal tests for equality between two JsonPath types.
func (p *JsonPath) Equal(o JsonPath) bool {
if len(*p) != len(o) {
return false
}
for i, v := range *p {
if v != o[i] {
return false
}
}
return true
}
func (p *JsonPath) HasPrefix(o JsonPath) bool {
for i, v := range o {
if v != (*p)[i] {
return false
}
}
return true
}

61
vendor/github.com/exponent-io/jsonpath/pathaction.go generated vendored Normal file
View file

@ -0,0 +1,61 @@
package jsonpath
// pathNode is used to construct a trie of paths to be matched
type pathNode struct {
matchOn interface{} // string, or integer
childNodes []pathNode
action DecodeAction
}
// match climbs the trie to find a node that matches the given JSON path.
func (n *pathNode) match(path JsonPath) *pathNode {
var node *pathNode = n
for _, ps := range path {
found := false
for i, n := range node.childNodes {
if n.matchOn == ps {
node = &node.childNodes[i]
found = true
break
} else if _, ok := ps.(int); ok && n.matchOn == AnyIndex {
node = &node.childNodes[i]
found = true
break
}
}
if !found {
return nil
}
}
return node
}
// PathActions represents a collection of DecodeAction functions that should be called at certain path positions
// when scanning the JSON stream. PathActions can be created once and used many times in one or more JSON streams.
type PathActions struct {
node pathNode
}
// DecodeAction handlers are called by the Decoder when scanning objects. See PathActions.Add for more detail.
type DecodeAction func(d *Decoder) error
// Add specifies an action to call on the Decoder when the specified path is encountered.
func (je *PathActions) Add(action DecodeAction, path ...interface{}) {
var node *pathNode = &je.node
for _, ps := range path {
found := false
for i, n := range node.childNodes {
if n.matchOn == ps {
node = &node.childNodes[i]
found = true
break
}
}
if !found {
node.childNodes = append(node.childNodes, pathNode{matchOn: ps})
node = &node.childNodes[len(node.childNodes)-1]
}
}
node.action = action
}

202
vendor/github.com/go-openapi/analysis/LICENSE generated vendored Normal file
View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

6
vendor/github.com/go-openapi/analysis/README.md generated vendored Normal file
View file

@ -0,0 +1,6 @@
# OpenAPI initiative analysis [![Build Status](https://ci.vmware.run/api/badges/go-openapi/analysis/status.svg)](https://ci.vmware.run/go-openapi/analysis) [![Coverage](https://coverage.vmware.run/badges/go-openapi/analysis/coverage.svg)](https://coverage.vmware.run/go-openapi/analysis) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/analysis/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/analysis?status.svg)](http://godoc.org/github.com/go-openapi/analysis)
A foundational library to analyze an OAI specification document for easier reasoning about the content.

614
vendor/github.com/go-openapi/analysis/analyzer.go generated vendored Normal file
View file

@ -0,0 +1,614 @@
// Copyright 2015 go-swagger maintainers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package analysis
import (
"fmt"
slashpath "path"
"strconv"
"strings"
"github.com/go-openapi/jsonpointer"
"github.com/go-openapi/spec"
"github.com/go-openapi/swag"
)
type referenceAnalysis struct {
schemas map[string]spec.Ref
responses map[string]spec.Ref
parameters map[string]spec.Ref
items map[string]spec.Ref
allRefs map[string]spec.Ref
referenced struct {
schemas map[string]SchemaRef
responses map[string]*spec.Response
parameters map[string]*spec.Parameter
}
}
func (r *referenceAnalysis) addRef(key string, ref spec.Ref) {
r.allRefs["#"+key] = ref
}
func (r *referenceAnalysis) addItemsRef(key string, items *spec.Items) {
r.items["#"+key] = items.Ref
r.addRef(key, items.Ref)
}
func (r *referenceAnalysis) addSchemaRef(key string, ref SchemaRef) {
r.schemas["#"+key] = ref.Schema.Ref
r.addRef(key, ref.Schema.Ref)
}
func (r *referenceAnalysis) addResponseRef(key string, resp *spec.Response) {
r.responses["#"+key] = resp.Ref
r.addRef(key, resp.Ref)
}
func (r *referenceAnalysis) addParamRef(key string, param *spec.Parameter) {
r.parameters["#"+key] = param.Ref
r.addRef(key, param.Ref)
}
// New takes a swagger spec object and returns an analyzed spec document.
// The analyzed document contains a number of indices that make it easier to
// reason about semantics of a swagger specification for use in code generation
// or validation etc.
func New(doc *spec.Swagger) *Spec {
a := &Spec{
spec: doc,
consumes: make(map[string]struct{}, 150),
produces: make(map[string]struct{}, 150),
authSchemes: make(map[string]struct{}, 150),
operations: make(map[string]map[string]*spec.Operation, 150),
allSchemas: make(map[string]SchemaRef, 150),
allOfs: make(map[string]SchemaRef, 150),
references: referenceAnalysis{
schemas: make(map[string]spec.Ref, 150),
responses: make(map[string]spec.Ref, 150),
parameters: make(map[string]spec.Ref, 150),
items: make(map[string]spec.Ref, 150),
allRefs: make(map[string]spec.Ref, 150),
},
}
a.references.referenced.schemas = make(map[string]SchemaRef, 150)
a.references.referenced.responses = make(map[string]*spec.Response, 150)
a.references.referenced.parameters = make(map[string]*spec.Parameter, 150)
a.initialize()
return a
}
// Spec takes a swagger spec object and turns it into a registry
// with a bunch of utility methods to act on the information in the spec
type Spec struct {
spec *spec.Swagger
consumes map[string]struct{}
produces map[string]struct{}
authSchemes map[string]struct{}
operations map[string]map[string]*spec.Operation
references referenceAnalysis
allSchemas map[string]SchemaRef
allOfs map[string]SchemaRef
}
func (s *Spec) initialize() {
for _, c := range s.spec.Consumes {
s.consumes[c] = struct{}{}
}
for _, c := range s.spec.Produces {
s.produces[c] = struct{}{}
}
for _, ss := range s.spec.Security {
for k := range ss {
s.authSchemes[k] = struct{}{}
}
}
for path, pathItem := range s.AllPaths() {
s.analyzeOperations(path, &pathItem)
}
for name, parameter := range s.spec.Parameters {
refPref := slashpath.Join("/parameters", jsonpointer.Escape(name))
if parameter.Items != nil {
s.analyzeItems("items", parameter.Items, refPref)
}
if parameter.In == "body" && parameter.Schema != nil {
s.analyzeSchema("schema", *parameter.Schema, refPref)
}
}
for name, response := range s.spec.Responses {
refPref := slashpath.Join("/responses", jsonpointer.Escape(name))
for _, v := range response.Headers {
if v.Items != nil {
s.analyzeItems("items", v.Items, refPref)
}
}
if response.Schema != nil {
s.analyzeSchema("schema", *response.Schema, refPref)
}
}
for name, schema := range s.spec.Definitions {
s.analyzeSchema(name, schema, "/definitions")
}
// TODO: after analyzing all things and flattening schemas etc
// resolve all the collected references to their final representations
// best put in a separate method because this could get expensive
}
func (s *Spec) analyzeOperations(path string, pi *spec.PathItem) {
// TODO: resolve refs here?
op := pi
s.analyzeOperation("GET", path, op.Get)
s.analyzeOperation("PUT", path, op.Put)
s.analyzeOperation("POST", path, op.Post)
s.analyzeOperation("PATCH", path, op.Patch)
s.analyzeOperation("DELETE", path, op.Delete)
s.analyzeOperation("HEAD", path, op.Head)
s.analyzeOperation("OPTIONS", path, op.Options)
for i, param := range op.Parameters {
refPref := slashpath.Join("/paths", jsonpointer.Escape(path), "parameters", strconv.Itoa(i))
if param.Ref.String() != "" {
s.references.addParamRef(refPref, &param)
}
if param.Items != nil {
s.analyzeItems("items", param.Items, refPref)
}
if param.Schema != nil {
s.analyzeSchema("schema", *param.Schema, refPref)
}
}
}
func (s *Spec) analyzeItems(name string, items *spec.Items, prefix string) {
if items == nil {
return
}
refPref := slashpath.Join(prefix, name)
s.analyzeItems(name, items.Items, refPref)
if items.Ref.String() != "" {
s.references.addItemsRef(refPref, items)
}
}
func (s *Spec) analyzeOperation(method, path string, op *spec.Operation) {
if op == nil {
return
}
for _, c := range op.Consumes {
s.consumes[c] = struct{}{}
}
for _, c := range op.Produces {
s.produces[c] = struct{}{}
}
for _, ss := range op.Security {
for k := range ss {
s.authSchemes[k] = struct{}{}
}
}
if _, ok := s.operations[method]; !ok {
s.operations[method] = make(map[string]*spec.Operation)
}
s.operations[method][path] = op
prefix := slashpath.Join("/paths", jsonpointer.Escape(path), strings.ToLower(method))
for i, param := range op.Parameters {
refPref := slashpath.Join(prefix, "parameters", strconv.Itoa(i))
if param.Ref.String() != "" {
s.references.addParamRef(refPref, &param)
}
s.analyzeItems("items", param.Items, refPref)
if param.In == "body" && param.Schema != nil {
s.analyzeSchema("schema", *param.Schema, refPref)
}
}
if op.Responses != nil {
if op.Responses.Default != nil {
refPref := slashpath.Join(prefix, "responses", "default")
if op.Responses.Default.Ref.String() != "" {
s.references.addResponseRef(refPref, op.Responses.Default)
}
for _, v := range op.Responses.Default.Headers {
s.analyzeItems("items", v.Items, refPref)
}
if op.Responses.Default.Schema != nil {
s.analyzeSchema("schema", *op.Responses.Default.Schema, refPref)
}
}
for k, res := range op.Responses.StatusCodeResponses {
refPref := slashpath.Join(prefix, "responses", strconv.Itoa(k))
if res.Ref.String() != "" {
s.references.addResponseRef(refPref, &res)
}
for _, v := range res.Headers {
s.analyzeItems("items", v.Items, refPref)
}
if res.Schema != nil {
s.analyzeSchema("schema", *res.Schema, refPref)
}
}
}
}
func (s *Spec) analyzeSchema(name string, schema spec.Schema, prefix string) {
refURI := slashpath.Join(prefix, jsonpointer.Escape(name))
schRef := SchemaRef{
Name: name,
Schema: &schema,
Ref: spec.MustCreateRef("#" + refURI),
}
s.allSchemas["#"+refURI] = schRef
if schema.Ref.String() != "" {
s.references.addSchemaRef(refURI, schRef)
}
for k, v := range schema.Definitions {
s.analyzeSchema(k, v, slashpath.Join(refURI, "definitions"))
}
for k, v := range schema.Properties {
s.analyzeSchema(k, v, slashpath.Join(refURI, "properties"))
}
for k, v := range schema.PatternProperties {
s.analyzeSchema(k, v, slashpath.Join(refURI, "patternProperties"))
}
for i, v := range schema.AllOf {
s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "allOf"))
}
if len(schema.AllOf) > 0 {
s.allOfs["#"+refURI] = SchemaRef{Name: name, Schema: &schema, Ref: spec.MustCreateRef("#" + refURI)}
}
for i, v := range schema.AnyOf {
s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "anyOf"))
}
for i, v := range schema.OneOf {
s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "oneOf"))
}
if schema.Not != nil {
s.analyzeSchema("not", *schema.Not, refURI)
}
if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil {
s.analyzeSchema("additionalProperties", *schema.AdditionalProperties.Schema, refURI)
}
if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil {
s.analyzeSchema("additionalItems", *schema.AdditionalItems.Schema, refURI)
}
if schema.Items != nil {
if schema.Items.Schema != nil {
s.analyzeSchema("items", *schema.Items.Schema, refURI)
}
for i, sch := range schema.Items.Schemas {
s.analyzeSchema(strconv.Itoa(i), sch, slashpath.Join(refURI, "items"))
}
}
}
// SecurityRequirement is a representation of a security requirement for an operation
type SecurityRequirement struct {
Name string
Scopes []string
}
// SecurityRequirementsFor gets the security requirements for the operation
func (s *Spec) SecurityRequirementsFor(operation *spec.Operation) []SecurityRequirement {
if s.spec.Security == nil && operation.Security == nil {
return nil
}
schemes := s.spec.Security
if operation.Security != nil {
schemes = operation.Security
}
unique := make(map[string]SecurityRequirement)
for _, scheme := range schemes {
for k, v := range scheme {
if _, ok := unique[k]; !ok {
unique[k] = SecurityRequirement{Name: k, Scopes: v}
}
}
}
var result []SecurityRequirement
for _, v := range unique {
result = append(result, v)
}
return result
}
// SecurityDefinitionsFor gets the matching security definitions for a set of requirements
func (s *Spec) SecurityDefinitionsFor(operation *spec.Operation) map[string]spec.SecurityScheme {
requirements := s.SecurityRequirementsFor(operation)
if len(requirements) == 0 {
return nil
}
result := make(map[string]spec.SecurityScheme)
for _, v := range requirements {
if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok {
if definition != nil {
result[v.Name] = *definition
}
}
}
return result
}
// ConsumesFor gets the mediatypes for the operation
func (s *Spec) ConsumesFor(operation *spec.Operation) []string {
if len(operation.Consumes) == 0 {
cons := make(map[string]struct{}, len(s.spec.Consumes))
for _, k := range s.spec.Consumes {
cons[k] = struct{}{}
}
return s.structMapKeys(cons)
}
cons := make(map[string]struct{}, len(operation.Consumes))
for _, c := range operation.Consumes {
cons[c] = struct{}{}
}
return s.structMapKeys(cons)
}
// ProducesFor gets the mediatypes for the operation
func (s *Spec) ProducesFor(operation *spec.Operation) []string {
if len(operation.Produces) == 0 {
prod := make(map[string]struct{}, len(s.spec.Produces))
for _, k := range s.spec.Produces {
prod[k] = struct{}{}
}
return s.structMapKeys(prod)
}
prod := make(map[string]struct{}, len(operation.Produces))
for _, c := range operation.Produces {
prod[c] = struct{}{}
}
return s.structMapKeys(prod)
}
func mapKeyFromParam(param *spec.Parameter) string {
return fmt.Sprintf("%s#%s", param.In, fieldNameFromParam(param))
}
func fieldNameFromParam(param *spec.Parameter) string {
if nm, ok := param.Extensions.GetString("go-name"); ok {
return nm
}
return swag.ToGoName(param.Name)
}
func (s *Spec) paramsAsMap(parameters []spec.Parameter, res map[string]spec.Parameter) {
for _, param := range parameters {
pr := param
if pr.Ref.String() != "" {
obj, _, err := pr.Ref.GetPointer().Get(s.spec)
if err != nil {
panic(err)
}
pr = obj.(spec.Parameter)
}
res[mapKeyFromParam(&pr)] = pr
}
}
// ParametersFor the specified operation id
func (s *Spec) ParametersFor(operationID string) []spec.Parameter {
gatherParams := func(pi *spec.PathItem, op *spec.Operation) []spec.Parameter {
bag := make(map[string]spec.Parameter)
s.paramsAsMap(pi.Parameters, bag)
s.paramsAsMap(op.Parameters, bag)
var res []spec.Parameter
for _, v := range bag {
res = append(res, v)
}
return res
}
for _, pi := range s.spec.Paths.Paths {
if pi.Get != nil && pi.Get.ID == operationID {
return gatherParams(&pi, pi.Get)
}
if pi.Head != nil && pi.Head.ID == operationID {
return gatherParams(&pi, pi.Head)
}
if pi.Options != nil && pi.Options.ID == operationID {
return gatherParams(&pi, pi.Options)
}
if pi.Post != nil && pi.Post.ID == operationID {
return gatherParams(&pi, pi.Post)
}
if pi.Patch != nil && pi.Patch.ID == operationID {
return gatherParams(&pi, pi.Patch)
}
if pi.Put != nil && pi.Put.ID == operationID {
return gatherParams(&pi, pi.Put)
}
if pi.Delete != nil && pi.Delete.ID == operationID {
return gatherParams(&pi, pi.Delete)
}
}
return nil
}
// ParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that
// apply for the method and path.
func (s *Spec) ParamsFor(method, path string) map[string]spec.Parameter {
res := make(map[string]spec.Parameter)
if pi, ok := s.spec.Paths.Paths[path]; ok {
s.paramsAsMap(pi.Parameters, res)
s.paramsAsMap(s.operations[strings.ToUpper(method)][path].Parameters, res)
}
return res
}
// OperationForName gets the operation for the given id
func (s *Spec) OperationForName(operationID string) (string, string, *spec.Operation, bool) {
for method, pathItem := range s.operations {
for path, op := range pathItem {
if operationID == op.ID {
return method, path, op, true
}
}
}
return "", "", nil, false
}
// OperationFor the given method and path
func (s *Spec) OperationFor(method, path string) (*spec.Operation, bool) {
if mp, ok := s.operations[strings.ToUpper(method)]; ok {
op, fn := mp[path]
return op, fn
}
return nil, false
}
// Operations gathers all the operations specified in the spec document
func (s *Spec) Operations() map[string]map[string]*spec.Operation {
return s.operations
}
func (s *Spec) structMapKeys(mp map[string]struct{}) []string {
if len(mp) == 0 {
return nil
}
result := make([]string, 0, len(mp))
for k := range mp {
result = append(result, k)
}
return result
}
// AllPaths returns all the paths in the swagger spec
func (s *Spec) AllPaths() map[string]spec.PathItem {
if s.spec == nil || s.spec.Paths == nil {
return nil
}
return s.spec.Paths.Paths
}
// OperationIDs gets all the operation ids based on method an dpath
func (s *Spec) OperationIDs() []string {
if len(s.operations) == 0 {
return nil
}
result := make([]string, 0, len(s.operations))
for method, v := range s.operations {
for p, o := range v {
if o.ID != "" {
result = append(result, o.ID)
} else {
result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p))
}
}
}
return result
}
// RequiredConsumes gets all the distinct consumes that are specified in the specification document
func (s *Spec) RequiredConsumes() []string {
return s.structMapKeys(s.consumes)
}
// RequiredProduces gets all the distinct produces that are specified in the specification document
func (s *Spec) RequiredProduces() []string {
return s.structMapKeys(s.produces)
}
// RequiredSecuritySchemes gets all the distinct security schemes that are specified in the swagger spec
func (s *Spec) RequiredSecuritySchemes() []string {
return s.structMapKeys(s.authSchemes)
}
// SchemaRef is a reference to a schema
type SchemaRef struct {
Name string
Ref spec.Ref
Schema *spec.Schema
}
// SchemasWithAllOf returns schema references to all schemas that are defined
// with an allOf key
func (s *Spec) SchemasWithAllOf() (result []SchemaRef) {
for _, v := range s.allOfs {
result = append(result, v)
}
return
}
// AllDefinitions returns schema references for all the definitions that were discovered
func (s *Spec) AllDefinitions() (result []SchemaRef) {
for _, v := range s.allSchemas {
result = append(result, v)
}
return
}
// AllDefinitionReferences returns json refs for all the discovered schemas
func (s *Spec) AllDefinitionReferences() (result []string) {
for _, v := range s.references.schemas {
result = append(result, v.String())
}
return
}
// AllParameterReferences returns json refs for all the discovered parameters
func (s *Spec) AllParameterReferences() (result []string) {
for _, v := range s.references.parameters {
result = append(result, v.String())
}
return
}
// AllResponseReferences returns json refs for all the discovered responses
func (s *Spec) AllResponseReferences() (result []string) {
for _, v := range s.references.responses {
result = append(result, v.String())
}
return
}
// AllItemsReferences returns the references for all the items
func (s *Spec) AllItemsReferences() (result []string) {
for _, v := range s.references.items {
result = append(result, v.String())
}
return
}
// AllReferences returns all the references found in the document
func (s *Spec) AllReferences() (result []string) {
for _, v := range s.references.allRefs {
result = append(result, v.String())
}
return
}
// AllRefs returns all the unique references found in the document
func (s *Spec) AllRefs() (result []spec.Ref) {
set := make(map[string]struct{})
for _, v := range s.references.allRefs {
a := v.String()
if a == "" {
continue
}
if _, ok := set[a]; !ok {
set[a] = struct{}{}
result = append(result, v)
}
}
return
}

202
vendor/github.com/go-openapi/loads/LICENSE generated vendored Normal file
View file

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

Some files were not shown because too many files have changed in this diff Show more