*: update kube vendor to v1.7.4
Signed-off-by: Antonio Murdaca <runcom@redhat.com>
This commit is contained in:
parent
c67859731f
commit
d56bf090ce
1032 changed files with 273965 additions and 40081 deletions
|
@ -1,4 +1,4 @@
|
|||
FROM golang:1.7
|
||||
FROM golang:1.8
|
||||
|
||||
# libseccomp in jessie is not _quite_ new enough -- need backports version
|
||||
RUN echo 'deb http://httpredir.debian.org/debian jessie-backports main' > /etc/apt/sources.list.d/backports.list
|
||||
|
|
|
@ -18,7 +18,7 @@ import (
|
|||
"github.com/urfave/cli"
|
||||
"golang.org/x/sys/unix"
|
||||
"google.golang.org/grpc"
|
||||
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
const crioConfigPath = "/etc/crio/crio.conf"
|
||||
|
|
|
@ -10,10 +10,10 @@ import (
|
|||
|
||||
"github.com/urfave/cli"
|
||||
"golang.org/x/net/context"
|
||||
remocommandconsts "k8s.io/apimachinery/pkg/util/remotecommand"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/remotecommand"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
remotecommandserver "k8s.io/kubernetes/pkg/kubelet/server/remotecommand"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
var containerCommand = cli.Command{
|
||||
|
@ -533,7 +533,7 @@ func Exec(client pb.RuntimeServiceClient, ID string, tty bool, stdin bool, urlOn
|
|||
}
|
||||
|
||||
options := remotecommand.StreamOptions{
|
||||
SupportedProtocols: remotecommandserver.SupportedStreamingProtocols,
|
||||
SupportedProtocols: remocommandconsts.SupportedStreamingProtocols,
|
||||
Stdout: os.Stdout,
|
||||
Stderr: os.Stderr,
|
||||
Tty: tty,
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
|
||||
"github.com/urfave/cli"
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
var imageCommand = cli.Command{
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli"
|
||||
"google.golang.org/grpc"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
func getClientConnection(context *cli.Context) (*grpc.ClientConn, error) {
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
|
||||
"github.com/urfave/cli"
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
var podSandboxCommand = cli.Command{
|
||||
|
@ -289,7 +289,6 @@ func PodSandboxStatus(client pb.RuntimeServiceClient, ID string) error {
|
|||
fmt.Printf("Status: %s\n", r.Status.State)
|
||||
ctm := time.Unix(0, r.Status.CreatedAt)
|
||||
fmt.Printf("Created: %v\n", ctm)
|
||||
fmt.Printf("Network namespace: %s\n", r.Status.Linux.Namespaces.Network)
|
||||
if r.Status.Network != nil {
|
||||
fmt.Printf("IP Address: %v\n", r.Status.Network.Ip)
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
|
||||
"github.com/urfave/cli"
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
var runtimeVersionCommand = cli.Command{
|
||||
|
|
|
@ -216,6 +216,20 @@
|
|||
shell: "cp test/plugin_test_args.bash /opt/cni/bin/"
|
||||
args:
|
||||
chdir: /root/src/github.com/kubernetes-incubator/cri-o/
|
||||
# k8s builds with go1.8.x, rhel, fedora don't have it yet
|
||||
- name: install Golang upstream in Fedora/RHEL
|
||||
shell: |
|
||||
curl -fsSL "https://golang.org/dl/go1.8.3.linux-amd64.tar.gz" \
|
||||
| tar -xzC /usr/local
|
||||
when: ansible_distribution == 'Fedora' or ansible_distribution == 'RedHat'
|
||||
- name: Set custom Golang path for Fedora/RHEL
|
||||
lineinfile:
|
||||
dest: /root/.bashrc
|
||||
line: 'export PATH=/usr/local/go/bin:$PATH'
|
||||
insertafter: 'EOF'
|
||||
regexp: 'export PATH=/usr/local/go/bin:$PATH'
|
||||
state: present
|
||||
when: ansible_distribution == 'Fedora' or ansible_distribution == 'RedHat'
|
||||
- name: run integration tests RHEL
|
||||
shell: 'CGROUP_MANAGER=cgroupfs STORAGE_OPTS="--storage-driver=overlay2 --storage-opt overlay2.override_kernel_check=1" make localintegration 2>&1 > testout.txt'
|
||||
args:
|
||||
|
@ -258,14 +272,12 @@
|
|||
shell: 'mv /root/src/github.com/kubernetes-incubator/cri-o/test/TestReport-bats*.xml /root/src/github.com/kubernetes-incubator/cri-o/reports/'
|
||||
when: xunit
|
||||
# XXX: kube tests from now on
|
||||
# TODO: remove custom repo origin and put kube master back after PR #510
|
||||
# we need this because of node-e2e tests failing on RHEL otherwise
|
||||
# For more info see issue #529
|
||||
- name: git clone k8s repo
|
||||
git:
|
||||
repo: https://github.com/runcom/kubernetes
|
||||
dest: /root/src/k8s.io/kubernetes
|
||||
version: custom-RHEL-node-e2e
|
||||
# based on kube upstream v1.7.4
|
||||
version: cri-o-node-e2e-patched
|
||||
force: yes
|
||||
async: 600
|
||||
poll: 10
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"os"
|
||||
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
|
||||
"github.com/kubernetes-incubator/cri-o/libkpod/driver"
|
||||
libkpodimage "github.com/kubernetes-incubator/cri-o/libkpod/image"
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// ContainerServer implements the ImageServer
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"encoding/json"
|
||||
"path/filepath"
|
||||
|
||||
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/unix"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network/hostport"
|
||||
)
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
"github.com/docker/docker/pkg/signal"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -12,9 +12,9 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/sys/unix"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/util/term"
|
||||
)
|
||||
|
||||
/* Sync with stdpipe_t in conmon.c */
|
||||
|
@ -37,7 +37,7 @@ func (s *Server) Attach(ctx context.Context, req *pb.AttachRequest) (*pb.AttachR
|
|||
}
|
||||
|
||||
// Attach endpoint for streaming.Runtime
|
||||
func (ss streamService) Attach(containerID string, inputStream io.Reader, outputStream, errorStream io.WriteCloser, tty bool, resize <-chan term.Size) error {
|
||||
func (ss streamService) Attach(containerID string, inputStream io.Reader, outputStream, errorStream io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
|
||||
c := ss.runtimeServer.GetContainer(containerID)
|
||||
|
||||
if c == nil {
|
||||
|
@ -59,7 +59,7 @@ func (ss streamService) Attach(containerID string, inputStream io.Reader, output
|
|||
return fmt.Errorf("failed to open container ctl file: %v", err)
|
||||
}
|
||||
|
||||
kubecontainer.HandleResizing(resize, func(size term.Size) {
|
||||
kubecontainer.HandleResizing(resize, func(size remotecommand.TerminalSize) {
|
||||
logrus.Infof("Got a resize event: %+v", size)
|
||||
_, err := fmt.Fprintf(controlFile, "%d %d %d\n", 1, size.Height, size.Width)
|
||||
if err != nil {
|
||||
|
|
|
@ -31,7 +31,7 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/sys/unix"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -10,7 +10,8 @@ import (
|
|||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
utilexec "k8s.io/kubernetes/pkg/util/exec"
|
||||
"k8s.io/kubernetes/pkg/util/term"
|
||||
|
@ -29,7 +30,7 @@ func (s *Server) Exec(ctx context.Context, req *pb.ExecRequest) (*pb.ExecRespons
|
|||
}
|
||||
|
||||
// Exec endpoint for streaming.Runtime
|
||||
func (ss streamService) Exec(containerID string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan term.Size) error {
|
||||
func (ss streamService) Exec(containerID string, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
|
||||
c := ss.runtimeServer.GetContainer(containerID)
|
||||
|
||||
if c == nil {
|
||||
|
@ -63,7 +64,7 @@ func (ss streamService) Exec(containerID string, cmd []string, stdin io.Reader,
|
|||
// make sure to close the stdout stream
|
||||
defer stdout.Close()
|
||||
|
||||
kubecontainer.HandleResizing(resize, func(size term.Size) {
|
||||
kubecontainer.HandleResizing(resize, func(size remotecommand.TerminalSize) {
|
||||
term.SetSize(p.Fd(), size)
|
||||
})
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// ExecSync runs a command in a container synchronously.
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// filterContainer returns whether passed container matches filtering criteria
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// PortForward prepares a streaming endpoint to forward ports from a PodSandbox.
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// RemoveContainer removes the container. If the container is running, the container
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// StartContainer starts the container.
|
||||
|
|
14
server/container_stats.go
Normal file
14
server/container_stats.go
Normal file
|
@ -0,0 +1,14 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// ContainerStats returns stats of the container. If the container does not
|
||||
// exist, the call returns an error.
|
||||
func (s *Server) ContainerStats(ctx context.Context, req *pb.ContainerStatsRequest) (*pb.ContainerStatsResponse, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
13
server/container_stats_list.go
Normal file
13
server/container_stats_list.go
Normal file
|
@ -0,0 +1,13 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// ListContainerStats returns stats of all running containers.
|
||||
func (s *Server) ListContainerStats(ctx context.Context, req *pb.ListContainerStatsRequest) (*pb.ListContainerStatsResponse, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
|
@ -4,7 +4,7 @@ import (
|
|||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// StopContainer stops a running container with a grace period (i.e., timeout).
|
||||
|
|
|
@ -2,7 +2,7 @@ package server
|
|||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// UpdateRuntimeConfig updates the configuration of a running container.
|
||||
|
|
13
server/image_fs_info.go
Normal file
13
server/image_fs_info.go
Normal file
|
@ -0,0 +1,13 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// ImageFsInfo returns information of the filesystem that is used to store images.
|
||||
func (s *Server) ImageFsInfo(ctx context.Context, req *pb.ImageFsInfoRequest) (*pb.ImageFsInfoResponse, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
|
@ -3,7 +3,7 @@ package server
|
|||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// ListImages lists existing images.
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"github.com/containers/image/types"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// PullImage pulls a image with authentication config.
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// RemoveImage removes the image.
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// ImageStatus returns the status of the image.
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -2,7 +2,7 @@ package server
|
|||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// Status returns the status of the runtime
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// filterSandbox returns whether passed container matches filtering criteria
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// RemovePodSandbox deletes the sandbox. If there are any running containers in the
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"golang.org/x/net/context"
|
||||
"golang.org/x/sys/unix"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network/hostport"
|
||||
)
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"github.com/kubernetes-incubator/cri-o/oci"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// PodSandboxStatus returns the Status of the PodSandbox.
|
||||
|
@ -36,13 +36,8 @@ func (s *Server) PodSandboxStatus(ctx context.Context, req *pb.PodSandboxStatusR
|
|||
sandboxID := sb.ID()
|
||||
resp := &pb.PodSandboxStatusResponse{
|
||||
Status: &pb.PodSandboxStatus{
|
||||
Id: sandboxID,
|
||||
CreatedAt: podInfraContainer.CreatedAt().UnixNano(),
|
||||
Linux: &pb.LinuxPodSandboxStatus{
|
||||
Namespaces: &pb.Namespace{
|
||||
Network: netNsPath,
|
||||
},
|
||||
},
|
||||
Id: sandboxID,
|
||||
CreatedAt: podInfraContainer.CreatedAt().UnixNano(),
|
||||
Network: &pb.PodSandboxNetworkStatus{Ip: ip},
|
||||
State: rStatus,
|
||||
Labels: sb.Labels(),
|
||||
|
|
|
@ -14,7 +14,7 @@ import (
|
|||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/sys/unix"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network/hostport"
|
||||
)
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/sirupsen/logrus"
|
||||
knet "k8s.io/apimachinery/pkg/util/net"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network/hostport"
|
||||
"k8s.io/kubernetes/pkg/kubelet/server/streaming"
|
||||
iptablesproxy "k8s.io/kubernetes/pkg/proxy/iptables"
|
||||
|
|
|
@ -2,7 +2,7 @@ package server
|
|||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/runtime"
|
||||
pb "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
)
|
||||
|
||||
// Version returns the runtime name, runtime version and runtime API version
|
||||
|
|
|
@ -408,10 +408,7 @@ EOF
|
|||
}
|
||||
|
||||
function check_pod_cidr() {
|
||||
fullnetns=`crioctl pod status --id $1 | grep namespace | cut -d ' ' -f 3`
|
||||
netns=`basename $fullnetns`
|
||||
|
||||
run ip netns exec $netns ip addr show dev eth0 scope global 2>&1
|
||||
run crioctl ctr execsync --id $1 ip addr show dev eth0 scope global 2>&1
|
||||
echo "$output"
|
||||
[ "$status" -eq 0 ]
|
||||
[[ "$output" =~ $POD_CIDR_MASK ]]
|
||||
|
@ -435,8 +432,7 @@ function get_host_ip() {
|
|||
}
|
||||
|
||||
function ping_pod() {
|
||||
netns=`crioctl pod status --id $1 | grep namespace | cut -d ' ' -f 3`
|
||||
inet=`ip netns exec \`basename $netns\` ip addr show dev eth0 scope global | grep inet`
|
||||
inet=`crioctl ctr execsync --id $1 ip addr show dev eth0 scope global 2>&1 | grep inet`
|
||||
|
||||
IFS=" "
|
||||
ip=`parse_pod_ip $inet`
|
||||
|
@ -447,12 +443,14 @@ function ping_pod() {
|
|||
}
|
||||
|
||||
function ping_pod_from_pod() {
|
||||
pod_ip=`crioctl pod status --id $1 | grep "IP Address" | cut -d ' ' -f 3`
|
||||
netns=`crioctl pod status --id $2 | grep namespace | cut -d ' ' -f 3`
|
||||
inet=`crioctl ctr execsync --id $1 ip addr show dev eth0 scope global 2>&1 | grep inet`
|
||||
|
||||
ip netns exec `basename $netns` ping -W 1 -c 2 $pod_ip
|
||||
IFS=" "
|
||||
ip=`parse_pod_ip $inet`
|
||||
|
||||
echo $?
|
||||
run crioctl ctr execsync --id $2 ping -W 1 -c 2 $ip
|
||||
echo "$output"
|
||||
[ "$status" -eq 0 ]
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -9,8 +9,14 @@ load helpers
|
|||
[ "$status" -eq 0 ]
|
||||
pod_id="$output"
|
||||
|
||||
check_pod_cidr $pod_id
|
||||
run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
|
||||
echo "$output"
|
||||
[ "$status" -eq 0 ]
|
||||
ctr_id="$output"
|
||||
|
||||
check_pod_cidr $ctr_id
|
||||
|
||||
cleanup_ctrs
|
||||
cleanup_pods
|
||||
stop_crio
|
||||
}
|
||||
|
@ -22,8 +28,14 @@ load helpers
|
|||
[ "$status" -eq 0 ]
|
||||
pod_id="$output"
|
||||
|
||||
ping_pod $pod_id
|
||||
run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod_id"
|
||||
echo "$output"
|
||||
[ "$status" -eq 0 ]
|
||||
ctr_id="$output"
|
||||
|
||||
ping_pod $ctr_id
|
||||
|
||||
cleanup_ctrs
|
||||
cleanup_pods
|
||||
stop_crio
|
||||
}
|
||||
|
@ -34,6 +46,10 @@ load helpers
|
|||
echo "$output"
|
||||
[ "$status" -eq 0 ]
|
||||
pod1_id="$output"
|
||||
run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod1_id"
|
||||
echo "$output"
|
||||
[ "$status" -eq 0 ]
|
||||
ctr1_id="$output"
|
||||
|
||||
temp_sandbox_conf cni_test
|
||||
|
||||
|
@ -41,13 +57,18 @@ load helpers
|
|||
echo "$output"
|
||||
[ "$status" -eq 0 ]
|
||||
pod2_id="$output"
|
||||
run crioctl ctr create --config "$TESTDATA"/container_redis.json --pod "$pod2_id"
|
||||
echo "$output"
|
||||
[ "$status" -eq 0 ]
|
||||
ctr2_id="$output"
|
||||
|
||||
ping_pod_from_pod $pod1_id $pod2_id
|
||||
ping_pod_from_pod $ctr1_id $ctr2_id
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
ping_pod_from_pod $pod2_id $pod1_id
|
||||
ping_pod_from_pod $ctr2_id $ctr1_id
|
||||
[ "$status" -eq 0 ]
|
||||
|
||||
cleanup_ctrs
|
||||
cleanup_pods
|
||||
stop_crio
|
||||
}
|
||||
|
|
41
vendor.conf
41
vendor.conf
|
@ -1,15 +1,15 @@
|
|||
k8s.io/kubernetes v1.6.5 https://github.com/kubernetes/kubernetes
|
||||
k8s.io/kubernetes v1.7.4 https://github.com/kubernetes/kubernetes
|
||||
# https://github.com/kubernetes/client-go#compatibility-matrix
|
||||
k8s.io/client-go v3.0.0-beta.0 https://github.com/kubernetes/client-go
|
||||
k8s.io/apimachinery release-1.6 https://github.com/kubernetes/apimachinery
|
||||
k8s.io/apiserver release-1.6 https://github.com/kubernetes/apiserver
|
||||
k8s.io/client-go v4.0.0 https://github.com/kubernetes/client-go
|
||||
k8s.io/apimachinery release-1.7 https://github.com/kubernetes/apimachinery
|
||||
k8s.io/apiserver release-1.7 https://github.com/kubernetes/apiserver
|
||||
#
|
||||
github.com/sirupsen/logrus v1.0.0
|
||||
github.com/containers/image 74e359348c7ce9e0caf4fa75aa8de3809cf41c46
|
||||
github.com/ostreedev/ostree-go master
|
||||
github.com/containers/storage f8cff0727cf0802f0752ca58d2c05ec5270a47d5
|
||||
github.com/containernetworking/cni v0.4.0
|
||||
google.golang.org/grpc v1.0.1-GA https://github.com/grpc/grpc-go
|
||||
google.golang.org/grpc v1.0.4 https://github.com/grpc/grpc-go
|
||||
github.com/opencontainers/selinux v1.0.0-rc1
|
||||
github.com/opencontainers/go-digest v1.0.0-rc0
|
||||
github.com/opencontainers/runtime-tools 6bcd3b417fd6962ea04dafdbc2c07444e750572d
|
||||
|
@ -19,7 +19,7 @@ github.com/vishvananda/netlink master
|
|||
github.com/vishvananda/netns master
|
||||
github.com/opencontainers/image-spec v1.0.0
|
||||
github.com/opencontainers/runtime-spec v1.0.0
|
||||
github.com/juju/ratelimit acf38b000a03e4ab89e40f20f1e548f4e6ac7f72
|
||||
github.com/juju/ratelimit 5b9ff866471762aa2ab2dced63c9fb6f53921342
|
||||
github.com/tchap/go-patricia v2.2.6
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.7
|
||||
gopkg.in/inf.v0 v0.9.0
|
||||
|
@ -44,10 +44,10 @@ github.com/BurntSushi/toml v0.2.0
|
|||
github.com/mitchellh/go-wordwrap ad45545899c7b13c020ea92b2072220eefad42b8
|
||||
github.com/golang/glog 23def4e6c14b4da8ac2ed8007337bc5eb5007998
|
||||
github.com/davecgh/go-spew v1.1.0
|
||||
github.com/go-openapi/spec 02fb9cd3430ed0581e0ceb4804d5d4b3cc702694
|
||||
github.com/go-openapi/spec 6aced65f8501fe1217321abf0749d354824ba2ff
|
||||
github.com/go-openapi/jsonpointer 779f45308c19820f1a69e9a4cd965f496e0da10f
|
||||
github.com/go-openapi/jsonreference 36d33bfe519efae5632669801b180bf1a245da3b
|
||||
github.com/go-openapi/swag d5f8ebc3b1c55a4cf6489eeae7354f338cfe299e
|
||||
github.com/go-openapi/swag 1d0bd113de87027671077d3c71eb3ac5d7dbba72
|
||||
github.com/google/gofuzz 44d81051d367757e1c7c6a5a86423ece9afcf63c
|
||||
github.com/mailru/easyjson 99e922cf9de1bc0ab38310c277cff32c2147e747
|
||||
github.com/PuerkitoBio/purell v1.1.0
|
||||
|
@ -65,23 +65,34 @@ github.com/coreos/go-systemd v14
|
|||
github.com/coreos/pkg v3
|
||||
github.com/golang/groupcache b710c8433bd175204919eb38776e944233235d03
|
||||
github.com/fsnotify/fsnotify 7d7316ed6e1ed2de075aab8dfc76de5d158d66e1
|
||||
github.com/emicklei/go-restful 09691a3b6378b740595c1002f40c34dd5f218a22
|
||||
github.com/Azure/go-ansiterm 19f72df4d05d31cbe1c56bfc8045c96babff6c7e
|
||||
github.com/Microsoft/go-winio 78439966b38d69bf38227fbf57ac8a6fee70f69a
|
||||
github.com/Microsoft/hcsshim 43f9725307998e09f2e3816c2c0c36dc98f0c982
|
||||
github.com/emicklei/go-restful ff4f55a206334ef123e4f79bbf348980da81ca46
|
||||
github.com/emicklei/go-restful-swagger12 1.0.1
|
||||
github.com/pkg/errors v0.8.0
|
||||
github.com/godbus/dbus v4.0.0
|
||||
github.com/urfave/cli v1.19.1
|
||||
github.com/vbatts/tar-split v0.10.1
|
||||
github.com/renstrom/dedent v1.0.0
|
||||
github.com/prometheus/client_golang v0.8.0
|
||||
github.com/prometheus/procfs e645f4e5aaa8506fc71d6edbc5c4ff02c04c46f2
|
||||
github.com/prometheus/common 61f87aac8082fa8c3c5655c7608d7478d46ac2ad
|
||||
github.com/prometheus/client_model 6f3806018612930941127f2a7c6c453ba2c527d2
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.0
|
||||
github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
|
||||
github.com/hpcloud/tail v1.0.0
|
||||
gopkg.in/fsnotify.v1 v1.4.2
|
||||
gopkg.in/tomb.v1 v1
|
||||
github.com/fatih/camelcase f6a740d52f961c60348ebb109adde9f4635d7540
|
||||
github.com/buger/goterm 2f8dfbc7dbbff5dd1d391ed91482c24df243b2d3
|
||||
github.com/dgrijalva/jwt-go v3.0.0
|
||||
github.com/exponent-io/jsonpath d6023ce2651d8eafb5c75bb0c7167536102ec9f5
|
||||
github.com/hashicorp/golang-lru 0a025b7e63adc15a622f29b0b2c4c3848243bbf6
|
||||
github.com/go-openapi/loads 18441dfa706d924a39a030ee2c3b1d8d81917b38
|
||||
github.com/go-openapi/analysis b44dc874b601d9e4e2f6e19140e794ba24bead3b
|
||||
github.com/go-openapi/strfmt 93a31ef21ac23f317792fff78f9539219dd74619
|
||||
github.com/asaskevich/govalidator v6
|
||||
github.com/go-openapi/errors d24ebc2075bad502fac3a8ae27aa6dd58e1952dc
|
||||
github.com/mitchellh/mapstructure d0303fe809921458f417bcf828397a65db30a7e4
|
||||
gopkg.in/mgo.v2 v2
|
||||
github.com/prometheus/client_golang e7e903064f5e9eb5da98208bae10b475d4db0f8c
|
||||
github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
|
||||
github.com/prometheus/common 13ba4ddd0caa9c28ca7b7bffe1dfa9ed8d5ef207
|
||||
github.com/prometheus/procfs 65c1f6f8f0fc1e2185eb9863a3bc751496404259
|
||||
github.com/matttproud/golang_protobuf_extensions fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a
|
||||
github.com/beorn7/perks 3ac7bf7a47d159a033b107610db8a1b6575507a4
|
||||
|
|
2
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
2
vendor/github.com/beorn7/perks/quantile/stream.go
generated
vendored
|
@ -133,7 +133,7 @@ func (s *Stream) Query(q float64) float64 {
|
|||
if l == 0 {
|
||||
return 0
|
||||
}
|
||||
i := int(math.Ceil(float64(l) * q))
|
||||
i := int(float64(l) * q)
|
||||
if i > 0 {
|
||||
i -= 1
|
||||
}
|
||||
|
|
7
vendor/github.com/dgrijalva/jwt-go/LICENSE
generated
vendored
Normal file
7
vendor/github.com/dgrijalva/jwt-go/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
Copyright (c) 2012 Dave Grijalva
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
85
vendor/github.com/dgrijalva/jwt-go/README.md
generated
vendored
Normal file
85
vendor/github.com/dgrijalva/jwt-go/README.md
generated
vendored
Normal file
|
@ -0,0 +1,85 @@
|
|||
A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html)
|
||||
|
||||
[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go)
|
||||
|
||||
**BREAKING CHANGES:*** Version 3.0.0 is here. It includes _a lot_ of changes including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code.
|
||||
|
||||
**NOTICE:** A vulnerability in JWT was [recently published](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). As this library doesn't force users to validate the `alg` is what they expected, it's possible your usage is effected. There will be an update soon to remedy this, and it will likey require backwards-incompatible changes to the API. In the short term, please make sure your implementation verifies the `alg` is what you expect.
|
||||
|
||||
|
||||
## What the heck is a JWT?
|
||||
|
||||
JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens.
|
||||
|
||||
In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way.
|
||||
|
||||
The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used.
|
||||
|
||||
The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own.
|
||||
|
||||
## What's in the box?
|
||||
|
||||
This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own.
|
||||
|
||||
## Examples
|
||||
|
||||
See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage:
|
||||
|
||||
* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example_Parse_hmac)
|
||||
* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example_New_hmac)
|
||||
* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples)
|
||||
|
||||
## Extensions
|
||||
|
||||
This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`.
|
||||
|
||||
Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go
|
||||
|
||||
## Compliance
|
||||
|
||||
This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences:
|
||||
|
||||
* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key.
|
||||
|
||||
## Project Status & Versioning
|
||||
|
||||
This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason).
|
||||
|
||||
This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases).
|
||||
|
||||
While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v2`. It will do the right thing WRT semantic versioning.
|
||||
|
||||
## Usage Tips
|
||||
|
||||
### Signing vs Encryption
|
||||
|
||||
A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data:
|
||||
|
||||
* The author of the token was in the possession of the signing secret
|
||||
* The data has not been modified since it was signed
|
||||
|
||||
It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library.
|
||||
|
||||
### Choosing a Signing Method
|
||||
|
||||
There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric.
|
||||
|
||||
Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation.
|
||||
|
||||
Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification.
|
||||
|
||||
### JWT and OAuth
|
||||
|
||||
It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication.
|
||||
|
||||
Without going too far down the rabbit hole, here's a description of the interaction of these technologies:
|
||||
|
||||
* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth.
|
||||
* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token.
|
||||
* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL.
|
||||
|
||||
## More
|
||||
|
||||
Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go).
|
||||
|
||||
The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in to documentation.
|
134
vendor/github.com/dgrijalva/jwt-go/claims.go
generated
vendored
Normal file
134
vendor/github.com/dgrijalva/jwt-go/claims.go
generated
vendored
Normal file
|
@ -0,0 +1,134 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"crypto/subtle"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// For a type to be a Claims object, it must just have a Valid method that determines
|
||||
// if the token is invalid for any supported reason
|
||||
type Claims interface {
|
||||
Valid() error
|
||||
}
|
||||
|
||||
// Structured version of Claims Section, as referenced at
|
||||
// https://tools.ietf.org/html/rfc7519#section-4.1
|
||||
// See examples for how to use this with your own claim types
|
||||
type StandardClaims struct {
|
||||
Audience string `json:"aud,omitempty"`
|
||||
ExpiresAt int64 `json:"exp,omitempty"`
|
||||
Id string `json:"jti,omitempty"`
|
||||
IssuedAt int64 `json:"iat,omitempty"`
|
||||
Issuer string `json:"iss,omitempty"`
|
||||
NotBefore int64 `json:"nbf,omitempty"`
|
||||
Subject string `json:"sub,omitempty"`
|
||||
}
|
||||
|
||||
// Validates time based claims "exp, iat, nbf".
|
||||
// There is no accounting for clock skew.
|
||||
// As well, if any of the above claims are not in the token, it will still
|
||||
// be considered a valid claim.
|
||||
func (c StandardClaims) Valid() error {
|
||||
vErr := new(ValidationError)
|
||||
now := TimeFunc().Unix()
|
||||
|
||||
// The claims below are optional, by default, so if they are set to the
|
||||
// default value in Go, let's not fail the verification for them.
|
||||
if c.VerifyExpiresAt(now, false) == false {
|
||||
delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0))
|
||||
vErr.Inner = fmt.Errorf("token is expired by %v", delta)
|
||||
vErr.Errors |= ValidationErrorExpired
|
||||
}
|
||||
|
||||
if c.VerifyIssuedAt(now, false) == false {
|
||||
vErr.Inner = fmt.Errorf("Token used before issued")
|
||||
vErr.Errors |= ValidationErrorIssuedAt
|
||||
}
|
||||
|
||||
if c.VerifyNotBefore(now, false) == false {
|
||||
vErr.Inner = fmt.Errorf("token is not valid yet")
|
||||
vErr.Errors |= ValidationErrorNotValidYet
|
||||
}
|
||||
|
||||
if vErr.valid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return vErr
|
||||
}
|
||||
|
||||
// Compares the aud claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool {
|
||||
return verifyAud(c.Audience, cmp, req)
|
||||
}
|
||||
|
||||
// Compares the exp claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool {
|
||||
return verifyExp(c.ExpiresAt, cmp, req)
|
||||
}
|
||||
|
||||
// Compares the iat claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool {
|
||||
return verifyIat(c.IssuedAt, cmp, req)
|
||||
}
|
||||
|
||||
// Compares the iss claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool {
|
||||
return verifyIss(c.Issuer, cmp, req)
|
||||
}
|
||||
|
||||
// Compares the nbf claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool {
|
||||
return verifyNbf(c.NotBefore, cmp, req)
|
||||
}
|
||||
|
||||
// ----- helpers
|
||||
|
||||
func verifyAud(aud string, cmp string, required bool) bool {
|
||||
if aud == "" {
|
||||
return !required
|
||||
}
|
||||
if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 {
|
||||
return true
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func verifyExp(exp int64, now int64, required bool) bool {
|
||||
if exp == 0 {
|
||||
return !required
|
||||
}
|
||||
return now <= exp
|
||||
}
|
||||
|
||||
func verifyIat(iat int64, now int64, required bool) bool {
|
||||
if iat == 0 {
|
||||
return !required
|
||||
}
|
||||
return now >= iat
|
||||
}
|
||||
|
||||
func verifyIss(iss string, cmp string, required bool) bool {
|
||||
if iss == "" {
|
||||
return !required
|
||||
}
|
||||
if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 {
|
||||
return true
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func verifyNbf(nbf int64, now int64, required bool) bool {
|
||||
if nbf == 0 {
|
||||
return !required
|
||||
}
|
||||
return now >= nbf
|
||||
}
|
4
vendor/github.com/dgrijalva/jwt-go/doc.go
generated
vendored
Normal file
4
vendor/github.com/dgrijalva/jwt-go/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html
|
||||
//
|
||||
// See README.md for more info.
|
||||
package jwt
|
147
vendor/github.com/dgrijalva/jwt-go/ecdsa.go
generated
vendored
Normal file
147
vendor/github.com/dgrijalva/jwt-go/ecdsa.go
generated
vendored
Normal file
|
@ -0,0 +1,147 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"math/big"
|
||||
)
|
||||
|
||||
var (
|
||||
// Sadly this is missing from crypto/ecdsa compared to crypto/rsa
|
||||
ErrECDSAVerification = errors.New("crypto/ecdsa: verification error")
|
||||
)
|
||||
|
||||
// Implements the ECDSA family of signing methods signing methods
|
||||
type SigningMethodECDSA struct {
|
||||
Name string
|
||||
Hash crypto.Hash
|
||||
KeySize int
|
||||
CurveBits int
|
||||
}
|
||||
|
||||
// Specific instances for EC256 and company
|
||||
var (
|
||||
SigningMethodES256 *SigningMethodECDSA
|
||||
SigningMethodES384 *SigningMethodECDSA
|
||||
SigningMethodES512 *SigningMethodECDSA
|
||||
)
|
||||
|
||||
func init() {
|
||||
// ES256
|
||||
SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256}
|
||||
RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod {
|
||||
return SigningMethodES256
|
||||
})
|
||||
|
||||
// ES384
|
||||
SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384}
|
||||
RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod {
|
||||
return SigningMethodES384
|
||||
})
|
||||
|
||||
// ES512
|
||||
SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521}
|
||||
RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod {
|
||||
return SigningMethodES512
|
||||
})
|
||||
}
|
||||
|
||||
func (m *SigningMethodECDSA) Alg() string {
|
||||
return m.Name
|
||||
}
|
||||
|
||||
// Implements the Verify method from SigningMethod
|
||||
// For this verify method, key must be an ecdsa.PublicKey struct
|
||||
func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error {
|
||||
var err error
|
||||
|
||||
// Decode the signature
|
||||
var sig []byte
|
||||
if sig, err = DecodeSegment(signature); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the key
|
||||
var ecdsaKey *ecdsa.PublicKey
|
||||
switch k := key.(type) {
|
||||
case *ecdsa.PublicKey:
|
||||
ecdsaKey = k
|
||||
default:
|
||||
return ErrInvalidKeyType
|
||||
}
|
||||
|
||||
if len(sig) != 2*m.KeySize {
|
||||
return ErrECDSAVerification
|
||||
}
|
||||
|
||||
r := big.NewInt(0).SetBytes(sig[:m.KeySize])
|
||||
s := big.NewInt(0).SetBytes(sig[m.KeySize:])
|
||||
|
||||
// Create hasher
|
||||
if !m.Hash.Available() {
|
||||
return ErrHashUnavailable
|
||||
}
|
||||
hasher := m.Hash.New()
|
||||
hasher.Write([]byte(signingString))
|
||||
|
||||
// Verify the signature
|
||||
if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true {
|
||||
return nil
|
||||
} else {
|
||||
return ErrECDSAVerification
|
||||
}
|
||||
}
|
||||
|
||||
// Implements the Sign method from SigningMethod
|
||||
// For this signing method, key must be an ecdsa.PrivateKey struct
|
||||
func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) {
|
||||
// Get the key
|
||||
var ecdsaKey *ecdsa.PrivateKey
|
||||
switch k := key.(type) {
|
||||
case *ecdsa.PrivateKey:
|
||||
ecdsaKey = k
|
||||
default:
|
||||
return "", ErrInvalidKeyType
|
||||
}
|
||||
|
||||
// Create the hasher
|
||||
if !m.Hash.Available() {
|
||||
return "", ErrHashUnavailable
|
||||
}
|
||||
|
||||
hasher := m.Hash.New()
|
||||
hasher.Write([]byte(signingString))
|
||||
|
||||
// Sign the string and return r, s
|
||||
if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil {
|
||||
curveBits := ecdsaKey.Curve.Params().BitSize
|
||||
|
||||
if m.CurveBits != curveBits {
|
||||
return "", ErrInvalidKey
|
||||
}
|
||||
|
||||
keyBytes := curveBits / 8
|
||||
if curveBits%8 > 0 {
|
||||
keyBytes += 1
|
||||
}
|
||||
|
||||
// We serialize the outpus (r and s) into big-endian byte arrays and pad
|
||||
// them with zeros on the left to make sure the sizes work out. Both arrays
|
||||
// must be keyBytes long, and the output must be 2*keyBytes long.
|
||||
rBytes := r.Bytes()
|
||||
rBytesPadded := make([]byte, keyBytes)
|
||||
copy(rBytesPadded[keyBytes-len(rBytes):], rBytes)
|
||||
|
||||
sBytes := s.Bytes()
|
||||
sBytesPadded := make([]byte, keyBytes)
|
||||
copy(sBytesPadded[keyBytes-len(sBytes):], sBytes)
|
||||
|
||||
out := append(rBytesPadded, sBytesPadded...)
|
||||
|
||||
return EncodeSegment(out), nil
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
}
|
67
vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go
generated
vendored
Normal file
67
vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go
generated
vendored
Normal file
|
@ -0,0 +1,67 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key")
|
||||
ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key")
|
||||
)
|
||||
|
||||
// Parse PEM encoded Elliptic Curve Private Key Structure
|
||||
func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) {
|
||||
var err error
|
||||
|
||||
// Parse PEM block
|
||||
var block *pem.Block
|
||||
if block, _ = pem.Decode(key); block == nil {
|
||||
return nil, ErrKeyMustBePEMEncoded
|
||||
}
|
||||
|
||||
// Parse the key
|
||||
var parsedKey interface{}
|
||||
if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var pkey *ecdsa.PrivateKey
|
||||
var ok bool
|
||||
if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok {
|
||||
return nil, ErrNotECPrivateKey
|
||||
}
|
||||
|
||||
return pkey, nil
|
||||
}
|
||||
|
||||
// Parse PEM encoded PKCS1 or PKCS8 public key
|
||||
func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) {
|
||||
var err error
|
||||
|
||||
// Parse PEM block
|
||||
var block *pem.Block
|
||||
if block, _ = pem.Decode(key); block == nil {
|
||||
return nil, ErrKeyMustBePEMEncoded
|
||||
}
|
||||
|
||||
// Parse the key
|
||||
var parsedKey interface{}
|
||||
if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
|
||||
if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
|
||||
parsedKey = cert.PublicKey
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var pkey *ecdsa.PublicKey
|
||||
var ok bool
|
||||
if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok {
|
||||
return nil, ErrNotECPublicKey
|
||||
}
|
||||
|
||||
return pkey, nil
|
||||
}
|
63
vendor/github.com/dgrijalva/jwt-go/errors.go
generated
vendored
Normal file
63
vendor/github.com/dgrijalva/jwt-go/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,63 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Error constants
|
||||
var (
|
||||
ErrInvalidKey = errors.New("key is invalid")
|
||||
ErrInvalidKeyType = errors.New("key is of invalid type")
|
||||
ErrHashUnavailable = errors.New("the requested hash function is unavailable")
|
||||
)
|
||||
|
||||
// The errors that might occur when parsing and validating a token
|
||||
const (
|
||||
ValidationErrorMalformed uint32 = 1 << iota // Token is malformed
|
||||
ValidationErrorUnverifiable // Token could not be verified because of signing problems
|
||||
ValidationErrorSignatureInvalid // Signature validation failed
|
||||
|
||||
// Standard Claim validation errors
|
||||
ValidationErrorAudience // AUD validation failed
|
||||
ValidationErrorExpired // EXP validation failed
|
||||
ValidationErrorIssuedAt // IAT validation failed
|
||||
ValidationErrorIssuer // ISS validation failed
|
||||
ValidationErrorNotValidYet // NBF validation failed
|
||||
ValidationErrorId // JTI validation failed
|
||||
ValidationErrorClaimsInvalid // Generic claims validation error
|
||||
)
|
||||
|
||||
// Helper for constructing a ValidationError with a string error message
|
||||
func NewValidationError(errorText string, errorFlags uint32) *ValidationError {
|
||||
return &ValidationError{
|
||||
text: errorText,
|
||||
Errors: errorFlags,
|
||||
}
|
||||
}
|
||||
|
||||
// The error from Parse if token is not valid
|
||||
type ValidationError struct {
|
||||
Inner error // stores the error returned by external dependencies, i.e.: KeyFunc
|
||||
Errors uint32 // bitfield. see ValidationError... constants
|
||||
text string // errors that do not have a valid error just have text
|
||||
}
|
||||
|
||||
// Validation error is an error type
|
||||
func (e ValidationError) Error() string {
|
||||
if e.Inner != nil {
|
||||
return e.Inner.Error()
|
||||
} else if e.text != "" {
|
||||
return e.text
|
||||
} else {
|
||||
return "token is invalid"
|
||||
}
|
||||
return e.Inner.Error()
|
||||
}
|
||||
|
||||
// No errors
|
||||
func (e *ValidationError) valid() bool {
|
||||
if e.Errors > 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
94
vendor/github.com/dgrijalva/jwt-go/hmac.go
generated
vendored
Normal file
94
vendor/github.com/dgrijalva/jwt-go/hmac.go
generated
vendored
Normal file
|
@ -0,0 +1,94 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/hmac"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Implements the HMAC-SHA family of signing methods signing methods
|
||||
type SigningMethodHMAC struct {
|
||||
Name string
|
||||
Hash crypto.Hash
|
||||
}
|
||||
|
||||
// Specific instances for HS256 and company
|
||||
var (
|
||||
SigningMethodHS256 *SigningMethodHMAC
|
||||
SigningMethodHS384 *SigningMethodHMAC
|
||||
SigningMethodHS512 *SigningMethodHMAC
|
||||
ErrSignatureInvalid = errors.New("signature is invalid")
|
||||
)
|
||||
|
||||
func init() {
|
||||
// HS256
|
||||
SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256}
|
||||
RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod {
|
||||
return SigningMethodHS256
|
||||
})
|
||||
|
||||
// HS384
|
||||
SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384}
|
||||
RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod {
|
||||
return SigningMethodHS384
|
||||
})
|
||||
|
||||
// HS512
|
||||
SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512}
|
||||
RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod {
|
||||
return SigningMethodHS512
|
||||
})
|
||||
}
|
||||
|
||||
func (m *SigningMethodHMAC) Alg() string {
|
||||
return m.Name
|
||||
}
|
||||
|
||||
// Verify the signature of HSXXX tokens. Returns nil if the signature is valid.
|
||||
func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error {
|
||||
// Verify the key is the right type
|
||||
keyBytes, ok := key.([]byte)
|
||||
if !ok {
|
||||
return ErrInvalidKeyType
|
||||
}
|
||||
|
||||
// Decode signature, for comparison
|
||||
sig, err := DecodeSegment(signature)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Can we use the specified hashing method?
|
||||
if !m.Hash.Available() {
|
||||
return ErrHashUnavailable
|
||||
}
|
||||
|
||||
// This signing method is symmetric, so we validate the signature
|
||||
// by reproducing the signature from the signing string and key, then
|
||||
// comparing that against the provided signature.
|
||||
hasher := hmac.New(m.Hash.New, keyBytes)
|
||||
hasher.Write([]byte(signingString))
|
||||
if !hmac.Equal(sig, hasher.Sum(nil)) {
|
||||
return ErrSignatureInvalid
|
||||
}
|
||||
|
||||
// No validation errors. Signature is good.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Implements the Sign method from SigningMethod for this signing method.
|
||||
// Key must be []byte
|
||||
func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) {
|
||||
if keyBytes, ok := key.([]byte); ok {
|
||||
if !m.Hash.Available() {
|
||||
return "", ErrHashUnavailable
|
||||
}
|
||||
|
||||
hasher := hmac.New(m.Hash.New, keyBytes)
|
||||
hasher.Write([]byte(signingString))
|
||||
|
||||
return EncodeSegment(hasher.Sum(nil)), nil
|
||||
}
|
||||
|
||||
return "", ErrInvalidKey
|
||||
}
|
94
vendor/github.com/dgrijalva/jwt-go/map_claims.go
generated
vendored
Normal file
94
vendor/github.com/dgrijalva/jwt-go/map_claims.go
generated
vendored
Normal file
|
@ -0,0 +1,94 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
// "fmt"
|
||||
)
|
||||
|
||||
// Claims type that uses the map[string]interface{} for JSON decoding
|
||||
// This is the default claims type if you don't supply one
|
||||
type MapClaims map[string]interface{}
|
||||
|
||||
// Compares the aud claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (m MapClaims) VerifyAudience(cmp string, req bool) bool {
|
||||
aud, _ := m["aud"].(string)
|
||||
return verifyAud(aud, cmp, req)
|
||||
}
|
||||
|
||||
// Compares the exp claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool {
|
||||
switch exp := m["exp"].(type) {
|
||||
case float64:
|
||||
return verifyExp(int64(exp), cmp, req)
|
||||
case json.Number:
|
||||
v, _ := exp.Int64()
|
||||
return verifyExp(v, cmp, req)
|
||||
}
|
||||
return req == false
|
||||
}
|
||||
|
||||
// Compares the iat claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool {
|
||||
switch iat := m["iat"].(type) {
|
||||
case float64:
|
||||
return verifyIat(int64(iat), cmp, req)
|
||||
case json.Number:
|
||||
v, _ := iat.Int64()
|
||||
return verifyIat(v, cmp, req)
|
||||
}
|
||||
return req == false
|
||||
}
|
||||
|
||||
// Compares the iss claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (m MapClaims) VerifyIssuer(cmp string, req bool) bool {
|
||||
iss, _ := m["iss"].(string)
|
||||
return verifyIss(iss, cmp, req)
|
||||
}
|
||||
|
||||
// Compares the nbf claim against cmp.
|
||||
// If required is false, this method will return true if the value matches or is unset
|
||||
func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool {
|
||||
switch nbf := m["nbf"].(type) {
|
||||
case float64:
|
||||
return verifyNbf(int64(nbf), cmp, req)
|
||||
case json.Number:
|
||||
v, _ := nbf.Int64()
|
||||
return verifyNbf(v, cmp, req)
|
||||
}
|
||||
return req == false
|
||||
}
|
||||
|
||||
// Validates time based claims "exp, iat, nbf".
|
||||
// There is no accounting for clock skew.
|
||||
// As well, if any of the above claims are not in the token, it will still
|
||||
// be considered a valid claim.
|
||||
func (m MapClaims) Valid() error {
|
||||
vErr := new(ValidationError)
|
||||
now := TimeFunc().Unix()
|
||||
|
||||
if m.VerifyExpiresAt(now, false) == false {
|
||||
vErr.Inner = errors.New("Token is expired")
|
||||
vErr.Errors |= ValidationErrorExpired
|
||||
}
|
||||
|
||||
if m.VerifyIssuedAt(now, false) == false {
|
||||
vErr.Inner = errors.New("Token used before issued")
|
||||
vErr.Errors |= ValidationErrorIssuedAt
|
||||
}
|
||||
|
||||
if m.VerifyNotBefore(now, false) == false {
|
||||
vErr.Inner = errors.New("Token is not valid yet")
|
||||
vErr.Errors |= ValidationErrorNotValidYet
|
||||
}
|
||||
|
||||
if vErr.valid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return vErr
|
||||
}
|
52
vendor/github.com/dgrijalva/jwt-go/none.go
generated
vendored
Normal file
52
vendor/github.com/dgrijalva/jwt-go/none.go
generated
vendored
Normal file
|
@ -0,0 +1,52 @@
|
|||
package jwt
|
||||
|
||||
// Implements the none signing method. This is required by the spec
|
||||
// but you probably should never use it.
|
||||
var SigningMethodNone *signingMethodNone
|
||||
|
||||
const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed"
|
||||
|
||||
var NoneSignatureTypeDisallowedError error
|
||||
|
||||
type signingMethodNone struct{}
|
||||
type unsafeNoneMagicConstant string
|
||||
|
||||
func init() {
|
||||
SigningMethodNone = &signingMethodNone{}
|
||||
NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid)
|
||||
|
||||
RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod {
|
||||
return SigningMethodNone
|
||||
})
|
||||
}
|
||||
|
||||
func (m *signingMethodNone) Alg() string {
|
||||
return "none"
|
||||
}
|
||||
|
||||
// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key
|
||||
func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) {
|
||||
// Key must be UnsafeAllowNoneSignatureType to prevent accidentally
|
||||
// accepting 'none' signing method
|
||||
if _, ok := key.(unsafeNoneMagicConstant); !ok {
|
||||
return NoneSignatureTypeDisallowedError
|
||||
}
|
||||
// If signing method is none, signature must be an empty string
|
||||
if signature != "" {
|
||||
return NewValidationError(
|
||||
"'none' signing method with non-empty signature",
|
||||
ValidationErrorSignatureInvalid,
|
||||
)
|
||||
}
|
||||
|
||||
// Accept 'none' signing method.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key
|
||||
func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) {
|
||||
if _, ok := key.(unsafeNoneMagicConstant); ok {
|
||||
return "", nil
|
||||
}
|
||||
return "", NoneSignatureTypeDisallowedError
|
||||
}
|
128
vendor/github.com/dgrijalva/jwt-go/parser.go
generated
vendored
Normal file
128
vendor/github.com/dgrijalva/jwt-go/parser.go
generated
vendored
Normal file
|
@ -0,0 +1,128 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Parser struct {
|
||||
ValidMethods []string // If populated, only these methods will be considered valid
|
||||
UseJSONNumber bool // Use JSON Number format in JSON decoder
|
||||
}
|
||||
|
||||
// Parse, validate, and return a token.
|
||||
// keyFunc will receive the parsed token and should return the key for validating.
|
||||
// If everything is kosher, err will be nil
|
||||
func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
|
||||
return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc)
|
||||
}
|
||||
|
||||
func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
|
||||
parts := strings.Split(tokenString, ".")
|
||||
if len(parts) != 3 {
|
||||
return nil, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed)
|
||||
}
|
||||
|
||||
var err error
|
||||
token := &Token{Raw: tokenString}
|
||||
|
||||
// parse Header
|
||||
var headerBytes []byte
|
||||
if headerBytes, err = DecodeSegment(parts[0]); err != nil {
|
||||
if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") {
|
||||
return token, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed)
|
||||
}
|
||||
return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
|
||||
}
|
||||
if err = json.Unmarshal(headerBytes, &token.Header); err != nil {
|
||||
return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
|
||||
}
|
||||
|
||||
// parse Claims
|
||||
var claimBytes []byte
|
||||
token.Claims = claims
|
||||
|
||||
if claimBytes, err = DecodeSegment(parts[1]); err != nil {
|
||||
return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
|
||||
}
|
||||
dec := json.NewDecoder(bytes.NewBuffer(claimBytes))
|
||||
if p.UseJSONNumber {
|
||||
dec.UseNumber()
|
||||
}
|
||||
// JSON Decode. Special case for map type to avoid weird pointer behavior
|
||||
if c, ok := token.Claims.(MapClaims); ok {
|
||||
err = dec.Decode(&c)
|
||||
} else {
|
||||
err = dec.Decode(&claims)
|
||||
}
|
||||
// Handle decode error
|
||||
if err != nil {
|
||||
return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed}
|
||||
}
|
||||
|
||||
// Lookup signature method
|
||||
if method, ok := token.Header["alg"].(string); ok {
|
||||
if token.Method = GetSigningMethod(method); token.Method == nil {
|
||||
return token, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable)
|
||||
}
|
||||
} else {
|
||||
return token, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable)
|
||||
}
|
||||
|
||||
// Verify signing method is in the required set
|
||||
if p.ValidMethods != nil {
|
||||
var signingMethodValid = false
|
||||
var alg = token.Method.Alg()
|
||||
for _, m := range p.ValidMethods {
|
||||
if m == alg {
|
||||
signingMethodValid = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !signingMethodValid {
|
||||
// signing method is not in the listed set
|
||||
return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid)
|
||||
}
|
||||
}
|
||||
|
||||
// Lookup key
|
||||
var key interface{}
|
||||
if keyFunc == nil {
|
||||
// keyFunc was not provided. short circuiting validation
|
||||
return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable)
|
||||
}
|
||||
if key, err = keyFunc(token); err != nil {
|
||||
// keyFunc returned an error
|
||||
return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable}
|
||||
}
|
||||
|
||||
vErr := &ValidationError{}
|
||||
|
||||
// Validate Claims
|
||||
if err := token.Claims.Valid(); err != nil {
|
||||
|
||||
// If the Claims Valid returned an error, check if it is a validation error,
|
||||
// If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set
|
||||
if e, ok := err.(*ValidationError); !ok {
|
||||
vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid}
|
||||
} else {
|
||||
vErr = e
|
||||
}
|
||||
}
|
||||
|
||||
// Perform validation
|
||||
token.Signature = parts[2]
|
||||
if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil {
|
||||
vErr.Inner = err
|
||||
vErr.Errors |= ValidationErrorSignatureInvalid
|
||||
}
|
||||
|
||||
if vErr.valid() {
|
||||
token.Valid = true
|
||||
return token, nil
|
||||
}
|
||||
|
||||
return token, vErr
|
||||
}
|
100
vendor/github.com/dgrijalva/jwt-go/rsa.go
generated
vendored
Normal file
100
vendor/github.com/dgrijalva/jwt-go/rsa.go
generated
vendored
Normal file
|
@ -0,0 +1,100 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
)
|
||||
|
||||
// Implements the RSA family of signing methods signing methods
|
||||
type SigningMethodRSA struct {
|
||||
Name string
|
||||
Hash crypto.Hash
|
||||
}
|
||||
|
||||
// Specific instances for RS256 and company
|
||||
var (
|
||||
SigningMethodRS256 *SigningMethodRSA
|
||||
SigningMethodRS384 *SigningMethodRSA
|
||||
SigningMethodRS512 *SigningMethodRSA
|
||||
)
|
||||
|
||||
func init() {
|
||||
// RS256
|
||||
SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256}
|
||||
RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod {
|
||||
return SigningMethodRS256
|
||||
})
|
||||
|
||||
// RS384
|
||||
SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384}
|
||||
RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod {
|
||||
return SigningMethodRS384
|
||||
})
|
||||
|
||||
// RS512
|
||||
SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512}
|
||||
RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod {
|
||||
return SigningMethodRS512
|
||||
})
|
||||
}
|
||||
|
||||
func (m *SigningMethodRSA) Alg() string {
|
||||
return m.Name
|
||||
}
|
||||
|
||||
// Implements the Verify method from SigningMethod
|
||||
// For this signing method, must be an rsa.PublicKey structure.
|
||||
func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error {
|
||||
var err error
|
||||
|
||||
// Decode the signature
|
||||
var sig []byte
|
||||
if sig, err = DecodeSegment(signature); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var rsaKey *rsa.PublicKey
|
||||
var ok bool
|
||||
|
||||
if rsaKey, ok = key.(*rsa.PublicKey); !ok {
|
||||
return ErrInvalidKeyType
|
||||
}
|
||||
|
||||
// Create hasher
|
||||
if !m.Hash.Available() {
|
||||
return ErrHashUnavailable
|
||||
}
|
||||
hasher := m.Hash.New()
|
||||
hasher.Write([]byte(signingString))
|
||||
|
||||
// Verify the signature
|
||||
return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig)
|
||||
}
|
||||
|
||||
// Implements the Sign method from SigningMethod
|
||||
// For this signing method, must be an rsa.PrivateKey structure.
|
||||
func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) {
|
||||
var rsaKey *rsa.PrivateKey
|
||||
var ok bool
|
||||
|
||||
// Validate type of key
|
||||
if rsaKey, ok = key.(*rsa.PrivateKey); !ok {
|
||||
return "", ErrInvalidKey
|
||||
}
|
||||
|
||||
// Create the hasher
|
||||
if !m.Hash.Available() {
|
||||
return "", ErrHashUnavailable
|
||||
}
|
||||
|
||||
hasher := m.Hash.New()
|
||||
hasher.Write([]byte(signingString))
|
||||
|
||||
// Sign the string and return the encoded bytes
|
||||
if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil {
|
||||
return EncodeSegment(sigBytes), nil
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
}
|
126
vendor/github.com/dgrijalva/jwt-go/rsa_pss.go
generated
vendored
Normal file
126
vendor/github.com/dgrijalva/jwt-go/rsa_pss.go
generated
vendored
Normal file
|
@ -0,0 +1,126 @@
|
|||
// +build go1.4
|
||||
|
||||
package jwt
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
)
|
||||
|
||||
// Implements the RSAPSS family of signing methods signing methods
|
||||
type SigningMethodRSAPSS struct {
|
||||
*SigningMethodRSA
|
||||
Options *rsa.PSSOptions
|
||||
}
|
||||
|
||||
// Specific instances for RS/PS and company
|
||||
var (
|
||||
SigningMethodPS256 *SigningMethodRSAPSS
|
||||
SigningMethodPS384 *SigningMethodRSAPSS
|
||||
SigningMethodPS512 *SigningMethodRSAPSS
|
||||
)
|
||||
|
||||
func init() {
|
||||
// PS256
|
||||
SigningMethodPS256 = &SigningMethodRSAPSS{
|
||||
&SigningMethodRSA{
|
||||
Name: "PS256",
|
||||
Hash: crypto.SHA256,
|
||||
},
|
||||
&rsa.PSSOptions{
|
||||
SaltLength: rsa.PSSSaltLengthAuto,
|
||||
Hash: crypto.SHA256,
|
||||
},
|
||||
}
|
||||
RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod {
|
||||
return SigningMethodPS256
|
||||
})
|
||||
|
||||
// PS384
|
||||
SigningMethodPS384 = &SigningMethodRSAPSS{
|
||||
&SigningMethodRSA{
|
||||
Name: "PS384",
|
||||
Hash: crypto.SHA384,
|
||||
},
|
||||
&rsa.PSSOptions{
|
||||
SaltLength: rsa.PSSSaltLengthAuto,
|
||||
Hash: crypto.SHA384,
|
||||
},
|
||||
}
|
||||
RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod {
|
||||
return SigningMethodPS384
|
||||
})
|
||||
|
||||
// PS512
|
||||
SigningMethodPS512 = &SigningMethodRSAPSS{
|
||||
&SigningMethodRSA{
|
||||
Name: "PS512",
|
||||
Hash: crypto.SHA512,
|
||||
},
|
||||
&rsa.PSSOptions{
|
||||
SaltLength: rsa.PSSSaltLengthAuto,
|
||||
Hash: crypto.SHA512,
|
||||
},
|
||||
}
|
||||
RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod {
|
||||
return SigningMethodPS512
|
||||
})
|
||||
}
|
||||
|
||||
// Implements the Verify method from SigningMethod
|
||||
// For this verify method, key must be an rsa.PublicKey struct
|
||||
func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error {
|
||||
var err error
|
||||
|
||||
// Decode the signature
|
||||
var sig []byte
|
||||
if sig, err = DecodeSegment(signature); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var rsaKey *rsa.PublicKey
|
||||
switch k := key.(type) {
|
||||
case *rsa.PublicKey:
|
||||
rsaKey = k
|
||||
default:
|
||||
return ErrInvalidKey
|
||||
}
|
||||
|
||||
// Create hasher
|
||||
if !m.Hash.Available() {
|
||||
return ErrHashUnavailable
|
||||
}
|
||||
hasher := m.Hash.New()
|
||||
hasher.Write([]byte(signingString))
|
||||
|
||||
return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options)
|
||||
}
|
||||
|
||||
// Implements the Sign method from SigningMethod
|
||||
// For this signing method, key must be an rsa.PrivateKey struct
|
||||
func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) {
|
||||
var rsaKey *rsa.PrivateKey
|
||||
|
||||
switch k := key.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
rsaKey = k
|
||||
default:
|
||||
return "", ErrInvalidKeyType
|
||||
}
|
||||
|
||||
// Create the hasher
|
||||
if !m.Hash.Available() {
|
||||
return "", ErrHashUnavailable
|
||||
}
|
||||
|
||||
hasher := m.Hash.New()
|
||||
hasher.Write([]byte(signingString))
|
||||
|
||||
// Sign the string and return the encoded bytes
|
||||
if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil {
|
||||
return EncodeSegment(sigBytes), nil
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
}
|
69
vendor/github.com/dgrijalva/jwt-go/rsa_utils.go
generated
vendored
Normal file
69
vendor/github.com/dgrijalva/jwt-go/rsa_utils.go
generated
vendored
Normal file
|
@ -0,0 +1,69 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key")
|
||||
ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key")
|
||||
ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key")
|
||||
)
|
||||
|
||||
// Parse PEM encoded PKCS1 or PKCS8 private key
|
||||
func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) {
|
||||
var err error
|
||||
|
||||
// Parse PEM block
|
||||
var block *pem.Block
|
||||
if block, _ = pem.Decode(key); block == nil {
|
||||
return nil, ErrKeyMustBePEMEncoded
|
||||
}
|
||||
|
||||
var parsedKey interface{}
|
||||
if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil {
|
||||
if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var pkey *rsa.PrivateKey
|
||||
var ok bool
|
||||
if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok {
|
||||
return nil, ErrNotRSAPrivateKey
|
||||
}
|
||||
|
||||
return pkey, nil
|
||||
}
|
||||
|
||||
// Parse PEM encoded PKCS1 or PKCS8 public key
|
||||
func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) {
|
||||
var err error
|
||||
|
||||
// Parse PEM block
|
||||
var block *pem.Block
|
||||
if block, _ = pem.Decode(key); block == nil {
|
||||
return nil, ErrKeyMustBePEMEncoded
|
||||
}
|
||||
|
||||
// Parse the key
|
||||
var parsedKey interface{}
|
||||
if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil {
|
||||
if cert, err := x509.ParseCertificate(block.Bytes); err == nil {
|
||||
parsedKey = cert.PublicKey
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var pkey *rsa.PublicKey
|
||||
var ok bool
|
||||
if pkey, ok = parsedKey.(*rsa.PublicKey); !ok {
|
||||
return nil, ErrNotRSAPublicKey
|
||||
}
|
||||
|
||||
return pkey, nil
|
||||
}
|
35
vendor/github.com/dgrijalva/jwt-go/signing_method.go
generated
vendored
Normal file
35
vendor/github.com/dgrijalva/jwt-go/signing_method.go
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
var signingMethods = map[string]func() SigningMethod{}
|
||||
var signingMethodLock = new(sync.RWMutex)
|
||||
|
||||
// Implement SigningMethod to add new methods for signing or verifying tokens.
|
||||
type SigningMethod interface {
|
||||
Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid
|
||||
Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error
|
||||
Alg() string // returns the alg identifier for this method (example: 'HS256')
|
||||
}
|
||||
|
||||
// Register the "alg" name and a factory function for signing method.
|
||||
// This is typically done during init() in the method's implementation
|
||||
func RegisterSigningMethod(alg string, f func() SigningMethod) {
|
||||
signingMethodLock.Lock()
|
||||
defer signingMethodLock.Unlock()
|
||||
|
||||
signingMethods[alg] = f
|
||||
}
|
||||
|
||||
// Get a signing method from an "alg" string
|
||||
func GetSigningMethod(alg string) (method SigningMethod) {
|
||||
signingMethodLock.RLock()
|
||||
defer signingMethodLock.RUnlock()
|
||||
|
||||
if methodF, ok := signingMethods[alg]; ok {
|
||||
method = methodF()
|
||||
}
|
||||
return
|
||||
}
|
108
vendor/github.com/dgrijalva/jwt-go/token.go
generated
vendored
Normal file
108
vendor/github.com/dgrijalva/jwt-go/token.go
generated
vendored
Normal file
|
@ -0,0 +1,108 @@
|
|||
package jwt
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time).
|
||||
// You can override it to use another time value. This is useful for testing or if your
|
||||
// server uses a different time zone than your tokens.
|
||||
var TimeFunc = time.Now
|
||||
|
||||
// Parse methods use this callback function to supply
|
||||
// the key for verification. The function receives the parsed,
|
||||
// but unverified Token. This allows you to use properties in the
|
||||
// Header of the token (such as `kid`) to identify which key to use.
|
||||
type Keyfunc func(*Token) (interface{}, error)
|
||||
|
||||
// A JWT Token. Different fields will be used depending on whether you're
|
||||
// creating or parsing/verifying a token.
|
||||
type Token struct {
|
||||
Raw string // The raw token. Populated when you Parse a token
|
||||
Method SigningMethod // The signing method used or to be used
|
||||
Header map[string]interface{} // The first segment of the token
|
||||
Claims Claims // The second segment of the token
|
||||
Signature string // The third segment of the token. Populated when you Parse a token
|
||||
Valid bool // Is the token valid? Populated when you Parse/Verify a token
|
||||
}
|
||||
|
||||
// Create a new Token. Takes a signing method
|
||||
func New(method SigningMethod) *Token {
|
||||
return NewWithClaims(method, MapClaims{})
|
||||
}
|
||||
|
||||
func NewWithClaims(method SigningMethod, claims Claims) *Token {
|
||||
return &Token{
|
||||
Header: map[string]interface{}{
|
||||
"typ": "JWT",
|
||||
"alg": method.Alg(),
|
||||
},
|
||||
Claims: claims,
|
||||
Method: method,
|
||||
}
|
||||
}
|
||||
|
||||
// Get the complete, signed token
|
||||
func (t *Token) SignedString(key interface{}) (string, error) {
|
||||
var sig, sstr string
|
||||
var err error
|
||||
if sstr, err = t.SigningString(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if sig, err = t.Method.Sign(sstr, key); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.Join([]string{sstr, sig}, "."), nil
|
||||
}
|
||||
|
||||
// Generate the signing string. This is the
|
||||
// most expensive part of the whole deal. Unless you
|
||||
// need this for something special, just go straight for
|
||||
// the SignedString.
|
||||
func (t *Token) SigningString() (string, error) {
|
||||
var err error
|
||||
parts := make([]string, 2)
|
||||
for i, _ := range parts {
|
||||
var jsonValue []byte
|
||||
if i == 0 {
|
||||
if jsonValue, err = json.Marshal(t.Header); err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
if jsonValue, err = json.Marshal(t.Claims); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
parts[i] = EncodeSegment(jsonValue)
|
||||
}
|
||||
return strings.Join(parts, "."), nil
|
||||
}
|
||||
|
||||
// Parse, validate, and return a token.
|
||||
// keyFunc will receive the parsed token and should return the key for validating.
|
||||
// If everything is kosher, err will be nil
|
||||
func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) {
|
||||
return new(Parser).Parse(tokenString, keyFunc)
|
||||
}
|
||||
|
||||
func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) {
|
||||
return new(Parser).ParseWithClaims(tokenString, claims, keyFunc)
|
||||
}
|
||||
|
||||
// Encode JWT specific base64url encoding with padding stripped
|
||||
func EncodeSegment(seg []byte) string {
|
||||
return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=")
|
||||
}
|
||||
|
||||
// Decode JWT specific base64url encoding with padding stripped
|
||||
func DecodeSegment(seg string) ([]byte, error) {
|
||||
if l := len(seg) % 4; l > 0 {
|
||||
seg += strings.Repeat("=", 4-l)
|
||||
}
|
||||
|
||||
return base64.URLEncoding.DecodeString(seg)
|
||||
}
|
22
vendor/github.com/emicklei/go-restful-swagger12/LICENSE
generated
vendored
Normal file
22
vendor/github.com/emicklei/go-restful-swagger12/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
Copyright (c) 2017 Ernest Micklei
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -1,10 +1,15 @@
|
|||
# go-restful-swagger12
|
||||
|
||||
[![Build Status](https://travis-ci.org/emicklei/go-restful-swagger12.png)](https://travis-ci.org/emicklei/go-restful-swagger12)
|
||||
[![GoDoc](https://godoc.org/github.com/emicklei/go-restful-swagger12?status.svg)](https://godoc.org/github.com/emicklei/go-restful-swagger12)
|
||||
|
||||
How to use Swagger UI with go-restful
|
||||
=
|
||||
|
||||
Get the Swagger UI sources (version 1.2 only)
|
||||
|
||||
git clone https://github.com/wordnik/swagger-ui.git
|
||||
|
||||
|
||||
The project contains a "dist" folder.
|
||||
Its contents has all the Swagger UI files you need.
|
||||
|
||||
|
@ -18,9 +23,9 @@ Now, you can install the Swagger WebService for serving the Swagger specificatio
|
|||
ApiPath: "/apidocs.json",
|
||||
SwaggerPath: "/apidocs/",
|
||||
SwaggerFilePath: "/Users/emicklei/Projects/swagger-ui/dist"}
|
||||
swagger.InstallSwaggerService(config)
|
||||
|
||||
|
||||
swagger.InstallSwaggerService(config)
|
||||
|
||||
|
||||
Documenting Structs
|
||||
--
|
||||
|
||||
|
@ -72,5 +77,7 @@ This example will generate a JSON like this
|
|||
|
||||
Notes
|
||||
--
|
||||
- The Nickname of an Operation is automatically set by finding the name of the function. You can override it using RouteBuilder.Operation(..)
|
||||
- The Nickname of an Operation is automatically set by finding the name of the function. You can override it using RouteBuilder.Operation(..)
|
||||
- The WebServices field of swagger.Config can be used to control which service you want to expose and document ; you can have multiple configs and therefore multiple endpoints.
|
||||
|
||||
© 2017, ernestmicklei.com. MIT License. Contributions welcome.
|
|
@ -226,6 +226,9 @@ func (sws SwaggerService) composeDeclaration(ws *restful.WebService, pathPrefix
|
|||
pathToRoutes := newOrderedRouteMap()
|
||||
for _, other := range ws.Routes() {
|
||||
if strings.HasPrefix(other.Path, pathPrefix) {
|
||||
if len(pathPrefix) > 1 && len(other.Path) > len(pathPrefix) && other.Path[len(pathPrefix)] != '/' {
|
||||
continue
|
||||
}
|
||||
pathToRoutes.Add(other.Path, other)
|
||||
}
|
||||
}
|
||||
|
@ -290,13 +293,12 @@ func composeResponseMessages(route restful.Route, decl *ApiDeclaration, config *
|
|||
if each.Model != nil {
|
||||
st := reflect.TypeOf(each.Model)
|
||||
isCollection, st := detectCollectionType(st)
|
||||
modelName := modelBuilder{}.keyFrom(st)
|
||||
if isCollection {
|
||||
modelName = "array[" + modelName + "]"
|
||||
// collection cannot be in responsemodel
|
||||
if !isCollection {
|
||||
modelName := modelBuilder{}.keyFrom(st)
|
||||
modelBuilder{Models: &decl.Models, Config: config}.addModel(st, "")
|
||||
message.ResponseModel = modelName
|
||||
}
|
||||
modelBuilder{Models: &decl.Models, Config: config}.addModel(st, "")
|
||||
// reference the model
|
||||
message.ResponseModel = modelName
|
||||
}
|
||||
messages = append(messages, message)
|
||||
}
|
||||
|
@ -331,12 +333,13 @@ func detectCollectionType(st reflect.Type) (bool, reflect.Type) {
|
|||
|
||||
// addModelFromSample creates and adds (or overwrites) a Model from a sample resource
|
||||
func (sws SwaggerService) addModelFromSampleTo(operation *Operation, isResponse bool, sample interface{}, models *ModelList) {
|
||||
mb := modelBuilder{Models: models, Config: &sws.config}
|
||||
if isResponse {
|
||||
type_, items := asDataType(sample, &sws.config)
|
||||
operation.Type = type_
|
||||
sampleType, items := asDataType(sample, &sws.config)
|
||||
operation.Type = sampleType
|
||||
operation.Items = items
|
||||
}
|
||||
modelBuilder{Models: models, Config: &sws.config}.addModelFrom(sample)
|
||||
mb.addModelFrom(sample)
|
||||
}
|
||||
|
||||
func asSwaggerParameter(param restful.ParameterData) Parameter {
|
24
vendor/github.com/emicklei/go-restful/README.md
generated
vendored
24
vendor/github.com/emicklei/go-restful/README.md
generated
vendored
|
@ -1,8 +1,13 @@
|
|||
go-restful
|
||||
==========
|
||||
|
||||
package for building REST-style Web Services using Google Go
|
||||
|
||||
[![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful)
|
||||
[![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful)
|
||||
[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://godoc.org/github.com/emicklei/go-restful)
|
||||
|
||||
- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples)
|
||||
|
||||
REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping:
|
||||
|
||||
- GET = Retrieve a representation of a resource
|
||||
|
@ -40,35 +45,30 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo
|
|||
|
||||
- Routes for request → function mapping with path parameter (e.g. {id}) support
|
||||
- Configurable router:
|
||||
- Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions (See RouterJSR311 which is used by default)
|
||||
- Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*}, See CurlyRouter)
|
||||
- (default) Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*}
|
||||
- Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions
|
||||
- Request API for reading structs from JSON/XML and accesing parameters (path,query,header)
|
||||
- Response API for writing structs to JSON/XML and setting headers
|
||||
- Customizable encoding using EntityReaderWriter registration
|
||||
- Filters for intercepting the request → response flow on Service or Route level
|
||||
- Request-scoped variables using attributes
|
||||
- Containers for WebServices on different HTTP endpoints
|
||||
- Content encoding (gzip,deflate) of request and response payloads
|
||||
- Automatic responses on OPTIONS (using a filter)
|
||||
- Automatic CORS request handling (using a filter)
|
||||
- API declaration for Swagger UI (see swagger package)
|
||||
- API declaration for Swagger UI (see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12),[go-restful-openapi](https://github.com/emicklei/go-restful-openapi))
|
||||
- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...)
|
||||
- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...)
|
||||
- Configurable (trace) logging
|
||||
- Customizable encoding using EntityReaderWriter registration
|
||||
- Customizable gzip/deflate readers and writers using CompressorProvider registration
|
||||
|
||||
### Resources
|
||||
|
||||
- [Documentation on godoc.org](http://godoc.org/github.com/emicklei/go-restful)
|
||||
- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples)
|
||||
- [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/)
|
||||
- [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/)
|
||||
- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful)
|
||||
- [gopkg.in](https://gopkg.in/emicklei/go-restful.v1)
|
||||
- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora)
|
||||
|
||||
[![Build Status](https://drone.io/github.com/emicklei/go-restful/status.png)](https://drone.io/github.com/emicklei/go-restful/latest)
|
||||
|
||||
(c) 2012 - 2015, http://ernestmicklei.com. MIT License
|
||||
|
||||
Type ```git shortlog -s``` for a full list of contributors.
|
||||
|
||||
© 2012 - 2017, http://ernestmicklei.com. MIT License. Contributions are welcome.
|
1
vendor/github.com/emicklei/go-restful/compressors.go
generated
vendored
1
vendor/github.com/emicklei/go-restful/compressors.go
generated
vendored
|
@ -9,6 +9,7 @@ import (
|
|||
"compress/zlib"
|
||||
)
|
||||
|
||||
// CompressorProvider describes a component that can provider compressors for the std methods.
|
||||
type CompressorProvider interface {
|
||||
// Returns a *gzip.Writer which needs to be released later.
|
||||
// Before using it, call Reset().
|
||||
|
|
21
vendor/github.com/emicklei/go-restful/container.go
generated
vendored
21
vendor/github.com/emicklei/go-restful/container.go
generated
vendored
|
@ -32,7 +32,7 @@ type Container struct {
|
|||
contentEncodingEnabled bool // default is false
|
||||
}
|
||||
|
||||
// NewContainer creates a new Container using a new ServeMux and default router (RouterJSR311)
|
||||
// NewContainer creates a new Container using a new ServeMux and default router (CurlyRouter)
|
||||
func NewContainer() *Container {
|
||||
return &Container{
|
||||
webServices: []*WebService{},
|
||||
|
@ -74,7 +74,7 @@ func (c *Container) DoNotRecover(doNot bool) {
|
|||
c.doNotRecover = doNot
|
||||
}
|
||||
|
||||
// Router changes the default Router (currently RouterJSR311)
|
||||
// Router changes the default Router (currently CurlyRouter)
|
||||
func (c *Container) Router(aRouter RouteSelector) {
|
||||
c.router = aRouter
|
||||
}
|
||||
|
@ -188,6 +188,17 @@ func writeServiceError(err ServiceError, req *Request, resp *Response) {
|
|||
resp.WriteErrorString(err.Code, err.Message)
|
||||
}
|
||||
|
||||
// Dispatch the incoming Http Request to a matching WebService.
|
||||
func (c *Container) Dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
|
||||
if httpWriter == nil {
|
||||
panic("httpWriter cannot be nil")
|
||||
}
|
||||
if httpRequest == nil {
|
||||
panic("httpRequest cannot be nil")
|
||||
}
|
||||
c.dispatch(httpWriter, httpRequest)
|
||||
}
|
||||
|
||||
// Dispatch the incoming Http Request to a matching WebService.
|
||||
func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
|
||||
writer := httpWriter
|
||||
|
@ -208,12 +219,6 @@ func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.R
|
|||
}
|
||||
}()
|
||||
}
|
||||
// Install closing the request body (if any)
|
||||
defer func() {
|
||||
if nil != httpRequest.Body {
|
||||
httpRequest.Body.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Detect if compression is needed
|
||||
// assume without compression, test for override
|
||||
|
|
2
vendor/github.com/emicklei/go-restful/doc.go
generated
vendored
2
vendor/github.com/emicklei/go-restful/doc.go
generated
vendored
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Package restful, a lean package for creating REST-style WebServices without magic.
|
||||
Package restful , a lean package for creating REST-style WebServices without magic.
|
||||
|
||||
WebServices and Routes
|
||||
|
||||
|
|
5
vendor/github.com/emicklei/go-restful/log/log.go
generated
vendored
5
vendor/github.com/emicklei/go-restful/log/log.go
generated
vendored
|
@ -5,7 +5,7 @@ import (
|
|||
"os"
|
||||
)
|
||||
|
||||
// Logger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger
|
||||
// StdLogger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger
|
||||
type StdLogger interface {
|
||||
Print(v ...interface{})
|
||||
Printf(format string, v ...interface{})
|
||||
|
@ -18,14 +18,17 @@ func init() {
|
|||
SetLogger(stdlog.New(os.Stderr, "[restful] ", stdlog.LstdFlags|stdlog.Lshortfile))
|
||||
}
|
||||
|
||||
// SetLogger sets the logger for this package
|
||||
func SetLogger(customLogger StdLogger) {
|
||||
Logger = customLogger
|
||||
}
|
||||
|
||||
// Print delegates to the Logger
|
||||
func Print(v ...interface{}) {
|
||||
Logger.Print(v...)
|
||||
}
|
||||
|
||||
// Printf delegates to the Logger
|
||||
func Printf(format string, v ...interface{}) {
|
||||
Logger.Printf(format, v...)
|
||||
}
|
||||
|
|
23
vendor/github.com/emicklei/go-restful/request.go
generated
vendored
23
vendor/github.com/emicklei/go-restful/request.go
generated
vendored
|
@ -5,20 +5,15 @@ package restful
|
|||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/zlib"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
var defaultRequestContentType string
|
||||
|
||||
var doCacheReadEntityBytes = false
|
||||
|
||||
// Request is a wrapper for a http Request that provides convenience methods
|
||||
type Request struct {
|
||||
Request *http.Request
|
||||
bodyContent *[]byte // to cache the request body for multiple reads of ReadEntity
|
||||
pathParameters map[string]string
|
||||
attributes map[string]interface{} // for storing request-scoped values
|
||||
selectedRoutePath string // root path + route path that matched the request, e.g. /meetings/{id}/attendees
|
||||
|
@ -41,12 +36,6 @@ func DefaultRequestContentType(mime string) {
|
|||
defaultRequestContentType = mime
|
||||
}
|
||||
|
||||
// SetCacheReadEntity controls whether the response data ([]byte) is cached such that ReadEntity is repeatable.
|
||||
// Default is true (due to backwardcompatibility). For better performance, you should set it to false if you don't need it.
|
||||
func SetCacheReadEntity(doCache bool) {
|
||||
doCacheReadEntityBytes = doCache
|
||||
}
|
||||
|
||||
// PathParameter accesses the Path parameter value by its name
|
||||
func (r *Request) PathParameter(name string) string {
|
||||
return r.pathParameters[name]
|
||||
|
@ -81,18 +70,6 @@ func (r *Request) ReadEntity(entityPointer interface{}) (err error) {
|
|||
contentType := r.Request.Header.Get(HEADER_ContentType)
|
||||
contentEncoding := r.Request.Header.Get(HEADER_ContentEncoding)
|
||||
|
||||
// OLD feature, cache the body for reads
|
||||
if doCacheReadEntityBytes {
|
||||
if r.bodyContent == nil {
|
||||
data, err := ioutil.ReadAll(r.Request.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r.bodyContent = &data
|
||||
}
|
||||
r.Request.Body = ioutil.NopCloser(bytes.NewReader(*r.bodyContent))
|
||||
}
|
||||
|
||||
// check if the request body needs decompression
|
||||
if ENCODING_GZIP == contentEncoding {
|
||||
gzipReader := currentCompressorProvider.AcquireGzipReader()
|
||||
|
|
5
vendor/github.com/emicklei/go-restful/response.go
generated
vendored
5
vendor/github.com/emicklei/go-restful/response.go
generated
vendored
|
@ -9,7 +9,7 @@ import (
|
|||
"net/http"
|
||||
)
|
||||
|
||||
// DEPRECATED, use DefaultResponseContentType(mime)
|
||||
// DefaultResponseMimeType is DEPRECATED, use DefaultResponseContentType(mime)
|
||||
var DefaultResponseMimeType string
|
||||
|
||||
//PrettyPrintResponses controls the indentation feature of XML and JSON serialization
|
||||
|
@ -27,11 +27,12 @@ type Response struct {
|
|||
err error // err property is kept when WriteError is called
|
||||
}
|
||||
|
||||
// Creates a new response based on a http ResponseWriter.
|
||||
// NewResponse creates a new response based on a http ResponseWriter.
|
||||
func NewResponse(httpWriter http.ResponseWriter) *Response {
|
||||
return &Response{httpWriter, "", []string{}, http.StatusOK, 0, PrettyPrintResponses, nil} // empty content-types
|
||||
}
|
||||
|
||||
// DefaultResponseContentType set a default.
|
||||
// If Accept header matching fails, fall back to this type.
|
||||
// Valid values are restful.MIME_JSON and restful.MIME_XML
|
||||
// Example:
|
||||
|
|
3
vendor/github.com/emicklei/go-restful/route.go
generated
vendored
3
vendor/github.com/emicklei/go-restful/route.go
generated
vendored
|
@ -34,6 +34,9 @@ type Route struct {
|
|||
ParameterDocs []*Parameter
|
||||
ResponseErrors map[int]ResponseError
|
||||
ReadSample, WriteSample interface{} // structs that model an example request or response payload
|
||||
|
||||
// Extra information used to store custom information about the route.
|
||||
Metadata map[string]interface{}
|
||||
}
|
||||
|
||||
// Initialize for Route
|
||||
|
|
69
vendor/github.com/emicklei/go-restful/route_builder.go
generated
vendored
69
vendor/github.com/emicklei/go-restful/route_builder.go
generated
vendored
|
@ -5,10 +5,12 @@ package restful
|
|||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/emicklei/go-restful/log"
|
||||
)
|
||||
|
@ -22,6 +24,9 @@ type RouteBuilder struct {
|
|||
httpMethod string // required
|
||||
function RouteFunction // required
|
||||
filters []FilterFunction
|
||||
|
||||
typeNameHandleFunc TypeNameHandleFunction // required
|
||||
|
||||
// documentation
|
||||
doc string
|
||||
notes string
|
||||
|
@ -29,6 +34,7 @@ type RouteBuilder struct {
|
|||
readSample, writeSample interface{}
|
||||
parameters []*Parameter
|
||||
errorMap map[int]ResponseError
|
||||
metadata map[string]interface{}
|
||||
}
|
||||
|
||||
// Do evaluates each argument with the RouteBuilder itself.
|
||||
|
@ -92,8 +98,13 @@ func (b *RouteBuilder) Notes(notes string) *RouteBuilder {
|
|||
// Reads tells what resource type will be read from the request payload. Optional.
|
||||
// A parameter of type "body" is added ,required is set to true and the dataType is set to the qualified name of the sample's type.
|
||||
func (b *RouteBuilder) Reads(sample interface{}) *RouteBuilder {
|
||||
fn := b.typeNameHandleFunc
|
||||
if fn == nil {
|
||||
fn = reflectTypeName
|
||||
}
|
||||
typeAsName := fn(sample)
|
||||
|
||||
b.readSample = sample
|
||||
typeAsName := reflect.TypeOf(sample).String()
|
||||
bodyParameter := &Parameter{&ParameterData{Name: "body"}}
|
||||
bodyParameter.beBody()
|
||||
bodyParameter.Required(true)
|
||||
|
@ -145,9 +156,10 @@ func (b *RouteBuilder) ReturnsError(code int, message string, model interface{})
|
|||
// The model parameter is optional ; either pass a struct instance or use nil if not applicable.
|
||||
func (b *RouteBuilder) Returns(code int, message string, model interface{}) *RouteBuilder {
|
||||
err := ResponseError{
|
||||
Code: code,
|
||||
Message: message,
|
||||
Model: model,
|
||||
Code: code,
|
||||
Message: message,
|
||||
Model: model,
|
||||
IsDefault: false,
|
||||
}
|
||||
// lazy init because there is no NewRouteBuilder (yet)
|
||||
if b.errorMap == nil {
|
||||
|
@ -157,10 +169,36 @@ func (b *RouteBuilder) Returns(code int, message string, model interface{}) *Rou
|
|||
return b
|
||||
}
|
||||
|
||||
// DefaultReturns is a special Returns call that sets the default of the response ; the code is zero.
|
||||
func (b *RouteBuilder) DefaultReturns(message string, model interface{}) *RouteBuilder {
|
||||
b.Returns(0, message, model)
|
||||
// Modify the ResponseError just added/updated
|
||||
re := b.errorMap[0]
|
||||
// errorMap is initialized
|
||||
b.errorMap[0] = ResponseError{
|
||||
Code: re.Code,
|
||||
Message: re.Message,
|
||||
Model: re.Model,
|
||||
IsDefault: true,
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Metadata adds or updates a key=value pair to the metadata map.
|
||||
func (b *RouteBuilder) Metadata(key string, value interface{}) *RouteBuilder {
|
||||
if b.metadata == nil {
|
||||
b.metadata = map[string]interface{}{}
|
||||
}
|
||||
b.metadata[key] = value
|
||||
return b
|
||||
}
|
||||
|
||||
// ResponseError represents a response; not necessarily an error.
|
||||
type ResponseError struct {
|
||||
Code int
|
||||
Message string
|
||||
Model interface{}
|
||||
Code int
|
||||
Message string
|
||||
Model interface{}
|
||||
IsDefault bool
|
||||
}
|
||||
|
||||
func (b *RouteBuilder) servicePath(path string) *RouteBuilder {
|
||||
|
@ -186,6 +224,13 @@ func (b *RouteBuilder) copyDefaults(rootProduces, rootConsumes []string) {
|
|||
}
|
||||
}
|
||||
|
||||
// typeNameHandler sets the function that will convert types to strings in the parameter
|
||||
// and model definitions.
|
||||
func (b *RouteBuilder) typeNameHandler(handler TypeNameHandleFunction) *RouteBuilder {
|
||||
b.typeNameHandleFunc = handler
|
||||
return b
|
||||
}
|
||||
|
||||
// Build creates a new Route using the specification details collected by the RouteBuilder
|
||||
func (b *RouteBuilder) Build() Route {
|
||||
pathExpr, err := newPathExpression(b.currentPath)
|
||||
|
@ -217,7 +262,8 @@ func (b *RouteBuilder) Build() Route {
|
|||
ParameterDocs: b.parameters,
|
||||
ResponseErrors: b.errorMap,
|
||||
ReadSample: b.readSample,
|
||||
WriteSample: b.writeSample}
|
||||
WriteSample: b.writeSample,
|
||||
Metadata: b.metadata}
|
||||
route.postBuild()
|
||||
return route
|
||||
}
|
||||
|
@ -226,6 +272,8 @@ func concatPath(path1, path2 string) string {
|
|||
return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/")
|
||||
}
|
||||
|
||||
var anonymousFuncCount int32
|
||||
|
||||
// nameOfFunction returns the short name of the function f for documentation.
|
||||
// It uses a runtime feature for debugging ; its value may change for later Go versions.
|
||||
func nameOfFunction(f interface{}) string {
|
||||
|
@ -236,5 +284,10 @@ func nameOfFunction(f interface{}) string {
|
|||
last = strings.TrimSuffix(last, ")-fm") // Go 1.5
|
||||
last = strings.TrimSuffix(last, "·fm") // < Go 1.5
|
||||
last = strings.TrimSuffix(last, "-fm") // Go 1.5
|
||||
if last == "func1" { // this could mean conflicts in API docs
|
||||
val := atomic.AddInt32(&anonymousFuncCount, 1)
|
||||
last = "func" + fmt.Sprintf("%d", val)
|
||||
atomic.StoreInt32(&anonymousFuncCount, val)
|
||||
}
|
||||
return last
|
||||
}
|
||||
|
|
36
vendor/github.com/emicklei/go-restful/web_service.go
generated
vendored
36
vendor/github.com/emicklei/go-restful/web_service.go
generated
vendored
|
@ -3,6 +3,7 @@ package restful
|
|||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"reflect"
|
||||
"sync"
|
||||
|
||||
"github.com/emicklei/go-restful/log"
|
||||
|
@ -24,6 +25,8 @@ type WebService struct {
|
|||
documentation string
|
||||
apiVersion string
|
||||
|
||||
typeNameHandleFunc TypeNameHandleFunction
|
||||
|
||||
dynamicRoutes bool
|
||||
|
||||
// protects 'routes' if dynamic routes are enabled
|
||||
|
@ -34,6 +37,25 @@ func (w *WebService) SetDynamicRoutes(enable bool) {
|
|||
w.dynamicRoutes = enable
|
||||
}
|
||||
|
||||
// TypeNameHandleFunction declares functions that can handle translating the name of a sample object
|
||||
// into the restful documentation for the service.
|
||||
type TypeNameHandleFunction func(sample interface{}) string
|
||||
|
||||
// TypeNameHandler sets the function that will convert types to strings in the parameter
|
||||
// and model definitions. If not set, the web service will invoke
|
||||
// reflect.TypeOf(object).String().
|
||||
func (w *WebService) TypeNameHandler(handler TypeNameHandleFunction) *WebService {
|
||||
w.typeNameHandleFunc = handler
|
||||
return w
|
||||
}
|
||||
|
||||
// reflectTypeName is the default TypeNameHandleFunction and for a given object
|
||||
// returns the name that Go identifies it with (e.g. "string" or "v1.Object") via
|
||||
// the reflection API.
|
||||
func reflectTypeName(sample interface{}) string {
|
||||
return reflect.TypeOf(sample).String()
|
||||
}
|
||||
|
||||
// compilePathExpression ensures that the path is compiled into a RegEx for those routers that need it.
|
||||
func (w *WebService) compilePathExpression() {
|
||||
compiled, err := newPathExpression(w.rootPath)
|
||||
|
@ -174,7 +196,7 @@ func (w *WebService) RemoveRoute(path, method string) error {
|
|||
|
||||
// Method creates a new RouteBuilder and initialize its http method
|
||||
func (w *WebService) Method(httpMethod string) *RouteBuilder {
|
||||
return new(RouteBuilder).servicePath(w.rootPath).Method(httpMethod)
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method(httpMethod)
|
||||
}
|
||||
|
||||
// Produces specifies that this WebService can produce one or more MIME types.
|
||||
|
@ -239,30 +261,30 @@ func (w *WebService) Documentation() string {
|
|||
|
||||
// HEAD is a shortcut for .Method("HEAD").Path(subPath)
|
||||
func (w *WebService) HEAD(subPath string) *RouteBuilder {
|
||||
return new(RouteBuilder).servicePath(w.rootPath).Method("HEAD").Path(subPath)
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("HEAD").Path(subPath)
|
||||
}
|
||||
|
||||
// GET is a shortcut for .Method("GET").Path(subPath)
|
||||
func (w *WebService) GET(subPath string) *RouteBuilder {
|
||||
return new(RouteBuilder).servicePath(w.rootPath).Method("GET").Path(subPath)
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("GET").Path(subPath)
|
||||
}
|
||||
|
||||
// POST is a shortcut for .Method("POST").Path(subPath)
|
||||
func (w *WebService) POST(subPath string) *RouteBuilder {
|
||||
return new(RouteBuilder).servicePath(w.rootPath).Method("POST").Path(subPath)
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("POST").Path(subPath)
|
||||
}
|
||||
|
||||
// PUT is a shortcut for .Method("PUT").Path(subPath)
|
||||
func (w *WebService) PUT(subPath string) *RouteBuilder {
|
||||
return new(RouteBuilder).servicePath(w.rootPath).Method("PUT").Path(subPath)
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PUT").Path(subPath)
|
||||
}
|
||||
|
||||
// PATCH is a shortcut for .Method("PATCH").Path(subPath)
|
||||
func (w *WebService) PATCH(subPath string) *RouteBuilder {
|
||||
return new(RouteBuilder).servicePath(w.rootPath).Method("PATCH").Path(subPath)
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("PATCH").Path(subPath)
|
||||
}
|
||||
|
||||
// DELETE is a shortcut for .Method("DELETE").Path(subPath)
|
||||
func (w *WebService) DELETE(subPath string) *RouteBuilder {
|
||||
return new(RouteBuilder).servicePath(w.rootPath).Method("DELETE").Path(subPath)
|
||||
return new(RouteBuilder).typeNameHandler(w.typeNameHandleFunc).servicePath(w.rootPath).Method("DELETE").Path(subPath)
|
||||
}
|
||||
|
|
21
vendor/github.com/exponent-io/jsonpath/LICENSE
generated
vendored
Normal file
21
vendor/github.com/exponent-io/jsonpath/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Exponent Labs LLC
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
66
vendor/github.com/exponent-io/jsonpath/README.md
generated
vendored
Normal file
66
vendor/github.com/exponent-io/jsonpath/README.md
generated
vendored
Normal file
|
@ -0,0 +1,66 @@
|
|||
[![GoDoc](https://godoc.org/github.com/exponent-io/jsonpath?status.svg)](https://godoc.org/github.com/exponent-io/jsonpath)
|
||||
[![Build Status](https://travis-ci.org/exponent-io/jsonpath.svg?branch=master)](https://travis-ci.org/exponent-io/jsonpath)
|
||||
|
||||
# jsonpath
|
||||
|
||||
This package extends the [json.Decoder](https://golang.org/pkg/encoding/json/#Decoder) to support navigating a stream of JSON tokens. You should be able to use this extended Decoder places where a json.Decoder would have been used.
|
||||
|
||||
This Decoder has the following enhancements...
|
||||
* The [Scan](https://godoc.org/github.com/exponent-io/jsonpath/#Decoder.Scan) method supports scanning a JSON stream while extracting particular values along the way using [PathActions](https://godoc.org/github.com/exponent-io/jsonpath#PathActions).
|
||||
* The [SeekTo](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.SeekTo) method supports seeking forward in a JSON token stream to a particular path.
|
||||
* The [Path](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Path) method returns the path of the most recently parsed token.
|
||||
* The [Token](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Token) method has been modified to distinguish between strings that are object keys and strings that are values. Object key strings are returned as the [KeyString](https://godoc.org/github.com/exponent-io/jsonpath#KeyString) type rather than a native string.
|
||||
|
||||
## Installation
|
||||
|
||||
go get -u github.com/exponent-io/jsonpath
|
||||
|
||||
## Example Usage
|
||||
|
||||
#### SeekTo
|
||||
|
||||
```go
|
||||
import "github.com/exponent-io/jsonpath"
|
||||
|
||||
var j = []byte(`[
|
||||
{"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10}},
|
||||
{"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255}}
|
||||
]`)
|
||||
|
||||
w := json.NewDecoder(bytes.NewReader(j))
|
||||
var v interface{}
|
||||
|
||||
w.SeekTo(1, "Point", "G")
|
||||
w.Decode(&v) // v is 218
|
||||
```
|
||||
|
||||
#### Scan with PathActions
|
||||
|
||||
```go
|
||||
var j = []byte(`{"colors":[
|
||||
{"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10, "A": 58}},
|
||||
{"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255, "A": 231}}
|
||||
]}`)
|
||||
|
||||
var actions PathActions
|
||||
|
||||
// Extract the value at Point.A
|
||||
actions.Add(func(d *Decoder) error {
|
||||
var alpha int
|
||||
err := d.Decode(&alpha)
|
||||
fmt.Printf("Alpha: %v\n", alpha)
|
||||
return err
|
||||
}, "Point", "A")
|
||||
|
||||
w := NewDecoder(bytes.NewReader(j))
|
||||
w.SeekTo("colors", 0)
|
||||
|
||||
var ok = true
|
||||
var err error
|
||||
for ok {
|
||||
ok, err = w.Scan(&actions)
|
||||
if err != nil && err != io.EOF {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
```
|
210
vendor/github.com/exponent-io/jsonpath/decoder.go
generated
vendored
Normal file
210
vendor/github.com/exponent-io/jsonpath/decoder.go
generated
vendored
Normal file
|
@ -0,0 +1,210 @@
|
|||
package jsonpath
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
)
|
||||
|
||||
// KeyString is returned from Decoder.Token to represent each key in a JSON object value.
|
||||
type KeyString string
|
||||
|
||||
// Decoder extends the Go runtime's encoding/json.Decoder to support navigating in a stream of JSON tokens.
|
||||
type Decoder struct {
|
||||
json.Decoder
|
||||
|
||||
path JsonPath
|
||||
context jsonContext
|
||||
}
|
||||
|
||||
// NewDecoder creates a new instance of the extended JSON Decoder.
|
||||
func NewDecoder(r io.Reader) *Decoder {
|
||||
return &Decoder{Decoder: *json.NewDecoder(r)}
|
||||
}
|
||||
|
||||
// SeekTo causes the Decoder to move forward to a given path in the JSON structure.
|
||||
//
|
||||
// The path argument must consist of strings or integers. Each string specifies an JSON object key, and
|
||||
// each integer specifies an index into a JSON array.
|
||||
//
|
||||
// Consider the JSON structure
|
||||
//
|
||||
// { "a": [0,"s",12e4,{"b":0,"v":35} ] }
|
||||
//
|
||||
// SeekTo("a",3,"v") will move to the value referenced by the "a" key in the current object,
|
||||
// followed by a move to the 4th value (index 3) in the array, followed by a move to the value at key "v".
|
||||
// In this example, a subsequent call to the decoder's Decode() would unmarshal the value 35.
|
||||
//
|
||||
// SeekTo returns a boolean value indicating whether a match was found.
|
||||
//
|
||||
// Decoder is intended to be used with a stream of tokens. As a result it navigates forward only.
|
||||
func (d *Decoder) SeekTo(path ...interface{}) (bool, error) {
|
||||
|
||||
if len(path) == 0 {
|
||||
return len(d.path) == 0, nil
|
||||
}
|
||||
last := len(path) - 1
|
||||
if i, ok := path[last].(int); ok {
|
||||
path[last] = i - 1
|
||||
}
|
||||
|
||||
for {
|
||||
if d.path.Equal(path) {
|
||||
return true, nil
|
||||
}
|
||||
_, err := d.Token()
|
||||
if err == io.EOF {
|
||||
return false, nil
|
||||
} else if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Decode reads the next JSON-encoded value from its input and stores it in the value pointed to by v. This is
|
||||
// equivalent to encoding/json.Decode().
|
||||
func (d *Decoder) Decode(v interface{}) error {
|
||||
switch d.context {
|
||||
case objValue:
|
||||
d.context = objKey
|
||||
break
|
||||
case arrValue:
|
||||
d.path.incTop()
|
||||
break
|
||||
}
|
||||
return d.Decoder.Decode(v)
|
||||
}
|
||||
|
||||
// Path returns a slice of string and/or int values representing the path from the root of the JSON object to the
|
||||
// position of the most-recently parsed token.
|
||||
func (d *Decoder) Path() JsonPath {
|
||||
p := make(JsonPath, len(d.path))
|
||||
copy(p, d.path)
|
||||
return p
|
||||
}
|
||||
|
||||
// Token is equivalent to the Token() method on json.Decoder. The primary difference is that it distinguishes
|
||||
// between strings that are keys and and strings that are values. String tokens that are object keys are returned as a
|
||||
// KeyString rather than as a native string.
|
||||
func (d *Decoder) Token() (json.Token, error) {
|
||||
t, err := d.Decoder.Token()
|
||||
if err != nil {
|
||||
return t, err
|
||||
}
|
||||
|
||||
if t == nil {
|
||||
switch d.context {
|
||||
case objValue:
|
||||
d.context = objKey
|
||||
break
|
||||
case arrValue:
|
||||
d.path.incTop()
|
||||
break
|
||||
}
|
||||
return t, err
|
||||
}
|
||||
|
||||
switch t := t.(type) {
|
||||
case json.Delim:
|
||||
switch t {
|
||||
case json.Delim('{'):
|
||||
if d.context == arrValue {
|
||||
d.path.incTop()
|
||||
}
|
||||
d.path.push("")
|
||||
d.context = objKey
|
||||
break
|
||||
case json.Delim('}'):
|
||||
d.path.pop()
|
||||
d.context = d.path.inferContext()
|
||||
break
|
||||
case json.Delim('['):
|
||||
if d.context == arrValue {
|
||||
d.path.incTop()
|
||||
}
|
||||
d.path.push(-1)
|
||||
d.context = arrValue
|
||||
break
|
||||
case json.Delim(']'):
|
||||
d.path.pop()
|
||||
d.context = d.path.inferContext()
|
||||
break
|
||||
}
|
||||
case float64, json.Number, bool:
|
||||
switch d.context {
|
||||
case objValue:
|
||||
d.context = objKey
|
||||
break
|
||||
case arrValue:
|
||||
d.path.incTop()
|
||||
break
|
||||
}
|
||||
break
|
||||
case string:
|
||||
switch d.context {
|
||||
case objKey:
|
||||
d.path.nameTop(t)
|
||||
d.context = objValue
|
||||
return KeyString(t), err
|
||||
case objValue:
|
||||
d.context = objKey
|
||||
case arrValue:
|
||||
d.path.incTop()
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
return t, err
|
||||
}
|
||||
|
||||
// Scan moves forward over the JSON stream consuming all the tokens at the current level (current object, current array)
|
||||
// invoking each matching PathAction along the way.
|
||||
//
|
||||
// Scan returns true if there are more contiguous values to scan (for example in an array).
|
||||
func (d *Decoder) Scan(ext *PathActions) (bool, error) {
|
||||
|
||||
rootPath := d.Path()
|
||||
|
||||
// If this is an array path, increment the root path in our local copy.
|
||||
if rootPath.inferContext() == arrValue {
|
||||
rootPath.incTop()
|
||||
}
|
||||
|
||||
for {
|
||||
// advance the token position
|
||||
_, err := d.Token()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
match:
|
||||
var relPath JsonPath
|
||||
|
||||
// capture the new JSON path
|
||||
path := d.Path()
|
||||
|
||||
if len(path) > len(rootPath) {
|
||||
// capture the path relative to where the scan started
|
||||
relPath = path[len(rootPath):]
|
||||
} else {
|
||||
// if the path is not longer than the root, then we are done with this scan
|
||||
// return boolean flag indicating if there are more items to scan at the same level
|
||||
return d.Decoder.More(), nil
|
||||
}
|
||||
|
||||
// match the relative path against the path actions
|
||||
if node := ext.node.match(relPath); node != nil {
|
||||
if node.action != nil {
|
||||
// we have a match so execute the action
|
||||
err = node.action(d)
|
||||
if err != nil {
|
||||
return d.Decoder.More(), err
|
||||
}
|
||||
// The action may have advanced the decoder. If we are in an array, advancing it further would
|
||||
// skip tokens. So, if we are scanning an array, jump to the top without advancing the token.
|
||||
if d.path.inferContext() == arrValue && d.Decoder.More() {
|
||||
goto match
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
67
vendor/github.com/exponent-io/jsonpath/path.go
generated
vendored
Normal file
67
vendor/github.com/exponent-io/jsonpath/path.go
generated
vendored
Normal file
|
@ -0,0 +1,67 @@
|
|||
// Extends the Go runtime's json.Decoder enabling navigation of a stream of json tokens.
|
||||
package jsonpath
|
||||
|
||||
import "fmt"
|
||||
|
||||
type jsonContext int
|
||||
|
||||
const (
|
||||
none jsonContext = iota
|
||||
objKey
|
||||
objValue
|
||||
arrValue
|
||||
)
|
||||
|
||||
// AnyIndex can be used in a pattern to match any array index.
|
||||
const AnyIndex = -2
|
||||
|
||||
// JsonPath is a slice of strings and/or integers. Each string specifies an JSON object key, and
|
||||
// each integer specifies an index into a JSON array.
|
||||
type JsonPath []interface{}
|
||||
|
||||
func (p *JsonPath) push(n interface{}) { *p = append(*p, n) }
|
||||
func (p *JsonPath) pop() { *p = (*p)[:len(*p)-1] }
|
||||
|
||||
// increment the index at the top of the stack (must be an array index)
|
||||
func (p *JsonPath) incTop() { (*p)[len(*p)-1] = (*p)[len(*p)-1].(int) + 1 }
|
||||
|
||||
// name the key at the top of the stack (must be an object key)
|
||||
func (p *JsonPath) nameTop(n string) { (*p)[len(*p)-1] = n }
|
||||
|
||||
// infer the context from the item at the top of the stack
|
||||
func (p *JsonPath) inferContext() jsonContext {
|
||||
if len(*p) == 0 {
|
||||
return none
|
||||
}
|
||||
t := (*p)[len(*p)-1]
|
||||
switch t.(type) {
|
||||
case string:
|
||||
return objKey
|
||||
case int:
|
||||
return arrValue
|
||||
default:
|
||||
panic(fmt.Sprintf("Invalid stack type %T", t))
|
||||
}
|
||||
}
|
||||
|
||||
// Equal tests for equality between two JsonPath types.
|
||||
func (p *JsonPath) Equal(o JsonPath) bool {
|
||||
if len(*p) != len(o) {
|
||||
return false
|
||||
}
|
||||
for i, v := range *p {
|
||||
if v != o[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *JsonPath) HasPrefix(o JsonPath) bool {
|
||||
for i, v := range o {
|
||||
if v != (*p)[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
61
vendor/github.com/exponent-io/jsonpath/pathaction.go
generated
vendored
Normal file
61
vendor/github.com/exponent-io/jsonpath/pathaction.go
generated
vendored
Normal file
|
@ -0,0 +1,61 @@
|
|||
package jsonpath
|
||||
|
||||
// pathNode is used to construct a trie of paths to be matched
|
||||
type pathNode struct {
|
||||
matchOn interface{} // string, or integer
|
||||
childNodes []pathNode
|
||||
action DecodeAction
|
||||
}
|
||||
|
||||
// match climbs the trie to find a node that matches the given JSON path.
|
||||
func (n *pathNode) match(path JsonPath) *pathNode {
|
||||
var node *pathNode = n
|
||||
for _, ps := range path {
|
||||
found := false
|
||||
for i, n := range node.childNodes {
|
||||
if n.matchOn == ps {
|
||||
node = &node.childNodes[i]
|
||||
found = true
|
||||
break
|
||||
} else if _, ok := ps.(int); ok && n.matchOn == AnyIndex {
|
||||
node = &node.childNodes[i]
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return node
|
||||
}
|
||||
|
||||
// PathActions represents a collection of DecodeAction functions that should be called at certain path positions
|
||||
// when scanning the JSON stream. PathActions can be created once and used many times in one or more JSON streams.
|
||||
type PathActions struct {
|
||||
node pathNode
|
||||
}
|
||||
|
||||
// DecodeAction handlers are called by the Decoder when scanning objects. See PathActions.Add for more detail.
|
||||
type DecodeAction func(d *Decoder) error
|
||||
|
||||
// Add specifies an action to call on the Decoder when the specified path is encountered.
|
||||
func (je *PathActions) Add(action DecodeAction, path ...interface{}) {
|
||||
|
||||
var node *pathNode = &je.node
|
||||
for _, ps := range path {
|
||||
found := false
|
||||
for i, n := range node.childNodes {
|
||||
if n.matchOn == ps {
|
||||
node = &node.childNodes[i]
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
node.childNodes = append(node.childNodes, pathNode{matchOn: ps})
|
||||
node = &node.childNodes[len(node.childNodes)-1]
|
||||
}
|
||||
}
|
||||
node.action = action
|
||||
}
|
202
vendor/github.com/go-openapi/analysis/LICENSE
generated
vendored
Normal file
202
vendor/github.com/go-openapi/analysis/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
6
vendor/github.com/go-openapi/analysis/README.md
generated
vendored
Normal file
6
vendor/github.com/go-openapi/analysis/README.md
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
# OpenAPI initiative analysis [![Build Status](https://ci.vmware.run/api/badges/go-openapi/analysis/status.svg)](https://ci.vmware.run/go-openapi/analysis) [![Coverage](https://coverage.vmware.run/badges/go-openapi/analysis/coverage.svg)](https://coverage.vmware.run/go-openapi/analysis) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
|
||||
|
||||
[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/analysis/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/analysis?status.svg)](http://godoc.org/github.com/go-openapi/analysis)
|
||||
|
||||
|
||||
A foundational library to analyze an OAI specification document for easier reasoning about the content.
|
614
vendor/github.com/go-openapi/analysis/analyzer.go
generated
vendored
Normal file
614
vendor/github.com/go-openapi/analysis/analyzer.go
generated
vendored
Normal file
|
@ -0,0 +1,614 @@
|
|||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package analysis
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
slashpath "path"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/go-openapi/jsonpointer"
|
||||
"github.com/go-openapi/spec"
|
||||
"github.com/go-openapi/swag"
|
||||
)
|
||||
|
||||
type referenceAnalysis struct {
|
||||
schemas map[string]spec.Ref
|
||||
responses map[string]spec.Ref
|
||||
parameters map[string]spec.Ref
|
||||
items map[string]spec.Ref
|
||||
allRefs map[string]spec.Ref
|
||||
referenced struct {
|
||||
schemas map[string]SchemaRef
|
||||
responses map[string]*spec.Response
|
||||
parameters map[string]*spec.Parameter
|
||||
}
|
||||
}
|
||||
|
||||
func (r *referenceAnalysis) addRef(key string, ref spec.Ref) {
|
||||
r.allRefs["#"+key] = ref
|
||||
}
|
||||
|
||||
func (r *referenceAnalysis) addItemsRef(key string, items *spec.Items) {
|
||||
r.items["#"+key] = items.Ref
|
||||
r.addRef(key, items.Ref)
|
||||
}
|
||||
|
||||
func (r *referenceAnalysis) addSchemaRef(key string, ref SchemaRef) {
|
||||
r.schemas["#"+key] = ref.Schema.Ref
|
||||
r.addRef(key, ref.Schema.Ref)
|
||||
}
|
||||
|
||||
func (r *referenceAnalysis) addResponseRef(key string, resp *spec.Response) {
|
||||
r.responses["#"+key] = resp.Ref
|
||||
r.addRef(key, resp.Ref)
|
||||
}
|
||||
|
||||
func (r *referenceAnalysis) addParamRef(key string, param *spec.Parameter) {
|
||||
r.parameters["#"+key] = param.Ref
|
||||
r.addRef(key, param.Ref)
|
||||
}
|
||||
|
||||
// New takes a swagger spec object and returns an analyzed spec document.
|
||||
// The analyzed document contains a number of indices that make it easier to
|
||||
// reason about semantics of a swagger specification for use in code generation
|
||||
// or validation etc.
|
||||
func New(doc *spec.Swagger) *Spec {
|
||||
a := &Spec{
|
||||
spec: doc,
|
||||
consumes: make(map[string]struct{}, 150),
|
||||
produces: make(map[string]struct{}, 150),
|
||||
authSchemes: make(map[string]struct{}, 150),
|
||||
operations: make(map[string]map[string]*spec.Operation, 150),
|
||||
allSchemas: make(map[string]SchemaRef, 150),
|
||||
allOfs: make(map[string]SchemaRef, 150),
|
||||
references: referenceAnalysis{
|
||||
schemas: make(map[string]spec.Ref, 150),
|
||||
responses: make(map[string]spec.Ref, 150),
|
||||
parameters: make(map[string]spec.Ref, 150),
|
||||
items: make(map[string]spec.Ref, 150),
|
||||
allRefs: make(map[string]spec.Ref, 150),
|
||||
},
|
||||
}
|
||||
a.references.referenced.schemas = make(map[string]SchemaRef, 150)
|
||||
a.references.referenced.responses = make(map[string]*spec.Response, 150)
|
||||
a.references.referenced.parameters = make(map[string]*spec.Parameter, 150)
|
||||
a.initialize()
|
||||
return a
|
||||
}
|
||||
|
||||
// Spec takes a swagger spec object and turns it into a registry
|
||||
// with a bunch of utility methods to act on the information in the spec
|
||||
type Spec struct {
|
||||
spec *spec.Swagger
|
||||
consumes map[string]struct{}
|
||||
produces map[string]struct{}
|
||||
authSchemes map[string]struct{}
|
||||
operations map[string]map[string]*spec.Operation
|
||||
references referenceAnalysis
|
||||
allSchemas map[string]SchemaRef
|
||||
allOfs map[string]SchemaRef
|
||||
}
|
||||
|
||||
func (s *Spec) initialize() {
|
||||
for _, c := range s.spec.Consumes {
|
||||
s.consumes[c] = struct{}{}
|
||||
}
|
||||
for _, c := range s.spec.Produces {
|
||||
s.produces[c] = struct{}{}
|
||||
}
|
||||
for _, ss := range s.spec.Security {
|
||||
for k := range ss {
|
||||
s.authSchemes[k] = struct{}{}
|
||||
}
|
||||
}
|
||||
for path, pathItem := range s.AllPaths() {
|
||||
s.analyzeOperations(path, &pathItem)
|
||||
}
|
||||
|
||||
for name, parameter := range s.spec.Parameters {
|
||||
refPref := slashpath.Join("/parameters", jsonpointer.Escape(name))
|
||||
if parameter.Items != nil {
|
||||
s.analyzeItems("items", parameter.Items, refPref)
|
||||
}
|
||||
if parameter.In == "body" && parameter.Schema != nil {
|
||||
s.analyzeSchema("schema", *parameter.Schema, refPref)
|
||||
}
|
||||
}
|
||||
|
||||
for name, response := range s.spec.Responses {
|
||||
refPref := slashpath.Join("/responses", jsonpointer.Escape(name))
|
||||
for _, v := range response.Headers {
|
||||
if v.Items != nil {
|
||||
s.analyzeItems("items", v.Items, refPref)
|
||||
}
|
||||
}
|
||||
if response.Schema != nil {
|
||||
s.analyzeSchema("schema", *response.Schema, refPref)
|
||||
}
|
||||
}
|
||||
|
||||
for name, schema := range s.spec.Definitions {
|
||||
s.analyzeSchema(name, schema, "/definitions")
|
||||
}
|
||||
// TODO: after analyzing all things and flattening schemas etc
|
||||
// resolve all the collected references to their final representations
|
||||
// best put in a separate method because this could get expensive
|
||||
}
|
||||
|
||||
func (s *Spec) analyzeOperations(path string, pi *spec.PathItem) {
|
||||
// TODO: resolve refs here?
|
||||
op := pi
|
||||
s.analyzeOperation("GET", path, op.Get)
|
||||
s.analyzeOperation("PUT", path, op.Put)
|
||||
s.analyzeOperation("POST", path, op.Post)
|
||||
s.analyzeOperation("PATCH", path, op.Patch)
|
||||
s.analyzeOperation("DELETE", path, op.Delete)
|
||||
s.analyzeOperation("HEAD", path, op.Head)
|
||||
s.analyzeOperation("OPTIONS", path, op.Options)
|
||||
for i, param := range op.Parameters {
|
||||
refPref := slashpath.Join("/paths", jsonpointer.Escape(path), "parameters", strconv.Itoa(i))
|
||||
if param.Ref.String() != "" {
|
||||
s.references.addParamRef(refPref, ¶m)
|
||||
}
|
||||
if param.Items != nil {
|
||||
s.analyzeItems("items", param.Items, refPref)
|
||||
}
|
||||
if param.Schema != nil {
|
||||
s.analyzeSchema("schema", *param.Schema, refPref)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Spec) analyzeItems(name string, items *spec.Items, prefix string) {
|
||||
if items == nil {
|
||||
return
|
||||
}
|
||||
refPref := slashpath.Join(prefix, name)
|
||||
s.analyzeItems(name, items.Items, refPref)
|
||||
if items.Ref.String() != "" {
|
||||
s.references.addItemsRef(refPref, items)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Spec) analyzeOperation(method, path string, op *spec.Operation) {
|
||||
if op == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, c := range op.Consumes {
|
||||
s.consumes[c] = struct{}{}
|
||||
}
|
||||
for _, c := range op.Produces {
|
||||
s.produces[c] = struct{}{}
|
||||
}
|
||||
for _, ss := range op.Security {
|
||||
for k := range ss {
|
||||
s.authSchemes[k] = struct{}{}
|
||||
}
|
||||
}
|
||||
if _, ok := s.operations[method]; !ok {
|
||||
s.operations[method] = make(map[string]*spec.Operation)
|
||||
}
|
||||
s.operations[method][path] = op
|
||||
prefix := slashpath.Join("/paths", jsonpointer.Escape(path), strings.ToLower(method))
|
||||
for i, param := range op.Parameters {
|
||||
refPref := slashpath.Join(prefix, "parameters", strconv.Itoa(i))
|
||||
if param.Ref.String() != "" {
|
||||
s.references.addParamRef(refPref, ¶m)
|
||||
}
|
||||
s.analyzeItems("items", param.Items, refPref)
|
||||
if param.In == "body" && param.Schema != nil {
|
||||
s.analyzeSchema("schema", *param.Schema, refPref)
|
||||
}
|
||||
}
|
||||
if op.Responses != nil {
|
||||
if op.Responses.Default != nil {
|
||||
refPref := slashpath.Join(prefix, "responses", "default")
|
||||
if op.Responses.Default.Ref.String() != "" {
|
||||
s.references.addResponseRef(refPref, op.Responses.Default)
|
||||
}
|
||||
for _, v := range op.Responses.Default.Headers {
|
||||
s.analyzeItems("items", v.Items, refPref)
|
||||
}
|
||||
if op.Responses.Default.Schema != nil {
|
||||
s.analyzeSchema("schema", *op.Responses.Default.Schema, refPref)
|
||||
}
|
||||
}
|
||||
for k, res := range op.Responses.StatusCodeResponses {
|
||||
refPref := slashpath.Join(prefix, "responses", strconv.Itoa(k))
|
||||
if res.Ref.String() != "" {
|
||||
s.references.addResponseRef(refPref, &res)
|
||||
}
|
||||
for _, v := range res.Headers {
|
||||
s.analyzeItems("items", v.Items, refPref)
|
||||
}
|
||||
if res.Schema != nil {
|
||||
s.analyzeSchema("schema", *res.Schema, refPref)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Spec) analyzeSchema(name string, schema spec.Schema, prefix string) {
|
||||
refURI := slashpath.Join(prefix, jsonpointer.Escape(name))
|
||||
schRef := SchemaRef{
|
||||
Name: name,
|
||||
Schema: &schema,
|
||||
Ref: spec.MustCreateRef("#" + refURI),
|
||||
}
|
||||
s.allSchemas["#"+refURI] = schRef
|
||||
if schema.Ref.String() != "" {
|
||||
s.references.addSchemaRef(refURI, schRef)
|
||||
}
|
||||
for k, v := range schema.Definitions {
|
||||
s.analyzeSchema(k, v, slashpath.Join(refURI, "definitions"))
|
||||
}
|
||||
for k, v := range schema.Properties {
|
||||
s.analyzeSchema(k, v, slashpath.Join(refURI, "properties"))
|
||||
}
|
||||
for k, v := range schema.PatternProperties {
|
||||
s.analyzeSchema(k, v, slashpath.Join(refURI, "patternProperties"))
|
||||
}
|
||||
for i, v := range schema.AllOf {
|
||||
s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "allOf"))
|
||||
}
|
||||
if len(schema.AllOf) > 0 {
|
||||
s.allOfs["#"+refURI] = SchemaRef{Name: name, Schema: &schema, Ref: spec.MustCreateRef("#" + refURI)}
|
||||
}
|
||||
for i, v := range schema.AnyOf {
|
||||
s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "anyOf"))
|
||||
}
|
||||
for i, v := range schema.OneOf {
|
||||
s.analyzeSchema(strconv.Itoa(i), v, slashpath.Join(refURI, "oneOf"))
|
||||
}
|
||||
if schema.Not != nil {
|
||||
s.analyzeSchema("not", *schema.Not, refURI)
|
||||
}
|
||||
if schema.AdditionalProperties != nil && schema.AdditionalProperties.Schema != nil {
|
||||
s.analyzeSchema("additionalProperties", *schema.AdditionalProperties.Schema, refURI)
|
||||
}
|
||||
if schema.AdditionalItems != nil && schema.AdditionalItems.Schema != nil {
|
||||
s.analyzeSchema("additionalItems", *schema.AdditionalItems.Schema, refURI)
|
||||
}
|
||||
if schema.Items != nil {
|
||||
if schema.Items.Schema != nil {
|
||||
s.analyzeSchema("items", *schema.Items.Schema, refURI)
|
||||
}
|
||||
for i, sch := range schema.Items.Schemas {
|
||||
s.analyzeSchema(strconv.Itoa(i), sch, slashpath.Join(refURI, "items"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SecurityRequirement is a representation of a security requirement for an operation
|
||||
type SecurityRequirement struct {
|
||||
Name string
|
||||
Scopes []string
|
||||
}
|
||||
|
||||
// SecurityRequirementsFor gets the security requirements for the operation
|
||||
func (s *Spec) SecurityRequirementsFor(operation *spec.Operation) []SecurityRequirement {
|
||||
if s.spec.Security == nil && operation.Security == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
schemes := s.spec.Security
|
||||
if operation.Security != nil {
|
||||
schemes = operation.Security
|
||||
}
|
||||
|
||||
unique := make(map[string]SecurityRequirement)
|
||||
for _, scheme := range schemes {
|
||||
for k, v := range scheme {
|
||||
if _, ok := unique[k]; !ok {
|
||||
unique[k] = SecurityRequirement{Name: k, Scopes: v}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var result []SecurityRequirement
|
||||
for _, v := range unique {
|
||||
result = append(result, v)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// SecurityDefinitionsFor gets the matching security definitions for a set of requirements
|
||||
func (s *Spec) SecurityDefinitionsFor(operation *spec.Operation) map[string]spec.SecurityScheme {
|
||||
requirements := s.SecurityRequirementsFor(operation)
|
||||
if len(requirements) == 0 {
|
||||
return nil
|
||||
}
|
||||
result := make(map[string]spec.SecurityScheme)
|
||||
for _, v := range requirements {
|
||||
if definition, ok := s.spec.SecurityDefinitions[v.Name]; ok {
|
||||
if definition != nil {
|
||||
result[v.Name] = *definition
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// ConsumesFor gets the mediatypes for the operation
|
||||
func (s *Spec) ConsumesFor(operation *spec.Operation) []string {
|
||||
|
||||
if len(operation.Consumes) == 0 {
|
||||
cons := make(map[string]struct{}, len(s.spec.Consumes))
|
||||
for _, k := range s.spec.Consumes {
|
||||
cons[k] = struct{}{}
|
||||
}
|
||||
return s.structMapKeys(cons)
|
||||
}
|
||||
|
||||
cons := make(map[string]struct{}, len(operation.Consumes))
|
||||
for _, c := range operation.Consumes {
|
||||
cons[c] = struct{}{}
|
||||
}
|
||||
return s.structMapKeys(cons)
|
||||
}
|
||||
|
||||
// ProducesFor gets the mediatypes for the operation
|
||||
func (s *Spec) ProducesFor(operation *spec.Operation) []string {
|
||||
if len(operation.Produces) == 0 {
|
||||
prod := make(map[string]struct{}, len(s.spec.Produces))
|
||||
for _, k := range s.spec.Produces {
|
||||
prod[k] = struct{}{}
|
||||
}
|
||||
return s.structMapKeys(prod)
|
||||
}
|
||||
|
||||
prod := make(map[string]struct{}, len(operation.Produces))
|
||||
for _, c := range operation.Produces {
|
||||
prod[c] = struct{}{}
|
||||
}
|
||||
return s.structMapKeys(prod)
|
||||
}
|
||||
|
||||
func mapKeyFromParam(param *spec.Parameter) string {
|
||||
return fmt.Sprintf("%s#%s", param.In, fieldNameFromParam(param))
|
||||
}
|
||||
|
||||
func fieldNameFromParam(param *spec.Parameter) string {
|
||||
if nm, ok := param.Extensions.GetString("go-name"); ok {
|
||||
return nm
|
||||
}
|
||||
return swag.ToGoName(param.Name)
|
||||
}
|
||||
|
||||
func (s *Spec) paramsAsMap(parameters []spec.Parameter, res map[string]spec.Parameter) {
|
||||
for _, param := range parameters {
|
||||
pr := param
|
||||
if pr.Ref.String() != "" {
|
||||
obj, _, err := pr.Ref.GetPointer().Get(s.spec)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
pr = obj.(spec.Parameter)
|
||||
}
|
||||
res[mapKeyFromParam(&pr)] = pr
|
||||
}
|
||||
}
|
||||
|
||||
// ParametersFor the specified operation id
|
||||
func (s *Spec) ParametersFor(operationID string) []spec.Parameter {
|
||||
gatherParams := func(pi *spec.PathItem, op *spec.Operation) []spec.Parameter {
|
||||
bag := make(map[string]spec.Parameter)
|
||||
s.paramsAsMap(pi.Parameters, bag)
|
||||
s.paramsAsMap(op.Parameters, bag)
|
||||
|
||||
var res []spec.Parameter
|
||||
for _, v := range bag {
|
||||
res = append(res, v)
|
||||
}
|
||||
return res
|
||||
}
|
||||
for _, pi := range s.spec.Paths.Paths {
|
||||
if pi.Get != nil && pi.Get.ID == operationID {
|
||||
return gatherParams(&pi, pi.Get)
|
||||
}
|
||||
if pi.Head != nil && pi.Head.ID == operationID {
|
||||
return gatherParams(&pi, pi.Head)
|
||||
}
|
||||
if pi.Options != nil && pi.Options.ID == operationID {
|
||||
return gatherParams(&pi, pi.Options)
|
||||
}
|
||||
if pi.Post != nil && pi.Post.ID == operationID {
|
||||
return gatherParams(&pi, pi.Post)
|
||||
}
|
||||
if pi.Patch != nil && pi.Patch.ID == operationID {
|
||||
return gatherParams(&pi, pi.Patch)
|
||||
}
|
||||
if pi.Put != nil && pi.Put.ID == operationID {
|
||||
return gatherParams(&pi, pi.Put)
|
||||
}
|
||||
if pi.Delete != nil && pi.Delete.ID == operationID {
|
||||
return gatherParams(&pi, pi.Delete)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParamsFor the specified method and path. Aggregates them with the defaults etc, so it's all the params that
|
||||
// apply for the method and path.
|
||||
func (s *Spec) ParamsFor(method, path string) map[string]spec.Parameter {
|
||||
res := make(map[string]spec.Parameter)
|
||||
if pi, ok := s.spec.Paths.Paths[path]; ok {
|
||||
s.paramsAsMap(pi.Parameters, res)
|
||||
s.paramsAsMap(s.operations[strings.ToUpper(method)][path].Parameters, res)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// OperationForName gets the operation for the given id
|
||||
func (s *Spec) OperationForName(operationID string) (string, string, *spec.Operation, bool) {
|
||||
for method, pathItem := range s.operations {
|
||||
for path, op := range pathItem {
|
||||
if operationID == op.ID {
|
||||
return method, path, op, true
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", "", nil, false
|
||||
}
|
||||
|
||||
// OperationFor the given method and path
|
||||
func (s *Spec) OperationFor(method, path string) (*spec.Operation, bool) {
|
||||
if mp, ok := s.operations[strings.ToUpper(method)]; ok {
|
||||
op, fn := mp[path]
|
||||
return op, fn
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Operations gathers all the operations specified in the spec document
|
||||
func (s *Spec) Operations() map[string]map[string]*spec.Operation {
|
||||
return s.operations
|
||||
}
|
||||
|
||||
func (s *Spec) structMapKeys(mp map[string]struct{}) []string {
|
||||
if len(mp) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
result := make([]string, 0, len(mp))
|
||||
for k := range mp {
|
||||
result = append(result, k)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// AllPaths returns all the paths in the swagger spec
|
||||
func (s *Spec) AllPaths() map[string]spec.PathItem {
|
||||
if s.spec == nil || s.spec.Paths == nil {
|
||||
return nil
|
||||
}
|
||||
return s.spec.Paths.Paths
|
||||
}
|
||||
|
||||
// OperationIDs gets all the operation ids based on method an dpath
|
||||
func (s *Spec) OperationIDs() []string {
|
||||
if len(s.operations) == 0 {
|
||||
return nil
|
||||
}
|
||||
result := make([]string, 0, len(s.operations))
|
||||
for method, v := range s.operations {
|
||||
for p, o := range v {
|
||||
if o.ID != "" {
|
||||
result = append(result, o.ID)
|
||||
} else {
|
||||
result = append(result, fmt.Sprintf("%s %s", strings.ToUpper(method), p))
|
||||
}
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// RequiredConsumes gets all the distinct consumes that are specified in the specification document
|
||||
func (s *Spec) RequiredConsumes() []string {
|
||||
return s.structMapKeys(s.consumes)
|
||||
}
|
||||
|
||||
// RequiredProduces gets all the distinct produces that are specified in the specification document
|
||||
func (s *Spec) RequiredProduces() []string {
|
||||
return s.structMapKeys(s.produces)
|
||||
}
|
||||
|
||||
// RequiredSecuritySchemes gets all the distinct security schemes that are specified in the swagger spec
|
||||
func (s *Spec) RequiredSecuritySchemes() []string {
|
||||
return s.structMapKeys(s.authSchemes)
|
||||
}
|
||||
|
||||
// SchemaRef is a reference to a schema
|
||||
type SchemaRef struct {
|
||||
Name string
|
||||
Ref spec.Ref
|
||||
Schema *spec.Schema
|
||||
}
|
||||
|
||||
// SchemasWithAllOf returns schema references to all schemas that are defined
|
||||
// with an allOf key
|
||||
func (s *Spec) SchemasWithAllOf() (result []SchemaRef) {
|
||||
for _, v := range s.allOfs {
|
||||
result = append(result, v)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AllDefinitions returns schema references for all the definitions that were discovered
|
||||
func (s *Spec) AllDefinitions() (result []SchemaRef) {
|
||||
for _, v := range s.allSchemas {
|
||||
result = append(result, v)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AllDefinitionReferences returns json refs for all the discovered schemas
|
||||
func (s *Spec) AllDefinitionReferences() (result []string) {
|
||||
for _, v := range s.references.schemas {
|
||||
result = append(result, v.String())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AllParameterReferences returns json refs for all the discovered parameters
|
||||
func (s *Spec) AllParameterReferences() (result []string) {
|
||||
for _, v := range s.references.parameters {
|
||||
result = append(result, v.String())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AllResponseReferences returns json refs for all the discovered responses
|
||||
func (s *Spec) AllResponseReferences() (result []string) {
|
||||
for _, v := range s.references.responses {
|
||||
result = append(result, v.String())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AllItemsReferences returns the references for all the items
|
||||
func (s *Spec) AllItemsReferences() (result []string) {
|
||||
for _, v := range s.references.items {
|
||||
result = append(result, v.String())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AllReferences returns all the references found in the document
|
||||
func (s *Spec) AllReferences() (result []string) {
|
||||
for _, v := range s.references.allRefs {
|
||||
result = append(result, v.String())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// AllRefs returns all the unique references found in the document
|
||||
func (s *Spec) AllRefs() (result []spec.Ref) {
|
||||
set := make(map[string]struct{})
|
||||
for _, v := range s.references.allRefs {
|
||||
a := v.String()
|
||||
if a == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := set[a]; !ok {
|
||||
set[a] = struct{}{}
|
||||
result = append(result, v)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
202
vendor/github.com/go-openapi/loads/LICENSE
generated
vendored
Normal file
202
vendor/github.com/go-openapi/loads/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
5
vendor/github.com/go-openapi/loads/README.md
generated
vendored
Normal file
5
vendor/github.com/go-openapi/loads/README.md
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
# Loads OAI specs [![Build Status](https://ci.vmware.run/api/badges/go-openapi/loads/status.svg)](https://ci.vmware.run/go-openapi/loads) [![Coverage](https://coverage.vmware.run/badges/go-openapi/loads/coverage.svg)](https://coverage.vmware.run/go-openapi/loads) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
|
||||
|
||||
[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/loads?status.svg)](http://godoc.org/github.com/go-openapi/loads)
|
||||
|
||||
Loading of OAI specification documents from local or remote locations.
|
203
vendor/github.com/go-openapi/loads/spec.go
generated
vendored
Normal file
203
vendor/github.com/go-openapi/loads/spec.go
generated
vendored
Normal file
|
@ -0,0 +1,203 @@
|
|||
// Copyright 2015 go-swagger maintainers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package loads
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/go-openapi/analysis"
|
||||
"github.com/go-openapi/spec"
|
||||
"github.com/go-openapi/swag"
|
||||
)
|
||||
|
||||
// JSONDoc loads a json document from either a file or a remote url
|
||||
func JSONDoc(path string) (json.RawMessage, error) {
|
||||
data, err := swag.LoadFromFileOrHTTP(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return json.RawMessage(data), nil
|
||||
}
|
||||
|
||||
// DocLoader represents a doc loader type
|
||||
type DocLoader func(string) (json.RawMessage, error)
|
||||
|
||||
// DocMatcher represents a predicate to check if a loader matches
|
||||
type DocMatcher func(string) bool
|
||||
|
||||
var loaders = &loader{Match: func(_ string) bool { return true }, Fn: JSONDoc}
|
||||
|
||||
// AddLoader for a document
|
||||
func AddLoader(predicate DocMatcher, load DocLoader) {
|
||||
prev := loaders
|
||||
loaders = &loader{
|
||||
Match: predicate,
|
||||
Fn: load,
|
||||
Next: prev,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
type loader struct {
|
||||
Fn DocLoader
|
||||
Match DocMatcher
|
||||
Next *loader
|
||||
}
|
||||
|
||||
// JSONSpec loads a spec from a json document
|
||||
func JSONSpec(path string) (*Document, error) {
|
||||
data, err := JSONDoc(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// convert to json
|
||||
return Analyzed(json.RawMessage(data), "")
|
||||
}
|
||||
|
||||
// Document represents a swagger spec document
|
||||
type Document struct {
|
||||
// specAnalyzer
|
||||
Analyzer *analysis.Spec
|
||||
spec *spec.Swagger
|
||||
origSpec *spec.Swagger
|
||||
schema *spec.Schema
|
||||
raw json.RawMessage
|
||||
}
|
||||
|
||||
// Spec loads a new spec document
|
||||
func Spec(path string) (*Document, error) {
|
||||
specURL, err := url.Parse(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for l := loaders.Next; l != nil; l = l.Next {
|
||||
if loaders.Match(specURL.Path) {
|
||||
b, err2 := loaders.Fn(path)
|
||||
if err2 != nil {
|
||||
return nil, err2
|
||||
}
|
||||
return Analyzed(b, "")
|
||||
}
|
||||
}
|
||||
b, err := loaders.Fn(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return Analyzed(b, "")
|
||||
}
|
||||
|
||||
var swag20Schema = spec.MustLoadSwagger20Schema()
|
||||
|
||||
// Analyzed creates a new analyzed spec document
|
||||
func Analyzed(data json.RawMessage, version string) (*Document, error) {
|
||||
if version == "" {
|
||||
version = "2.0"
|
||||
}
|
||||
if version != "2.0" {
|
||||
return nil, fmt.Errorf("spec version %q is not supported", version)
|
||||
}
|
||||
|
||||
swspec := new(spec.Swagger)
|
||||
if err := json.Unmarshal(data, swspec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
origsqspec := new(spec.Swagger)
|
||||
if err := json.Unmarshal(data, origsqspec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d := &Document{
|
||||
Analyzer: analysis.New(swspec),
|
||||
schema: swag20Schema,
|
||||
spec: swspec,
|
||||
raw: data,
|
||||
origSpec: origsqspec,
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// Expanded expands the ref fields in the spec document and returns a new spec document
|
||||
func (d *Document) Expanded() (*Document, error) {
|
||||
swspec := new(spec.Swagger)
|
||||
if err := json.Unmarshal(d.raw, swspec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := spec.ExpandSpec(swspec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dd := &Document{
|
||||
Analyzer: analysis.New(swspec),
|
||||
spec: swspec,
|
||||
schema: swag20Schema,
|
||||
raw: d.raw,
|
||||
origSpec: d.origSpec,
|
||||
}
|
||||
return dd, nil
|
||||
}
|
||||
|
||||
// BasePath the base path for this spec
|
||||
func (d *Document) BasePath() string {
|
||||
return d.spec.BasePath
|
||||
}
|
||||
|
||||
// Version returns the version of this spec
|
||||
func (d *Document) Version() string {
|
||||
return d.spec.Swagger
|
||||
}
|
||||
|
||||
// Schema returns the swagger 2.0 schema
|
||||
func (d *Document) Schema() *spec.Schema {
|
||||
return d.schema
|
||||
}
|
||||
|
||||
// Spec returns the swagger spec object model
|
||||
func (d *Document) Spec() *spec.Swagger {
|
||||
return d.spec
|
||||
}
|
||||
|
||||
// Host returns the host for the API
|
||||
func (d *Document) Host() string {
|
||||
return d.spec.Host
|
||||
}
|
||||
|
||||
// Raw returns the raw swagger spec as json bytes
|
||||
func (d *Document) Raw() json.RawMessage {
|
||||
return d.raw
|
||||
}
|
||||
|
||||
func (d *Document) OrigSpec() *spec.Swagger {
|
||||
return d.origSpec
|
||||
}
|
||||
|
||||
// ResetDefinitions gives a shallow copy with the models reset
|
||||
func (d *Document) ResetDefinitions() *Document {
|
||||
defs := make(map[string]spec.Schema, len(d.origSpec.Definitions))
|
||||
for k, v := range d.origSpec.Definitions {
|
||||
defs[k] = v
|
||||
}
|
||||
|
||||
d.spec.Definitions = defs
|
||||
return d
|
||||
}
|
||||
|
||||
// Pristine creates a new pristine document instance based on the input data
|
||||
func (d *Document) Pristine() *Document {
|
||||
dd, _ := Analyzed(d.Raw(), d.Version())
|
||||
return dd
|
||||
}
|
4
vendor/github.com/go-openapi/spec/README.md
generated
vendored
4
vendor/github.com/go-openapi/spec/README.md
generated
vendored
|
@ -1,5 +1,5 @@
|
|||
# OAI object model [![Build Status](https://travis-ci.org/go-openapi/spec.svg?branch=master)](https://travis-ci.org/go-openapi/spec) [![codecov](https://codecov.io/gh/go-openapi/spec/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/spec) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
|
||||
# OAI object model [![Build Status](https://ci.vmware.run/api/badges/go-openapi/spec/status.svg)](https://ci.vmware.run/go-openapi/spec) [![Coverage](https://coverage.vmware.run/badges/go-openapi/spec/coverage.svg)](https://coverage.vmware.run/go-openapi/spec) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io)
|
||||
|
||||
[![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/spec/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/spec?status.svg)](http://godoc.org/github.com/go-openapi/spec)
|
||||
|
||||
The object model for OpenAPI specification documents
|
||||
The object model for OpenAPI specification documents
|
26
vendor/github.com/go-openapi/spec/bindata.go
generated
vendored
26
vendor/github.com/go-openapi/spec/bindata.go
generated
vendored
File diff suppressed because one or more lines are too long
461
vendor/github.com/go-openapi/spec/expander.go
generated
vendored
461
vendor/github.com/go-openapi/spec/expander.go
generated
vendored
|
@ -17,11 +17,7 @@ package spec
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -30,17 +26,6 @@ import (
|
|||
"github.com/go-openapi/swag"
|
||||
)
|
||||
|
||||
var (
|
||||
// Debug enables logging when SWAGGER_DEBUG env var is not empty
|
||||
Debug = os.Getenv("SWAGGER_DEBUG") != ""
|
||||
)
|
||||
|
||||
// ExpandOptions provides options for expand.
|
||||
type ExpandOptions struct {
|
||||
RelativeBase string
|
||||
SkipSchemas bool
|
||||
}
|
||||
|
||||
// ResolutionCache a cache for resolving urls
|
||||
type ResolutionCache interface {
|
||||
Get(string) (interface{}, bool)
|
||||
|
@ -52,11 +37,7 @@ type simpleCache struct {
|
|||
store map[string]interface{}
|
||||
}
|
||||
|
||||
var resCache ResolutionCache
|
||||
|
||||
func init() {
|
||||
resCache = initResolutionCache()
|
||||
}
|
||||
var resCache = initResolutionCache()
|
||||
|
||||
func initResolutionCache() ResolutionCache {
|
||||
return &simpleCache{store: map[string]interface{}{
|
||||
|
@ -66,11 +47,8 @@ func initResolutionCache() ResolutionCache {
|
|||
}
|
||||
|
||||
func (s *simpleCache) Get(uri string) (interface{}, bool) {
|
||||
debugLog("getting %q from resolution cache", uri)
|
||||
s.lock.Lock()
|
||||
v, ok := s.store[uri]
|
||||
debugLog("got %q from resolution cache: %t", uri, ok)
|
||||
|
||||
s.lock.Unlock()
|
||||
return v, ok
|
||||
}
|
||||
|
@ -81,9 +59,9 @@ func (s *simpleCache) Set(uri string, data interface{}) {
|
|||
s.lock.Unlock()
|
||||
}
|
||||
|
||||
// ResolveRefWithBase resolves a reference against a context root with preservation of base path
|
||||
func ResolveRefWithBase(root interface{}, ref *Ref, opts *ExpandOptions) (*Schema, error) {
|
||||
resolver, err := defaultSchemaLoader(root, nil, opts, nil)
|
||||
// ResolveRef resolves a reference against a context root
|
||||
func ResolveRef(root interface{}, ref *Ref) (*Schema, error) {
|
||||
resolver, err := defaultSchemaLoader(root, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -95,19 +73,9 @@ func ResolveRefWithBase(root interface{}, ref *Ref, opts *ExpandOptions) (*Schem
|
|||
return result, nil
|
||||
}
|
||||
|
||||
// ResolveRef resolves a reference against a context root
|
||||
func ResolveRef(root interface{}, ref *Ref) (*Schema, error) {
|
||||
return ResolveRefWithBase(root, ref, nil)
|
||||
}
|
||||
|
||||
// ResolveParameter resolves a paramter reference against a context root
|
||||
func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) {
|
||||
return ResolveParameterWithBase(root, ref, nil)
|
||||
}
|
||||
|
||||
// ResolveParameterWithBase resolves a paramter reference against a context root and base path
|
||||
func ResolveParameterWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*Parameter, error) {
|
||||
resolver, err := defaultSchemaLoader(root, nil, opts, nil)
|
||||
resolver, err := defaultSchemaLoader(root, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -121,12 +89,7 @@ func ResolveParameterWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*
|
|||
|
||||
// ResolveResponse resolves response a reference against a context root
|
||||
func ResolveResponse(root interface{}, ref Ref) (*Response, error) {
|
||||
return ResolveResponseWithBase(root, ref, nil)
|
||||
}
|
||||
|
||||
// ResolveResponseWithBase resolves response a reference against a context root and base path
|
||||
func ResolveResponseWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*Response, error) {
|
||||
resolver, err := defaultSchemaLoader(root, nil, opts, nil)
|
||||
resolver, err := defaultSchemaLoader(root, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -138,72 +101,23 @@ func ResolveResponseWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*R
|
|||
return result, nil
|
||||
}
|
||||
|
||||
// ResolveItems resolves header and parameter items reference against a context root and base path
|
||||
func ResolveItems(root interface{}, ref Ref, opts *ExpandOptions) (*Items, error) {
|
||||
resolver, err := defaultSchemaLoader(root, nil, opts, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := new(Items)
|
||||
if err := resolver.Resolve(&ref, result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ResolvePathItem resolves response a path item against a context root and base path
|
||||
func ResolvePathItem(root interface{}, ref Ref, opts *ExpandOptions) (*PathItem, error) {
|
||||
resolver, err := defaultSchemaLoader(root, nil, opts, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result := new(PathItem)
|
||||
if err := resolver.Resolve(&ref, result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type schemaLoader struct {
|
||||
loadingRef *Ref
|
||||
startingRef *Ref
|
||||
currentRef *Ref
|
||||
root interface{}
|
||||
options *ExpandOptions
|
||||
cache ResolutionCache
|
||||
loadDoc func(string) (json.RawMessage, error)
|
||||
}
|
||||
|
||||
var idPtr, _ = jsonpointer.New("/id")
|
||||
var schemaPtr, _ = jsonpointer.New("/$schema")
|
||||
var refPtr, _ = jsonpointer.New("/$ref")
|
||||
|
||||
// PathLoader function to use when loading remote refs
|
||||
var PathLoader func(string) (json.RawMessage, error)
|
||||
|
||||
func init() {
|
||||
PathLoader = func(path string) (json.RawMessage, error) {
|
||||
data, err := swag.LoadFromFileOrHTTP(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return json.RawMessage(data), nil
|
||||
}
|
||||
}
|
||||
|
||||
func defaultSchemaLoader(
|
||||
root interface{},
|
||||
ref *Ref,
|
||||
expandOptions *ExpandOptions,
|
||||
cache ResolutionCache) (*schemaLoader, error) {
|
||||
|
||||
func defaultSchemaLoader(root interface{}, ref *Ref, cache ResolutionCache) (*schemaLoader, error) {
|
||||
if cache == nil {
|
||||
cache = resCache
|
||||
}
|
||||
if expandOptions == nil {
|
||||
expandOptions = &ExpandOptions{}
|
||||
}
|
||||
|
||||
var ptr *jsonpointer.Pointer
|
||||
if ref != nil {
|
||||
|
@ -213,16 +127,18 @@ func defaultSchemaLoader(
|
|||
currentRef := nextRef(root, ref, ptr)
|
||||
|
||||
return &schemaLoader{
|
||||
root: root,
|
||||
loadingRef: ref,
|
||||
startingRef: ref,
|
||||
currentRef: currentRef,
|
||||
root: root,
|
||||
options: expandOptions,
|
||||
cache: cache,
|
||||
loadDoc: func(path string) (json.RawMessage, error) {
|
||||
debugLog("fetching document at %q", path)
|
||||
return PathLoader(path)
|
||||
data, err := swag.LoadFromFileOrHTTP(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return json.RawMessage(data), nil
|
||||
},
|
||||
currentRef: currentRef,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -243,7 +159,6 @@ func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointe
|
|||
if startingRef == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if ptr == nil {
|
||||
return startingRef
|
||||
}
|
||||
|
@ -269,107 +184,32 @@ func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointe
|
|||
|
||||
refRef, _, _ := refPtr.Get(node)
|
||||
if refRef != nil {
|
||||
var rf Ref
|
||||
switch value := refRef.(type) {
|
||||
case string:
|
||||
rf, _ = NewRef(value)
|
||||
}
|
||||
rf, _ := NewRef(refRef.(string))
|
||||
nw, err := ret.Inherits(rf)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
nwURL := nw.GetURL()
|
||||
if nwURL.Scheme == "file" || (nwURL.Scheme == "" && nwURL.Host == "") {
|
||||
nwpt := filepath.ToSlash(nwURL.Path)
|
||||
if filepath.IsAbs(nwpt) {
|
||||
_, err := os.Stat(nwpt)
|
||||
if err != nil {
|
||||
nwURL.Path = filepath.Join(".", nwpt)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ret = nw
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func debugLog(msg string, args ...interface{}) {
|
||||
if Debug {
|
||||
log.Printf(msg, args...)
|
||||
}
|
||||
}
|
||||
|
||||
func normalizeFileRef(ref *Ref, relativeBase string) *Ref {
|
||||
refURL := ref.GetURL()
|
||||
debugLog("normalizing %s against %s (%s)", ref.String(), relativeBase, refURL.String())
|
||||
if strings.HasPrefix(refURL.String(), "#") {
|
||||
return ref
|
||||
}
|
||||
|
||||
if refURL.Scheme == "file" || (refURL.Scheme == "" && refURL.Host == "") {
|
||||
filePath := refURL.Path
|
||||
debugLog("normalizing file path: %s", filePath)
|
||||
|
||||
if !filepath.IsAbs(filepath.FromSlash(filePath)) && len(relativeBase) != 0 {
|
||||
debugLog("joining %s with %s", relativeBase, filePath)
|
||||
if fi, err := os.Stat(filepath.FromSlash(relativeBase)); err == nil {
|
||||
if !fi.IsDir() {
|
||||
relativeBase = path.Dir(relativeBase)
|
||||
}
|
||||
}
|
||||
filePath = filepath.Join(filepath.FromSlash(relativeBase), filepath.FromSlash(filePath))
|
||||
}
|
||||
if !filepath.IsAbs(filepath.FromSlash(filePath)) {
|
||||
pwd, err := os.Getwd()
|
||||
if err == nil {
|
||||
debugLog("joining cwd %s with %s", pwd, filePath)
|
||||
filePath = filepath.Join(pwd, filePath)
|
||||
}
|
||||
}
|
||||
|
||||
debugLog("cleaning %s", filePath)
|
||||
filePath = filepath.Clean(filePath)
|
||||
_, err := os.Stat(filepath.FromSlash(filePath))
|
||||
if err == nil {
|
||||
debugLog("rewriting url to scheme \"\" path %s", filePath)
|
||||
refURL.Scheme = ""
|
||||
refURL.Path = filepath.ToSlash(filePath)
|
||||
debugLog("new url with joined filepath: %s", refURL.String())
|
||||
*ref = MustCreateRef(refURL.String())
|
||||
}
|
||||
}
|
||||
|
||||
debugLog("refurl: %s", ref.GetURL().String())
|
||||
return ref
|
||||
}
|
||||
|
||||
func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{}) error {
|
||||
|
||||
tgt := reflect.ValueOf(target)
|
||||
if tgt.Kind() != reflect.Ptr {
|
||||
return fmt.Errorf("resolve ref: target needs to be a pointer")
|
||||
}
|
||||
|
||||
oldRef := currentRef
|
||||
|
||||
if currentRef != nil {
|
||||
debugLog("resolve ref current %s new %s", currentRef.String(), ref.String())
|
||||
nextRef := nextRef(node, ref, currentRef.GetPointer())
|
||||
if nextRef == nil || nextRef.GetURL() == nil {
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
currentRef, err = currentRef.Inherits(*nextRef)
|
||||
debugLog("resolved ref current %s", currentRef.String())
|
||||
currentRef, err = currentRef.Inherits(*nextRef(node, ref, currentRef.GetPointer()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if currentRef == nil {
|
||||
currentRef = ref
|
||||
}
|
||||
|
@ -405,71 +245,42 @@ func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{}
|
|||
return nil
|
||||
}
|
||||
|
||||
relativeBase := ""
|
||||
if r.options != nil && r.options.RelativeBase != "" {
|
||||
relativeBase = r.options.RelativeBase
|
||||
}
|
||||
normalizeFileRef(currentRef, relativeBase)
|
||||
debugLog("current ref normalized file: %s", currentRef.String())
|
||||
normalizeFileRef(ref, relativeBase)
|
||||
debugLog("ref normalized file: %s", currentRef.String())
|
||||
|
||||
data, _, _, err := r.load(currentRef.GetURL())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ((oldRef == nil && currentRef != nil) ||
|
||||
(oldRef != nil && currentRef == nil) ||
|
||||
oldRef.String() != currentRef.String()) &&
|
||||
((oldRef == nil && ref != nil) ||
|
||||
(oldRef != nil && ref == nil) ||
|
||||
(oldRef.String() != ref.String())) {
|
||||
|
||||
return r.resolveRef(currentRef, ref, data, target)
|
||||
}
|
||||
|
||||
var res interface{}
|
||||
if currentRef.String() != "" {
|
||||
res, _, err = currentRef.GetPointer().Get(data)
|
||||
if refURL.Scheme != "" && refURL.Host != "" {
|
||||
// most definitely take the red pill
|
||||
data, _, _, err := r.load(refURL)
|
||||
if err != nil {
|
||||
if strings.HasPrefix(ref.String(), "#") {
|
||||
if r.loadingRef != nil {
|
||||
rr, er := r.loadingRef.Inherits(*ref)
|
||||
if er != nil {
|
||||
return er
|
||||
}
|
||||
refURL = rr.GetURL()
|
||||
return err
|
||||
}
|
||||
|
||||
data, _, _, err = r.load(refURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
data = r.root
|
||||
}
|
||||
}
|
||||
if ((oldRef == nil && currentRef != nil) ||
|
||||
(oldRef != nil && currentRef == nil) ||
|
||||
oldRef.String() != currentRef.String()) &&
|
||||
((oldRef == nil && ref != nil) ||
|
||||
(oldRef != nil && ref == nil) ||
|
||||
(oldRef.String() != ref.String())) {
|
||||
|
||||
res, _, err = ref.GetPointer().Get(data)
|
||||
return r.resolveRef(currentRef, ref, data, target)
|
||||
}
|
||||
|
||||
var res interface{}
|
||||
if currentRef.String() != "" {
|
||||
res, _, err = currentRef.GetPointer().Get(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
res = data
|
||||
}
|
||||
} else {
|
||||
res = data
|
||||
|
||||
if err := swag.DynamicJSONToStruct(res, target); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if err := swag.DynamicJSONToStruct(res, target); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.currentRef = currentRef
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) {
|
||||
debugLog("loading schema from url: %s", refURL)
|
||||
toFetch := *refURL
|
||||
toFetch.Fragment = ""
|
||||
|
||||
|
@ -488,39 +299,33 @@ func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error)
|
|||
|
||||
return data, toFetch, fromCache, nil
|
||||
}
|
||||
|
||||
func (r *schemaLoader) Resolve(ref *Ref, target interface{}) error {
|
||||
return r.resolveRef(r.currentRef, ref, r.root, target)
|
||||
}
|
||||
|
||||
func (r *schemaLoader) reset() {
|
||||
ref := r.startingRef
|
||||
|
||||
var ptr *jsonpointer.Pointer
|
||||
if ref != nil {
|
||||
ptr = ref.GetPointer()
|
||||
if err := r.resolveRef(r.currentRef, ref, r.root, target); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.currentRef = nextRef(r.root, ref, ptr)
|
||||
return nil
|
||||
}
|
||||
|
||||
type specExpander struct {
|
||||
spec *Swagger
|
||||
resolver *schemaLoader
|
||||
}
|
||||
|
||||
// ExpandSpec expands the references in a swagger spec
|
||||
func ExpandSpec(spec *Swagger, options *ExpandOptions) error {
|
||||
resolver, err := defaultSchemaLoader(spec, nil, options, nil)
|
||||
func ExpandSpec(spec *Swagger) error {
|
||||
resolver, err := defaultSchemaLoader(spec, nil, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if options == nil || !options.SkipSchemas {
|
||||
for key, definition := range spec.Definitions {
|
||||
var def *Schema
|
||||
var err error
|
||||
if def, err = expandSchema(definition, []string{"#/definitions/" + key}, resolver); err != nil {
|
||||
return err
|
||||
}
|
||||
resolver.reset()
|
||||
spec.Definitions[key] = *def
|
||||
for key, defintition := range spec.Definitions {
|
||||
var def *Schema
|
||||
var err error
|
||||
if def, err = expandSchema(defintition, []string{"#/definitions/" + key}, resolver); err != nil {
|
||||
return err
|
||||
}
|
||||
spec.Definitions[key] = *def
|
||||
}
|
||||
|
||||
for key, parameter := range spec.Parameters {
|
||||
|
@ -551,11 +356,7 @@ func ExpandSpec(spec *Swagger, options *ExpandOptions) error {
|
|||
|
||||
// ExpandSchema expands the refs in the schema object
|
||||
func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error {
|
||||
return ExpandSchemaWithBasePath(schema, root, cache, nil)
|
||||
}
|
||||
|
||||
// ExpandSchemaWithBasePath expands the refs in the schema object, base path configured through expand options
|
||||
func ExpandSchemaWithBasePath(schema *Schema, root interface{}, cache ResolutionCache, opts *ExpandOptions) error {
|
||||
if schema == nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -566,17 +367,18 @@ func ExpandSchemaWithBasePath(schema *Schema, root interface{}, cache Resolution
|
|||
nrr, _ := NewRef(schema.ID)
|
||||
var rrr *Ref
|
||||
if nrr.String() != "" {
|
||||
switch rt := root.(type) {
|
||||
switch root.(type) {
|
||||
case *Schema:
|
||||
rid, _ := NewRef(rt.ID)
|
||||
rid, _ := NewRef(root.(*Schema).ID)
|
||||
rrr, _ = rid.Inherits(nrr)
|
||||
case *Swagger:
|
||||
rid, _ := NewRef(rt.ID)
|
||||
rid, _ := NewRef(root.(*Swagger).ID)
|
||||
rrr, _ = rid.Inherits(nrr)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
resolver, err := defaultSchemaLoader(root, rrr, opts, cache)
|
||||
resolver, err := defaultSchemaLoader(root, rrr, cache)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -587,7 +389,7 @@ func ExpandSchemaWithBasePath(schema *Schema, root interface{}, cache Resolution
|
|||
}
|
||||
var s *Schema
|
||||
if s, err = expandSchema(*schema, refs, resolver); err != nil {
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
*schema = *s
|
||||
return nil
|
||||
|
@ -598,15 +400,7 @@ func expandItems(target Schema, parentRefs []string, resolver *schemaLoader) (*S
|
|||
if target.Items.Schema != nil {
|
||||
t, err := expandSchema(*target.Items.Schema, parentRefs, resolver)
|
||||
if err != nil {
|
||||
if target.Items.Schema.ID == "" {
|
||||
target.Items.Schema.ID = target.ID
|
||||
if err != nil {
|
||||
t, err = expandSchema(*target.Items.Schema, parentRefs, resolver)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
*target.Items.Schema = *t
|
||||
}
|
||||
|
@ -621,112 +415,101 @@ func expandItems(target Schema, parentRefs []string, resolver *schemaLoader) (*S
|
|||
return &target, nil
|
||||
}
|
||||
|
||||
func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (*Schema, error) {
|
||||
func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (schema *Schema, err error) {
|
||||
defer func() {
|
||||
schema = &target
|
||||
}()
|
||||
if target.Ref.String() == "" && target.Ref.IsRoot() {
|
||||
debugLog("skipping expand schema for no ref and root: %v", resolver.root)
|
||||
|
||||
return resolver.root.(*Schema), nil
|
||||
target = *resolver.root.(*Schema)
|
||||
return
|
||||
}
|
||||
|
||||
// t is the new expanded schema
|
||||
var t *Schema
|
||||
|
||||
for target.Ref.String() != "" {
|
||||
if swag.ContainsStringsCI(parentRefs, target.Ref.String()) {
|
||||
return &target, nil
|
||||
// var newTarget Schema
|
||||
pRefs := strings.Join(parentRefs, ",")
|
||||
pRefs += ","
|
||||
if strings.Contains(pRefs, target.Ref.String()+",") {
|
||||
err = nil
|
||||
return
|
||||
}
|
||||
|
||||
if err := resolver.Resolve(&target.Ref, &t); err != nil {
|
||||
return &target, err
|
||||
}
|
||||
|
||||
if swag.ContainsStringsCI(parentRefs, target.Ref.String()) {
|
||||
debugLog("ref already exists in parent")
|
||||
return &target, nil
|
||||
if err = resolver.Resolve(&target.Ref, &t); err != nil {
|
||||
return
|
||||
}
|
||||
parentRefs = append(parentRefs, target.Ref.String())
|
||||
target = *t
|
||||
}
|
||||
|
||||
t, err := expandItems(target, parentRefs, resolver)
|
||||
if err != nil {
|
||||
return &target, err
|
||||
if t, err = expandItems(target, parentRefs, resolver); err != nil {
|
||||
return
|
||||
}
|
||||
target = *t
|
||||
|
||||
for i := range target.AllOf {
|
||||
t, err := expandSchema(target.AllOf[i], parentRefs, resolver)
|
||||
if err != nil {
|
||||
return &target, err
|
||||
if t, err = expandSchema(target.AllOf[i], parentRefs, resolver); err != nil {
|
||||
return
|
||||
}
|
||||
target.AllOf[i] = *t
|
||||
}
|
||||
for i := range target.AnyOf {
|
||||
t, err := expandSchema(target.AnyOf[i], parentRefs, resolver)
|
||||
if err != nil {
|
||||
return &target, err
|
||||
if t, err = expandSchema(target.AnyOf[i], parentRefs, resolver); err != nil {
|
||||
return
|
||||
}
|
||||
target.AnyOf[i] = *t
|
||||
}
|
||||
for i := range target.OneOf {
|
||||
t, err := expandSchema(target.OneOf[i], parentRefs, resolver)
|
||||
if err != nil {
|
||||
return &target, err
|
||||
if t, err = expandSchema(target.OneOf[i], parentRefs, resolver); err != nil {
|
||||
return
|
||||
}
|
||||
target.OneOf[i] = *t
|
||||
}
|
||||
if target.Not != nil {
|
||||
t, err := expandSchema(*target.Not, parentRefs, resolver)
|
||||
if err != nil {
|
||||
return &target, err
|
||||
if t, err = expandSchema(*target.Not, parentRefs, resolver); err != nil {
|
||||
return
|
||||
}
|
||||
*target.Not = *t
|
||||
}
|
||||
for k := range target.Properties {
|
||||
t, err := expandSchema(target.Properties[k], parentRefs, resolver)
|
||||
if err != nil {
|
||||
return &target, err
|
||||
for k, _ := range target.Properties {
|
||||
if t, err = expandSchema(target.Properties[k], parentRefs, resolver); err != nil {
|
||||
return
|
||||
}
|
||||
target.Properties[k] = *t
|
||||
}
|
||||
if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil {
|
||||
t, err := expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver)
|
||||
if err != nil {
|
||||
return &target, err
|
||||
if t, err = expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver); err != nil {
|
||||
return
|
||||
}
|
||||
*target.AdditionalProperties.Schema = *t
|
||||
}
|
||||
for k := range target.PatternProperties {
|
||||
t, err := expandSchema(target.PatternProperties[k], parentRefs, resolver)
|
||||
if err != nil {
|
||||
return &target, err
|
||||
for k, _ := range target.PatternProperties {
|
||||
if t, err = expandSchema(target.PatternProperties[k], parentRefs, resolver); err != nil {
|
||||
return
|
||||
}
|
||||
target.PatternProperties[k] = *t
|
||||
}
|
||||
for k := range target.Dependencies {
|
||||
for k, _ := range target.Dependencies {
|
||||
if target.Dependencies[k].Schema != nil {
|
||||
t, err := expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver)
|
||||
if err != nil {
|
||||
return &target, err
|
||||
if t, err = expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver); err != nil {
|
||||
return
|
||||
}
|
||||
*target.Dependencies[k].Schema = *t
|
||||
}
|
||||
}
|
||||
if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil {
|
||||
t, err := expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver)
|
||||
if err != nil {
|
||||
return &target, err
|
||||
if t, err = expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver); err != nil {
|
||||
return
|
||||
}
|
||||
*target.AdditionalItems.Schema = *t
|
||||
}
|
||||
for k := range target.Definitions {
|
||||
t, err := expandSchema(target.Definitions[k], parentRefs, resolver)
|
||||
if err != nil {
|
||||
return &target, err
|
||||
for k, _ := range target.Definitions {
|
||||
if t, err = expandSchema(target.Definitions[k], parentRefs, resolver); err != nil {
|
||||
return
|
||||
}
|
||||
target.Definitions[k] = *t
|
||||
}
|
||||
return &target, nil
|
||||
return
|
||||
}
|
||||
|
||||
func expandPathItem(pathItem *PathItem, resolver *schemaLoader) error {
|
||||
|
@ -737,8 +520,6 @@ func expandPathItem(pathItem *PathItem, resolver *schemaLoader) error {
|
|||
if err := resolver.Resolve(&pathItem.Ref, &pathItem); err != nil {
|
||||
return err
|
||||
}
|
||||
resolver.reset()
|
||||
pathItem.Ref = Ref{}
|
||||
}
|
||||
|
||||
for idx := range pathItem.Parameters {
|
||||
|
@ -801,28 +582,22 @@ func expandResponse(response *Response, resolver *schemaLoader) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
var parentRefs []string
|
||||
if response.Ref.String() != "" {
|
||||
parentRefs = append(parentRefs, response.Ref.String())
|
||||
if err := resolver.Resolve(&response.Ref, response); err != nil {
|
||||
return err
|
||||
}
|
||||
resolver.reset()
|
||||
response.Ref = Ref{}
|
||||
}
|
||||
|
||||
if !resolver.options.SkipSchemas && response.Schema != nil {
|
||||
parentRefs = append(parentRefs, response.Schema.Ref.String())
|
||||
debugLog("response ref: %s", response.Schema.Ref)
|
||||
if response.Schema != nil {
|
||||
parentRefs := []string{response.Schema.Ref.String()}
|
||||
if err := resolver.Resolve(&response.Schema.Ref, &response.Schema); err != nil {
|
||||
return err
|
||||
}
|
||||
s, err := expandSchema(*response.Schema, parentRefs, resolver)
|
||||
if err != nil {
|
||||
if s, err := expandSchema(*response.Schema, parentRefs, resolver); err != nil {
|
||||
return err
|
||||
} else {
|
||||
*response.Schema = *s
|
||||
}
|
||||
resolver.reset()
|
||||
*response.Schema = *s
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -831,27 +606,21 @@ func expandParameter(parameter *Parameter, resolver *schemaLoader) error {
|
|||
if parameter == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var parentRefs []string
|
||||
if parameter.Ref.String() != "" {
|
||||
parentRefs = append(parentRefs, parameter.Ref.String())
|
||||
if err := resolver.Resolve(¶meter.Ref, parameter); err != nil {
|
||||
return err
|
||||
}
|
||||
resolver.reset()
|
||||
parameter.Ref = Ref{}
|
||||
}
|
||||
if !resolver.options.SkipSchemas && parameter.Schema != nil {
|
||||
parentRefs = append(parentRefs, parameter.Schema.Ref.String())
|
||||
if parameter.Schema != nil {
|
||||
parentRefs := []string{parameter.Schema.Ref.String()}
|
||||
if err := resolver.Resolve(¶meter.Schema.Ref, ¶meter.Schema); err != nil {
|
||||
return err
|
||||
}
|
||||
s, err := expandSchema(*parameter.Schema, parentRefs, resolver)
|
||||
if err != nil {
|
||||
if s, err := expandSchema(*parameter.Schema, parentRefs, resolver); err != nil {
|
||||
return err
|
||||
} else {
|
||||
*parameter.Schema = *s
|
||||
}
|
||||
resolver.reset()
|
||||
*parameter.Schema = *s
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
30
vendor/github.com/go-openapi/spec/header.go
generated
vendored
30
vendor/github.com/go-openapi/spec/header.go
generated
vendored
|
@ -16,9 +16,7 @@ package spec
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
|
||||
"github.com/go-openapi/jsonpointer"
|
||||
"github.com/go-openapi/swag"
|
||||
)
|
||||
|
||||
|
@ -32,7 +30,6 @@ type HeaderProps struct {
|
|||
type Header struct {
|
||||
CommonValidations
|
||||
SimpleSchema
|
||||
VendorExtensible
|
||||
HeaderProps
|
||||
}
|
||||
|
||||
|
@ -161,35 +158,8 @@ func (h *Header) UnmarshalJSON(data []byte) error {
|
|||
if err := json.Unmarshal(data, &h.SimpleSchema); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := json.Unmarshal(data, &h.VendorExtensible); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := json.Unmarshal(data, &h.HeaderProps); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// JSONLookup look up a value by the json property name
|
||||
func (p Header) JSONLookup(token string) (interface{}, error) {
|
||||
if ex, ok := p.Extensions[token]; ok {
|
||||
return &ex, nil
|
||||
}
|
||||
|
||||
r, _, err := jsonpointer.GetForToken(p.CommonValidations, token)
|
||||
if err != nil && !strings.HasPrefix(err.Error(), "object has no field") {
|
||||
return nil, err
|
||||
}
|
||||
if r != nil {
|
||||
return r, nil
|
||||
}
|
||||
r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token)
|
||||
if err != nil && !strings.HasPrefix(err.Error(), "object has no field") {
|
||||
return nil, err
|
||||
}
|
||||
if r != nil {
|
||||
return r, nil
|
||||
}
|
||||
r, _, err = jsonpointer.GetForToken(p.HeaderProps, token)
|
||||
return r, err
|
||||
}
|
||||
|
|
22
vendor/github.com/go-openapi/spec/items.go
generated
vendored
22
vendor/github.com/go-openapi/spec/items.go
generated
vendored
|
@ -16,9 +16,7 @@ package spec
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
|
||||
"github.com/go-openapi/jsonpointer"
|
||||
"github.com/go-openapi/swag"
|
||||
)
|
||||
|
||||
|
@ -62,12 +60,11 @@ type CommonValidations struct {
|
|||
// Items a limited subset of JSON-Schema's items object.
|
||||
// It is used by parameter definitions that are not located in "body".
|
||||
//
|
||||
// For more information: http://goo.gl/8us55a#items-object
|
||||
// For more information: http://goo.gl/8us55a#items-object-
|
||||
type Items struct {
|
||||
Refable
|
||||
CommonValidations
|
||||
SimpleSchema
|
||||
VendorExtensible
|
||||
}
|
||||
|
||||
// NewItems creates a new instance of items
|
||||
|
@ -200,20 +197,3 @@ func (i Items) MarshalJSON() ([]byte, error) {
|
|||
}
|
||||
return swag.ConcatJSON(b3, b1, b2), nil
|
||||
}
|
||||
|
||||
// JSONLookup look up a value by the json property name
|
||||
func (p Items) JSONLookup(token string) (interface{}, error) {
|
||||
if token == "$ref" {
|
||||
return &p.Ref, nil
|
||||
}
|
||||
|
||||
r, _, err := jsonpointer.GetForToken(p.CommonValidations, token)
|
||||
if err != nil && !strings.HasPrefix(err.Error(), "object has no field") {
|
||||
return nil, err
|
||||
}
|
||||
if r != nil {
|
||||
return r, nil
|
||||
}
|
||||
r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token)
|
||||
return r, err
|
||||
}
|
||||
|
|
6
vendor/github.com/go-openapi/spec/parameter.go
generated
vendored
6
vendor/github.com/go-openapi/spec/parameter.go
generated
vendored
|
@ -16,7 +16,6 @@ package spec
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
|
||||
"github.com/go-openapi/jsonpointer"
|
||||
"github.com/go-openapi/swag"
|
||||
|
@ -101,16 +100,15 @@ func (p Parameter) JSONLookup(token string) (interface{}, error) {
|
|||
if token == "$ref" {
|
||||
return &p.Ref, nil
|
||||
}
|
||||
|
||||
r, _, err := jsonpointer.GetForToken(p.CommonValidations, token)
|
||||
if err != nil && !strings.HasPrefix(err.Error(), "object has no field") {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if r != nil {
|
||||
return r, nil
|
||||
}
|
||||
r, _, err = jsonpointer.GetForToken(p.SimpleSchema, token)
|
||||
if err != nil && !strings.HasPrefix(err.Error(), "object has no field") {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if r != nil {
|
||||
|
|
10
vendor/github.com/go-openapi/spec/ref.go
generated
vendored
10
vendor/github.com/go-openapi/spec/ref.go
generated
vendored
|
@ -55,7 +55,7 @@ func (r *Ref) RemoteURI() string {
|
|||
}
|
||||
|
||||
// IsValidURI returns true when the url the ref points to can be found
|
||||
func (r *Ref) IsValidURI(basepaths ...string) bool {
|
||||
func (r *Ref) IsValidURI() bool {
|
||||
if r.String() == "" {
|
||||
return true
|
||||
}
|
||||
|
@ -81,18 +81,14 @@ func (r *Ref) IsValidURI(basepaths ...string) bool {
|
|||
// check for local file
|
||||
pth := v
|
||||
if r.HasURLPathOnly {
|
||||
base := "."
|
||||
if len(basepaths) > 0 {
|
||||
base = filepath.Dir(filepath.Join(basepaths...))
|
||||
}
|
||||
p, e := filepath.Abs(filepath.ToSlash(filepath.Join(base, pth)))
|
||||
p, e := filepath.Abs(pth)
|
||||
if e != nil {
|
||||
return false
|
||||
}
|
||||
pth = p
|
||||
}
|
||||
|
||||
fi, err := os.Stat(filepath.ToSlash(pth))
|
||||
fi, err := os.Stat(pth)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue