Switch to github.com/golang/dep for vendoring
Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
This commit is contained in:
parent
d6ab91be27
commit
8e5b17cf13
15431 changed files with 3971413 additions and 8881 deletions
79
vendor/k8s.io/kubernetes/examples/BUILD
generated
vendored
Normal file
79
vendor/k8s.io/kubernetes/examples/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,79 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "config",
|
||||
srcs = glob([
|
||||
"**/*.yaml",
|
||||
"**/*.yml",
|
||||
"**/*.json",
|
||||
]) + [
|
||||
"pod",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "sources",
|
||||
srcs = glob([
|
||||
"**/*",
|
||||
]),
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["doc.go"],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_xtest",
|
||||
srcs = ["examples_test.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/api/validation:go_default_library",
|
||||
"//pkg/apis/apps:go_default_library",
|
||||
"//pkg/apis/apps/validation:go_default_library",
|
||||
"//pkg/apis/batch:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/apis/extensions/validation:go_default_library",
|
||||
"//pkg/capabilities:go_default_library",
|
||||
"//pkg/registry/batch/job:go_default_library",
|
||||
"//plugin/pkg/scheduler/api:go_default_library",
|
||||
"//plugin/pkg/scheduler/api/latest:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/validation/field",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/yaml",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//examples/apiserver:all-srcs",
|
||||
"//examples/explorer:all-srcs",
|
||||
"//examples/guestbook-go:all-srcs",
|
||||
"//examples/https-nginx:all-srcs",
|
||||
"//examples/k8petstore/web-server/src:all-srcs",
|
||||
"//examples/sharing-clusters:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
5
vendor/k8s.io/kubernetes/examples/OWNERS
generated
vendored
Normal file
5
vendor/k8s.io/kubernetes/examples/OWNERS
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
assignees:
|
||||
- bgrant0607
|
||||
- brendandburns
|
||||
- thockin
|
||||
- zmerlynn
|
29
vendor/k8s.io/kubernetes/examples/README.md
generated
vendored
Normal file
29
vendor/k8s.io/kubernetes/examples/README.md
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
# Kubernetes Examples: releases.k8s.io/HEAD
|
||||
|
||||
This directory contains a number of examples of how to run
|
||||
real applications with Kubernetes.
|
||||
|
||||
Demonstrations of how to use specific Kubernetes features can be found in our [documents](../docs/).
|
||||
|
||||
|
||||
### Maintained Examples
|
||||
|
||||
Maintained Examples are expected to be updated with every Kubernetes
|
||||
release, to use the latest and greatest features, current guidelines
|
||||
and best practices, and to refresh command syntax, output, changed
|
||||
prerequisites, as needed.
|
||||
|
||||
Name | Description | Notable Features Used | Complexity Level
|
||||
------------- | ------------- | ------------ | ------------ | ------------
|
||||
[Guestbook](guestbook/) | PHP app with Redis | Replication Controller, Service | Beginner
|
||||
[WordPress](mysql-wordpress-pd/) | WordPress with MySQL | Deployment, Persistent Volume with Claim | Beginner
|
||||
[Cassandra](storage/cassandra/) | Cloud Native Cassandra | Daemon Set | Intermediate
|
||||
|
||||
Note: Please add examples to the list above that are maintained.
|
||||
|
||||
See [Example Guidelines](guidelines.md) for a description of what goes
|
||||
in this directory, and what examples should contain.
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
47
vendor/k8s.io/kubernetes/examples/apiserver/BUILD
generated
vendored
Normal file
47
vendor/k8s.io/kubernetes/examples/apiserver/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,47 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["apiserver.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//cmd/libs/go2idl/client-gen/test_apis/testgroup/install:go_default_library",
|
||||
"//cmd/libs/go2idl/client-gen/test_apis/testgroup/v1:go_default_library",
|
||||
"//examples/apiserver/rest:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/genericapiserver:go_default_library",
|
||||
"//pkg/genericapiserver/api/rest:go_default_library",
|
||||
"//pkg/genericapiserver/authorizer:go_default_library",
|
||||
"//pkg/genericapiserver/options:go_default_library",
|
||||
"//pkg/kubeapiserver/options:go_default_library",
|
||||
"//pkg/registry/generic:go_default_library",
|
||||
"//pkg/storage/storagebackend:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/errors",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//examples/apiserver/rest:all-srcs",
|
||||
"//examples/apiserver/server:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
22
vendor/k8s.io/kubernetes/examples/apiserver/README.md
generated
vendored
Normal file
22
vendor/k8s.io/kubernetes/examples/apiserver/README.md
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
# API Server
|
||||
|
||||
This is a work in progress example for an API Server.
|
||||
We are working on isolating the generic api server code from kubernetes specific
|
||||
API objects. Some relevant issues:
|
||||
|
||||
* https://github.com/kubernetes/kubernetes/issues/17412
|
||||
* https://github.com/kubernetes/kubernetes/issues/2742
|
||||
* https://github.com/kubernetes/kubernetes/issues/13541
|
||||
|
||||
This code here is to examplify what it takes to write your own API server.
|
||||
|
||||
To start this example api server, run:
|
||||
|
||||
```
|
||||
$ go run examples/apiserver/server/main.go
|
||||
```
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
159
vendor/k8s.io/kubernetes/examples/apiserver/apiserver.go
generated
vendored
Normal file
159
vendor/k8s.io/kubernetes/examples/apiserver/apiserver.go
generated
vendored
Normal file
|
@ -0,0 +1,159 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apiserver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/kubernetes/cmd/libs/go2idl/client-gen/test_apis/testgroup/v1"
|
||||
testgroupetcd "k8s.io/kubernetes/examples/apiserver/rest"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/genericapiserver"
|
||||
"k8s.io/kubernetes/pkg/genericapiserver/api/rest"
|
||||
"k8s.io/kubernetes/pkg/genericapiserver/authorizer"
|
||||
genericoptions "k8s.io/kubernetes/pkg/genericapiserver/options"
|
||||
kubeoptions "k8s.io/kubernetes/pkg/kubeapiserver/options"
|
||||
"k8s.io/kubernetes/pkg/registry/generic"
|
||||
"k8s.io/kubernetes/pkg/storage/storagebackend"
|
||||
|
||||
// Install the testgroup API
|
||||
_ "k8s.io/kubernetes/cmd/libs/go2idl/client-gen/test_apis/testgroup/install"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
// Ports on which to run the server.
|
||||
// Explicitly setting these to a different value than the default values, to prevent this from clashing with a local cluster.
|
||||
InsecurePort = 8081
|
||||
SecurePort = 6444
|
||||
)
|
||||
|
||||
func newStorageFactory() genericapiserver.StorageFactory {
|
||||
config := storagebackend.Config{
|
||||
Prefix: genericoptions.DefaultEtcdPathPrefix,
|
||||
ServerList: []string{"http://127.0.0.1:2379"},
|
||||
}
|
||||
storageFactory := genericapiserver.NewDefaultStorageFactory(config, "application/json", api.Codecs, genericapiserver.NewDefaultResourceEncodingConfig(), genericapiserver.NewResourceConfig())
|
||||
|
||||
return storageFactory
|
||||
}
|
||||
|
||||
type ServerRunOptions struct {
|
||||
GenericServerRunOptions *genericoptions.ServerRunOptions
|
||||
Etcd *genericoptions.EtcdOptions
|
||||
SecureServing *genericoptions.SecureServingOptions
|
||||
InsecureServing *genericoptions.ServingOptions
|
||||
Authentication *kubeoptions.BuiltInAuthenticationOptions
|
||||
CloudProvider *kubeoptions.CloudProviderOptions
|
||||
}
|
||||
|
||||
func NewServerRunOptions() *ServerRunOptions {
|
||||
s := ServerRunOptions{
|
||||
GenericServerRunOptions: genericoptions.NewServerRunOptions(),
|
||||
Etcd: genericoptions.NewEtcdOptions(),
|
||||
SecureServing: genericoptions.NewSecureServingOptions(),
|
||||
InsecureServing: genericoptions.NewInsecureServingOptions(),
|
||||
Authentication: kubeoptions.NewBuiltInAuthenticationOptions().WithAll(),
|
||||
CloudProvider: kubeoptions.NewCloudProviderOptions(),
|
||||
}
|
||||
s.InsecureServing.BindPort = InsecurePort
|
||||
s.SecureServing.ServingOptions.BindPort = SecurePort
|
||||
|
||||
return &s
|
||||
}
|
||||
|
||||
func (serverOptions *ServerRunOptions) Run(stopCh <-chan struct{}) error {
|
||||
serverOptions.Etcd.StorageConfig.ServerList = []string{"http://127.0.0.1:2379"}
|
||||
|
||||
// set defaults
|
||||
if err := serverOptions.CloudProvider.DefaultExternalHost(serverOptions.GenericServerRunOptions); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := serverOptions.SecureServing.MaybeDefaultWithSelfSignedCerts(serverOptions.GenericServerRunOptions.AdvertiseAddress.String()); err != nil {
|
||||
glog.Fatalf("Error creating self-signed certificates: %v", err)
|
||||
}
|
||||
|
||||
// validate options
|
||||
if errs := serverOptions.Etcd.Validate(); len(errs) > 0 {
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
if errs := serverOptions.SecureServing.Validate(); len(errs) > 0 {
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
if errs := serverOptions.InsecureServing.Validate("insecure-port"); len(errs) > 0 {
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
// create config from options
|
||||
config := genericapiserver.NewConfig().
|
||||
ApplyOptions(serverOptions.GenericServerRunOptions).
|
||||
ApplyInsecureServingOptions(serverOptions.InsecureServing)
|
||||
|
||||
if _, err := config.ApplySecureServingOptions(serverOptions.SecureServing); err != nil {
|
||||
return fmt.Errorf("failed to configure https: %s", err)
|
||||
}
|
||||
if err := serverOptions.Authentication.Apply(config); err != nil {
|
||||
return fmt.Errorf("failed to configure authentication: %s", err)
|
||||
}
|
||||
|
||||
config.Authorizer = authorizer.NewAlwaysAllowAuthorizer()
|
||||
config.SwaggerConfig = genericapiserver.DefaultSwaggerConfig()
|
||||
|
||||
s, err := config.Complete().New()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error in bringing up the server: %v", err)
|
||||
}
|
||||
|
||||
groupVersion := v1.SchemeGroupVersion
|
||||
groupName := groupVersion.Group
|
||||
groupMeta, err := api.Registry.Group(groupName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%v", err)
|
||||
}
|
||||
storageFactory := newStorageFactory()
|
||||
storageConfig, err := storageFactory.NewConfig(schema.GroupResource{Group: groupName, Resource: "testtype"})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to get storage config: %v", err)
|
||||
}
|
||||
|
||||
testTypeOpts := generic.RESTOptions{
|
||||
StorageConfig: storageConfig,
|
||||
Decorator: generic.UndecoratedStorage,
|
||||
ResourcePrefix: "testtypes",
|
||||
DeleteCollectionWorkers: 1,
|
||||
}
|
||||
|
||||
restStorageMap := map[string]rest.Storage{
|
||||
"testtypes": testgroupetcd.NewREST(testTypeOpts),
|
||||
}
|
||||
apiGroupInfo := genericapiserver.APIGroupInfo{
|
||||
GroupMeta: *groupMeta,
|
||||
VersionedResourcesStorageMap: map[string]map[string]rest.Storage{
|
||||
groupVersion.Version: restStorageMap,
|
||||
},
|
||||
Scheme: api.Scheme,
|
||||
NegotiatedSerializer: api.Codecs,
|
||||
}
|
||||
if err := s.InstallAPIGroup(&apiGroupInfo); err != nil {
|
||||
return fmt.Errorf("Error in installing API: %v", err)
|
||||
}
|
||||
s.PrepareRun().Run(stopCh)
|
||||
return nil
|
||||
}
|
40
vendor/k8s.io/kubernetes/examples/apiserver/rest/BUILD
generated
vendored
Normal file
40
vendor/k8s.io/kubernetes/examples/apiserver/rest/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["reststorage.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//cmd/libs/go2idl/client-gen/test_apis/testgroup:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/fields:go_default_library",
|
||||
"//pkg/registry/generic:go_default_library",
|
||||
"//pkg/registry/generic/registry:go_default_library",
|
||||
"//pkg/storage:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/labels",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/validation/field",
|
||||
"//vendor:k8s.io/apiserver/pkg/request",
|
||||
"//vendor:k8s.io/apiserver/pkg/storage/names",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
91
vendor/k8s.io/kubernetes/examples/apiserver/rest/reststorage.go
generated
vendored
Normal file
91
vendor/k8s.io/kubernetes/examples/apiserver/rest/reststorage.go
generated
vendored
Normal file
|
@ -0,0 +1,91 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package rest
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
genericapirequest "k8s.io/apiserver/pkg/request"
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
"k8s.io/kubernetes/cmd/libs/go2idl/client-gen/test_apis/testgroup"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/registry/generic"
|
||||
genericregistry "k8s.io/kubernetes/pkg/registry/generic/registry"
|
||||
"k8s.io/kubernetes/pkg/storage"
|
||||
)
|
||||
|
||||
type REST struct {
|
||||
*genericregistry.Store
|
||||
}
|
||||
|
||||
// NewREST returns a RESTStorage object that will work with testtype.
|
||||
func NewREST(optsGetter generic.RESTOptionsGetter) *REST {
|
||||
store := &genericregistry.Store{
|
||||
NewFunc: func() runtime.Object { return &testgroup.TestType{} },
|
||||
// NewListFunc returns an object capable of storing results of an etcd list.
|
||||
NewListFunc: func() runtime.Object { return &testgroup.TestTypeList{} },
|
||||
// Retrieve the name field of the resource.
|
||||
ObjectNameFunc: func(obj runtime.Object) (string, error) {
|
||||
return obj.(*testgroup.TestType).Name, nil
|
||||
},
|
||||
// Used to match objects based on labels/fields for list.
|
||||
PredicateFunc: matcher,
|
||||
// QualifiedResource should always be plural
|
||||
QualifiedResource: api.Resource("testtypes"),
|
||||
|
||||
CreateStrategy: strategy,
|
||||
}
|
||||
options := &generic.StoreOptions{RESTOptions: optsGetter, AttrFunc: getAttrs}
|
||||
if err := store.CompleteWithOptions(options); err != nil {
|
||||
panic(err) // TODO: Propagate error up
|
||||
}
|
||||
return &REST{store}
|
||||
}
|
||||
|
||||
type fakeStrategy struct {
|
||||
runtime.ObjectTyper
|
||||
names.NameGenerator
|
||||
}
|
||||
|
||||
func (*fakeStrategy) NamespaceScoped() bool { return false }
|
||||
func (*fakeStrategy) PrepareForCreate(ctx genericapirequest.Context, obj runtime.Object) {}
|
||||
func (*fakeStrategy) Validate(ctx genericapirequest.Context, obj runtime.Object) field.ErrorList {
|
||||
return nil
|
||||
}
|
||||
func (*fakeStrategy) Canonicalize(obj runtime.Object) {}
|
||||
|
||||
var strategy = &fakeStrategy{api.Scheme, names.SimpleNameGenerator}
|
||||
|
||||
func getAttrs(obj runtime.Object) (labels.Set, fields.Set, error) {
|
||||
testType, ok := obj.(*testgroup.TestType)
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("not a TestType")
|
||||
}
|
||||
return labels.Set(testType.ObjectMeta.Labels), fields.Set{}, nil
|
||||
}
|
||||
|
||||
func matcher(label labels.Selector, field fields.Selector) storage.SelectionPredicate {
|
||||
return storage.SelectionPredicate{
|
||||
Label: label,
|
||||
Field: field,
|
||||
GetAttrs: getAttrs,
|
||||
}
|
||||
}
|
41
vendor/k8s.io/kubernetes/examples/apiserver/server/BUILD
generated
vendored
Normal file
41
vendor/k8s.io/kubernetes/examples/apiserver/server/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,41 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "server",
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["main.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//examples/apiserver:go_default_library",
|
||||
"//pkg/util/flag:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/spf13/pflag",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
43
vendor/k8s.io/kubernetes/examples/apiserver/server/main.go
generated
vendored
Normal file
43
vendor/k8s.io/kubernetes/examples/apiserver/server/main.go
generated
vendored
Normal file
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/examples/apiserver"
|
||||
"k8s.io/kubernetes/pkg/util/flag"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
func main() {
|
||||
serverRunOptions := apiserver.NewServerRunOptions()
|
||||
|
||||
// Parse command line flags.
|
||||
serverRunOptions.GenericServerRunOptions.AddUniversalFlags(pflag.CommandLine)
|
||||
serverRunOptions.Etcd.AddFlags(pflag.CommandLine)
|
||||
serverRunOptions.SecureServing.AddFlags(pflag.CommandLine)
|
||||
serverRunOptions.SecureServing.AddDeprecatedFlags(pflag.CommandLine)
|
||||
serverRunOptions.InsecureServing.AddFlags(pflag.CommandLine)
|
||||
serverRunOptions.InsecureServing.AddDeprecatedFlags(pflag.CommandLine)
|
||||
flag.InitFlags()
|
||||
|
||||
if err := serverRunOptions.Run(wait.NeverStop); err != nil {
|
||||
glog.Fatalf("Error in bringing up the server: %v", err)
|
||||
}
|
||||
}
|
182
vendor/k8s.io/kubernetes/examples/cluster-dns/README.md
generated
vendored
Normal file
182
vendor/k8s.io/kubernetes/examples/cluster-dns/README.md
generated
vendored
Normal file
|
@ -0,0 +1,182 @@
|
|||
## Kubernetes DNS example
|
||||
|
||||
This is a toy example demonstrating how to use kubernetes DNS.
|
||||
|
||||
### Step Zero: Prerequisites
|
||||
|
||||
This example assumes that you have forked the repository and [turned up a Kubernetes cluster](../../docs/getting-started-guides/). Make sure DNS is enabled in your setup, see [DNS doc](../../build/kube-dns/).
|
||||
|
||||
```sh
|
||||
$ cd kubernetes
|
||||
$ hack/dev-build-and-up.sh
|
||||
```
|
||||
|
||||
### Step One: Create two namespaces
|
||||
|
||||
We'll see how cluster DNS works across multiple [namespaces](../../docs/user-guide/namespaces.md), first we need to create two namespaces:
|
||||
|
||||
```sh
|
||||
$ kubectl create -f examples/cluster-dns/namespace-dev.yaml
|
||||
$ kubectl create -f examples/cluster-dns/namespace-prod.yaml
|
||||
```
|
||||
|
||||
Now list all namespaces:
|
||||
|
||||
```sh
|
||||
$ kubectl get namespaces
|
||||
NAME LABELS STATUS
|
||||
default <none> Active
|
||||
development name=development Active
|
||||
production name=production Active
|
||||
```
|
||||
|
||||
For kubectl client to work with each namespace, we define two contexts:
|
||||
|
||||
```sh
|
||||
$ kubectl config set-context dev --namespace=development --cluster=${CLUSTER_NAME} --user=${USER_NAME}
|
||||
$ kubectl config set-context prod --namespace=production --cluster=${CLUSTER_NAME} --user=${USER_NAME}
|
||||
```
|
||||
|
||||
You can view your cluster name and user name in kubernetes config at ~/.kube/config.
|
||||
|
||||
### Step Two: Create backend replication controller in each namespace
|
||||
|
||||
Use the file [`examples/cluster-dns/dns-backend-rc.yaml`](dns-backend-rc.yaml) to create a backend server [replication controller](../../docs/user-guide/replication-controller.md) in each namespace.
|
||||
|
||||
```sh
|
||||
$ kubectl config use-context dev
|
||||
$ kubectl create -f examples/cluster-dns/dns-backend-rc.yaml
|
||||
```
|
||||
|
||||
Once that's up you can list the pod in the cluster:
|
||||
|
||||
```sh
|
||||
$ kubectl get rc
|
||||
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
||||
dns-backend dns-backend ddysher/dns-backend name=dns-backend 1
|
||||
```
|
||||
|
||||
Now repeat the above commands to create a replication controller in prod namespace:
|
||||
|
||||
```sh
|
||||
$ kubectl config use-context prod
|
||||
$ kubectl create -f examples/cluster-dns/dns-backend-rc.yaml
|
||||
$ kubectl get rc
|
||||
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
||||
dns-backend dns-backend ddysher/dns-backend name=dns-backend 1
|
||||
```
|
||||
|
||||
### Step Three: Create backend service
|
||||
|
||||
Use the file [`examples/cluster-dns/dns-backend-service.yaml`](dns-backend-service.yaml) to create
|
||||
a [service](../../docs/user-guide/services.md) for the backend server.
|
||||
|
||||
```sh
|
||||
$ kubectl config use-context dev
|
||||
$ kubectl create -f examples/cluster-dns/dns-backend-service.yaml
|
||||
```
|
||||
|
||||
Once that's up you can list the service in the cluster:
|
||||
|
||||
```sh
|
||||
$ kubectl get service dns-backend
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
dns-backend 10.0.2.3 <none> 8000/TCP name=dns-backend 1d
|
||||
```
|
||||
|
||||
Again, repeat the same process for prod namespace:
|
||||
|
||||
```sh
|
||||
$ kubectl config use-context prod
|
||||
$ kubectl create -f examples/cluster-dns/dns-backend-service.yaml
|
||||
$ kubectl get service dns-backend
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
dns-backend 10.0.2.4 <none> 8000/TCP name=dns-backend 1d
|
||||
```
|
||||
|
||||
### Step Four: Create client pod in one namespace
|
||||
|
||||
Use the file [`examples/cluster-dns/dns-frontend-pod.yaml`](dns-frontend-pod.yaml) to create a client [pod](../../docs/user-guide/pods.md) in dev namespace. The client pod will make a connection to backend and exit. Specifically, it tries to connect to address `http://dns-backend.development.cluster.local:8000`.
|
||||
|
||||
```sh
|
||||
$ kubectl config use-context dev
|
||||
$ kubectl create -f examples/cluster-dns/dns-frontend-pod.yaml
|
||||
```
|
||||
|
||||
Once that's up you can list the pod in the cluster:
|
||||
|
||||
```sh
|
||||
$ kubectl get pods dns-frontend
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
dns-frontend 0/1 ExitCode:0 0 1m
|
||||
```
|
||||
|
||||
Wait until the pod succeeds, then we can see the output from the client pod:
|
||||
|
||||
```sh
|
||||
$ kubectl logs dns-frontend
|
||||
2015-05-07T20:13:54.147664936Z 10.0.236.129
|
||||
2015-05-07T20:13:54.147721290Z Send request to: http://dns-backend.development.cluster.local:8000
|
||||
2015-05-07T20:13:54.147733438Z <Response [200]>
|
||||
2015-05-07T20:13:54.147738295Z Hello World!
|
||||
```
|
||||
|
||||
Please refer to the [source code](images/frontend/client.py) about the log. First line prints out the ip address associated with the service in dev namespace; remaining lines print out our request and server response.
|
||||
|
||||
If we switch to prod namespace with the same pod config, we'll see the same result, i.e. dns will resolve across namespace.
|
||||
|
||||
```sh
|
||||
$ kubectl config use-context prod
|
||||
$ kubectl create -f examples/cluster-dns/dns-frontend-pod.yaml
|
||||
$ kubectl logs dns-frontend
|
||||
2015-05-07T20:13:54.147664936Z 10.0.236.129
|
||||
2015-05-07T20:13:54.147721290Z Send request to: http://dns-backend.development.cluster.local:8000
|
||||
2015-05-07T20:13:54.147733438Z <Response [200]>
|
||||
2015-05-07T20:13:54.147738295Z Hello World!
|
||||
```
|
||||
|
||||
|
||||
#### Note about default namespace
|
||||
|
||||
If you prefer not using namespace, then all your services can be addressed using `default` namespace, e.g. `http://dns-backend.default.svc.cluster.local:8000`, or shorthand version `http://dns-backend:8000`
|
||||
|
||||
|
||||
### tl; dr;
|
||||
|
||||
For those of you who are impatient, here is the summary of the commands we ran in this tutorial. Remember to set first `$CLUSTER_NAME` and `$USER_NAME` to the values found in `~/.kube/config`.
|
||||
|
||||
```sh
|
||||
# create dev and prod namespaces
|
||||
kubectl create -f examples/cluster-dns/namespace-dev.yaml
|
||||
kubectl create -f examples/cluster-dns/namespace-prod.yaml
|
||||
|
||||
# create two contexts
|
||||
kubectl config set-context dev --namespace=development --cluster=${CLUSTER_NAME} --user=${USER_NAME}
|
||||
kubectl config set-context prod --namespace=production --cluster=${CLUSTER_NAME} --user=${USER_NAME}
|
||||
|
||||
# create two backend replication controllers
|
||||
kubectl config use-context dev
|
||||
kubectl create -f examples/cluster-dns/dns-backend-rc.yaml
|
||||
kubectl config use-context prod
|
||||
kubectl create -f examples/cluster-dns/dns-backend-rc.yaml
|
||||
|
||||
# create backend services
|
||||
kubectl config use-context dev
|
||||
kubectl create -f examples/cluster-dns/dns-backend-service.yaml
|
||||
kubectl config use-context prod
|
||||
kubectl create -f examples/cluster-dns/dns-backend-service.yaml
|
||||
|
||||
# create a pod in each namespace and get its output
|
||||
kubectl config use-context dev
|
||||
kubectl create -f examples/cluster-dns/dns-frontend-pod.yaml
|
||||
kubectl logs dns-frontend
|
||||
|
||||
kubectl config use-context prod
|
||||
kubectl create -f examples/cluster-dns/dns-frontend-pod.yaml
|
||||
kubectl logs dns-frontend
|
||||
```
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
21
vendor/k8s.io/kubernetes/examples/cluster-dns/dns-backend-rc.yaml
generated
vendored
Normal file
21
vendor/k8s.io/kubernetes/examples/cluster-dns/dns-backend-rc.yaml
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: dns-backend
|
||||
labels:
|
||||
name: dns-backend
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
name: dns-backend
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: dns-backend
|
||||
spec:
|
||||
containers:
|
||||
- name: dns-backend
|
||||
image: gcr.io/google_containers/example-dns-backend:v1
|
||||
ports:
|
||||
- name: backend-port
|
||||
containerPort: 8000
|
9
vendor/k8s.io/kubernetes/examples/cluster-dns/dns-backend-service.yaml
generated
vendored
Normal file
9
vendor/k8s.io/kubernetes/examples/cluster-dns/dns-backend-service.yaml
generated
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: dns-backend
|
||||
spec:
|
||||
ports:
|
||||
- port: 8000
|
||||
selector:
|
||||
name: dns-backend
|
16
vendor/k8s.io/kubernetes/examples/cluster-dns/dns-frontend-pod.yaml
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/examples/cluster-dns/dns-frontend-pod.yaml
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: dns-frontend
|
||||
labels:
|
||||
name: dns-frontend
|
||||
spec:
|
||||
containers:
|
||||
- name: dns-frontend
|
||||
image: gcr.io/google_containers/example-dns-frontend:v1
|
||||
command:
|
||||
- python
|
||||
- client.py
|
||||
- http://dns-backend.development.cluster.local:8000
|
||||
imagePullPolicy: Always
|
||||
restartPolicy: Never
|
20
vendor/k8s.io/kubernetes/examples/cluster-dns/images/backend/Dockerfile
generated
vendored
Normal file
20
vendor/k8s.io/kubernetes/examples/cluster-dns/images/backend/Dockerfile
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM python:2.7-slim
|
||||
|
||||
COPY . /dns-backend
|
||||
WORKDIR /dns-backend
|
||||
|
||||
CMD ["python", "server.py"]
|
27
vendor/k8s.io/kubernetes/examples/cluster-dns/images/backend/Makefile
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/examples/cluster-dns/images/backend/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
TAG = v1
|
||||
PREFIX = gcr.io/google_containers
|
||||
IMAGE = example-dns-backend
|
||||
|
||||
all: push
|
||||
|
||||
image:
|
||||
docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) .
|
||||
|
||||
push: image
|
||||
gcloud docker -- push $(PREFIX)/$(IMAGE)
|
||||
|
||||
clean:
|
37
vendor/k8s.io/kubernetes/examples/cluster-dns/images/backend/server.py
generated
vendored
Normal file
37
vendor/k8s.io/kubernetes/examples/cluster-dns/images/backend/server.py
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
|
||||
|
||||
PORT_NUMBER = 8000
|
||||
|
||||
# This class will handles any incoming request.
|
||||
class HTTPHandler(BaseHTTPRequestHandler):
|
||||
# Handler for the GET requests
|
||||
def do_GET(self):
|
||||
self.send_response(200)
|
||||
self.send_header('Content-type','text/html')
|
||||
self.end_headers()
|
||||
self.wfile.write("Hello World!")
|
||||
|
||||
try:
|
||||
# Create a web server and define the handler to manage the incoming request.
|
||||
server = HTTPServer(('', PORT_NUMBER), HTTPHandler)
|
||||
print 'Started httpserver on port ' , PORT_NUMBER
|
||||
server.serve_forever()
|
||||
except KeyboardInterrupt:
|
||||
print '^C received, shutting down the web server'
|
||||
server.socket.close()
|
22
vendor/k8s.io/kubernetes/examples/cluster-dns/images/frontend/Dockerfile
generated
vendored
Normal file
22
vendor/k8s.io/kubernetes/examples/cluster-dns/images/frontend/Dockerfile
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM python:2.7-slim
|
||||
|
||||
RUN pip install requests
|
||||
|
||||
COPY . /dns-frontend
|
||||
WORKDIR /dns-frontend
|
||||
|
||||
CMD ["python", "client.py"]
|
27
vendor/k8s.io/kubernetes/examples/cluster-dns/images/frontend/Makefile
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/examples/cluster-dns/images/frontend/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
TAG = v1
|
||||
PREFIX = gcr.io/google_containers
|
||||
IMAGE = example-dns-frontend
|
||||
|
||||
all: push
|
||||
|
||||
image:
|
||||
docker build --pull -t $(PREFIX)/$(IMAGE):$(TAG) .
|
||||
|
||||
push: image
|
||||
gcloud docker -- push $(PREFIX)/$(IMAGE)
|
||||
|
||||
clean:
|
46
vendor/k8s.io/kubernetes/examples/cluster-dns/images/frontend/client.py
generated
vendored
Normal file
46
vendor/k8s.io/kubernetes/examples/cluster-dns/images/frontend/client.py
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2015 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
import requests
|
||||
import socket
|
||||
|
||||
from urlparse import urlparse
|
||||
|
||||
|
||||
def CheckServiceAddress(address):
|
||||
hostname = urlparse(address).hostname
|
||||
service_address = socket.gethostbyname(hostname)
|
||||
print service_address
|
||||
|
||||
|
||||
def GetServerResponse(address):
|
||||
print 'Send request to:', address
|
||||
response = requests.get(address)
|
||||
print response
|
||||
print response.content
|
||||
|
||||
|
||||
def Main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('address')
|
||||
args = parser.parse_args()
|
||||
CheckServiceAddress(args.address)
|
||||
GetServerResponse(args.address)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
Main()
|
6
vendor/k8s.io/kubernetes/examples/cluster-dns/namespace-dev.yaml
generated
vendored
Normal file
6
vendor/k8s.io/kubernetes/examples/cluster-dns/namespace-dev.yaml
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: "development"
|
||||
labels:
|
||||
name: "development"
|
6
vendor/k8s.io/kubernetes/examples/cluster-dns/namespace-prod.yaml
generated
vendored
Normal file
6
vendor/k8s.io/kubernetes/examples/cluster-dns/namespace-prod.yaml
generated
vendored
Normal file
|
@ -0,0 +1,6 @@
|
|||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: "production"
|
||||
labels:
|
||||
name: "production"
|
125
vendor/k8s.io/kubernetes/examples/cockroachdb/README.md
generated
vendored
Normal file
125
vendor/k8s.io/kubernetes/examples/cockroachdb/README.md
generated
vendored
Normal file
|
@ -0,0 +1,125 @@
|
|||
# CockroachDB on Kubernetes as a StatefulSet
|
||||
|
||||
This example deploys [CockroachDB](https://cockroachlabs.com) on Kubernetes as
|
||||
a StatefulSet. CockroachDB is a distributed, scalable NewSQL database. Please see
|
||||
[the homepage](https://cockroachlabs.com) and the
|
||||
[documentation](https://www.cockroachlabs.com/docs/) for details.
|
||||
|
||||
## Limitations
|
||||
|
||||
### StatefulSet limitations
|
||||
|
||||
Standard StatefulSet limitations apply: There is currently no possibility to use
|
||||
node-local storage (outside of single-node tests), and so there is likely
|
||||
a performance hit associated with running CockroachDB on some external storage.
|
||||
Note that CockroachDB already does replication and thus it is unnecessary to
|
||||
deploy it onto persistent volumes which already replicate internally.
|
||||
For this reason, high-performance use cases on a private Kubernetes cluster
|
||||
may want to consider a DaemonSet deployment until Stateful Sets support node-local
|
||||
storage (see #7562).
|
||||
|
||||
### Recovery after persistent storage failure
|
||||
|
||||
A persistent storage failure (e.g. losing the hard drive) is gracefully handled
|
||||
by CockroachDB as long as enough replicas survive (two out of three by
|
||||
default). Due to the bootstrapping in this deployment, a storage failure of the
|
||||
first node is special in that the administrator must manually prepopulate the
|
||||
"new" storage medium by running an instance of CockroachDB with the `--join`
|
||||
parameter. If this is not done, the first node will bootstrap a new cluster,
|
||||
which will lead to a lot of trouble.
|
||||
|
||||
### Dynamic volume provisioning
|
||||
|
||||
The deployment is written for a use case in which dynamic volume provisioning is
|
||||
available. When that is not the case, the persistent volume claims need
|
||||
to be created manually. See [minikube.sh](minikube.sh) for the necessary
|
||||
steps. If you're on GCE or AWS, where dynamic provisioning is supported, no
|
||||
manual work is needed to create the persistent volumes.
|
||||
|
||||
## Testing locally on minikube
|
||||
|
||||
Follow the steps in [minikube.sh](minikube.sh) (or simply run that file).
|
||||
|
||||
## Testing in the cloud on GCE or AWS
|
||||
|
||||
Once you have a Kubernetes cluster running, just run
|
||||
`kubectl create -f cockroachdb-statefulset.yaml` to create your cockroachdb cluster.
|
||||
This works because GCE and AWS support dynamic volume provisioning by default,
|
||||
so persistent volumes will be created for the CockroachDB pods as needed.
|
||||
|
||||
## Accessing the database
|
||||
|
||||
Along with our StatefulSet configuration, we expose a standard Kubernetes service
|
||||
that offers a load-balanced virtual IP for clients to access the database
|
||||
with. In our example, we've called this service `cockroachdb-public`.
|
||||
|
||||
Start up a client pod and open up an interactive, (mostly) Postgres-flavor
|
||||
SQL shell using:
|
||||
|
||||
```console
|
||||
$ kubectl run -it --rm cockroach-client --image=cockroachdb/cockroach --restart=Never --command -- ./cockroach sql --host cockroachdb-public
|
||||
```
|
||||
|
||||
You can see example SQL statements for inserting and querying data in the
|
||||
included [demo script](demo.sh), but can use almost any Postgres-style SQL
|
||||
commands. Some more basic examples can be found within
|
||||
[CockroachDB's documentation](https://www.cockroachlabs.com/docs/learn-cockroachdb-sql.html).
|
||||
|
||||
## Accessing the admin UI
|
||||
|
||||
If you want to see information about how the cluster is doing, you can try
|
||||
pulling up the CockroachDB admin UI by port-forwarding from your local machine
|
||||
to one of the pods:
|
||||
|
||||
```shell
|
||||
kubectl port-forward cockroachdb-0 8080
|
||||
```
|
||||
|
||||
Once you’ve done that, you should be able to access the admin UI by visiting
|
||||
http://localhost:8080/ in your web browser.
|
||||
|
||||
## Simulating failures
|
||||
|
||||
When all (or enough) nodes are up, simulate a failure like this:
|
||||
|
||||
```shell
|
||||
kubectl exec cockroachdb-0 -- /bin/bash -c "while true; do kill 1; done"
|
||||
```
|
||||
|
||||
You can then reconnect to the database as demonstrated above and verify
|
||||
that no data was lost. The example runs with three-fold replication, so
|
||||
it can tolerate one failure of any given node at a time. Note also that
|
||||
there is a brief period of time immediately after the creation of the
|
||||
cluster during which the three-fold replication is established, and during
|
||||
which killing a node may lead to unavailability.
|
||||
|
||||
The [demo script](demo.sh) gives an example of killing one instance of the
|
||||
database and ensuring the other replicas have all data that was written.
|
||||
|
||||
## Scaling up or down
|
||||
|
||||
Scale the Stateful Set by running
|
||||
|
||||
```shell
|
||||
kubectl scale statefulset cockroachdb --replicas=4
|
||||
```
|
||||
|
||||
Note that you may need to create a new persistent volume claim first. If you
|
||||
ran `minikube.sh`, there's a spare volume so you can immediately scale up by
|
||||
one. If you're running on GCE or AWS, you can scale up by as many as you want
|
||||
because new volumes will automatically be created for you. Convince yourself
|
||||
that the new node immediately serves reads and writes.
|
||||
|
||||
## Cleaning up when you're done
|
||||
|
||||
Because all of the resources in this example have been tagged with the label `app=cockroachdb`,
|
||||
we can clean up everything that we created in one quick command using a selector on that label:
|
||||
|
||||
```shell
|
||||
kubectl delete statefulsets,pods,persistentvolumes,persistentvolumeclaims,services -l app=cockroachdb
|
||||
```
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
174
vendor/k8s.io/kubernetes/examples/cockroachdb/cockroachdb-statefulset.yaml
generated
vendored
Normal file
174
vendor/k8s.io/kubernetes/examples/cockroachdb/cockroachdb-statefulset.yaml
generated
vendored
Normal file
|
@ -0,0 +1,174 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
# This service is meant to be used by clients of the database. It exposes a ClusterIP that will
|
||||
# automatically load balance connections to the different database pods.
|
||||
name: cockroachdb-public
|
||||
labels:
|
||||
app: cockroachdb
|
||||
spec:
|
||||
ports:
|
||||
# The main port, served by gRPC, serves Postgres-flavor SQL, internode
|
||||
# traffic and the cli.
|
||||
- port: 26257
|
||||
targetPort: 26257
|
||||
name: grpc
|
||||
# The secondary port serves the UI as well as health and debug endpoints.
|
||||
- port: 8080
|
||||
targetPort: 8080
|
||||
name: http
|
||||
selector:
|
||||
app: cockroachdb
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
# This service only exists to create DNS entries for each pod in the stateful
|
||||
# set such that they can resolve each other's IP addresses. It does not
|
||||
# create a load-balanced ClusterIP and should not be used directly by clients
|
||||
# in most circumstances.
|
||||
name: cockroachdb
|
||||
labels:
|
||||
app: cockroachdb
|
||||
annotations:
|
||||
# This is needed to make the peer-finder work properly and to help avoid
|
||||
# edge cases where instance 0 comes up after losing its data and needs to
|
||||
# decide whether it should create a new cluster or try to join an existing
|
||||
# one. If it creates a new cluster when it should have joined an existing
|
||||
# one, we'd end up with two separate clusters listening at the same service
|
||||
# endpoint, which would be very bad.
|
||||
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
|
||||
# Enable automatic monitoring of all instances when Prometheus is running in the cluster.
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/path: "_status/vars"
|
||||
prometheus.io/port: "8080"
|
||||
spec:
|
||||
ports:
|
||||
- port: 26257
|
||||
targetPort: 26257
|
||||
name: grpc
|
||||
- port: 8080
|
||||
targetPort: 8080
|
||||
name: http
|
||||
clusterIP: None
|
||||
selector:
|
||||
app: cockroachdb
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: cockroachdb
|
||||
spec:
|
||||
serviceName: "cockroachdb"
|
||||
replicas: 3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: cockroachdb
|
||||
annotations:
|
||||
# Init containers are run only once in the lifetime of a pod, before
|
||||
# it's started up for the first time. It has to exit successfully
|
||||
# before the pod's main containers are allowed to start.
|
||||
# This particular init container does a DNS lookup for other pods in
|
||||
# the set to help determine whether or not a cluster already exists.
|
||||
# If any other pods exist, it creates a file in the cockroach-data
|
||||
# directory to pass that information along to the primary container that
|
||||
# has to decide what command-line flags to use when starting CockroachDB.
|
||||
# This only matters when a pod's persistent volume is empty - if it has
|
||||
# data from a previous execution, that data will always be used.
|
||||
pod.alpha.kubernetes.io/init-containers: '[
|
||||
{
|
||||
"name": "bootstrap",
|
||||
"image": "cockroachdb/cockroach-k8s-init:0.1",
|
||||
"args": [
|
||||
"-on-start=/on-start.sh",
|
||||
"-service=cockroachdb"
|
||||
],
|
||||
"env": [
|
||||
{
|
||||
"name": "POD_NAMESPACE",
|
||||
"valueFrom": {
|
||||
"fieldRef": {
|
||||
"apiVersion": "v1",
|
||||
"fieldPath": "metadata.namespace"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"volumeMounts": [
|
||||
{
|
||||
"name": "datadir",
|
||||
"mountPath": "/cockroach/cockroach-data"
|
||||
}
|
||||
]
|
||||
}
|
||||
]'
|
||||
spec:
|
||||
containers:
|
||||
- name: cockroachdb
|
||||
# Runs the master branch. Not recommended for production, but since
|
||||
# CockroachDB is in Beta, you don't want to run it in production
|
||||
# anyway. See
|
||||
# https://hub.docker.com/r/cockroachdb/cockroach/tags/
|
||||
# if you prefer to run a beta release.
|
||||
image: cockroachdb/cockroach
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 26257
|
||||
name: grpc
|
||||
- containerPort: 8080
|
||||
name: http
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /_admin/v1/health
|
||||
port: http
|
||||
initialDelaySeconds: 30
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /_admin/v1/health
|
||||
port: http
|
||||
initialDelaySeconds: 10
|
||||
volumeMounts:
|
||||
- name: datadir
|
||||
mountPath: /cockroach/cockroach-data
|
||||
command:
|
||||
- "/bin/bash"
|
||||
- "-ecx"
|
||||
- |
|
||||
# The use of qualified `hostname -f` is crucial:
|
||||
# Other nodes aren't able to look up the unqualified hostname.
|
||||
CRARGS=("start" "--logtostderr" "--insecure" "--host" "$(hostname -f)" "--http-host" "0.0.0.0")
|
||||
# We only want to initialize a new cluster (by omitting the join flag)
|
||||
# if we're sure that we're the first node (i.e. index 0) and that
|
||||
# there aren't any other nodes running as part of the cluster that
|
||||
# this is supposed to be a part of (which indicates that a cluster
|
||||
# already exists and we should make sure not to create a new one).
|
||||
# It's fine to run without --join on a restart if there aren't any
|
||||
# other nodes.
|
||||
if [ ! "$(hostname)" == "cockroachdb-0" ] || \
|
||||
[ -e "/cockroach/cockroach-data/cluster_exists_marker" ]
|
||||
then
|
||||
# We don't join cockroachdb in order to avoid a node attempting
|
||||
# to join itself, which currently doesn't work
|
||||
# (https://github.com/cockroachdb/cockroach/issues/9625).
|
||||
CRARGS+=("--join" "cockroachdb-public")
|
||||
fi
|
||||
exec /cockroach/cockroach ${CRARGS[*]}
|
||||
# No pre-stop hook is required, a SIGTERM plus some time is all that's
|
||||
# needed for graceful shutdown of a node.
|
||||
terminationGracePeriodSeconds: 60
|
||||
volumes:
|
||||
- name: datadir
|
||||
persistentVolumeClaim:
|
||||
claimName: datadir
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: datadir
|
||||
annotations:
|
||||
volume.alpha.kubernetes.io/storage-class: anything
|
||||
spec:
|
||||
accessModes:
|
||||
- "ReadWriteOnce"
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
47
vendor/k8s.io/kubernetes/examples/cockroachdb/demo.sh
generated
vendored
Executable file
47
vendor/k8s.io/kubernetes/examples/cockroachdb/demo.sh
generated
vendored
Executable file
|
@ -0,0 +1,47 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
function sql() {
|
||||
# TODO(knz): Why does the more idiomatic read from stdin not produce any
|
||||
# output?
|
||||
kubectl exec "cockroachdb-${1}" -- /cockroach/cockroach sql \
|
||||
--host "cockroachdb-${1}.cockroachdb" \
|
||||
-e "$(cat /dev/stdin)"
|
||||
}
|
||||
|
||||
function kill() {
|
||||
! kubectl exec -t "cockroachdb-${1}" -- /bin/bash -c "while true; do kill 1; done" &> /dev/null
|
||||
}
|
||||
|
||||
# Create database on second node (idempotently for convenience).
|
||||
cat <<EOF | sql 1
|
||||
CREATE DATABASE IF NOT EXISTS foo;
|
||||
CREATE TABLE IF NOT EXISTS foo.bar (k STRING PRIMARY KEY, v STRING);
|
||||
UPSERT INTO foo.bar VALUES ('Kuber', 'netes'), ('Cockroach', 'DB');
|
||||
EOF
|
||||
|
||||
# Kill the node we just created the table on.
|
||||
kill 1
|
||||
|
||||
# Read the data from all other nodes (we could also read from the one we just
|
||||
# killed, but it's awkward to wait for it to respawn).
|
||||
for i in 0 2 3 4; do
|
||||
cat <<EOF | sql "${i}"
|
||||
SELECT CONCAT(k, v) FROM foo.bar;
|
||||
EOF
|
||||
done
|
72
vendor/k8s.io/kubernetes/examples/cockroachdb/minikube.sh
generated
vendored
Executable file
72
vendor/k8s.io/kubernetes/examples/cockroachdb/minikube.sh
generated
vendored
Executable file
|
@ -0,0 +1,72 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Run the CockroachDB StatefulSet example on a minikube instance.
|
||||
#
|
||||
# For a fresh start, run the following first:
|
||||
# minikube delete
|
||||
# minikube start
|
||||
#
|
||||
# To upgrade minikube & kubectl on OSX, the following should suffice:
|
||||
# brew reinstall kubernetes-cli --devel
|
||||
# url -Lo minikube \
|
||||
# https://storage.googleapis.com/minikube/releases/v0.4.0/minikube-darwin-amd64 && \
|
||||
# chmod +x minikube && sudo mv minikube /usr/local/bin/
|
||||
|
||||
set -exuo pipefail
|
||||
|
||||
# Clean up anything from a prior run:
|
||||
kubectl delete statefulsets,pods,persistentvolumes,persistentvolumeclaims,services -l app=cockroachdb
|
||||
|
||||
# Make persistent volumes and (correctly named) claims. We must create the
|
||||
# claims here manually even though that sounds counter-intuitive. For details
|
||||
# see https://github.com/kubernetes/contrib/pull/1295#issuecomment-230180894.
|
||||
# Note that we make an extra volume here so you can manually test scale-up.
|
||||
for i in $(seq 0 3); do
|
||||
cat <<EOF | kubectl create -f -
|
||||
kind: PersistentVolume
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: pv${i}
|
||||
labels:
|
||||
type: local
|
||||
app: cockroachdb
|
||||
spec:
|
||||
capacity:
|
||||
storage: 1Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
hostPath:
|
||||
path: "/tmp/${i}"
|
||||
EOF
|
||||
|
||||
cat <<EOF | kubectl create -f -
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: datadir-cockroachdb-${i}
|
||||
labels:
|
||||
app: cockroachdb
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
||||
EOF
|
||||
done;
|
||||
|
||||
kubectl create -f cockroachdb-statefulset.yaml
|
18
vendor/k8s.io/kubernetes/examples/doc.go
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/examples/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Examples contains sample applications for trying out the concepts in Kubernetes.
|
||||
package examples // import "k8s.io/kubernetes/examples"
|
163
vendor/k8s.io/kubernetes/examples/elasticsearch/README.md
generated
vendored
Normal file
163
vendor/k8s.io/kubernetes/examples/elasticsearch/README.md
generated
vendored
Normal file
|
@ -0,0 +1,163 @@
|
|||
# Elasticsearch for Kubernetes
|
||||
|
||||
Kubernetes makes it trivial for anyone to easily build and scale [Elasticsearch](http://www.elasticsearch.org/) clusters. Here, you'll find how to do so.
|
||||
Current Elasticsearch version is `1.7.1`.
|
||||
|
||||
[A more robust example that follows Elasticsearch best-practices of separating nodes concern is also available](production_cluster/README.md).
|
||||
|
||||
<img src="http://kubernetes.io/kubernetes/img/warning.png" alt="WARNING" width="25" height="25"> Current pod descriptors use an `emptyDir` for storing data in each data node container. This is meant to be for the sake of simplicity and [should be adapted according to your storage needs](../../docs/design/persistent-storage.md).
|
||||
|
||||
## Docker image
|
||||
|
||||
The [pre-built image](https://github.com/pires/docker-elasticsearch-kubernetes) used in this example will not be supported. Feel free to fork to fit your own needs, but keep in mind that you will need to change Kubernetes descriptors accordingly.
|
||||
|
||||
## Deploy
|
||||
|
||||
Let's kickstart our cluster with 1 instance of Elasticsearch.
|
||||
|
||||
```
|
||||
kubectl create -f examples/elasticsearch/service-account.yaml
|
||||
kubectl create -f examples/elasticsearch/es-svc.yaml
|
||||
kubectl create -f examples/elasticsearch/es-rc.yaml
|
||||
```
|
||||
|
||||
Let's see if it worked:
|
||||
|
||||
```
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
es-kfymw 1/1 Running 0 7m
|
||||
kube-dns-p3v1u 3/3 Running 0 19m
|
||||
```
|
||||
|
||||
```
|
||||
$ kubectl logs es-kfymw
|
||||
log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender.
|
||||
log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender.
|
||||
log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender.
|
||||
[2015-08-30 10:01:31,946][INFO ][node ] [Hammerhead] version[1.7.1], pid[7], build[b88f43f/2015-07-29T09:54:16Z]
|
||||
[2015-08-30 10:01:31,946][INFO ][node ] [Hammerhead] initializing ...
|
||||
[2015-08-30 10:01:32,110][INFO ][plugins ] [Hammerhead] loaded [cloud-kubernetes], sites []
|
||||
[2015-08-30 10:01:32,153][INFO ][env ] [Hammerhead] using [1] data paths, mounts [[/data (/dev/sda9)]], net usable_space [14.4gb], net total_space [15.5gb], types [ext4]
|
||||
[2015-08-30 10:01:37,188][INFO ][node ] [Hammerhead] initialized
|
||||
[2015-08-30 10:01:37,189][INFO ][node ] [Hammerhead] starting ...
|
||||
[2015-08-30 10:01:37,499][INFO ][transport ] [Hammerhead] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.244.48.2:9300]}
|
||||
[2015-08-30 10:01:37,550][INFO ][discovery ] [Hammerhead] myesdb/n2-6uu_UT3W5XNrjyqBPiA
|
||||
[2015-08-30 10:01:43,966][INFO ][cluster.service ] [Hammerhead] new_master [Hammerhead][n2-6uu_UT3W5XNrjyqBPiA][es-kfymw][inet[/10.244.48.2:9300]]{master=true}, reason: zen-disco-join (elected_as_master)
|
||||
[2015-08-30 10:01:44,010][INFO ][http ] [Hammerhead] bound_address {inet[/0:0:0:0:0:0:0:0:9200]}, publish_address {inet[/10.244.48.2:9200]}
|
||||
[2015-08-30 10:01:44,011][INFO ][node ] [Hammerhead] started
|
||||
[2015-08-30 10:01:44,042][INFO ][gateway ] [Hammerhead] recovered [0] indices into cluster_state
|
||||
```
|
||||
|
||||
So we have a 1-node Elasticsearch cluster ready to handle some work.
|
||||
|
||||
## Scale
|
||||
|
||||
Scaling is as easy as:
|
||||
|
||||
```
|
||||
kubectl scale --replicas=3 rc es
|
||||
```
|
||||
|
||||
Did it work?
|
||||
|
||||
```
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
es-78e0s 1/1 Running 0 8m
|
||||
es-kfymw 1/1 Running 0 17m
|
||||
es-rjmer 1/1 Running 0 8m
|
||||
kube-dns-p3v1u 3/3 Running 0 30m
|
||||
```
|
||||
|
||||
Let's take a look at logs:
|
||||
|
||||
```
|
||||
$ kubectl logs es-kfymw
|
||||
log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender.
|
||||
log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender.
|
||||
log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender.
|
||||
[2015-08-30 10:01:31,946][INFO ][node ] [Hammerhead] version[1.7.1], pid[7], build[b88f43f/2015-07-29T09:54:16Z]
|
||||
[2015-08-30 10:01:31,946][INFO ][node ] [Hammerhead] initializing ...
|
||||
[2015-08-30 10:01:32,110][INFO ][plugins ] [Hammerhead] loaded [cloud-kubernetes], sites []
|
||||
[2015-08-30 10:01:32,153][INFO ][env ] [Hammerhead] using [1] data paths, mounts [[/data (/dev/sda9)]], net usable_space [14.4gb], net total_space [15.5gb], types [ext4]
|
||||
[2015-08-30 10:01:37,188][INFO ][node ] [Hammerhead] initialized
|
||||
[2015-08-30 10:01:37,189][INFO ][node ] [Hammerhead] starting ...
|
||||
[2015-08-30 10:01:37,499][INFO ][transport ] [Hammerhead] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.244.48.2:9300]}
|
||||
[2015-08-30 10:01:37,550][INFO ][discovery ] [Hammerhead] myesdb/n2-6uu_UT3W5XNrjyqBPiA
|
||||
[2015-08-30 10:01:43,966][INFO ][cluster.service ] [Hammerhead] new_master [Hammerhead][n2-6uu_UT3W5XNrjyqBPiA][es-kfymw][inet[/10.244.48.2:9300]]{master=true}, reason: zen-disco-join (elected_as_master)
|
||||
[2015-08-30 10:01:44,010][INFO ][http ] [Hammerhead] bound_address {inet[/0:0:0:0:0:0:0:0:9200]}, publish_address {inet[/10.244.48.2:9200]}
|
||||
[2015-08-30 10:01:44,011][INFO ][node ] [Hammerhead] started
|
||||
[2015-08-30 10:01:44,042][INFO ][gateway ] [Hammerhead] recovered [0] indices into cluster_state
|
||||
[2015-08-30 10:08:02,517][INFO ][cluster.service ] [Hammerhead] added {[Tenpin][2gv5MiwhRiOSsrTOF3DhuA][es-78e0s][inet[/10.244.54.4:9300]]{master=true},}, reason: zen-disco-receive(join from node[[Tenpin][2gv5MiwhRiOSsrTOF3DhuA][es-78e0s][inet[/10.244.54.4:9300]]{master=true}])
|
||||
[2015-08-30 10:10:10,645][INFO ][cluster.service ] [Hammerhead] added {[Evilhawk][ziTq2PzYRJys43rNL2tbyg][es-rjmer][inet[/10.244.33.3:9300]]{master=true},}, reason: zen-disco-receive(join from node[[Evilhawk][ziTq2PzYRJys43rNL2tbyg][es-rjmer][inet[/10.244.33.3:9300]]{master=true}])
|
||||
```
|
||||
|
||||
So we have a 3-node Elasticsearch cluster ready to handle more work.
|
||||
|
||||
## Access the service
|
||||
|
||||
*Don't forget* that services in Kubernetes are only acessible from containers in the cluster. For different behavior you should [configure the creation of an external load-balancer](http://kubernetes.io/v1.0/docs/user-guide/services.html#type-loadbalancer). While it's supported within this example service descriptor, its usage is out of scope of this document, for now.
|
||||
|
||||
```
|
||||
$ kubectl get service elasticsearch
|
||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
||||
elasticsearch component=elasticsearch component=elasticsearch 10.100.108.94 9200/TCP
|
||||
9300/TCP
|
||||
```
|
||||
|
||||
From any host on your cluster (that's running `kube-proxy`), run:
|
||||
|
||||
```
|
||||
$ curl 10.100.108.94:9200
|
||||
```
|
||||
|
||||
You should see something similar to the following:
|
||||
|
||||
|
||||
```json
|
||||
{
|
||||
"status" : 200,
|
||||
"name" : "Hammerhead",
|
||||
"cluster_name" : "myesdb",
|
||||
"version" : {
|
||||
"number" : "1.7.1",
|
||||
"build_hash" : "b88f43fc40b0bcd7f173a1f9ee2e97816de80b19",
|
||||
"build_timestamp" : "2015-07-29T09:54:16Z",
|
||||
"build_snapshot" : false,
|
||||
"lucene_version" : "4.10.4"
|
||||
},
|
||||
"tagline" : "You Know, for Search"
|
||||
}
|
||||
```
|
||||
|
||||
Or if you want to check cluster information:
|
||||
|
||||
|
||||
```
|
||||
curl 10.100.108.94:9200/_cluster/health?pretty
|
||||
```
|
||||
|
||||
You should see something similar to the following:
|
||||
|
||||
```json
|
||||
{
|
||||
"cluster_name" : "myesdb",
|
||||
"status" : "green",
|
||||
"timed_out" : false,
|
||||
"number_of_nodes" : 3,
|
||||
"number_of_data_nodes" : 3,
|
||||
"active_primary_shards" : 0,
|
||||
"active_shards" : 0,
|
||||
"relocating_shards" : 0,
|
||||
"initializing_shards" : 0,
|
||||
"unassigned_shards" : 0,
|
||||
"delayed_unassigned_shards" : 0,
|
||||
"number_of_pending_tasks" : 0,
|
||||
"number_of_in_flight_fetch" : 0
|
||||
}
|
||||
```
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
51
vendor/k8s.io/kubernetes/examples/elasticsearch/es-rc.yaml
generated
vendored
Normal file
51
vendor/k8s.io/kubernetes/examples/elasticsearch/es-rc.yaml
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: es
|
||||
labels:
|
||||
component: elasticsearch
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: elasticsearch
|
||||
spec:
|
||||
serviceAccount: elasticsearch
|
||||
containers:
|
||||
- name: es
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- IPC_LOCK
|
||||
image: quay.io/pires/docker-elasticsearch-kubernetes:1.7.1-4
|
||||
env:
|
||||
- name: KUBERNETES_CA_CERTIFICATE_FILE
|
||||
value: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: "CLUSTER_NAME"
|
||||
value: "myesdb"
|
||||
- name: "DISCOVERY_SERVICE"
|
||||
value: "elasticsearch"
|
||||
- name: NODE_MASTER
|
||||
value: "true"
|
||||
- name: NODE_DATA
|
||||
value: "true"
|
||||
- name: HTTP_ENABLE
|
||||
value: "true"
|
||||
ports:
|
||||
- containerPort: 9200
|
||||
name: http
|
||||
protocol: TCP
|
||||
- containerPort: 9300
|
||||
name: transport
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- mountPath: /data
|
||||
name: storage
|
||||
volumes:
|
||||
- name: storage
|
||||
emptyDir: {}
|
17
vendor/k8s.io/kubernetes/examples/elasticsearch/es-svc.yaml
generated
vendored
Normal file
17
vendor/k8s.io/kubernetes/examples/elasticsearch/es-svc.yaml
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: elasticsearch
|
||||
labels:
|
||||
component: elasticsearch
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
selector:
|
||||
component: elasticsearch
|
||||
ports:
|
||||
- name: http
|
||||
port: 9200
|
||||
protocol: TCP
|
||||
- name: transport
|
||||
port: 9300
|
||||
protocol: TCP
|
189
vendor/k8s.io/kubernetes/examples/elasticsearch/production_cluster/README.md
generated
vendored
Normal file
189
vendor/k8s.io/kubernetes/examples/elasticsearch/production_cluster/README.md
generated
vendored
Normal file
|
@ -0,0 +1,189 @@
|
|||
# Elasticsearch for Kubernetes
|
||||
|
||||
Kubernetes makes it trivial for anyone to easily build and scale [Elasticsearch](http://www.elasticsearch.org/) clusters. Here, you'll find how to do so.
|
||||
Current Elasticsearch version is `1.7.1`.
|
||||
|
||||
Before we start, one needs to know that Elasticsearch best-practices recommend to separate nodes in three roles:
|
||||
* `Master` nodes - intended for clustering management only, no data, no HTTP API
|
||||
* `Client` nodes - intended for client usage, no data, with HTTP API
|
||||
* `Data` nodes - intended for storing and indexing your data, no HTTP API
|
||||
|
||||
This is enforced throughout this document.
|
||||
|
||||
<img src="http://kubernetes.io/kubernetes/img/warning.png" alt="WARNING" width="25" height="25"> Current pod descriptors use an `emptyDir` for storing data in each data node container. This is meant to be for the sake of simplicity and [should be adapted according to your storage needs](../../../docs/design/persistent-storage.md).
|
||||
|
||||
## Docker image
|
||||
|
||||
This example uses [this pre-built image](https://github.com/pires/docker-elasticsearch-kubernetes). Feel free to fork and update it to fit your own needs, but keep in mind that you will need to change Kubernetes descriptors accordingly.
|
||||
|
||||
## Deploy
|
||||
|
||||
```
|
||||
kubectl create -f examples/elasticsearch/production_cluster/service-account.yaml
|
||||
kubectl create -f examples/elasticsearch/production_cluster/es-discovery-svc.yaml
|
||||
kubectl create -f examples/elasticsearch/production_cluster/es-svc.yaml
|
||||
kubectl create -f examples/elasticsearch/production_cluster/es-master-rc.yaml
|
||||
```
|
||||
|
||||
Wait until `es-master` is provisioned, and
|
||||
|
||||
```
|
||||
kubectl create -f examples/elasticsearch/production_cluster/es-client-rc.yaml
|
||||
```
|
||||
|
||||
Wait until `es-client` is provisioned, and
|
||||
|
||||
```
|
||||
kubectl create -f examples/elasticsearch/production_cluster/es-data-rc.yaml
|
||||
```
|
||||
|
||||
Wait until `es-data` is provisioned.
|
||||
|
||||
Now, I leave up to you how to validate the cluster, but a first step is to wait for containers to be in ```RUNNING``` state and check the Elasticsearch master logs:
|
||||
|
||||
```
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
es-client-2ep9o 1/1 Running 0 2m
|
||||
es-data-r9tgv 1/1 Running 0 1m
|
||||
es-master-vxl6c 1/1 Running 0 6m
|
||||
```
|
||||
|
||||
```
|
||||
$ kubectl logs es-master-vxl6c
|
||||
log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender.
|
||||
log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender.
|
||||
log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender.
|
||||
[2015-08-21 10:58:51,324][INFO ][node ] [Arc] version[1.7.1], pid[8], build[b88f43f/2015-07-29T09:54:16Z]
|
||||
[2015-08-21 10:58:51,328][INFO ][node ] [Arc] initializing ...
|
||||
[2015-08-21 10:58:51,542][INFO ][plugins ] [Arc] loaded [cloud-kubernetes], sites []
|
||||
[2015-08-21 10:58:51,624][INFO ][env ] [Arc] using [1] data paths, mounts [[/data (/dev/sda9)]], net usable_space [14.4gb], net total_space [15.5gb], types [ext4]
|
||||
[2015-08-21 10:58:57,439][INFO ][node ] [Arc] initialized
|
||||
[2015-08-21 10:58:57,439][INFO ][node ] [Arc] starting ...
|
||||
[2015-08-21 10:58:57,782][INFO ][transport ] [Arc] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.244.15.2:9300]}
|
||||
[2015-08-21 10:58:57,847][INFO ][discovery ] [Arc] myesdb/-x16XFUzTCC8xYqWoeEOYQ
|
||||
[2015-08-21 10:59:05,167][INFO ][cluster.service ] [Arc] new_master [Arc][-x16XFUzTCC8xYqWoeEOYQ][es-master-vxl6c][inet[/10.244.15.2:9300]]{data=false, master=true}, reason: zen-disco-join (elected_as_master)
|
||||
[2015-08-21 10:59:05,202][INFO ][node ] [Arc] started
|
||||
[2015-08-21 10:59:05,238][INFO ][gateway ] [Arc] recovered [0] indices into cluster_state
|
||||
[2015-08-21 11:02:28,797][INFO ][cluster.service ] [Arc] added {[Gideon][4EfhWSqaTqikbK4tI7bODA][es-data-r9tgv][inet[/10.244.59.4:9300]]{master=false},}, reason: zen-disco-receive(join from node[[Gideon][4EfhWSqaTqikbK4tI7bODA][es-data-r9tgv][inet[/10.244.59.4:9300]]{master=false}])
|
||||
[2015-08-21 11:03:16,822][INFO ][cluster.service ] [Arc] added {[Venomm][tFYxwgqGSpOejHLG4umRqg][es-client-2ep9o][inet[/10.244.53.2:9300]]{data=false, master=false},}, reason: zen-disco-receive(join from node[[Venomm][tFYxwgqGSpOejHLG4umRqg][es-client-2ep9o][inet[/10.244.53.2:9300]]{data=false, master=false}])
|
||||
```
|
||||
|
||||
As you can assert, the cluster is up and running. Easy, wasn't it?
|
||||
|
||||
## Scale
|
||||
|
||||
Scaling each type of node to handle your cluster is as easy as:
|
||||
|
||||
```
|
||||
kubectl scale --replicas=3 rc es-master
|
||||
kubectl scale --replicas=2 rc es-client
|
||||
kubectl scale --replicas=2 rc es-data
|
||||
```
|
||||
|
||||
Did it work?
|
||||
|
||||
```
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
es-client-2ep9o 1/1 Running 0 4m
|
||||
es-client-ye5s1 1/1 Running 0 50s
|
||||
es-data-8az22 1/1 Running 0 47s
|
||||
es-data-r9tgv 1/1 Running 0 3m
|
||||
es-master-57h7k 1/1 Running 0 52s
|
||||
es-master-kuwse 1/1 Running 0 52s
|
||||
es-master-vxl6c 1/1 Running 0 8m
|
||||
```
|
||||
|
||||
Let's take another look of the Elasticsearch master logs:
|
||||
|
||||
```
|
||||
$ kubectl logs es-master-vxl6c
|
||||
log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender.
|
||||
log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender.
|
||||
log4j:WARN No such property [maxBackupIndex] in org.apache.log4j.DailyRollingFileAppender.
|
||||
[2015-08-21 10:58:51,324][INFO ][node ] [Arc] version[1.7.1], pid[8], build[b88f43f/2015-07-29T09:54:16Z]
|
||||
[2015-08-21 10:58:51,328][INFO ][node ] [Arc] initializing ...
|
||||
[2015-08-21 10:58:51,542][INFO ][plugins ] [Arc] loaded [cloud-kubernetes], sites []
|
||||
[2015-08-21 10:58:51,624][INFO ][env ] [Arc] using [1] data paths, mounts [[/data (/dev/sda9)]], net usable_space [14.4gb], net total_space [15.5gb], types [ext4]
|
||||
[2015-08-21 10:58:57,439][INFO ][node ] [Arc] initialized
|
||||
[2015-08-21 10:58:57,439][INFO ][node ] [Arc] starting ...
|
||||
[2015-08-21 10:58:57,782][INFO ][transport ] [Arc] bound_address {inet[/0:0:0:0:0:0:0:0:9300]}, publish_address {inet[/10.244.15.2:9300]}
|
||||
[2015-08-21 10:58:57,847][INFO ][discovery ] [Arc] myesdb/-x16XFUzTCC8xYqWoeEOYQ
|
||||
[2015-08-21 10:59:05,167][INFO ][cluster.service ] [Arc] new_master [Arc][-x16XFUzTCC8xYqWoeEOYQ][es-master-vxl6c][inet[/10.244.15.2:9300]]{data=false, master=true}, reason: zen-disco-join (elected_as_master)
|
||||
[2015-08-21 10:59:05,202][INFO ][node ] [Arc] started
|
||||
[2015-08-21 10:59:05,238][INFO ][gateway ] [Arc] recovered [0] indices into cluster_state
|
||||
[2015-08-21 11:02:28,797][INFO ][cluster.service ] [Arc] added {[Gideon][4EfhWSqaTqikbK4tI7bODA][es-data-r9tgv][inet[/10.244.59.4:9300]]{master=false},}, reason: zen-disco-receive(join from node[[Gideon][4EfhWSqaTqikbK4tI7bODA][es-data-r9tgv][inet[/10.244.59.4:9300]]{master=false}])
|
||||
[2015-08-21 11:03:16,822][INFO ][cluster.service ] [Arc] added {[Venomm][tFYxwgqGSpOejHLG4umRqg][es-client-2ep9o][inet[/10.244.53.2:9300]]{data=false, master=false},}, reason: zen-disco-receive(join from node[[Venomm][tFYxwgqGSpOejHLG4umRqg][es-client-2ep9o][inet[/10.244.53.2:9300]]{data=false, master=false}])
|
||||
[2015-08-21 11:04:40,781][INFO ][cluster.service ] [Arc] added {[Erik Josten][QUJlahfLTi-MsxzM6_Da0g][es-master-kuwse][inet[/10.244.59.5:9300]]{data=false, master=true},}, reason: zen-disco-receive(join from node[[Erik Josten][QUJlahfLTi-MsxzM6_Da0g][es-master-kuwse][inet[/10.244.59.5:9300]]{data=false, master=true}])
|
||||
[2015-08-21 11:04:41,076][INFO ][cluster.service ] [Arc] added {[Power Princess][V4qnR-6jQOS5ovXQsPgo7g][es-master-57h7k][inet[/10.244.53.3:9300]]{data=false, master=true},}, reason: zen-disco-receive(join from node[[Power Princess][V4qnR-6jQOS5ovXQsPgo7g][es-master-57h7k][inet[/10.244.53.3:9300]]{data=false, master=true}])
|
||||
[2015-08-21 11:04:53,966][INFO ][cluster.service ] [Arc] added {[Cagliostro][Wpfx5fkBRiG2qCEWd8laaQ][es-client-ye5s1][inet[/10.244.15.3:9300]]{data=false, master=false},}, reason: zen-disco-receive(join from node[[Cagliostro][Wpfx5fkBRiG2qCEWd8laaQ][es-client-ye5s1][inet[/10.244.15.3:9300]]{data=false, master=false}])
|
||||
[2015-08-21 11:04:56,803][INFO ][cluster.service ] [Arc] added {[Thog][vkdEtX3ESfWmhXXf-Wi0_Q][es-data-8az22][inet[/10.244.15.4:9300]]{master=false},}, reason: zen-disco-receive(join from node[[Thog][vkdEtX3ESfWmhXXf-Wi0_Q][es-data-8az22][inet[/10.244.15.4:9300]]{master=false}])
|
||||
```
|
||||
|
||||
## Access the service
|
||||
|
||||
*Don't forget* that services in Kubernetes are only accessible from containers in the cluster. For different behavior you should [configure the creation of an external load-balancer](http://kubernetes.io/v1.0/docs/user-guide/services.html#type-loadbalancer). While it's supported within this example service descriptor, its usage is out of scope of this document, for now.
|
||||
|
||||
```
|
||||
$ kubectl get service elasticsearch
|
||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
||||
elasticsearch component=elasticsearch,role=client component=elasticsearch,role=client 10.100.134.2 9200/TCP
|
||||
```
|
||||
|
||||
From any host on your cluster (that's running `kube-proxy`), run:
|
||||
|
||||
```
|
||||
curl http://10.100.134.2:9200
|
||||
```
|
||||
|
||||
You should see something similar to the following:
|
||||
|
||||
|
||||
```json
|
||||
{
|
||||
"status" : 200,
|
||||
"name" : "Cagliostro",
|
||||
"cluster_name" : "myesdb",
|
||||
"version" : {
|
||||
"number" : "1.7.1",
|
||||
"build_hash" : "b88f43fc40b0bcd7f173a1f9ee2e97816de80b19",
|
||||
"build_timestamp" : "2015-07-29T09:54:16Z",
|
||||
"build_snapshot" : false,
|
||||
"lucene_version" : "4.10.4"
|
||||
},
|
||||
"tagline" : "You Know, for Search"
|
||||
}
|
||||
```
|
||||
|
||||
Or if you want to check cluster information:
|
||||
|
||||
|
||||
```
|
||||
curl http://10.100.134.2:9200/_cluster/health?pretty
|
||||
```
|
||||
|
||||
You should see something similar to the following:
|
||||
|
||||
```json
|
||||
{
|
||||
"cluster_name" : "myesdb",
|
||||
"status" : "green",
|
||||
"timed_out" : false,
|
||||
"number_of_nodes" : 7,
|
||||
"number_of_data_nodes" : 2,
|
||||
"active_primary_shards" : 0,
|
||||
"active_shards" : 0,
|
||||
"relocating_shards" : 0,
|
||||
"initializing_shards" : 0,
|
||||
"unassigned_shards" : 0,
|
||||
"delayed_unassigned_shards" : 0,
|
||||
"number_of_pending_tasks" : 0,
|
||||
"number_of_in_flight_fetch" : 0
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
51
vendor/k8s.io/kubernetes/examples/elasticsearch/production_cluster/es-client-rc.yaml
generated
vendored
Normal file
51
vendor/k8s.io/kubernetes/examples/elasticsearch/production_cluster/es-client-rc.yaml
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: es-client
|
||||
labels:
|
||||
component: elasticsearch
|
||||
role: client
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: elasticsearch
|
||||
role: client
|
||||
spec:
|
||||
serviceAccount: elasticsearch
|
||||
containers:
|
||||
- name: es-client
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- IPC_LOCK
|
||||
image: quay.io/pires/docker-elasticsearch-kubernetes:1.7.1-4
|
||||
env:
|
||||
- name: KUBERNETES_CA_CERTIFICATE_FILE
|
||||
value: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: "CLUSTER_NAME"
|
||||
value: "myesdb"
|
||||
- name: NODE_MASTER
|
||||
value: "false"
|
||||
- name: NODE_DATA
|
||||
value: "false"
|
||||
- name: HTTP_ENABLE
|
||||
value: "true"
|
||||
ports:
|
||||
- containerPort: 9200
|
||||
name: http
|
||||
protocol: TCP
|
||||
- containerPort: 9300
|
||||
name: transport
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- mountPath: /data
|
||||
name: storage
|
||||
volumes:
|
||||
- name: storage
|
||||
emptyDir: {}
|
46
vendor/k8s.io/kubernetes/examples/elasticsearch/production_cluster/es-data-rc.yaml
generated
vendored
Normal file
46
vendor/k8s.io/kubernetes/examples/elasticsearch/production_cluster/es-data-rc.yaml
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: es-data
|
||||
labels:
|
||||
component: elasticsearch
|
||||
role: data
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: elasticsearch
|
||||
role: data
|
||||
spec:
|
||||
serviceAccount: elasticsearch
|
||||
containers:
|
||||
- name: es-data
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- IPC_LOCK
|
||||
image: quay.io/pires/docker-elasticsearch-kubernetes:1.7.1-4
|
||||
env:
|
||||
- name: KUBERNETES_CA_CERTIFICATE_FILE
|
||||
value: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: "CLUSTER_NAME"
|
||||
value: "myesdb"
|
||||
- name: NODE_MASTER
|
||||
value: "false"
|
||||
- name: HTTP_ENABLE
|
||||
value: "false"
|
||||
ports:
|
||||
- containerPort: 9300
|
||||
name: transport
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- mountPath: /data
|
||||
name: storage
|
||||
volumes:
|
||||
- name: storage
|
||||
emptyDir: {}
|
15
vendor/k8s.io/kubernetes/examples/elasticsearch/production_cluster/es-discovery-svc.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/examples/elasticsearch/production_cluster/es-discovery-svc.yaml
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: elasticsearch-discovery
|
||||
labels:
|
||||
component: elasticsearch
|
||||
role: master
|
||||
spec:
|
||||
selector:
|
||||
component: elasticsearch
|
||||
role: master
|
||||
ports:
|
||||
- name: transport
|
||||
port: 9300
|
||||
protocol: TCP
|
48
vendor/k8s.io/kubernetes/examples/elasticsearch/production_cluster/es-master-rc.yaml
generated
vendored
Normal file
48
vendor/k8s.io/kubernetes/examples/elasticsearch/production_cluster/es-master-rc.yaml
generated
vendored
Normal file
|
@ -0,0 +1,48 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: es-master
|
||||
labels:
|
||||
component: elasticsearch
|
||||
role: master
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
component: elasticsearch
|
||||
role: master
|
||||
spec:
|
||||
serviceAccount: elasticsearch
|
||||
containers:
|
||||
- name: es-master
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- IPC_LOCK
|
||||
image: quay.io/pires/docker-elasticsearch-kubernetes:1.7.1-4
|
||||
env:
|
||||
- name: KUBERNETES_CA_CERTIFICATE_FILE
|
||||
value: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
- name: NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: "CLUSTER_NAME"
|
||||
value: "myesdb"
|
||||
- name: NODE_MASTER
|
||||
value: "true"
|
||||
- name: NODE_DATA
|
||||
value: "false"
|
||||
- name: HTTP_ENABLE
|
||||
value: "false"
|
||||
ports:
|
||||
- containerPort: 9300
|
||||
name: transport
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- mountPath: /data
|
||||
name: storage
|
||||
volumes:
|
||||
- name: storage
|
||||
emptyDir: {}
|
16
vendor/k8s.io/kubernetes/examples/elasticsearch/production_cluster/es-svc.yaml
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/examples/elasticsearch/production_cluster/es-svc.yaml
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: elasticsearch
|
||||
labels:
|
||||
component: elasticsearch
|
||||
role: client
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
selector:
|
||||
component: elasticsearch
|
||||
role: client
|
||||
ports:
|
||||
- name: http
|
||||
port: 9200
|
||||
protocol: TCP
|
4
vendor/k8s.io/kubernetes/examples/elasticsearch/production_cluster/service-account.yaml
generated
vendored
Normal file
4
vendor/k8s.io/kubernetes/examples/elasticsearch/production_cluster/service-account.yaml
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: elasticsearch
|
4
vendor/k8s.io/kubernetes/examples/elasticsearch/service-account.yaml
generated
vendored
Normal file
4
vendor/k8s.io/kubernetes/examples/elasticsearch/service-account.yaml
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: elasticsearch
|
461
vendor/k8s.io/kubernetes/examples/examples_test.go
generated
vendored
Normal file
461
vendor/k8s.io/kubernetes/examples/examples_test.go
generated
vendored
Normal file
|
@ -0,0 +1,461 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package examples_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/validation"
|
||||
"k8s.io/kubernetes/pkg/apis/apps"
|
||||
appsvalidation "k8s.io/kubernetes/pkg/apis/apps/validation"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
expvalidation "k8s.io/kubernetes/pkg/apis/extensions/validation"
|
||||
"k8s.io/kubernetes/pkg/capabilities"
|
||||
"k8s.io/kubernetes/pkg/registry/batch/job"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
schedulerapilatest "k8s.io/kubernetes/plugin/pkg/scheduler/api/latest"
|
||||
)
|
||||
|
||||
func validateObject(obj runtime.Object) (errors field.ErrorList) {
|
||||
switch t := obj.(type) {
|
||||
case *api.ReplicationController:
|
||||
if t.Namespace == "" {
|
||||
t.Namespace = api.NamespaceDefault
|
||||
}
|
||||
errors = validation.ValidateReplicationController(t)
|
||||
case *api.ReplicationControllerList:
|
||||
for i := range t.Items {
|
||||
errors = append(errors, validateObject(&t.Items[i])...)
|
||||
}
|
||||
case *api.Service:
|
||||
if t.Namespace == "" {
|
||||
t.Namespace = api.NamespaceDefault
|
||||
}
|
||||
errors = validation.ValidateService(t)
|
||||
case *api.ServiceList:
|
||||
for i := range t.Items {
|
||||
errors = append(errors, validateObject(&t.Items[i])...)
|
||||
}
|
||||
case *api.Pod:
|
||||
if t.Namespace == "" {
|
||||
t.Namespace = api.NamespaceDefault
|
||||
}
|
||||
errors = validation.ValidatePod(t)
|
||||
case *api.PodList:
|
||||
for i := range t.Items {
|
||||
errors = append(errors, validateObject(&t.Items[i])...)
|
||||
}
|
||||
case *api.PersistentVolume:
|
||||
errors = validation.ValidatePersistentVolume(t)
|
||||
case *api.PersistentVolumeClaim:
|
||||
if t.Namespace == "" {
|
||||
t.Namespace = api.NamespaceDefault
|
||||
}
|
||||
errors = validation.ValidatePersistentVolumeClaim(t)
|
||||
case *api.PodTemplate:
|
||||
if t.Namespace == "" {
|
||||
t.Namespace = api.NamespaceDefault
|
||||
}
|
||||
errors = validation.ValidatePodTemplate(t)
|
||||
case *api.Endpoints:
|
||||
if t.Namespace == "" {
|
||||
t.Namespace = api.NamespaceDefault
|
||||
}
|
||||
errors = validation.ValidateEndpoints(t)
|
||||
case *api.Namespace:
|
||||
errors = validation.ValidateNamespace(t)
|
||||
case *api.Secret:
|
||||
if t.Namespace == "" {
|
||||
t.Namespace = api.NamespaceDefault
|
||||
}
|
||||
errors = validation.ValidateSecret(t)
|
||||
case *api.LimitRange:
|
||||
if t.Namespace == "" {
|
||||
t.Namespace = api.NamespaceDefault
|
||||
}
|
||||
errors = validation.ValidateLimitRange(t)
|
||||
case *api.ResourceQuota:
|
||||
if t.Namespace == "" {
|
||||
t.Namespace = api.NamespaceDefault
|
||||
}
|
||||
errors = validation.ValidateResourceQuota(t)
|
||||
case *extensions.Deployment:
|
||||
if t.Namespace == "" {
|
||||
t.Namespace = api.NamespaceDefault
|
||||
}
|
||||
errors = expvalidation.ValidateDeployment(t)
|
||||
case *batch.Job:
|
||||
if t.Namespace == "" {
|
||||
t.Namespace = api.NamespaceDefault
|
||||
}
|
||||
// Job needs generateSelector called before validation, and job.Validate does this.
|
||||
// See: https://github.com/kubernetes/kubernetes/issues/20951#issuecomment-187787040
|
||||
t.ObjectMeta.UID = types.UID("fakeuid")
|
||||
errors = job.Strategy.Validate(nil, t)
|
||||
case *extensions.Ingress:
|
||||
if t.Namespace == "" {
|
||||
t.Namespace = api.NamespaceDefault
|
||||
}
|
||||
errors = expvalidation.ValidateIngress(t)
|
||||
case *extensions.DaemonSet:
|
||||
if t.Namespace == "" {
|
||||
t.Namespace = api.NamespaceDefault
|
||||
}
|
||||
errors = expvalidation.ValidateDaemonSet(t)
|
||||
case *apps.StatefulSet:
|
||||
if t.Namespace == "" {
|
||||
t.Namespace = api.NamespaceDefault
|
||||
}
|
||||
errors = appsvalidation.ValidateStatefulSet(t)
|
||||
default:
|
||||
errors = field.ErrorList{}
|
||||
errors = append(errors, field.InternalError(field.NewPath(""), fmt.Errorf("no validation defined for %#v", obj)))
|
||||
}
|
||||
return errors
|
||||
}
|
||||
|
||||
func walkJSONFiles(inDir string, fn func(name, path string, data []byte)) error {
|
||||
return filepath.Walk(inDir, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if info.IsDir() && path != inDir {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
file := filepath.Base(path)
|
||||
if ext := filepath.Ext(file); ext == ".json" || ext == ".yaml" {
|
||||
glog.Infof("Testing %s", path)
|
||||
data, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
name := strings.TrimSuffix(file, ext)
|
||||
|
||||
if ext == ".yaml" {
|
||||
out, err := yaml.ToJSON(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %v", path, err)
|
||||
}
|
||||
data = out
|
||||
}
|
||||
|
||||
fn(name, path, data)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func TestExampleObjectSchemas(t *testing.T) {
|
||||
cases := map[string]map[string]runtime.Object{
|
||||
"../examples/guestbook": {
|
||||
"frontend-deployment": &extensions.Deployment{},
|
||||
"redis-slave-deployment": &extensions.Deployment{},
|
||||
"redis-master-deployment": &extensions.Deployment{},
|
||||
"frontend-service": &api.Service{},
|
||||
"redis-master-service": &api.Service{},
|
||||
"redis-slave-service": &api.Service{},
|
||||
},
|
||||
"../examples/guestbook/legacy": {
|
||||
"frontend-controller": &api.ReplicationController{},
|
||||
"redis-slave-controller": &api.ReplicationController{},
|
||||
"redis-master-controller": &api.ReplicationController{},
|
||||
},
|
||||
"../examples/guestbook-go": {
|
||||
"guestbook-controller": &api.ReplicationController{},
|
||||
"redis-slave-controller": &api.ReplicationController{},
|
||||
"redis-master-controller": &api.ReplicationController{},
|
||||
"guestbook-service": &api.Service{},
|
||||
"redis-master-service": &api.Service{},
|
||||
"redis-slave-service": &api.Service{},
|
||||
},
|
||||
"../examples/volumes/iscsi": {
|
||||
"iscsi": &api.Pod{},
|
||||
},
|
||||
"../examples/volumes/glusterfs": {
|
||||
"glusterfs-pod": &api.Pod{},
|
||||
"glusterfs-endpoints": &api.Endpoints{},
|
||||
"glusterfs-service": &api.Service{},
|
||||
},
|
||||
"../examples": {
|
||||
"scheduler-policy-config": &schedulerapi.Policy{},
|
||||
"scheduler-policy-config-with-extender": &schedulerapi.Policy{},
|
||||
},
|
||||
"../examples/volumes/rbd/secret": {
|
||||
"ceph-secret": &api.Secret{},
|
||||
},
|
||||
"../examples/volumes/rbd": {
|
||||
"rbd": &api.Pod{},
|
||||
"rbd-with-secret": &api.Pod{},
|
||||
},
|
||||
"../examples/storage/cassandra": {
|
||||
"cassandra-daemonset": &extensions.DaemonSet{},
|
||||
"cassandra-controller": &api.ReplicationController{},
|
||||
"cassandra-service": &api.Service{},
|
||||
"cassandra-statefulset": &apps.StatefulSet{},
|
||||
},
|
||||
"../examples/cluster-dns": {
|
||||
"dns-backend-rc": &api.ReplicationController{},
|
||||
"dns-backend-service": &api.Service{},
|
||||
"dns-frontend-pod": &api.Pod{},
|
||||
"namespace-dev": &api.Namespace{},
|
||||
"namespace-prod": &api.Namespace{},
|
||||
},
|
||||
"../examples/elasticsearch": {
|
||||
"es-rc": &api.ReplicationController{},
|
||||
"es-svc": &api.Service{},
|
||||
"service-account": nil,
|
||||
},
|
||||
"../examples/explorer": {
|
||||
"pod": &api.Pod{},
|
||||
},
|
||||
"../examples/storage/hazelcast": {
|
||||
"hazelcast-controller": &api.ReplicationController{},
|
||||
"hazelcast-service": &api.Service{},
|
||||
},
|
||||
"../examples/meteor": {
|
||||
"meteor-controller": &api.ReplicationController{},
|
||||
"meteor-service": &api.Service{},
|
||||
"mongo-pod": &api.Pod{},
|
||||
"mongo-service": &api.Service{},
|
||||
},
|
||||
"../examples/mysql-wordpress-pd": {
|
||||
"gce-volumes": &api.PersistentVolume{},
|
||||
"local-volumes": &api.PersistentVolume{},
|
||||
"mysql-deployment": &api.Service{},
|
||||
"wordpress-deployment": &api.Service{},
|
||||
},
|
||||
"../examples/volumes/nfs": {
|
||||
"nfs-busybox-rc": &api.ReplicationController{},
|
||||
"nfs-server-rc": &api.ReplicationController{},
|
||||
"nfs-server-service": &api.Service{},
|
||||
"nfs-pv": &api.PersistentVolume{},
|
||||
"nfs-pvc": &api.PersistentVolumeClaim{},
|
||||
"nfs-web-rc": &api.ReplicationController{},
|
||||
"nfs-web-service": &api.Service{},
|
||||
},
|
||||
"../examples/openshift-origin": {
|
||||
"openshift-origin-namespace": &api.Namespace{},
|
||||
"openshift-controller": &extensions.Deployment{},
|
||||
"openshift-service": &api.Service{},
|
||||
"etcd-controller": &extensions.Deployment{},
|
||||
"etcd-service": &api.Service{},
|
||||
"etcd-discovery-controller": &extensions.Deployment{},
|
||||
"etcd-discovery-service": &api.Service{},
|
||||
"secret": nil,
|
||||
},
|
||||
"../examples/phabricator": {
|
||||
"phabricator-controller": &api.ReplicationController{},
|
||||
"phabricator-service": &api.Service{},
|
||||
},
|
||||
"../examples/storage/redis": {
|
||||
"redis-controller": &api.ReplicationController{},
|
||||
"redis-master": &api.Pod{},
|
||||
"redis-proxy": &api.Pod{},
|
||||
"redis-sentinel-controller": &api.ReplicationController{},
|
||||
"redis-sentinel-service": &api.Service{},
|
||||
},
|
||||
"../examples/storage/rethinkdb": {
|
||||
"admin-pod": &api.Pod{},
|
||||
"admin-service": &api.Service{},
|
||||
"driver-service": &api.Service{},
|
||||
"rc": &api.ReplicationController{},
|
||||
},
|
||||
"../examples/spark": {
|
||||
"namespace-spark-cluster": &api.Namespace{},
|
||||
"spark-master-controller": &api.ReplicationController{},
|
||||
"spark-master-service": &api.Service{},
|
||||
"spark-ui-proxy-controller": &api.ReplicationController{},
|
||||
"spark-ui-proxy-service": &api.Service{},
|
||||
"spark-worker-controller": &api.ReplicationController{},
|
||||
"zeppelin-controller": &api.ReplicationController{},
|
||||
"zeppelin-service": &api.Service{},
|
||||
},
|
||||
"../examples/spark/spark-gluster": {
|
||||
"spark-master-service": &api.Service{},
|
||||
"spark-master-controller": &api.ReplicationController{},
|
||||
"spark-worker-controller": &api.ReplicationController{},
|
||||
"glusterfs-endpoints": &api.Endpoints{},
|
||||
},
|
||||
"../examples/storm": {
|
||||
"storm-nimbus-service": &api.Service{},
|
||||
"storm-nimbus": &api.Pod{},
|
||||
"storm-worker-controller": &api.ReplicationController{},
|
||||
"zookeeper-service": &api.Service{},
|
||||
"zookeeper": &api.Pod{},
|
||||
},
|
||||
"../examples/volumes/cephfs/": {
|
||||
"cephfs": &api.Pod{},
|
||||
"cephfs-with-secret": &api.Pod{},
|
||||
},
|
||||
"../examples/volumes/fibre_channel": {
|
||||
"fc": &api.Pod{},
|
||||
},
|
||||
"../examples/javaweb-tomcat-sidecar": {
|
||||
"javaweb": &api.Pod{},
|
||||
"javaweb-2": &api.Pod{},
|
||||
},
|
||||
"../examples/volumes/azure_file": {
|
||||
"azure": &api.Pod{},
|
||||
},
|
||||
"../examples/volumes/azure_disk": {
|
||||
"azure": &api.Pod{},
|
||||
},
|
||||
}
|
||||
|
||||
capabilities.SetForTests(capabilities.Capabilities{
|
||||
AllowPrivileged: true,
|
||||
})
|
||||
|
||||
for path, expected := range cases {
|
||||
tested := 0
|
||||
err := walkJSONFiles(path, func(name, path string, data []byte) {
|
||||
expectedType, found := expected[name]
|
||||
if !found {
|
||||
t.Errorf("%s: %s does not have a test case defined", path, name)
|
||||
return
|
||||
}
|
||||
tested++
|
||||
if expectedType == nil {
|
||||
t.Logf("skipping : %s/%s\n", path, name)
|
||||
return
|
||||
}
|
||||
if strings.Contains(name, "scheduler-policy-config") {
|
||||
if err := runtime.DecodeInto(schedulerapilatest.Codec, data, expectedType); err != nil {
|
||||
t.Errorf("%s did not decode correctly: %v\n%s", path, err, string(data))
|
||||
return
|
||||
}
|
||||
//TODO: Add validate method for &schedulerapi.Policy
|
||||
} else {
|
||||
codec, err := testapi.GetCodecForObject(expectedType)
|
||||
if err != nil {
|
||||
t.Errorf("Could not get codec for %s: %s", expectedType, err)
|
||||
}
|
||||
if err := runtime.DecodeInto(codec, data, expectedType); err != nil {
|
||||
t.Errorf("%s did not decode correctly: %v\n%s", path, err, string(data))
|
||||
return
|
||||
}
|
||||
if errors := validateObject(expectedType); len(errors) > 0 {
|
||||
t.Errorf("%s did not validate correctly: %v", path, errors)
|
||||
}
|
||||
}
|
||||
})
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error, Got %v", err)
|
||||
}
|
||||
if tested != len(expected) {
|
||||
t.Errorf("Directory %v: Expected %d examples, Got %d", path, len(expected), tested)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This regex is tricky, but it works. For future me, here is the decode:
|
||||
//
|
||||
// Flags: (?ms) = multiline match, allow . to match \n
|
||||
// 1) Look for a line that starts with ``` (a markdown code block)
|
||||
// 2) (?: ... ) = non-capturing group
|
||||
// 3) (P<name>) = capture group as "name"
|
||||
// 4) Look for #1 followed by either:
|
||||
// 4a) "yaml" followed by any word-characters followed by a newline (e.g. ```yamlfoo\n)
|
||||
// 4b) "any word-characters followed by a newline (e.g. ```json\n)
|
||||
// 5) Look for either:
|
||||
// 5a) #4a followed by one or more characters (non-greedy)
|
||||
// 5b) #4b followed by { followed by one or more characters (non-greedy) followed by }
|
||||
// 6) Look for #5 followed by a newline followed by ``` (end of the code block)
|
||||
//
|
||||
// This could probably be simplified, but is already too delicate. Before any
|
||||
// real changes, we should have a testscase that just tests this regex.
|
||||
var sampleRegexp = regexp.MustCompile("(?ms)^```(?:(?P<type>yaml)\\w*\\n(?P<content>.+?)|\\w*\\n(?P<content>\\{.+?\\}))\\n^```")
|
||||
var subsetRegexp = regexp.MustCompile("(?ms)\\.{3}")
|
||||
|
||||
func TestReadme(t *testing.T) {
|
||||
paths := []struct {
|
||||
file string
|
||||
expectedType []runtime.Object
|
||||
}{
|
||||
{"../README.md", []runtime.Object{&api.Pod{}}},
|
||||
{"../examples/volumes/iscsi/README.md", []runtime.Object{&api.Pod{}}},
|
||||
}
|
||||
|
||||
for _, path := range paths {
|
||||
data, err := ioutil.ReadFile(path.file)
|
||||
if err != nil {
|
||||
t.Errorf("Unable to read file %s: %v", path, err)
|
||||
continue
|
||||
}
|
||||
|
||||
matches := sampleRegexp.FindAllStringSubmatch(string(data), -1)
|
||||
if matches == nil {
|
||||
continue
|
||||
}
|
||||
ix := 0
|
||||
for _, match := range matches {
|
||||
var content, subtype string
|
||||
for i, name := range sampleRegexp.SubexpNames() {
|
||||
if name == "type" {
|
||||
subtype = match[i]
|
||||
}
|
||||
if name == "content" && match[i] != "" {
|
||||
content = match[i]
|
||||
}
|
||||
}
|
||||
if subtype == "yaml" && subsetRegexp.FindString(content) != "" {
|
||||
t.Logf("skipping (%s): \n%s", subtype, content)
|
||||
continue
|
||||
}
|
||||
|
||||
var expectedType runtime.Object
|
||||
if len(path.expectedType) == 1 {
|
||||
expectedType = path.expectedType[0]
|
||||
} else {
|
||||
expectedType = path.expectedType[ix]
|
||||
ix++
|
||||
}
|
||||
json, err := yaml.ToJSON([]byte(content))
|
||||
if err != nil {
|
||||
t.Errorf("%s could not be converted to JSON: %v\n%s", path, err, string(content))
|
||||
}
|
||||
if err := runtime.DecodeInto(testapi.Default.Codec(), json, expectedType); err != nil {
|
||||
t.Errorf("%s did not decode correctly: %v\n%s", path, err, string(content))
|
||||
continue
|
||||
}
|
||||
if errors := validateObject(expectedType); len(errors) > 0 {
|
||||
t.Errorf("%s did not validate correctly: %v", path, errors)
|
||||
}
|
||||
_, err = runtime.Encode(testapi.Default.Codec(), expectedType)
|
||||
if err != nil {
|
||||
t.Errorf("Could not encode object: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
35
vendor/k8s.io/kubernetes/examples/explorer/BUILD
generated
vendored
Normal file
35
vendor/k8s.io/kubernetes/examples/explorer/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "explorer",
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["explorer.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = ["//vendor:github.com/davecgh/go-spew/spew"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
19
vendor/k8s.io/kubernetes/examples/explorer/Dockerfile
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/examples/explorer/Dockerfile
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM scratch
|
||||
ADD explorer explorer
|
||||
ADD README.md README.md
|
||||
EXPOSE 8080
|
||||
ENTRYPOINT ["/explorer"]
|
30
vendor/k8s.io/kubernetes/examples/explorer/Makefile
generated
vendored
Normal file
30
vendor/k8s.io/kubernetes/examples/explorer/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
all: push
|
||||
|
||||
# Keep this one version ahead, so no one accidentally blows away the latest published version.
|
||||
TAG = 1.1
|
||||
|
||||
explorer: explorer.go
|
||||
CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -ldflags '-w' ./explorer.go
|
||||
|
||||
container: explorer
|
||||
docker build --pull -t gcr.io/google_containers/explorer:$(TAG) .
|
||||
|
||||
push: container
|
||||
gcloud docker -- push gcr.io/google_containers/explorer:$(TAG)
|
||||
|
||||
clean:
|
||||
rm -f explorer
|
133
vendor/k8s.io/kubernetes/examples/explorer/README.md
generated
vendored
Normal file
133
vendor/k8s.io/kubernetes/examples/explorer/README.md
generated
vendored
Normal file
|
@ -0,0 +1,133 @@
|
|||
### explorer
|
||||
|
||||
Explorer is a little container for examining the runtime environment Kubernetes produces for your pods.
|
||||
|
||||
The intended use is to substitute gcr.io/google_containers/explorer for your intended container, and then visit it via the proxy.
|
||||
|
||||
Currently, you can look at:
|
||||
* The environment variables to make sure Kubernetes is doing what you expect.
|
||||
* The filesystem to make sure the mounted volumes and files are also what you expect.
|
||||
* Perform DNS lookups, to see how DNS works.
|
||||
|
||||
`pod.yaml` is supplied as an example. You can control the port it serves on with the -port flag.
|
||||
|
||||
Example from command line (the DNS lookup looks better from a web browser):
|
||||
|
||||
```console
|
||||
$ kubectl create -f examples/explorer/pod.yaml
|
||||
$ kubectl proxy &
|
||||
Starting to serve on localhost:8001
|
||||
|
||||
$ curl localhost:8001/api/v1/proxy/namespaces/default/pods/explorer:8080/vars/
|
||||
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
||||
HOSTNAME=explorer
|
||||
KIBANA_LOGGING_PORT_5601_TCP_PORT=5601
|
||||
KUBERNETES_SERVICE_HOST=10.0.0.2
|
||||
MONITORING_GRAFANA_PORT_80_TCP_PROTO=tcp
|
||||
MONITORING_INFLUXDB_UI_PORT_80_TCP_PROTO=tcp
|
||||
KIBANA_LOGGING_SERVICE_PORT=5601
|
||||
MONITORING_HEAPSTER_PORT_80_TCP_PORT=80
|
||||
MONITORING_INFLUXDB_UI_PORT_80_TCP_PORT=80
|
||||
KIBANA_LOGGING_SERVICE_HOST=10.0.204.206
|
||||
KIBANA_LOGGING_PORT_5601_TCP=tcp://10.0.204.206:5601
|
||||
KUBERNETES_PORT=tcp://10.0.0.2:443
|
||||
MONITORING_INFLUXDB_PORT=tcp://10.0.2.30:80
|
||||
MONITORING_INFLUXDB_PORT_80_TCP_PROTO=tcp
|
||||
MONITORING_INFLUXDB_UI_PORT=tcp://10.0.36.78:80
|
||||
KUBE_DNS_PORT_53_UDP=udp://10.0.0.10:53
|
||||
MONITORING_INFLUXDB_SERVICE_HOST=10.0.2.30
|
||||
ELASTICSEARCH_LOGGING_PORT=tcp://10.0.48.200:9200
|
||||
ELASTICSEARCH_LOGGING_PORT_9200_TCP_PORT=9200
|
||||
KUBERNETES_PORT_443_TCP=tcp://10.0.0.2:443
|
||||
ELASTICSEARCH_LOGGING_PORT_9200_TCP_PROTO=tcp
|
||||
KIBANA_LOGGING_PORT_5601_TCP_ADDR=10.0.204.206
|
||||
KUBE_DNS_PORT_53_UDP_ADDR=10.0.0.10
|
||||
MONITORING_HEAPSTER_PORT_80_TCP_PROTO=tcp
|
||||
MONITORING_INFLUXDB_PORT_80_TCP_ADDR=10.0.2.30
|
||||
KIBANA_LOGGING_PORT=tcp://10.0.204.206:5601
|
||||
MONITORING_GRAFANA_SERVICE_PORT=80
|
||||
MONITORING_HEAPSTER_SERVICE_PORT=80
|
||||
MONITORING_HEAPSTER_PORT_80_TCP=tcp://10.0.150.238:80
|
||||
ELASTICSEARCH_LOGGING_PORT_9200_TCP=tcp://10.0.48.200:9200
|
||||
ELASTICSEARCH_LOGGING_PORT_9200_TCP_ADDR=10.0.48.200
|
||||
MONITORING_GRAFANA_PORT_80_TCP_PORT=80
|
||||
MONITORING_HEAPSTER_PORT=tcp://10.0.150.238:80
|
||||
MONITORING_INFLUXDB_PORT_80_TCP=tcp://10.0.2.30:80
|
||||
KUBE_DNS_SERVICE_PORT=53
|
||||
KUBE_DNS_PORT_53_UDP_PORT=53
|
||||
MONITORING_GRAFANA_PORT_80_TCP_ADDR=10.0.100.174
|
||||
MONITORING_INFLUXDB_UI_SERVICE_HOST=10.0.36.78
|
||||
KIBANA_LOGGING_PORT_5601_TCP_PROTO=tcp
|
||||
MONITORING_GRAFANA_PORT=tcp://10.0.100.174:80
|
||||
MONITORING_INFLUXDB_UI_PORT_80_TCP_ADDR=10.0.36.78
|
||||
KUBE_DNS_SERVICE_HOST=10.0.0.10
|
||||
KUBERNETES_PORT_443_TCP_PORT=443
|
||||
MONITORING_HEAPSTER_PORT_80_TCP_ADDR=10.0.150.238
|
||||
MONITORING_INFLUXDB_UI_SERVICE_PORT=80
|
||||
KUBE_DNS_PORT=udp://10.0.0.10:53
|
||||
ELASTICSEARCH_LOGGING_SERVICE_HOST=10.0.48.200
|
||||
KUBERNETES_SERVICE_PORT=443
|
||||
MONITORING_HEAPSTER_SERVICE_HOST=10.0.150.238
|
||||
MONITORING_INFLUXDB_SERVICE_PORT=80
|
||||
MONITORING_INFLUXDB_PORT_80_TCP_PORT=80
|
||||
KUBE_DNS_PORT_53_UDP_PROTO=udp
|
||||
MONITORING_GRAFANA_PORT_80_TCP=tcp://10.0.100.174:80
|
||||
ELASTICSEARCH_LOGGING_SERVICE_PORT=9200
|
||||
MONITORING_GRAFANA_SERVICE_HOST=10.0.100.174
|
||||
MONITORING_INFLUXDB_UI_PORT_80_TCP=tcp://10.0.36.78:80
|
||||
KUBERNETES_PORT_443_TCP_PROTO=tcp
|
||||
KUBERNETES_PORT_443_TCP_ADDR=10.0.0.2
|
||||
HOME=/
|
||||
|
||||
$ curl localhost:8001/api/v1/proxy/namespaces/default/pods/explorer:8080/fs/
|
||||
mount/
|
||||
var/
|
||||
.dockerenv
|
||||
etc/
|
||||
dev/
|
||||
proc/
|
||||
.dockerinit
|
||||
sys/
|
||||
README.md
|
||||
explorer
|
||||
|
||||
$ curl localhost:8001/api/v1/proxy/namespaces/default/pods/explorer:8080/dns?q=elasticsearch-logging
|
||||
<html><head></head><body>
|
||||
<form action="/api/v1/proxy/namespaces/default/pods/explorer:8080/dns">
|
||||
<input name="q" type="text" value="elasticsearch-logging"/>
|
||||
<button type="submit">Lookup</button>
|
||||
</form>
|
||||
<br/><br/><pre>LookupNS(elasticsearch-logging):
|
||||
Result: ([]*net.NS)<nil>
|
||||
Error: <*>lookup elasticsearch-logging: no such host
|
||||
|
||||
LookupTXT(elasticsearch-logging):
|
||||
Result: ([]string)<nil>
|
||||
Error: <*>lookup elasticsearch-logging: no such host
|
||||
|
||||
LookupSRV("", "", elasticsearch-logging):
|
||||
cname: elasticsearch-logging.default.svc.cluster.local.
|
||||
Result: ([]*net.SRV)[<*>{Target:(string)elasticsearch-logging.default.svc.cluster.local. Port:(uint16)9200 Priority:(uint16)10 Weight:(uint16)100}]
|
||||
Error: <nil>
|
||||
|
||||
LookupHost(elasticsearch-logging):
|
||||
Result: ([]string)[10.0.60.245]
|
||||
Error: <nil>
|
||||
|
||||
LookupIP(elasticsearch-logging):
|
||||
Result: ([]net.IP)[10.0.60.245]
|
||||
Error: <nil>
|
||||
|
||||
LookupMX(elasticsearch-logging):
|
||||
Result: ([]*net.MX)<nil>
|
||||
Error: <*>lookup elasticsearch-logging: no such host
|
||||
|
||||
</nil></nil></nil></nil></nil></nil></pre>
|
||||
|
||||
</body></html>
|
||||
```
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
122
vendor/k8s.io/kubernetes/examples/explorer/explorer.go
generated
vendored
Normal file
122
vendor/k8s.io/kubernetes/examples/explorer/explorer.go
generated
vendored
Normal file
|
@ -0,0 +1,122 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// A tiny web server for viewing the environment kubernetes creates for your
|
||||
// containers. It exposes the filesystem and environment variables via http
|
||||
// server.
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
)
|
||||
|
||||
var (
|
||||
port = flag.Int("port", 8080, "Port number to serve at.")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
log.Fatalf("Error getting hostname: %v", err)
|
||||
}
|
||||
|
||||
links := []struct {
|
||||
link, desc string
|
||||
}{
|
||||
{"/fs/", "Complete file system as seen by this container."},
|
||||
{"/vars/", "Environment variables as seen by this container."},
|
||||
{"/hostname/", "Hostname as seen by this container."},
|
||||
{"/dns?q=google.com", "Explore DNS records seen by this container."},
|
||||
{"/quit", "Cause this container to exit."},
|
||||
}
|
||||
|
||||
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintf(w, "<b> Kubernetes environment explorer </b><br/><br/>")
|
||||
for _, v := range links {
|
||||
fmt.Fprintf(w, `<a href="%v">%v: %v</a><br/>`, v.link, v.link, v.desc)
|
||||
}
|
||||
})
|
||||
|
||||
http.Handle("/fs/", http.StripPrefix("/fs/", http.FileServer(http.Dir("/"))))
|
||||
http.HandleFunc("/vars/", func(w http.ResponseWriter, r *http.Request) {
|
||||
for _, v := range os.Environ() {
|
||||
fmt.Fprintf(w, "%v\n", v)
|
||||
}
|
||||
})
|
||||
http.HandleFunc("/hostname/", func(w http.ResponseWriter, r *http.Request) {
|
||||
fmt.Fprintf(w, hostname)
|
||||
})
|
||||
http.HandleFunc("/quit", func(w http.ResponseWriter, r *http.Request) {
|
||||
os.Exit(0)
|
||||
})
|
||||
http.HandleFunc("/dns", dns)
|
||||
|
||||
go log.Fatal(http.ListenAndServe(fmt.Sprintf("0.0.0.0:%d", *port), nil))
|
||||
|
||||
select {}
|
||||
}
|
||||
|
||||
func dns(w http.ResponseWriter, r *http.Request) {
|
||||
q := r.URL.Query().Get("q")
|
||||
// Note that the below is NOT safe from input attacks, but that's OK
|
||||
// because this is just for debugging.
|
||||
fmt.Fprintf(w, `<html><body>
|
||||
<form action="/dns">
|
||||
<input name="q" type="text" value="%v"></input>
|
||||
<button type="submit">Lookup</button>
|
||||
</form>
|
||||
<br/><br/><pre>`, q)
|
||||
{
|
||||
res, err := net.LookupNS(q)
|
||||
spew.Fprintf(w, "LookupNS(%v):\nResult: %#v\nError: %v\n\n", q, res, err)
|
||||
}
|
||||
{
|
||||
res, err := net.LookupTXT(q)
|
||||
spew.Fprintf(w, "LookupTXT(%v):\nResult: %#v\nError: %v\n\n", q, res, err)
|
||||
}
|
||||
{
|
||||
cname, res, err := net.LookupSRV("", "", q)
|
||||
spew.Fprintf(w, `LookupSRV("", "", %v):
|
||||
cname: %v
|
||||
Result: %#v
|
||||
Error: %v
|
||||
|
||||
`, q, cname, res, err)
|
||||
}
|
||||
{
|
||||
res, err := net.LookupHost(q)
|
||||
spew.Fprintf(w, "LookupHost(%v):\nResult: %#v\nError: %v\n\n", q, res, err)
|
||||
}
|
||||
{
|
||||
res, err := net.LookupIP(q)
|
||||
spew.Fprintf(w, "LookupIP(%v):\nResult: %#v\nError: %v\n\n", q, res, err)
|
||||
}
|
||||
{
|
||||
res, err := net.LookupMX(q)
|
||||
spew.Fprintf(w, "LookupMX(%v):\nResult: %#v\nError: %v\n\n", q, res, err)
|
||||
}
|
||||
fmt.Fprintf(w, `</pre>
|
||||
</body>
|
||||
</html>`)
|
||||
}
|
18
vendor/k8s.io/kubernetes/examples/explorer/pod.yaml
generated
vendored
Normal file
18
vendor/k8s.io/kubernetes/examples/explorer/pod.yaml
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: explorer
|
||||
spec:
|
||||
containers:
|
||||
- name: explorer
|
||||
image: gcr.io/google_containers/explorer:1.0
|
||||
args: ["-port=8080"]
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- mountPath: "/mount/test-volume"
|
||||
name: test-volume
|
||||
volumes:
|
||||
- name: test-volume
|
||||
emptyDir: {}
|
1
vendor/k8s.io/kubernetes/examples/guestbook-go/.gitignore
generated
vendored
Normal file
1
vendor/k8s.io/kubernetes/examples/guestbook-go/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
guestbook_bin
|
39
vendor/k8s.io/kubernetes/examples/guestbook-go/BUILD
generated
vendored
Normal file
39
vendor/k8s.io/kubernetes/examples/guestbook-go/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,39 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "guestbook-go",
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["main.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//vendor:github.com/codegangsta/negroni",
|
||||
"//vendor:github.com/gorilla/mux",
|
||||
"//vendor:github.com/xyproto/simpleredis",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
24
vendor/k8s.io/kubernetes/examples/guestbook-go/Dockerfile
generated
vendored
Normal file
24
vendor/k8s.io/kubernetes/examples/guestbook-go/Dockerfile
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM busybox:ubuntu-14.04
|
||||
|
||||
ADD ./guestbook_bin /app/guestbook
|
||||
ADD ./public/index.html /app/public/index.html
|
||||
ADD ./public/script.js /app/public/script.js
|
||||
ADD ./public/style.css /app/public/style.css
|
||||
|
||||
WORKDIR /app
|
||||
CMD ["./guestbook"]
|
||||
EXPOSE 3000
|
41
vendor/k8s.io/kubernetes/examples/guestbook-go/Makefile
generated
vendored
Normal file
41
vendor/k8s.io/kubernetes/examples/guestbook-go/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,41 @@
|
|||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Build the guestbook-go example
|
||||
|
||||
# Usage:
|
||||
# [VERSION=v3] [REGISTRY="gcr.io/google_containers"] make build
|
||||
VERSION?=v3
|
||||
REGISTRY?=gcr.io/google_containers
|
||||
|
||||
release: clean build push clean
|
||||
|
||||
# builds a docker image that builds the app and packages it into a minimal docker image
|
||||
build:
|
||||
@cp ../../bazel-bin/examples/guestbook-go/guestbook-go guestbook_bin
|
||||
docker build --pull --rm --force-rm -t ${REGISTRY}/guestbook-builder .
|
||||
docker run --rm ${REGISTRY}/guestbook-builder | docker build --pull -t "${REGISTRY}/guestbook:${VERSION}" -
|
||||
|
||||
# push the image to an registry
|
||||
push:
|
||||
gcloud docker -- push ${REGISTRY}/guestbook:${VERSION}
|
||||
|
||||
# remove previous images and containers
|
||||
clean:
|
||||
rm -f guestbook_bin
|
||||
docker rm -f ${REGISTRY}/guestbook-builder 2> /dev/null || true
|
||||
docker rmi -f ${REGISTRY}/guestbook-builder || true
|
||||
docker rmi -f "${REGISTRY}/guestbook:${VERSION}" || true
|
||||
|
||||
.PHONY: release clean build push
|
271
vendor/k8s.io/kubernetes/examples/guestbook-go/README.md
generated
vendored
Normal file
271
vendor/k8s.io/kubernetes/examples/guestbook-go/README.md
generated
vendored
Normal file
|
@ -0,0 +1,271 @@
|
|||
## Guestbook Example
|
||||
|
||||
This example shows how to build a simple multi-tier web application using Kubernetes and Docker. The application consists of a web front-end, Redis master for storage, and replicated set of Redis slaves, all for which we will create Kubernetes replication controllers, pods, and services.
|
||||
|
||||
If you are running a cluster in Google Container Engine (GKE), instead see the [Guestbook Example for Google Container Engine](https://cloud.google.com/container-engine/docs/tutorials/guestbook).
|
||||
|
||||
##### Table of Contents
|
||||
|
||||
* [Step Zero: Prerequisites](#step-zero)
|
||||
* [Step One: Create the Redis master pod](#step-one)
|
||||
* [Step Two: Create the Redis master service](#step-two)
|
||||
* [Step Three: Create the Redis slave pods](#step-three)
|
||||
* [Step Four: Create the Redis slave service](#step-four)
|
||||
* [Step Five: Create the guestbook pods](#step-five)
|
||||
* [Step Six: Create the guestbook service](#step-six)
|
||||
* [Step Seven: View the guestbook](#step-seven)
|
||||
* [Step Eight: Cleanup](#step-eight)
|
||||
|
||||
### Step Zero: Prerequisites <a id="step-zero"></a>
|
||||
|
||||
This example assumes that you have a working cluster. See the [Getting Started Guides](../../docs/getting-started-guides/) for details about creating a cluster.
|
||||
|
||||
**Tip:** View all the `kubectl` commands, including their options and descriptions in the [kubectl CLI reference](../../docs/user-guide/kubectl/kubectl.md).
|
||||
|
||||
### Step One: Create the Redis master pod<a id="step-one"></a>
|
||||
|
||||
Use the `examples/guestbook-go/redis-master-controller.json` file to create a [replication controller](../../docs/user-guide/replication-controller.md) and Redis master [pod](../../docs/user-guide/pods.md). The pod runs a Redis key-value server in a container. Using a replication controller is the preferred way to launch long-running pods, even for 1 replica, so that the pod benefits from the self-healing mechanism in Kubernetes (keeps the pods alive).
|
||||
|
||||
1. Use the [redis-master-controller.json](redis-master-controller.json) file to create the Redis master replication controller in your Kubernetes cluster by running the `kubectl create -f` *`filename`* command:
|
||||
|
||||
```console
|
||||
$ kubectl create -f examples/guestbook-go/redis-master-controller.json
|
||||
replicationcontrollers/redis-master
|
||||
```
|
||||
|
||||
2. To verify that the redis-master controller is up, list the replication controllers you created in the cluster with the `kubectl get rc` command(if you don't specify a `--namespace`, the `default` namespace will be used. The same below):
|
||||
|
||||
```console
|
||||
$ kubectl get rc
|
||||
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
||||
redis-master redis-master gurpartap/redis app=redis,role=master 1
|
||||
...
|
||||
```
|
||||
|
||||
Result: The replication controller then creates the single Redis master pod.
|
||||
|
||||
3. To verify that the redis-master pod is running, list the pods you created in cluster with the `kubectl get pods` command:
|
||||
|
||||
```console
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
redis-master-xx4uv 1/1 Running 0 1m
|
||||
...
|
||||
```
|
||||
|
||||
Result: You'll see a single Redis master pod and the machine where the pod is running after the pod gets placed (may take up to thirty seconds).
|
||||
|
||||
4. To verify what containers are running in the redis-master pod, you can SSH to that machine with `gcloud compute ssh --zone` *`zone_name`* *`host_name`* and then run `docker ps`:
|
||||
|
||||
```console
|
||||
me@workstation$ gcloud compute ssh --zone us-central1-b kubernetes-node-bz1p
|
||||
|
||||
me@kubernetes-node-3:~$ sudo docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS
|
||||
d5c458dabe50 redis "/entrypoint.sh redis" 5 minutes ago Up 5 minutes
|
||||
```
|
||||
|
||||
Note: The initial `docker pull` can take a few minutes, depending on network conditions.
|
||||
|
||||
### Step Two: Create the Redis master service <a id="step-two"></a>
|
||||
|
||||
A Kubernetes [service](../../docs/user-guide/services.md) is a named load balancer that proxies traffic to one or more pods. The services in a Kubernetes cluster are discoverable inside other pods via environment variables or DNS.
|
||||
|
||||
Services find the pods to load balance based on pod labels. The pod that you created in Step One has the label `app=redis` and `role=master`. The selector field of the service determines which pods will receive the traffic sent to the service.
|
||||
|
||||
1. Use the [redis-master-service.json](redis-master-service.json) file to create the service in your Kubernetes cluster by running the `kubectl create -f` *`filename`* command:
|
||||
|
||||
```console
|
||||
$ kubectl create -f examples/guestbook-go/redis-master-service.json
|
||||
services/redis-master
|
||||
```
|
||||
|
||||
2. To verify that the redis-master service is up, list the services you created in the cluster with the `kubectl get services` command:
|
||||
|
||||
```console
|
||||
$ kubectl get services
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
redis-master 10.0.136.3 <none> 6379/TCP app=redis,role=master 1h
|
||||
...
|
||||
```
|
||||
|
||||
Result: All new pods will see the `redis-master` service running on the host (`$REDIS_MASTER_SERVICE_HOST` environment variable) at port 6379, or running on `redis-master:6379`. After the service is created, the service proxy on each node is configured to set up a proxy on the specified port (in our example, that's port 6379).
|
||||
|
||||
|
||||
### Step Three: Create the Redis slave pods <a id="step-three"></a>
|
||||
|
||||
The Redis master we created earlier is a single pod (REPLICAS = 1), while the Redis read slaves we are creating here are 'replicated' pods. In Kubernetes, a replication controller is responsible for managing the multiple instances of a replicated pod.
|
||||
|
||||
1. Use the file [redis-slave-controller.json](redis-slave-controller.json) to create the replication controller by running the `kubectl create -f` *`filename`* command:
|
||||
|
||||
```console
|
||||
$ kubectl create -f examples/guestbook-go/redis-slave-controller.json
|
||||
replicationcontrollers/redis-slave
|
||||
```
|
||||
|
||||
2. To verify that the redis-slave controller is running, run the `kubectl get rc` command:
|
||||
|
||||
```console
|
||||
$ kubectl get rc
|
||||
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
||||
redis-master redis-master redis app=redis,role=master 1
|
||||
redis-slave redis-slave kubernetes/redis-slave:v2 app=redis,role=slave 2
|
||||
...
|
||||
```
|
||||
|
||||
Result: The replication controller creates and configures the Redis slave pods through the redis-master service (name:port pair, in our example that's `redis-master:6379`).
|
||||
|
||||
Example:
|
||||
The Redis slaves get started by the replication controller with the following command:
|
||||
|
||||
```console
|
||||
redis-server --slaveof redis-master 6379
|
||||
```
|
||||
|
||||
3. To verify that the Redis master and slaves pods are running, run the `kubectl get pods` command:
|
||||
|
||||
```console
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
redis-master-xx4uv 1/1 Running 0 18m
|
||||
redis-slave-b6wj4 1/1 Running 0 1m
|
||||
redis-slave-iai40 1/1 Running 0 1m
|
||||
...
|
||||
```
|
||||
|
||||
Result: You see the single Redis master and two Redis slave pods.
|
||||
|
||||
### Step Four: Create the Redis slave service <a id="step-four"></a>
|
||||
|
||||
Just like the master, we want to have a service to proxy connections to the read slaves. In this case, in addition to discovery, the Redis slave service provides transparent load balancing to clients.
|
||||
|
||||
1. Use the [redis-slave-service.json](redis-slave-service.json) file to create the Redis slave service by running the `kubectl create -f` *`filename`* command:
|
||||
|
||||
```console
|
||||
$ kubectl create -f examples/guestbook-go/redis-slave-service.json
|
||||
services/redis-slave
|
||||
```
|
||||
|
||||
2. To verify that the redis-slave service is up, list the services you created in the cluster with the `kubectl get services` command:
|
||||
|
||||
```console
|
||||
$ kubectl get services
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
redis-master 10.0.136.3 <none> 6379/TCP app=redis,role=master 1h
|
||||
redis-slave 10.0.21.92 <none> 6379/TCP app-redis,role=slave 1h
|
||||
...
|
||||
```
|
||||
|
||||
Result: The service is created with labels `app=redis` and `role=slave` to identify that the pods are running the Redis slaves.
|
||||
|
||||
Tip: It is helpful to set labels on your services themselves--as we've done here--to make it easy to locate them later.
|
||||
|
||||
### Step Five: Create the guestbook pods <a id="step-five"></a>
|
||||
|
||||
This is a simple Go `net/http` ([negroni](https://github.com/codegangsta/negroni) based) server that is configured to talk to either the slave or master services depending on whether the request is a read or a write. The pods we are creating expose a simple JSON interface and serves a jQuery-Ajax based UI. Like the Redis read slaves, these pods are also managed by a replication controller.
|
||||
|
||||
1. Use the [guestbook-controller.json](guestbook-controller.json) file to create the guestbook replication controller by running the `kubectl create -f` *`filename`* command:
|
||||
|
||||
```console
|
||||
$ kubectl create -f examples/guestbook-go/guestbook-controller.json
|
||||
replicationcontrollers/guestbook
|
||||
```
|
||||
|
||||
Tip: If you want to modify the guestbook code open the `_src` of this example and read the README.md and the Makefile. If you have pushed your custom image be sure to update the `image` accordingly in the guestbook-controller.json.
|
||||
|
||||
2. To verify that the guestbook replication controller is running, run the `kubectl get rc` command:
|
||||
|
||||
```console
|
||||
$ kubectl get rc
|
||||
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
||||
guestbook guestbook gcr.io/google_containers/guestbook:v3 app=guestbook 3
|
||||
redis-master redis-master redis app=redis,role=master 1
|
||||
redis-slave redis-slave kubernetes/redis-slave:v2 app=redis,role=slave 2
|
||||
...
|
||||
```
|
||||
|
||||
3. To verify that the guestbook pods are running (it might take up to thirty seconds to create the pods), list the pods you created in cluster with the `kubectl get pods` command:
|
||||
|
||||
```console
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
guestbook-3crgn 1/1 Running 0 2m
|
||||
guestbook-gv7i6 1/1 Running 0 2m
|
||||
guestbook-x405a 1/1 Running 0 2m
|
||||
redis-master-xx4uv 1/1 Running 0 23m
|
||||
redis-slave-b6wj4 1/1 Running 0 6m
|
||||
redis-slave-iai40 1/1 Running 0 6m
|
||||
...
|
||||
```
|
||||
|
||||
Result: You see a single Redis master, two Redis slaves, and three guestbook pods.
|
||||
|
||||
### Step Six: Create the guestbook service <a id="step-six"></a>
|
||||
|
||||
Just like the others, we create a service to group the guestbook pods but this time, to make the guestbook front-end externally visible, we specify `"type": "LoadBalancer"`.
|
||||
|
||||
1. Use the [guestbook-service.json](guestbook-service.json) file to create the guestbook service by running the `kubectl create -f` *`filename`* command:
|
||||
|
||||
```console
|
||||
$ kubectl create -f examples/guestbook-go/guestbook-service.json
|
||||
```
|
||||
|
||||
|
||||
2. To verify that the guestbook service is up, list the services you created in the cluster with the `kubectl get services` command:
|
||||
|
||||
```console
|
||||
$ kubectl get services
|
||||
NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE
|
||||
guestbook 10.0.217.218 146.148.81.8 3000/TCP app=guestbook 1h
|
||||
redis-master 10.0.136.3 <none> 6379/TCP app=redis,role=master 1h
|
||||
redis-slave 10.0.21.92 <none> 6379/TCP app-redis,role=slave 1h
|
||||
...
|
||||
```
|
||||
|
||||
Result: The service is created with label `app=guestbook`.
|
||||
|
||||
### Step Seven: View the guestbook <a id="step-seven"></a>
|
||||
|
||||
You can now play with the guestbook that you just created by opening it in a browser (it might take a few moments for the guestbook to come up).
|
||||
|
||||
* **Local Host:**
|
||||
If you are running Kubernetes locally, to view the guestbook, navigate to `http://localhost:3000` in your browser.
|
||||
|
||||
* **Remote Host:**
|
||||
1. To view the guestbook on a remote host, locate the external IP of the load balancer in the **IP** column of the `kubectl get services` output. In our example, the internal IP address is `10.0.217.218` and the external IP address is `146.148.81.8` (*Note: you might need to scroll to see the IP column*).
|
||||
|
||||
2. Append port `3000` to the IP address (for example `http://146.148.81.8:3000`), and then navigate to that address in your browser.
|
||||
|
||||
Result: The guestbook displays in your browser:
|
||||
|
||||

|
||||
|
||||
**Further Reading:**
|
||||
If you're using Google Compute Engine, see the details about limiting traffic to specific sources at [Google Compute Engine firewall documentation][gce-firewall-docs].
|
||||
|
||||
[cloud-console]: https://console.developer.google.com
|
||||
[gce-firewall-docs]: https://cloud.google.com/compute/docs/networking#firewalls
|
||||
|
||||
### Step Eight: Cleanup <a id="step-eight"></a>
|
||||
|
||||
After you're done playing with the guestbook, you can cleanup by deleting the guestbook service and removing the associated resources that were created, including load balancers, forwarding rules, target pools, and Kubernetes replication controllers and services.
|
||||
|
||||
Delete all the resources by running the following `kubectl delete -f` *`filename`* command:
|
||||
|
||||
```console
|
||||
$ kubectl delete -f examples/guestbook-go
|
||||
guestbook-controller
|
||||
guestbook
|
||||
redid-master-controller
|
||||
redis-master
|
||||
redis-slave-controller
|
||||
redis-slave
|
||||
```
|
||||
|
||||
Tip: To turn down your Kubernetes cluster, follow the corresponding instructions in the version of the
|
||||
[Getting Started Guides](../../docs/getting-started-guides/) that you previously used to create your cluster.
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
37
vendor/k8s.io/kubernetes/examples/guestbook-go/guestbook-controller.json
generated
vendored
Normal file
37
vendor/k8s.io/kubernetes/examples/guestbook-go/guestbook-controller.json
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
|||
{
|
||||
"kind":"ReplicationController",
|
||||
"apiVersion":"v1",
|
||||
"metadata":{
|
||||
"name":"guestbook",
|
||||
"labels":{
|
||||
"app":"guestbook"
|
||||
}
|
||||
},
|
||||
"spec":{
|
||||
"replicas":3,
|
||||
"selector":{
|
||||
"app":"guestbook"
|
||||
},
|
||||
"template":{
|
||||
"metadata":{
|
||||
"labels":{
|
||||
"app":"guestbook"
|
||||
}
|
||||
},
|
||||
"spec":{
|
||||
"containers":[
|
||||
{
|
||||
"name":"guestbook",
|
||||
"image":"gcr.io/google_containers/guestbook:v3",
|
||||
"ports":[
|
||||
{
|
||||
"name":"http-server",
|
||||
"containerPort":3000
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
BIN
vendor/k8s.io/kubernetes/examples/guestbook-go/guestbook-page.png
generated
vendored
Normal file
BIN
vendor/k8s.io/kubernetes/examples/guestbook-go/guestbook-page.png
generated
vendored
Normal file
Binary file not shown.
After Width: | Height: | Size: 39 KiB |
22
vendor/k8s.io/kubernetes/examples/guestbook-go/guestbook-service.json
generated
vendored
Normal file
22
vendor/k8s.io/kubernetes/examples/guestbook-go/guestbook-service.json
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
{
|
||||
"kind":"Service",
|
||||
"apiVersion":"v1",
|
||||
"metadata":{
|
||||
"name":"guestbook",
|
||||
"labels":{
|
||||
"app":"guestbook"
|
||||
}
|
||||
},
|
||||
"spec":{
|
||||
"ports": [
|
||||
{
|
||||
"port":3000,
|
||||
"targetPort":"http-server"
|
||||
}
|
||||
],
|
||||
"selector":{
|
||||
"app":"guestbook"
|
||||
},
|
||||
"type": "LoadBalancer"
|
||||
}
|
||||
}
|
91
vendor/k8s.io/kubernetes/examples/guestbook-go/main.go
generated
vendored
Normal file
91
vendor/k8s.io/kubernetes/examples/guestbook-go/main.go
generated
vendored
Normal file
|
@ -0,0 +1,91 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/codegangsta/negroni"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/xyproto/simpleredis"
|
||||
)
|
||||
|
||||
var (
|
||||
masterPool *simpleredis.ConnectionPool
|
||||
slavePool *simpleredis.ConnectionPool
|
||||
)
|
||||
|
||||
func ListRangeHandler(rw http.ResponseWriter, req *http.Request) {
|
||||
key := mux.Vars(req)["key"]
|
||||
list := simpleredis.NewList(slavePool, key)
|
||||
members := HandleError(list.GetAll()).([]string)
|
||||
membersJSON := HandleError(json.MarshalIndent(members, "", " ")).([]byte)
|
||||
rw.Write(membersJSON)
|
||||
}
|
||||
|
||||
func ListPushHandler(rw http.ResponseWriter, req *http.Request) {
|
||||
key := mux.Vars(req)["key"]
|
||||
value := mux.Vars(req)["value"]
|
||||
list := simpleredis.NewList(masterPool, key)
|
||||
HandleError(nil, list.Add(value))
|
||||
ListRangeHandler(rw, req)
|
||||
}
|
||||
|
||||
func InfoHandler(rw http.ResponseWriter, req *http.Request) {
|
||||
info := HandleError(masterPool.Get(0).Do("INFO")).([]byte)
|
||||
rw.Write(info)
|
||||
}
|
||||
|
||||
func EnvHandler(rw http.ResponseWriter, req *http.Request) {
|
||||
environment := make(map[string]string)
|
||||
for _, item := range os.Environ() {
|
||||
splits := strings.Split(item, "=")
|
||||
key := splits[0]
|
||||
val := strings.Join(splits[1:], "=")
|
||||
environment[key] = val
|
||||
}
|
||||
|
||||
envJSON := HandleError(json.MarshalIndent(environment, "", " ")).([]byte)
|
||||
rw.Write(envJSON)
|
||||
}
|
||||
|
||||
func HandleError(result interface{}, err error) (r interface{}) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func main() {
|
||||
masterPool = simpleredis.NewConnectionPoolHost("redis-master:6379")
|
||||
defer masterPool.Close()
|
||||
slavePool = simpleredis.NewConnectionPoolHost("redis-slave:6379")
|
||||
defer slavePool.Close()
|
||||
|
||||
r := mux.NewRouter()
|
||||
r.Path("/lrange/{key}").Methods("GET").HandlerFunc(ListRangeHandler)
|
||||
r.Path("/rpush/{key}/{value}").Methods("GET").HandlerFunc(ListPushHandler)
|
||||
r.Path("/info").Methods("GET").HandlerFunc(InfoHandler)
|
||||
r.Path("/env").Methods("GET").HandlerFunc(EnvHandler)
|
||||
|
||||
n := negroni.Classic()
|
||||
n.UseHandler(r)
|
||||
n.Run(":3000")
|
||||
}
|
34
vendor/k8s.io/kubernetes/examples/guestbook-go/public/index.html
generated
vendored
Normal file
34
vendor/k8s.io/kubernetes/examples/guestbook-go/public/index.html
generated
vendored
Normal file
|
@ -0,0 +1,34 @@
|
|||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta content="text/html; charset=utf-8" http-equiv="Content-Type">
|
||||
<meta charset="utf-8">
|
||||
<meta content="width=device-width" name="viewport">
|
||||
<link href="style.css" rel="stylesheet">
|
||||
<title>Guestbook</title>
|
||||
</head>
|
||||
<body>
|
||||
<div id="header">
|
||||
<h1>Guestbook</h1>
|
||||
</div>
|
||||
|
||||
<div id="guestbook-entries">
|
||||
<p>Waiting for database connection...</p>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<form id="guestbook-form">
|
||||
<input autocomplete="off" id="guestbook-entry-content" type="text">
|
||||
<a href="#" id="guestbook-submit">Submit</a>
|
||||
</form>
|
||||
</div>
|
||||
|
||||
<div>
|
||||
<p><h2 id="guestbook-host-address"></h2></p>
|
||||
<p><a href="env">/env</a>
|
||||
<a href="info">/info</a></p>
|
||||
</div>
|
||||
<script src="//ajax.googleapis.com/ajax/libs/jquery/2.1.1/jquery.min.js"></script>
|
||||
<script src="script.js"></script>
|
||||
</body>
|
||||
</html>
|
46
vendor/k8s.io/kubernetes/examples/guestbook-go/public/script.js
generated
vendored
Normal file
46
vendor/k8s.io/kubernetes/examples/guestbook-go/public/script.js
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
$(document).ready(function() {
|
||||
var headerTitleElement = $("#header h1");
|
||||
var entriesElement = $("#guestbook-entries");
|
||||
var formElement = $("#guestbook-form");
|
||||
var submitElement = $("#guestbook-submit");
|
||||
var entryContentElement = $("#guestbook-entry-content");
|
||||
var hostAddressElement = $("#guestbook-host-address");
|
||||
|
||||
var appendGuestbookEntries = function(data) {
|
||||
entriesElement.empty();
|
||||
$.each(data, function(key, val) {
|
||||
entriesElement.append("<p>" + val + "</p>");
|
||||
});
|
||||
}
|
||||
|
||||
var handleSubmission = function(e) {
|
||||
e.preventDefault();
|
||||
var entryValue = entryContentElement.val()
|
||||
if (entryValue.length > 0) {
|
||||
entriesElement.append("<p>...</p>");
|
||||
$.getJSON("rpush/guestbook/" + entryValue, appendGuestbookEntries);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// colors = purple, blue, red, green, yellow
|
||||
var colors = ["#549", "#18d", "#d31", "#2a4", "#db1"];
|
||||
var randomColor = colors[Math.floor(5 * Math.random())];
|
||||
(function setElementsColor(color) {
|
||||
headerTitleElement.css("color", color);
|
||||
entryContentElement.css("box-shadow", "inset 0 0 0 2px " + color);
|
||||
submitElement.css("background-color", color);
|
||||
})(randomColor);
|
||||
|
||||
submitElement.click(handleSubmission);
|
||||
formElement.submit(handleSubmission);
|
||||
hostAddressElement.append(document.URL);
|
||||
|
||||
// Poll every second.
|
||||
(function fetchGuestbook() {
|
||||
$.getJSON("lrange/guestbook").done(appendGuestbookEntries).always(
|
||||
function() {
|
||||
setTimeout(fetchGuestbook, 1000);
|
||||
});
|
||||
})();
|
||||
});
|
61
vendor/k8s.io/kubernetes/examples/guestbook-go/public/style.css
generated
vendored
Normal file
61
vendor/k8s.io/kubernetes/examples/guestbook-go/public/style.css
generated
vendored
Normal file
|
@ -0,0 +1,61 @@
|
|||
body, input {
|
||||
color: #123;
|
||||
font-family: "Gill Sans", sans-serif;
|
||||
}
|
||||
|
||||
div {
|
||||
overflow: hidden;
|
||||
padding: 1em 0;
|
||||
position: relative;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
h1, h2, p, input, a {
|
||||
font-weight: 300;
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
h1 {
|
||||
color: #BDB76B;
|
||||
font-size: 3.5em;
|
||||
}
|
||||
|
||||
h2 {
|
||||
color: #999;
|
||||
}
|
||||
|
||||
form {
|
||||
margin: 0 auto;
|
||||
max-width: 50em;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
input {
|
||||
border: 0;
|
||||
border-radius: 1000px;
|
||||
box-shadow: inset 0 0 0 2px #BDB76B;
|
||||
display: inline;
|
||||
font-size: 1.5em;
|
||||
margin-bottom: 1em;
|
||||
outline: none;
|
||||
padding: .5em 5%;
|
||||
width: 55%;
|
||||
}
|
||||
|
||||
form a {
|
||||
background: #BDB76B;
|
||||
border: 0;
|
||||
border-radius: 1000px;
|
||||
color: #FFF;
|
||||
font-size: 1.25em;
|
||||
font-weight: 400;
|
||||
padding: .75em 2em;
|
||||
text-decoration: none;
|
||||
text-transform: uppercase;
|
||||
white-space: normal;
|
||||
}
|
||||
|
||||
p {
|
||||
font-size: 1.5em;
|
||||
line-height: 1.5;
|
||||
}
|
40
vendor/k8s.io/kubernetes/examples/guestbook-go/redis-master-controller.json
generated
vendored
Normal file
40
vendor/k8s.io/kubernetes/examples/guestbook-go/redis-master-controller.json
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
{
|
||||
"kind":"ReplicationController",
|
||||
"apiVersion":"v1",
|
||||
"metadata":{
|
||||
"name":"redis-master",
|
||||
"labels":{
|
||||
"app":"redis",
|
||||
"role":"master"
|
||||
}
|
||||
},
|
||||
"spec":{
|
||||
"replicas":1,
|
||||
"selector":{
|
||||
"app":"redis",
|
||||
"role":"master"
|
||||
},
|
||||
"template":{
|
||||
"metadata":{
|
||||
"labels":{
|
||||
"app":"redis",
|
||||
"role":"master"
|
||||
}
|
||||
},
|
||||
"spec":{
|
||||
"containers":[
|
||||
{
|
||||
"name":"redis-master",
|
||||
"image":"redis:2.8.23",
|
||||
"ports":[
|
||||
{
|
||||
"name":"redis-server",
|
||||
"containerPort":6379
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
23
vendor/k8s.io/kubernetes/examples/guestbook-go/redis-master-service.json
generated
vendored
Normal file
23
vendor/k8s.io/kubernetes/examples/guestbook-go/redis-master-service.json
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
{
|
||||
"kind":"Service",
|
||||
"apiVersion":"v1",
|
||||
"metadata":{
|
||||
"name":"redis-master",
|
||||
"labels":{
|
||||
"app":"redis",
|
||||
"role":"master"
|
||||
}
|
||||
},
|
||||
"spec":{
|
||||
"ports": [
|
||||
{
|
||||
"port":6379,
|
||||
"targetPort":"redis-server"
|
||||
}
|
||||
],
|
||||
"selector":{
|
||||
"app":"redis",
|
||||
"role":"master"
|
||||
}
|
||||
}
|
||||
}
|
40
vendor/k8s.io/kubernetes/examples/guestbook-go/redis-slave-controller.json
generated
vendored
Normal file
40
vendor/k8s.io/kubernetes/examples/guestbook-go/redis-slave-controller.json
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
{
|
||||
"kind":"ReplicationController",
|
||||
"apiVersion":"v1",
|
||||
"metadata":{
|
||||
"name":"redis-slave",
|
||||
"labels":{
|
||||
"app":"redis",
|
||||
"role":"slave"
|
||||
}
|
||||
},
|
||||
"spec":{
|
||||
"replicas":2,
|
||||
"selector":{
|
||||
"app":"redis",
|
||||
"role":"slave"
|
||||
},
|
||||
"template":{
|
||||
"metadata":{
|
||||
"labels":{
|
||||
"app":"redis",
|
||||
"role":"slave"
|
||||
}
|
||||
},
|
||||
"spec":{
|
||||
"containers":[
|
||||
{
|
||||
"name":"redis-slave",
|
||||
"image":"kubernetes/redis-slave:v2",
|
||||
"ports":[
|
||||
{
|
||||
"name":"redis-server",
|
||||
"containerPort":6379
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
23
vendor/k8s.io/kubernetes/examples/guestbook-go/redis-slave-service.json
generated
vendored
Normal file
23
vendor/k8s.io/kubernetes/examples/guestbook-go/redis-slave-service.json
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
{
|
||||
"kind":"Service",
|
||||
"apiVersion":"v1",
|
||||
"metadata":{
|
||||
"name":"redis-slave",
|
||||
"labels":{
|
||||
"app":"redis",
|
||||
"role":"slave"
|
||||
}
|
||||
},
|
||||
"spec":{
|
||||
"ports": [
|
||||
{
|
||||
"port":6379,
|
||||
"targetPort":"redis-server"
|
||||
}
|
||||
],
|
||||
"selector":{
|
||||
"app":"redis",
|
||||
"role":"slave"
|
||||
}
|
||||
}
|
||||
}
|
702
vendor/k8s.io/kubernetes/examples/guestbook/README.md
generated
vendored
Normal file
702
vendor/k8s.io/kubernetes/examples/guestbook/README.md
generated
vendored
Normal file
|
@ -0,0 +1,702 @@
|
|||
|
||||
## Guestbook Example
|
||||
|
||||
This example shows how to build a simple, multi-tier web application using Kubernetes and [Docker](https://www.docker.com/).
|
||||
|
||||
**Table of Contents**
|
||||
<!-- BEGIN MUNGE: GENERATED_TOC -->
|
||||
|
||||
- [Guestbook Example](#guestbook-example)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Quick Start](#quick-start)
|
||||
- [Step One: Start up the redis master](#step-one-start-up-the-redis-master)
|
||||
- [Define a Deployment](#define-a-deployment)
|
||||
- [Define a Service](#define-a-service)
|
||||
- [Create a Service](#create-a-service)
|
||||
- [Finding a Service](#finding-a-service)
|
||||
- [Environment variables](#environment-variables)
|
||||
- [DNS service](#dns-service)
|
||||
- [Create a Deployment](#create-a-deployment)
|
||||
- [Optional Interlude](#optional-interlude)
|
||||
- [Step Two: Start up the redis slave](#step-two-start-up-the-redis-slave)
|
||||
- [Step Three: Start up the guestbook frontend](#step-three-start-up-the-guestbook-frontend)
|
||||
- [Using 'type: LoadBalancer' for the frontend service (cloud-provider-specific)](#using-type-loadbalancer-for-the-frontend-service-cloud-provider-specific)
|
||||
- [Step Four: Cleanup](#step-four-cleanup)
|
||||
- [Troubleshooting](#troubleshooting)
|
||||
- [Appendix: Accessing the guestbook site externally](#appendix-accessing-the-guestbook-site-externally)
|
||||
- [Google Compute Engine External Load Balancer Specifics](#google-compute-engine-external-load-balancer-specifics)
|
||||
|
||||
<!-- END MUNGE: GENERATED_TOC -->
|
||||
|
||||
The example consists of:
|
||||
|
||||
- A web frontend
|
||||
- A [redis](http://redis.io/) master (for storage), and a replicated set of redis 'slaves'.
|
||||
|
||||
The web frontend interacts with the redis master via javascript redis API calls.
|
||||
|
||||
**Note**: If you are running this example on a [Google Container Engine](https://cloud.google.com/container-engine/) installation, see [this Google Container Engine guestbook walkthrough](https://cloud.google.com/container-engine/docs/tutorials/guestbook) instead. The basic concepts are the same, but the walkthrough is tailored to a Container Engine setup.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
This example requires a running Kubernetes cluster. First, check that kubectl is properly configured by getting the cluster state:
|
||||
|
||||
```console
|
||||
$ kubectl cluster-info
|
||||
```
|
||||
|
||||
If you see a url response, you are ready to go. If not, read the [Getting Started guides](http://kubernetes.io/docs/getting-started-guides/) for how to get started, and follow the [prerequisites](http://kubernetes.io/docs/user-guide/prereqs/) to install and configure `kubectl`. As noted above, if you have a Google Container Engine cluster set up, read [this example](https://cloud.google.com/container-engine/docs/tutorials/guestbook) instead.
|
||||
|
||||
All the files referenced in this example can be downloaded in [current folder](./).
|
||||
|
||||
### Quick Start
|
||||
|
||||
This section shows the simplest way to get the example work. If you want to know the details, you should skip this and read [the rest of the example](#step-one-start-up-the-redis-master).
|
||||
|
||||
Start the guestbook with one command:
|
||||
|
||||
```console
|
||||
$ kubectl create -f examples/guestbook/all-in-one/guestbook-all-in-one.yaml
|
||||
service "redis-master" created
|
||||
deployment "redis-master" created
|
||||
service "redis-slave" created
|
||||
deployment "redis-slave" created
|
||||
service "frontend" created
|
||||
deployment "frontend" created
|
||||
```
|
||||
|
||||
Alternatively, you can start the guestbook by running:
|
||||
|
||||
```console
|
||||
$ kubectl create -f examples/guestbook/
|
||||
```
|
||||
|
||||
Then, list all your Services:
|
||||
|
||||
```console
|
||||
$ kubectl get services
|
||||
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
frontend 10.0.0.117 <none> 80/TCP 20s
|
||||
redis-master 10.0.0.170 <none> 6379/TCP 20s
|
||||
redis-slave 10.0.0.201 <none> 6379/TCP 20s
|
||||
```
|
||||
|
||||
Now you can access the guestbook on each node with frontend Service's `<Cluster-IP>:<PORT>`, e.g. `10.0.0.117:80` in this guide. `<Cluster-IP>` is a cluster-internal IP. If you want to access the guestbook from outside of the cluster, add `type: NodePort` to the frontend Service `spec` field. Then you can access the guestbook with `<NodeIP>:NodePort` from outside of the cluster. On cloud providers which support external load balancers, adding `type: LoadBalancer` to the frontend Service `spec` field will provision a load balancer for your Service. There are several ways for you to access the guestbook. You may learn from [Accessing services running on the cluster](../../docs/user-guide/accessing-the-cluster.md#accessing-services-running-on-the-cluster).
|
||||
|
||||
Clean up the guestbook:
|
||||
|
||||
```console
|
||||
$ kubectl delete -f examples/guestbook/all-in-one/guestbook-all-in-one.yaml
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```console
|
||||
$ kubectl delete -f examples/guestbook/
|
||||
```
|
||||
|
||||
|
||||
### Step One: Start up the redis master
|
||||
|
||||
Before continuing to the gory details, we also recommend you to read [Quick walkthrough](../../docs/user-guide/#quick-walkthrough), [Thorough walkthrough](../../docs/user-guide/#thorough-walkthrough) and [Concept guide](../../docs/user-guide/#concept-guide).
|
||||
**Note**: The redis master in this example is *not* highly available. Making it highly available would be an interesting, but intricate exercise — redis doesn't actually support multi-master Deployments at this point in time, so high availability would be a somewhat tricky thing to implement, and might involve periodic serialization to disk, and so on.
|
||||
|
||||
#### Define a Deployment
|
||||
|
||||
To start the redis master, use the file `examples/guestbook/redis-master-deployment.yaml`, which describes a single [pod](../../docs/user-guide/pods.md) running a redis key-value server in a container.
|
||||
|
||||
Although we have a single instance of our redis master, we are using a [Deployment](../../docs/user-guide/deployments.md) to enforce that exactly one pod keeps running. E.g., if the node were to go down, the Deployment will ensure that the redis master gets restarted on a healthy node. (In our simplified example, this could result in data loss.)
|
||||
|
||||
The file `examples/guestbook/redis-master-deployment.yaml` defines the redis master Deployment:
|
||||
|
||||
<!-- BEGIN MUNGE: EXAMPLE redis-master-deployment.yaml -->
|
||||
|
||||
```yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: redis-master
|
||||
# these labels can be applied automatically
|
||||
# from the labels in the pod template if not set
|
||||
# labels:
|
||||
# app: redis
|
||||
# role: master
|
||||
# tier: backend
|
||||
spec:
|
||||
# this replicas value is default
|
||||
# modify it according to your case
|
||||
replicas: 1
|
||||
# selector can be applied automatically
|
||||
# from the labels in the pod template if not set
|
||||
# selector:
|
||||
# matchLabels:
|
||||
# app: guestbook
|
||||
# role: master
|
||||
# tier: backend
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
role: master
|
||||
tier: backend
|
||||
spec:
|
||||
containers:
|
||||
- name: master
|
||||
image: gcr.io/google_containers/redis:e2e # or just image: redis
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
ports:
|
||||
- containerPort: 6379
|
||||
```
|
||||
|
||||
[Download example](redis-master-deployment.yaml?raw=true)
|
||||
<!-- END MUNGE: EXAMPLE redis-master-deployment.yaml -->
|
||||
|
||||
#### Define a Service
|
||||
|
||||
A Kubernetes [Service](../../docs/user-guide/services.md) is a named load balancer that proxies traffic to one or more containers. This is done using the [labels](../../docs/user-guide/labels.md) metadata that we defined in the `redis-master` pod above. As mentioned, we have only one redis master, but we nevertheless want to create a Service for it. Why? Because it gives us a deterministic way to route to the single master using an elastic IP.
|
||||
|
||||
Services find the pods to load balance based on the pods' labels.
|
||||
The selector field of the Service description determines which pods will receive the traffic sent to the Service, and the `port` and `targetPort` information defines what port the Service proxy will run at.
|
||||
|
||||
The file `examples/guestbook/redis-master-service.yaml` defines the redis master Service:
|
||||
|
||||
<!-- BEGIN MUNGE: EXAMPLE redis-master-service.yaml -->
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: redis-master
|
||||
labels:
|
||||
app: redis
|
||||
role: master
|
||||
tier: backend
|
||||
spec:
|
||||
ports:
|
||||
# the port that this service should serve on
|
||||
- port: 6379
|
||||
targetPort: 6379
|
||||
selector:
|
||||
app: redis
|
||||
role: master
|
||||
tier: backend
|
||||
```
|
||||
|
||||
[Download example](redis-master-service.yaml?raw=true)
|
||||
<!-- END MUNGE: EXAMPLE redis-master-service.yaml -->
|
||||
|
||||
#### Create a Service
|
||||
|
||||
According to the [config best practices](../../docs/user-guide/config-best-practices.md), create a Service before corresponding Deployments so that the scheduler can spread the pods comprising the Service. So we first create the Service by running:
|
||||
|
||||
```console
|
||||
$ kubectl create -f examples/guestbook/redis-master-service.yaml
|
||||
service "redis-master" created
|
||||
```
|
||||
|
||||
Then check the list of services, which should include the redis-master:
|
||||
|
||||
```console
|
||||
$ kubectl get services
|
||||
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
redis-master 10.0.76.248 <none> 6379/TCP 1s
|
||||
```
|
||||
|
||||
This will cause all pods to see the redis master apparently running on `<CLUSTER-IP>:<PORT>`. A Service can map an incoming port to any `targetPort` in the backend pod. Once created, the Service proxy on each node is configured to set up a proxy on the specified port (in this case port `6379`).
|
||||
|
||||
`targetPort` will default to `port` if it is omitted in the configuration. `targetPort` is the port the container accepts traffic on, and `port` is the abstracted Service port, which can be any port other pods use to access the Service. For simplicity's sake, we omit it in the following configurations.
|
||||
|
||||
The traffic flow from slaves to masters can be described in two steps:
|
||||
|
||||
- A *redis slave* will connect to `port` on the *redis master Service*
|
||||
- Traffic will be forwarded from the Service `port` (on the Service node) to the `targetPort` on the pod that the Service listens to.
|
||||
|
||||
For more details, please see [Connecting applications](../../docs/user-guide/connecting-applications.md).
|
||||
|
||||
#### Finding a Service
|
||||
|
||||
Kubernetes supports two primary modes of finding a Service — environment variables and DNS.
|
||||
|
||||
|
||||
##### Environment variables
|
||||
|
||||
The services in a Kubernetes cluster are discoverable inside other containers [via environment variables](../../docs/user-guide/services.md#environment-variables).
|
||||
|
||||
##### DNS service
|
||||
|
||||
An alternative is to use the [cluster's DNS service](../../docs/user-guide/services.md#dns), if it has been enabled for the cluster. This lets all pods do name resolution of services automatically, based on the Service name.
|
||||
|
||||
This example has been configured to use the DNS service by default.
|
||||
|
||||
If your cluster does not have the DNS service enabled, then you can use environment variables by setting the
|
||||
`GET_HOSTS_FROM` env value in both
|
||||
`examples/guestbook/redis-slave-deployment.yaml` and `examples/guestbook/frontend-deployment.yaml`
|
||||
from `dns` to `env` before you start up the app.
|
||||
(However, this is unlikely to be necessary. You can check for the DNS service in the list of the cluster's services by
|
||||
running `kubectl --namespace=kube-system get rc -l k8s-app=kube-dns`.)
|
||||
Note that switching to env causes creation-order dependencies, since Services need to be created before their clients that require env vars.
|
||||
|
||||
#### Create a Deployment
|
||||
|
||||
Second, create the redis master pod in your Kubernetes cluster by running:
|
||||
|
||||
```console
|
||||
$ kubectl create -f examples/guestbook/redis-master-deployment.yaml
|
||||
deployment "redis-master" created
|
||||
```
|
||||
|
||||
You can see the Deployment for your cluster by running:
|
||||
|
||||
```console
|
||||
$ kubectl get deployments
|
||||
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
|
||||
redis-master 1 1 1 1 27s
|
||||
```
|
||||
|
||||
Then, you can list the pods in the cluster, to verify that the master is running:
|
||||
|
||||
```console
|
||||
$ kubectl get pods
|
||||
```
|
||||
|
||||
You'll see all pods in the cluster, including the redis master pod, and the status of each pod.
|
||||
The name of the redis master will look similar to that in the following list:
|
||||
|
||||
```console
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
redis-master-2353460263-1ecey 1/1 Running 0 1m
|
||||
...
|
||||
```
|
||||
|
||||
(Note that an initial `docker pull` to grab a container image may take a few minutes, depending on network conditions. A pod will be reported as `Pending` while its image is being downloaded.)
|
||||
|
||||
`kubectl get pods` will show only the pods in the default [namespace](../../docs/user-guide/namespaces.md). To see pods in all namespaces, run:
|
||||
|
||||
```
|
||||
kubectl get pods --all-namespaces
|
||||
```
|
||||
|
||||
For more details, please see [Configuring containers](../../docs/user-guide/configuring-containers.md) and [Deploying applications](../../docs/user-guide/deploying-applications.md).
|
||||
|
||||
#### Optional Interlude
|
||||
|
||||
You can get information about a pod, including the machine that it is running on, via `kubectl describe pods/<POD-NAME>`. E.g., for the redis master, you should see something like the following (your pod name will be different):
|
||||
|
||||
```console
|
||||
$ kubectl describe pods redis-master-2353460263-1ecey
|
||||
Name: redis-master-2353460263-1ecey
|
||||
Node: kubernetes-node-m0k7/10.240.0.5
|
||||
...
|
||||
Labels: app=redis,pod-template-hash=2353460263,role=master,tier=backend
|
||||
Status: Running
|
||||
IP: 10.244.2.3
|
||||
Controllers: ReplicaSet/redis-master-2353460263
|
||||
Containers:
|
||||
master:
|
||||
Container ID: docker://76cf8115485966131587958ea3cbe363e2e1dcce129e2e624883f393ce256f6c
|
||||
Image: gcr.io/google_containers/redis:e2e
|
||||
Image ID: docker://e5f6c5a2b5646828f51e8e0d30a2987df7e8183ab2c3ed0ca19eaa03cc5db08c
|
||||
Port: 6379/TCP
|
||||
...
|
||||
```
|
||||
|
||||
The `Node` is the name and IP of the machine, e.g. `kubernetes-node-m0k7` in the example above. You can find more details about this node with `kubectl describe nodes kubernetes-node-m0k7`.
|
||||
|
||||
If you want to view the container logs for a given pod, you can run:
|
||||
|
||||
```console
|
||||
$ kubectl logs <POD-NAME>
|
||||
```
|
||||
|
||||
These logs will usually give you enough information to troubleshoot.
|
||||
|
||||
However, if you should want to SSH to the listed host machine, you can inspect various logs there directly as well. For example, with Google Compute Engine, using `gcloud`, you can SSH like this:
|
||||
|
||||
```console
|
||||
me@workstation$ gcloud compute ssh <NODE-NAME>
|
||||
```
|
||||
|
||||
Then, you can look at the Docker containers on the remote machine. You should see something like this (the specifics of the IDs will be different):
|
||||
|
||||
```console
|
||||
me@kubernetes-node-krxw:~$ sudo docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
...
|
||||
0ffef9649265 redis:latest "/entrypoint.sh redi" About a minute ago Up About a minute k8s_master.869d22f3_redis-master-dz33o_default_1449a58a-5ead-11e5-a104-688f84ef8ef6_d74cb2b5
|
||||
```
|
||||
|
||||
If you want to see the logs for a given container, you can run:
|
||||
|
||||
```console
|
||||
$ docker logs <container_id>
|
||||
```
|
||||
|
||||
### Step Two: Start up the redis slave
|
||||
|
||||
Now that the redis master is running, we can start up its 'read slaves'.
|
||||
|
||||
We'll define these as replicated pods as well, though this time — unlike for the redis master — we'll define the number of replicas to be 2.
|
||||
In Kubernetes, a Deployment is responsible for managing multiple instances of a replicated pod. The Deployment will automatically launch new pods if the number of replicas falls below the specified number.
|
||||
(This particular replicated pod is a great one to test this with -- you can try killing the Docker processes for your pods directly, then watch them come back online on a new node shortly thereafter.)
|
||||
|
||||
Just like the master, we want to have a Service to proxy connections to the redis slaves. In this case, in addition to discovery, the slave Service will provide transparent load balancing to web app clients.
|
||||
|
||||
This time we put the Service and Deployment into one [file](../../docs/user-guide/managing-deployments.md#organizing-resource-configurations). Grouping related objects together in a single file is often better than having separate files.
|
||||
The specification for the slaves is in `examples/guestbook/all-in-one/redis-slave.yaml`:
|
||||
|
||||
<!-- BEGIN MUNGE: EXAMPLE all-in-one/redis-slave.yaml -->
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: redis-slave
|
||||
labels:
|
||||
app: redis
|
||||
role: slave
|
||||
tier: backend
|
||||
spec:
|
||||
ports:
|
||||
# the port that this service should serve on
|
||||
- port: 6379
|
||||
selector:
|
||||
app: redis
|
||||
role: slave
|
||||
tier: backend
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: redis-slave
|
||||
# these labels can be applied automatically
|
||||
# from the labels in the pod template if not set
|
||||
# labels:
|
||||
# app: redis
|
||||
# role: slave
|
||||
# tier: backend
|
||||
spec:
|
||||
# this replicas value is default
|
||||
# modify it according to your case
|
||||
replicas: 2
|
||||
# selector can be applied automatically
|
||||
# from the labels in the pod template if not set
|
||||
# selector:
|
||||
# matchLabels:
|
||||
# app: guestbook
|
||||
# role: slave
|
||||
# tier: backend
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
role: slave
|
||||
tier: backend
|
||||
spec:
|
||||
containers:
|
||||
- name: slave
|
||||
image: gcr.io/google_samples/gb-redisslave:v1
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
env:
|
||||
- name: GET_HOSTS_FROM
|
||||
value: dns
|
||||
# If your cluster config does not include a dns service, then to
|
||||
# instead access an environment variable to find the master
|
||||
# service's host, comment out the 'value: dns' line above, and
|
||||
# uncomment the line below.
|
||||
# value: env
|
||||
ports:
|
||||
- containerPort: 6379
|
||||
```
|
||||
|
||||
[Download example](all-in-one/redis-slave.yaml?raw=true)
|
||||
<!-- END MUNGE: EXAMPLE all-in-one/redis-slave.yaml -->
|
||||
|
||||
This time the selector for the Service is `app=redis,role=slave,tier=backend`, because that identifies the pods running redis slaves. It is generally helpful to set labels on your Service itself as we've done here to make it easy to locate them with the `kubectl get services -l "app=redis,role=slave,tier=backend"` command. For more information on the usage of labels, see [using-labels-effectively](../../docs/user-guide/managing-deployments.md#using-labels-effectively).
|
||||
|
||||
Now that you have created the specification, create the Service in your cluster by running:
|
||||
|
||||
```console
|
||||
$ kubectl create -f examples/guestbook/all-in-one/redis-slave.yaml
|
||||
service "redis-slave" created
|
||||
deployment "redis-slave" created
|
||||
|
||||
$ kubectl get services
|
||||
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
redis-master 10.0.76.248 <none> 6379/TCP 20m
|
||||
redis-slave 10.0.112.188 <none> 6379/TCP 16s
|
||||
|
||||
$ kubectl get deployments
|
||||
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
|
||||
redis-master 1 1 1 1 22m
|
||||
redis-slave 2 2 2 2 2m
|
||||
```
|
||||
|
||||
Once the Deployment is up, you can list the pods in the cluster, to verify that the master and slaves are running. You should see a list that includes something like the following:
|
||||
|
||||
```console
|
||||
$ kubectl get pods
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
redis-master-2353460263-1ecey 1/1 Running 0 35m
|
||||
redis-slave-1691881626-dlf5f 1/1 Running 0 15m
|
||||
redis-slave-1691881626-sfn8t 1/1 Running 0 15m
|
||||
```
|
||||
|
||||
You should see a single redis master pod and two redis slave pods. As mentioned above, you can get more information about any pod with: `kubectl describe pods/<POD_NAME>`. And also can view the resources on [kube-ui](../../docs/user-guide/ui.md).
|
||||
|
||||
### Step Three: Start up the guestbook frontend
|
||||
|
||||
A frontend pod is a simple PHP server that is configured to talk to either the slave or master services, depending on whether the client request is a read or a write. It exposes a simple AJAX interface, and serves an Angular-based UX.
|
||||
Again we'll create a set of replicated frontend pods instantiated by a Deployment — this time, with three replicas.
|
||||
|
||||
As with the other pods, we now want to create a Service to group the frontend pods.
|
||||
The Deployment and Service are described in the file `frontend.yaml`:
|
||||
|
||||
<!-- BEGIN MUNGE: EXAMPLE all-in-one/frontend.yaml -->
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: frontend
|
||||
labels:
|
||||
app: guestbook
|
||||
tier: frontend
|
||||
spec:
|
||||
# if your cluster supports it, uncomment the following to automatically create
|
||||
# an external load-balanced IP for the frontend service.
|
||||
# type: LoadBalancer
|
||||
ports:
|
||||
# the port that this service should serve on
|
||||
- port: 80
|
||||
selector:
|
||||
app: guestbook
|
||||
tier: frontend
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: frontend
|
||||
# these labels can be applied automatically
|
||||
# from the labels in the pod template if not set
|
||||
# labels:
|
||||
# app: guestbook
|
||||
# tier: frontend
|
||||
spec:
|
||||
# this replicas value is default
|
||||
# modify it according to your case
|
||||
replicas: 3
|
||||
# selector can be applied automatically
|
||||
# from the labels in the pod template if not set
|
||||
# selector:
|
||||
# matchLabels:
|
||||
# app: guestbook
|
||||
# tier: frontend
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: guestbook
|
||||
tier: frontend
|
||||
spec:
|
||||
containers:
|
||||
- name: php-redis
|
||||
image: gcr.io/google-samples/gb-frontend:v4
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
env:
|
||||
- name: GET_HOSTS_FROM
|
||||
value: dns
|
||||
# If your cluster config does not include a dns service, then to
|
||||
# instead access environment variables to find service host
|
||||
# info, comment out the 'value: dns' line above, and uncomment the
|
||||
# line below.
|
||||
# value: env
|
||||
ports:
|
||||
- containerPort: 80
|
||||
```
|
||||
|
||||
[Download example](all-in-one/frontend.yaml?raw=true)
|
||||
<!-- END MUNGE: EXAMPLE all-in-one/frontend.yaml -->
|
||||
|
||||
#### Using 'type: LoadBalancer' for the frontend service (cloud-provider-specific)
|
||||
|
||||
For supported cloud providers, such as Google Compute Engine or Google Container Engine, you can specify to use an external load balancer
|
||||
in the service `spec`, to expose the service onto an external load balancer IP.
|
||||
To do this, uncomment the `type: LoadBalancer` line in the `frontend.yaml` file before you start the service.
|
||||
|
||||
[See the appendix below](#appendix-accessing-the-guestbook-site-externally) on accessing the guestbook site externally for more details.
|
||||
|
||||
Create the service and Deployment like this:
|
||||
|
||||
```console
|
||||
$ kubectl create -f examples/guestbook/all-in-one/frontend.yaml
|
||||
service "frontend" created
|
||||
deployment "frontend" created
|
||||
```
|
||||
|
||||
Then, list all your services again:
|
||||
|
||||
```console
|
||||
$ kubectl get services
|
||||
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
frontend 10.0.63.63 <none> 80/TCP 1m
|
||||
redis-master 10.0.76.248 <none> 6379/TCP 39m
|
||||
redis-slave 10.0.112.188 <none> 6379/TCP 19m
|
||||
```
|
||||
|
||||
Also list all your Deployments:
|
||||
|
||||
```console
|
||||
$ kubectl get deployments
|
||||
NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE
|
||||
frontend 3 3 3 3 2m
|
||||
redis-master 1 1 1 1 39m
|
||||
redis-slave 2 2 2 2 20m
|
||||
```
|
||||
|
||||
Once it's up, i.e. when desired replicas match current replicas (again, it may take up to thirty seconds to create the pods), you can list the pods with specified labels in the cluster, to verify that the master, slaves and frontends are all running. You should see a list containing pods with label 'tier' like the following:
|
||||
|
||||
```console
|
||||
$ kubectl get pods -L tier
|
||||
NAME READY STATUS RESTARTS AGE TIER
|
||||
frontend-1211764471-4e1j2 1/1 Running 0 4m frontend
|
||||
frontend-1211764471-gkbkv 1/1 Running 0 4m frontend
|
||||
frontend-1211764471-rk1cf 1/1 Running 0 4m frontend
|
||||
redis-master-2353460263-1ecey 1/1 Running 0 42m backend
|
||||
redis-slave-1691881626-dlf5f 1/1 Running 0 22m backend
|
||||
redis-slave-1691881626-sfn8t 1/1 Running 0 22m backend
|
||||
```
|
||||
|
||||
You should see a single redis master pod, two redis slaves, and three frontend pods.
|
||||
|
||||
The code for the PHP server that the frontends are running is in `examples/guestbook/php-redis/guestbook.php`. It looks like this:
|
||||
|
||||
```php
|
||||
<?
|
||||
|
||||
set_include_path('.:/usr/local/lib/php');
|
||||
|
||||
error_reporting(E_ALL);
|
||||
ini_set('display_errors', 1);
|
||||
|
||||
require 'Predis/Autoloader.php';
|
||||
|
||||
Predis\Autoloader::register();
|
||||
|
||||
if (isset($_GET['cmd']) === true) {
|
||||
$host = 'redis-master';
|
||||
if (getenv('GET_HOSTS_FROM') == 'env') {
|
||||
$host = getenv('REDIS_MASTER_SERVICE_HOST');
|
||||
}
|
||||
header('Content-Type: application/json');
|
||||
if ($_GET['cmd'] == 'set') {
|
||||
$client = new Predis\Client([
|
||||
'scheme' => 'tcp',
|
||||
'host' => $host,
|
||||
'port' => 6379,
|
||||
]);
|
||||
|
||||
$client->set($_GET['key'], $_GET['value']);
|
||||
print('{"message": "Updated"}');
|
||||
} else {
|
||||
$host = 'redis-slave';
|
||||
if (getenv('GET_HOSTS_FROM') == 'env') {
|
||||
$host = getenv('REDIS_SLAVE_SERVICE_HOST');
|
||||
}
|
||||
$client = new Predis\Client([
|
||||
'scheme' => 'tcp',
|
||||
'host' => $host,
|
||||
'port' => 6379,
|
||||
]);
|
||||
|
||||
$value = $client->get($_GET['key']);
|
||||
print('{"data": "' . $value . '"}');
|
||||
}
|
||||
} else {
|
||||
phpinfo();
|
||||
} ?>
|
||||
```
|
||||
|
||||
Note the use of the `redis-master` and `redis-slave` host names -- we're finding those Services via the Kubernetes cluster's DNS service, as discussed above. All the frontend replicas will write to the load-balancing redis-slaves service, which can be highly replicated as well.
|
||||
|
||||
### Step Four: Cleanup
|
||||
|
||||
If you are in a live Kubernetes cluster, you can just kill the pods by deleting the Deployments and Services. Using labels to select the resources to delete is an easy way to do this in one command.
|
||||
|
||||
```console
|
||||
$ kubectl delete deployments,services -l "app in (redis, guestbook)"
|
||||
```
|
||||
|
||||
To completely tear down a Kubernetes cluster, if you ran this from source, you can use:
|
||||
|
||||
```console
|
||||
$ <kubernetes>/cluster/kube-down.sh
|
||||
```
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
If you are having trouble bringing up your guestbook app, double check that your external IP is properly defined for your frontend Service, and that the firewall for your cluster nodes is open to port 80.
|
||||
|
||||
Then, see the [troubleshooting documentation](http://kubernetes.io/docs/troubleshooting/) for a further list of common issues and how you can diagnose them.
|
||||
|
||||
|
||||
|
||||
### Appendix: Accessing the guestbook site externally
|
||||
|
||||
You'll want to set up your guestbook Service so that it can be accessed from outside of the internal Kubernetes network. Above, we introduced one way to do that, by setting `type: LoadBalancer` to Service `spec`.
|
||||
|
||||
More generally, Kubernetes supports two ways of exposing a Service onto an external IP address: `NodePort`s and `LoadBalancer`s , as described [here](../../docs/user-guide/services.md#publishing-services---service-types).
|
||||
|
||||
If the `LoadBalancer` specification is used, it can take a short period for an external IP to show up in `kubectl get services` output, but you should then see it listed as well, e.g. like this:
|
||||
|
||||
```console
|
||||
$ kubectl get services
|
||||
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
frontend 10.0.63.63 23.236.59.54 80/TCP 1m
|
||||
redis-master 10.0.76.248 <none> 6379/TCP 39m
|
||||
redis-slave 10.0.112.188 <none> 6379/TCP 19m
|
||||
```
|
||||
|
||||
Once you've exposed the service to an external IP, visit the IP to see your guestbook in action, i.e. `http://<EXTERNAL-IP>:<PORT>`.
|
||||
|
||||
You should see a web page that looks something like this (without the messages). Try adding some entries to it!
|
||||
|
||||
<img width="50%" src="http://amy-jo.storage.googleapis.com/images/gb_k8s_ex1.png">
|
||||
|
||||
If you are more advanced in the ops arena, you can also manually get the service IP from looking at the output of `kubectl get pods,services`, and modify your firewall using standard tools and services (firewalld, iptables, selinux) which you are already familiar with.
|
||||
|
||||
#### Google Compute Engine External Load Balancer Specifics
|
||||
|
||||
In Google Compute Engine, Kubernetes automatically creates forwarding rules for services with `LoadBalancer`.
|
||||
|
||||
You can list the forwarding rules like this (the forwarding rule also indicates the external IP):
|
||||
|
||||
```console
|
||||
$ gcloud compute forwarding-rules list
|
||||
NAME REGION IP_ADDRESS IP_PROTOCOL TARGET
|
||||
frontend us-central1 130.211.188.51 TCP us-central1/targetPools/frontend
|
||||
```
|
||||
|
||||
In Google Compute Engine, you also may need to open the firewall for port 80 using the [console][cloud-console] or the `gcloud` tool. The following command will allow traffic from any source to instances tagged `kubernetes-node` (replace with your tags as appropriate):
|
||||
|
||||
```console
|
||||
$ gcloud compute firewall-rules create --allow=tcp:80 --target-tags=kubernetes-node kubernetes-node-80
|
||||
```
|
||||
|
||||
For GCE Kubernetes startup details, see the [Getting started on Google Compute Engine](../../docs/getting-started-guides/gce.md)
|
||||
|
||||
For Google Compute Engine details about limiting traffic to specific sources, see the [Google Compute Engine firewall documentation][gce-firewall-docs].
|
||||
|
||||
[cloud-console]: https://console.developer.google.com
|
||||
[gce-firewall-docs]: https://cloud.google.com/compute/docs/networking#firewalls
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
60
vendor/k8s.io/kubernetes/examples/guestbook/all-in-one/frontend.yaml
generated
vendored
Normal file
60
vendor/k8s.io/kubernetes/examples/guestbook/all-in-one/frontend.yaml
generated
vendored
Normal file
|
@ -0,0 +1,60 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: frontend
|
||||
labels:
|
||||
app: guestbook
|
||||
tier: frontend
|
||||
spec:
|
||||
# if your cluster supports it, uncomment the following to automatically create
|
||||
# an external load-balanced IP for the frontend service.
|
||||
# type: LoadBalancer
|
||||
ports:
|
||||
# the port that this service should serve on
|
||||
- port: 80
|
||||
selector:
|
||||
app: guestbook
|
||||
tier: frontend
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: frontend
|
||||
# these labels can be applied automatically
|
||||
# from the labels in the pod template if not set
|
||||
# labels:
|
||||
# app: guestbook
|
||||
# tier: frontend
|
||||
spec:
|
||||
# this replicas value is default
|
||||
# modify it according to your case
|
||||
replicas: 3
|
||||
# selector can be applied automatically
|
||||
# from the labels in the pod template if not set
|
||||
# selector:
|
||||
# matchLabels:
|
||||
# app: guestbook
|
||||
# tier: frontend
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: guestbook
|
||||
tier: frontend
|
||||
spec:
|
||||
containers:
|
||||
- name: php-redis
|
||||
image: gcr.io/google-samples/gb-frontend:v4
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
env:
|
||||
- name: GET_HOSTS_FROM
|
||||
value: dns
|
||||
# If your cluster config does not include a dns service, then to
|
||||
# instead access environment variables to find service host
|
||||
# info, comment out the 'value: dns' line above, and uncomment the
|
||||
# line below.
|
||||
# value: env
|
||||
ports:
|
||||
- containerPort: 80
|
179
vendor/k8s.io/kubernetes/examples/guestbook/all-in-one/guestbook-all-in-one.yaml
generated
vendored
Normal file
179
vendor/k8s.io/kubernetes/examples/guestbook/all-in-one/guestbook-all-in-one.yaml
generated
vendored
Normal file
|
@ -0,0 +1,179 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: redis-master
|
||||
labels:
|
||||
app: redis
|
||||
tier: backend
|
||||
role: master
|
||||
spec:
|
||||
ports:
|
||||
# the port that this service should serve on
|
||||
- port: 6379
|
||||
targetPort: 6379
|
||||
selector:
|
||||
app: redis
|
||||
tier: backend
|
||||
role: master
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: redis-master
|
||||
# these labels can be applied automatically
|
||||
# from the labels in the pod template if not set
|
||||
# labels:
|
||||
# app: redis
|
||||
# role: master
|
||||
# tier: backend
|
||||
spec:
|
||||
# this replicas value is default
|
||||
# modify it according to your case
|
||||
replicas: 1
|
||||
# selector can be applied automatically
|
||||
# from the labels in the pod template if not set
|
||||
# selector:
|
||||
# matchLabels:
|
||||
# app: guestbook
|
||||
# role: master
|
||||
# tier: backend
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
role: master
|
||||
tier: backend
|
||||
spec:
|
||||
containers:
|
||||
- name: master
|
||||
image: gcr.io/google_containers/redis:e2e # or just image: redis
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
ports:
|
||||
- containerPort: 6379
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: redis-slave
|
||||
labels:
|
||||
app: redis
|
||||
tier: backend
|
||||
role: slave
|
||||
spec:
|
||||
ports:
|
||||
# the port that this service should serve on
|
||||
- port: 6379
|
||||
selector:
|
||||
app: redis
|
||||
tier: backend
|
||||
role: slave
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: redis-slave
|
||||
# these labels can be applied automatically
|
||||
# from the labels in the pod template if not set
|
||||
# labels:
|
||||
# app: redis
|
||||
# role: slave
|
||||
# tier: backend
|
||||
spec:
|
||||
# this replicas value is default
|
||||
# modify it according to your case
|
||||
replicas: 2
|
||||
# selector can be applied automatically
|
||||
# from the labels in the pod template if not set
|
||||
# selector:
|
||||
# matchLabels:
|
||||
# app: guestbook
|
||||
# role: slave
|
||||
# tier: backend
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
role: slave
|
||||
tier: backend
|
||||
spec:
|
||||
containers:
|
||||
- name: slave
|
||||
image: gcr.io/google_samples/gb-redisslave:v1
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
env:
|
||||
- name: GET_HOSTS_FROM
|
||||
value: dns
|
||||
# If your cluster config does not include a dns service, then to
|
||||
# instead access an environment variable to find the master
|
||||
# service's host, comment out the 'value: dns' line above, and
|
||||
# uncomment the line below.
|
||||
# value: env
|
||||
ports:
|
||||
- containerPort: 6379
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: frontend
|
||||
labels:
|
||||
app: guestbook
|
||||
tier: frontend
|
||||
spec:
|
||||
# if your cluster supports it, uncomment the following to automatically create
|
||||
# an external load-balanced IP for the frontend service.
|
||||
# type: LoadBalancer
|
||||
ports:
|
||||
# the port that this service should serve on
|
||||
- port: 80
|
||||
selector:
|
||||
app: guestbook
|
||||
tier: frontend
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: frontend
|
||||
# these labels can be applied automatically
|
||||
# from the labels in the pod template if not set
|
||||
# labels:
|
||||
# app: guestbook
|
||||
# tier: frontend
|
||||
spec:
|
||||
# this replicas value is default
|
||||
# modify it according to your case
|
||||
replicas: 3
|
||||
# selector can be applied automatically
|
||||
# from the labels in the pod template if not set
|
||||
# selector:
|
||||
# matchLabels:
|
||||
# app: guestbook
|
||||
# tier: frontend
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: guestbook
|
||||
tier: frontend
|
||||
spec:
|
||||
containers:
|
||||
- name: php-redis
|
||||
image: gcr.io/google-samples/gb-frontend:v4
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
env:
|
||||
- name: GET_HOSTS_FROM
|
||||
value: dns
|
||||
# If your cluster config does not include a dns service, then to
|
||||
# instead access environment variables to find service host
|
||||
# info, comment out the 'value: dns' line above, and uncomment the
|
||||
# line below.
|
||||
# value: env
|
||||
ports:
|
||||
- containerPort: 80
|
62
vendor/k8s.io/kubernetes/examples/guestbook/all-in-one/redis-slave.yaml
generated
vendored
Normal file
62
vendor/k8s.io/kubernetes/examples/guestbook/all-in-one/redis-slave.yaml
generated
vendored
Normal file
|
@ -0,0 +1,62 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: redis-slave
|
||||
labels:
|
||||
app: redis
|
||||
role: slave
|
||||
tier: backend
|
||||
spec:
|
||||
ports:
|
||||
# the port that this service should serve on
|
||||
- port: 6379
|
||||
selector:
|
||||
app: redis
|
||||
role: slave
|
||||
tier: backend
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: redis-slave
|
||||
# these labels can be applied automatically
|
||||
# from the labels in the pod template if not set
|
||||
# labels:
|
||||
# app: redis
|
||||
# role: slave
|
||||
# tier: backend
|
||||
spec:
|
||||
# this replicas value is default
|
||||
# modify it according to your case
|
||||
replicas: 2
|
||||
# selector can be applied automatically
|
||||
# from the labels in the pod template if not set
|
||||
# selector:
|
||||
# matchLabels:
|
||||
# app: guestbook
|
||||
# role: slave
|
||||
# tier: backend
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
role: slave
|
||||
tier: backend
|
||||
spec:
|
||||
containers:
|
||||
- name: slave
|
||||
image: gcr.io/google_samples/gb-redisslave:v1
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
env:
|
||||
- name: GET_HOSTS_FROM
|
||||
value: dns
|
||||
# If your cluster config does not include a dns service, then to
|
||||
# instead access an environment variable to find the master
|
||||
# service's host, comment out the 'value: dns' line above, and
|
||||
# uncomment the line below.
|
||||
# value: env
|
||||
ports:
|
||||
- containerPort: 6379
|
42
vendor/k8s.io/kubernetes/examples/guestbook/frontend-deployment.yaml
generated
vendored
Normal file
42
vendor/k8s.io/kubernetes/examples/guestbook/frontend-deployment.yaml
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: frontend
|
||||
# these labels can be applied automatically
|
||||
# from the labels in the pod template if not set
|
||||
# labels:
|
||||
# app: guestbook
|
||||
# tier: frontend
|
||||
spec:
|
||||
# this replicas value is default
|
||||
# modify it according to your case
|
||||
replicas: 3
|
||||
# selector can be applied automatically
|
||||
# from the labels in the pod template if not set
|
||||
# selector:
|
||||
# matchLabels:
|
||||
# app: guestbook
|
||||
# tier: frontend
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: guestbook
|
||||
tier: frontend
|
||||
spec:
|
||||
containers:
|
||||
- name: php-redis
|
||||
image: gcr.io/google-samples/gb-frontend:v4
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
env:
|
||||
- name: GET_HOSTS_FROM
|
||||
value: dns
|
||||
# If your cluster config does not include a dns service, then to
|
||||
# instead access environment variables to find service host
|
||||
# info, comment out the 'value: dns' line above, and uncomment the
|
||||
# line below.
|
||||
# value: env
|
||||
ports:
|
||||
- containerPort: 80
|
17
vendor/k8s.io/kubernetes/examples/guestbook/frontend-service.yaml
generated
vendored
Normal file
17
vendor/k8s.io/kubernetes/examples/guestbook/frontend-service.yaml
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: frontend
|
||||
labels:
|
||||
app: guestbook
|
||||
tier: frontend
|
||||
spec:
|
||||
# if your cluster supports it, uncomment the following to automatically create
|
||||
# an external load-balanced IP for the frontend service.
|
||||
# type: LoadBalancer
|
||||
ports:
|
||||
# the port that this service should serve on
|
||||
- port: 80
|
||||
selector:
|
||||
app: guestbook
|
||||
tier: frontend
|
41
vendor/k8s.io/kubernetes/examples/guestbook/legacy/frontend-controller.yaml
generated
vendored
Normal file
41
vendor/k8s.io/kubernetes/examples/guestbook/legacy/frontend-controller.yaml
generated
vendored
Normal file
|
@ -0,0 +1,41 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: frontend
|
||||
# these labels can be applied automatically
|
||||
# from the labels in the pod template if not set
|
||||
# labels:
|
||||
# app: guestbook
|
||||
# tier: frontend
|
||||
spec:
|
||||
# this replicas value is default
|
||||
# modify it according to your case
|
||||
replicas: 3
|
||||
# selector can be applied automatically
|
||||
# from the labels in the pod template if not set
|
||||
# selector:
|
||||
# app: guestbook
|
||||
# tier: frontend
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: guestbook
|
||||
tier: frontend
|
||||
spec:
|
||||
containers:
|
||||
- name: php-redis
|
||||
image: gcr.io/google_samples/gb-frontend:v4
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
env:
|
||||
- name: GET_HOSTS_FROM
|
||||
value: dns
|
||||
# If your cluster config does not include a dns service, then to
|
||||
# instead access environment variables to find service host
|
||||
# info, comment out the 'value: dns' line above, and uncomment the
|
||||
# line below.
|
||||
# value: env
|
||||
ports:
|
||||
- containerPort: 80
|
36
vendor/k8s.io/kubernetes/examples/guestbook/legacy/redis-master-controller.yaml
generated
vendored
Normal file
36
vendor/k8s.io/kubernetes/examples/guestbook/legacy/redis-master-controller.yaml
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: redis-master
|
||||
# these labels can be applied automatically
|
||||
# from the labels in the pod template if not set
|
||||
labels:
|
||||
app: redis
|
||||
role: master
|
||||
tier: backend
|
||||
spec:
|
||||
# this replicas value is default
|
||||
# modify it according to your case
|
||||
replicas: 1
|
||||
# selector can be applied automatically
|
||||
# from the labels in the pod template if not set
|
||||
# selector:
|
||||
# app: guestbook
|
||||
# role: master
|
||||
# tier: backend
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
role: master
|
||||
tier: backend
|
||||
spec:
|
||||
containers:
|
||||
- name: master
|
||||
image: gcr.io/google_containers/redis:e2e # or just image: redis
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
ports:
|
||||
- containerPort: 6379
|
44
vendor/k8s.io/kubernetes/examples/guestbook/legacy/redis-slave-controller.yaml
generated
vendored
Normal file
44
vendor/k8s.io/kubernetes/examples/guestbook/legacy/redis-slave-controller.yaml
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: redis-slave
|
||||
# these labels can be applied automatically
|
||||
# from the labels in the pod template if not set
|
||||
labels:
|
||||
app: redis
|
||||
role: slave
|
||||
tier: backend
|
||||
spec:
|
||||
# this replicas value is default
|
||||
# modify it according to your case
|
||||
replicas: 2
|
||||
# selector can be applied automatically
|
||||
# from the labels in the pod template if not set
|
||||
# selector:
|
||||
# app: guestbook
|
||||
# role: slave
|
||||
# tier: backend
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
role: slave
|
||||
tier: backend
|
||||
spec:
|
||||
containers:
|
||||
- name: slave
|
||||
image: gcr.io/google_samples/gb-redisslave:v1
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
env:
|
||||
- name: GET_HOSTS_FROM
|
||||
value: dns
|
||||
# If your cluster config does not include a dns service, then to
|
||||
# instead access an environment variable to find the master
|
||||
# service's host, comment out the 'value: dns' line above, and
|
||||
# uncomment the line below.
|
||||
# value: env
|
||||
ports:
|
||||
- containerPort: 6379
|
31
vendor/k8s.io/kubernetes/examples/guestbook/php-redis/Dockerfile
generated
vendored
Normal file
31
vendor/k8s.io/kubernetes/examples/guestbook/php-redis/Dockerfile
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM php:5-apache
|
||||
|
||||
RUN apt-get update
|
||||
RUN apt-get install -y php-pear
|
||||
RUN pear channel-discover pear.nrk.io
|
||||
RUN pear install nrk/Predis
|
||||
|
||||
# If the container's stdio is connected to systemd-journald,
|
||||
# /proc/self/fd/{1,2} are Unix sockets and apache will not be able to open()
|
||||
# them. Use "cat" to write directly to the already opened fds without opening
|
||||
# them again.
|
||||
RUN sed -i 's#ErrorLog /proc/self/fd/2#ErrorLog "|$/bin/cat 1>\&2"#' /etc/apache2/apache2.conf
|
||||
RUN sed -i 's#CustomLog /proc/self/fd/1 combined#CustomLog "|/bin/cat" combined#' /etc/apache2/apache2.conf
|
||||
|
||||
ADD guestbook.php /var/www/html/guestbook.php
|
||||
ADD controllers.js /var/www/html/controllers.js
|
||||
ADD index.html /var/www/html/index.html
|
29
vendor/k8s.io/kubernetes/examples/guestbook/php-redis/controllers.js
generated
vendored
Normal file
29
vendor/k8s.io/kubernetes/examples/guestbook/php-redis/controllers.js
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
var redisApp = angular.module('redis', ['ui.bootstrap']);
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
*/
|
||||
function RedisController() {}
|
||||
|
||||
RedisController.prototype.onRedis = function() {
|
||||
this.scope_.messages.push(this.scope_.msg);
|
||||
this.scope_.msg = "";
|
||||
var value = this.scope_.messages.join();
|
||||
this.http_.get("guestbook.php?cmd=set&key=messages&value=" + value)
|
||||
.success(angular.bind(this, function(data) {
|
||||
this.scope_.redisResponse = "Updated.";
|
||||
}));
|
||||
};
|
||||
|
||||
redisApp.controller('RedisCtrl', function ($scope, $http, $location) {
|
||||
$scope.controller = new RedisController();
|
||||
$scope.controller.scope_ = $scope;
|
||||
$scope.controller.location_ = $location;
|
||||
$scope.controller.http_ = $http;
|
||||
|
||||
$scope.controller.http_.get("guestbook.php?cmd=get&key=messages")
|
||||
.success(function(data) {
|
||||
console.log(data);
|
||||
$scope.messages = data.data.split(",");
|
||||
});
|
||||
});
|
41
vendor/k8s.io/kubernetes/examples/guestbook/php-redis/guestbook.php
generated
vendored
Normal file
41
vendor/k8s.io/kubernetes/examples/guestbook/php-redis/guestbook.php
generated
vendored
Normal file
|
@ -0,0 +1,41 @@
|
|||
<?php
|
||||
|
||||
error_reporting(E_ALL);
|
||||
ini_set('display_errors', 1);
|
||||
|
||||
require 'Predis/Autoloader.php';
|
||||
|
||||
Predis\Autoloader::register();
|
||||
|
||||
if (isset($_GET['cmd']) === true) {
|
||||
$host = 'redis-master';
|
||||
if (getenv('GET_HOSTS_FROM') == 'env') {
|
||||
$host = getenv('REDIS_MASTER_SERVICE_HOST');
|
||||
}
|
||||
header('Content-Type: application/json');
|
||||
if ($_GET['cmd'] == 'set') {
|
||||
$client = new Predis\Client([
|
||||
'scheme' => 'tcp',
|
||||
'host' => $host,
|
||||
'port' => 6379,
|
||||
]);
|
||||
|
||||
$client->set($_GET['key'], $_GET['value']);
|
||||
print('{"message": "Updated"}');
|
||||
} else {
|
||||
$host = 'redis-slave';
|
||||
if (getenv('GET_HOSTS_FROM') == 'env') {
|
||||
$host = getenv('REDIS_SLAVE_SERVICE_HOST');
|
||||
}
|
||||
$client = new Predis\Client([
|
||||
'scheme' => 'tcp',
|
||||
'host' => $host,
|
||||
'port' => 6379,
|
||||
]);
|
||||
|
||||
$value = $client->get($_GET['key']);
|
||||
print('{"data": "' . $value . '"}');
|
||||
}
|
||||
} else {
|
||||
phpinfo();
|
||||
} ?>
|
25
vendor/k8s.io/kubernetes/examples/guestbook/php-redis/index.html
generated
vendored
Normal file
25
vendor/k8s.io/kubernetes/examples/guestbook/php-redis/index.html
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
<html ng-app="redis">
|
||||
<head>
|
||||
<title>Guestbook</title>
|
||||
<link rel="stylesheet" href="//netdna.bootstrapcdn.com/bootstrap/3.1.1/css/bootstrap.min.css">
|
||||
<script src="https://ajax.googleapis.com/ajax/libs/angularjs/1.2.12/angular.min.js"></script>
|
||||
<script src="controllers.js"></script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/angular-ui-bootstrap/0.13.0/ui-bootstrap-tpls.js"></script>
|
||||
</head>
|
||||
<body ng-controller="RedisCtrl">
|
||||
<div style="width: 50%; margin-left: 20px">
|
||||
<h2>Guestbook</h2>
|
||||
<form>
|
||||
<fieldset>
|
||||
<input ng-model="msg" placeholder="Messages" class="form-control" type="text" name="input"><br>
|
||||
<button type="button" class="btn btn-primary" ng-click="controller.onRedis()">Submit</button>
|
||||
</fieldset>
|
||||
</form>
|
||||
<div>
|
||||
<div ng-repeat="msg in messages track by $index">
|
||||
{{msg}}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
37
vendor/k8s.io/kubernetes/examples/guestbook/redis-master-deployment.yaml
generated
vendored
Normal file
37
vendor/k8s.io/kubernetes/examples/guestbook/redis-master-deployment.yaml
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: redis-master
|
||||
# these labels can be applied automatically
|
||||
# from the labels in the pod template if not set
|
||||
# labels:
|
||||
# app: redis
|
||||
# role: master
|
||||
# tier: backend
|
||||
spec:
|
||||
# this replicas value is default
|
||||
# modify it according to your case
|
||||
replicas: 1
|
||||
# selector can be applied automatically
|
||||
# from the labels in the pod template if not set
|
||||
# selector:
|
||||
# matchLabels:
|
||||
# app: guestbook
|
||||
# role: master
|
||||
# tier: backend
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
role: master
|
||||
tier: backend
|
||||
spec:
|
||||
containers:
|
||||
- name: master
|
||||
image: gcr.io/google_containers/redis:e2e # or just image: redis
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
ports:
|
||||
- containerPort: 6379
|
17
vendor/k8s.io/kubernetes/examples/guestbook/redis-master-service.yaml
generated
vendored
Normal file
17
vendor/k8s.io/kubernetes/examples/guestbook/redis-master-service.yaml
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: redis-master
|
||||
labels:
|
||||
app: redis
|
||||
role: master
|
||||
tier: backend
|
||||
spec:
|
||||
ports:
|
||||
# the port that this service should serve on
|
||||
- port: 6379
|
||||
targetPort: 6379
|
||||
selector:
|
||||
app: redis
|
||||
role: master
|
||||
tier: backend
|
45
vendor/k8s.io/kubernetes/examples/guestbook/redis-slave-deployment.yaml
generated
vendored
Normal file
45
vendor/k8s.io/kubernetes/examples/guestbook/redis-slave-deployment.yaml
generated
vendored
Normal file
|
@ -0,0 +1,45 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: redis-slave
|
||||
# these labels can be applied automatically
|
||||
# from the labels in the pod template if not set
|
||||
# labels:
|
||||
# app: redis
|
||||
# role: slave
|
||||
# tier: backend
|
||||
spec:
|
||||
# this replicas value is default
|
||||
# modify it according to your case
|
||||
replicas: 2
|
||||
# selector can be applied automatically
|
||||
# from the labels in the pod template if not set
|
||||
# selector:
|
||||
# matchLabels:
|
||||
# app: guestbook
|
||||
# role: slave
|
||||
# tier: backend
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: redis
|
||||
role: slave
|
||||
tier: backend
|
||||
spec:
|
||||
containers:
|
||||
- name: slave
|
||||
image: gcr.io/google_samples/gb-redisslave:v1
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
env:
|
||||
- name: GET_HOSTS_FROM
|
||||
value: dns
|
||||
# If your cluster config does not include a dns service, then to
|
||||
# instead access an environment variable to find the master
|
||||
# service's host, comment out the 'value: dns' line above, and
|
||||
# uncomment the line below.
|
||||
# value: env
|
||||
ports:
|
||||
- containerPort: 6379
|
16
vendor/k8s.io/kubernetes/examples/guestbook/redis-slave-service.yaml
generated
vendored
Normal file
16
vendor/k8s.io/kubernetes/examples/guestbook/redis-slave-service.yaml
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: redis-slave
|
||||
labels:
|
||||
app: redis
|
||||
role: slave
|
||||
tier: backend
|
||||
spec:
|
||||
ports:
|
||||
# the port that this service should serve on
|
||||
- port: 6379
|
||||
selector:
|
||||
app: redis
|
||||
role: slave
|
||||
tier: backend
|
21
vendor/k8s.io/kubernetes/examples/guestbook/redis-slave/Dockerfile
generated
vendored
Normal file
21
vendor/k8s.io/kubernetes/examples/guestbook/redis-slave/Dockerfile
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM redis
|
||||
|
||||
ADD run.sh /run.sh
|
||||
|
||||
RUN chmod a+x /run.sh
|
||||
|
||||
CMD /run.sh
|
21
vendor/k8s.io/kubernetes/examples/guestbook/redis-slave/run.sh
generated
vendored
Executable file
21
vendor/k8s.io/kubernetes/examples/guestbook/redis-slave/run.sh
generated
vendored
Executable file
|
@ -0,0 +1,21 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
if [[ ${GET_HOSTS_FROM:-dns} == "env" ]]; then
|
||||
redis-server --slaveof ${REDIS_MASTER_SERVICE_HOST} 6379
|
||||
else
|
||||
redis-server --slaveof redis-master 6379
|
||||
fi
|
90
vendor/k8s.io/kubernetes/examples/guidelines.md
generated
vendored
Normal file
90
vendor/k8s.io/kubernetes/examples/guidelines.md
generated
vendored
Normal file
|
@ -0,0 +1,90 @@
|
|||
# Example Guidelines
|
||||
|
||||
## An Example Is
|
||||
|
||||
An example demonstrates running an application/framework/workload on
|
||||
Kubernetes in a meaningful way. It is educational and informative.
|
||||
|
||||
Examples are not:
|
||||
|
||||
* Full app deployments, ready to use, with no explanation. These
|
||||
belong either
|
||||
[here](https://github.com/kubernetes/application-dm-templates) or in
|
||||
something like [Helm](https://github.com/helm/charts).
|
||||
* Simple toys to show how to use a Kubernetes feature. These belong in
|
||||
the [user guide](../docs/user-guide/).
|
||||
* Demos that follow a script to show a Kubernetes feature in
|
||||
action. Example: killing a node to demonstrate controller
|
||||
self-healing.
|
||||
* A tutorial which guides the user through multiple progressively more
|
||||
complex deployments to arrive at the final solution. An example
|
||||
should just demonstrate how to setup the correct deployment
|
||||
|
||||
## An Example Includes
|
||||
|
||||
### Up front
|
||||
|
||||
* Has a "this is what you'll learn" section.
|
||||
* Has a Table of Contents.
|
||||
* Has a section that brings up the app in the fewest number of
|
||||
commands (TL;DR / quickstart), without cloning the repo (kubectl
|
||||
apply -f http://...).
|
||||
* Points to documentation of prerequisites.
|
||||
* [Create a cluster](../docs/getting-started-guides/) (e.g., single-node docker).
|
||||
* [Setup kubectl](../docs/user-guide/prereqs.md).
|
||||
* etc.
|
||||
* Should specify which release of Kubernetes is required and any other
|
||||
prerequisites, such as DNS, a cloudprovider with PV provisioning, a
|
||||
cloudprovider with external load balancers, etc.
|
||||
* Point to general documentation about alternatives for those
|
||||
mechanisms rather than present the alternatives in each example.
|
||||
* Tries to balance between using using new features, and being
|
||||
compatible across environments.
|
||||
|
||||
### Throughout
|
||||
|
||||
* Should point to documentation on first mention:
|
||||
[kubectl](../docs/user-guide/kubectl-overview.md),
|
||||
[pods](../docs/user-guide/pods.md),
|
||||
[services](../docs/user-guide/services.md),
|
||||
[deployments](../docs/user-guide/deployments.md),
|
||||
[replication controllers](../docs/user-guide/replication-controller.md),
|
||||
[jobs](../docs/user-guide/jobs.md),
|
||||
[labels](../docs/user-guide/labels.md),
|
||||
[persistent volumes](../docs/user-guide/persistent-volumes.md),
|
||||
etc.
|
||||
* Most examples should be cloudprovider-independent (e.g., using PVCs, not PDs).
|
||||
* Other examples with cloudprovider-specific bits could be somewhere else.
|
||||
* Actually show the app working -- console output, and or screenshots.
|
||||
* Ascii animations and screencasts are recommended.
|
||||
* Follows [config best practices](../docs/user-guide/config-best-practices.md).
|
||||
* Shouldn't duplicate the [thorough walk-through](../docs/user-guide/#thorough-walkthrough).
|
||||
* Docker images are pre-built, and source is contained in a subfolder.
|
||||
* Source is the Dockerfile and any custom files needed beyond the
|
||||
upstream app being packaged.
|
||||
* Images are pushed to `gcr.io/google-samples`. Contact @jeffmendoza
|
||||
to have an image pushed
|
||||
* Images are tagged with a version (not latest) that is referenced
|
||||
in the example config.
|
||||
* Only use the code highlighting types
|
||||
[supported by Rouge](https://github.com/jneen/rouge/wiki/list-of-supported-languages-and-lexers),
|
||||
as this is what GitHub Pages uses.
|
||||
* Commands to be copied use the `shell` syntax highlighting type, and
|
||||
do not include any kind of prompt.
|
||||
* Example output is in a separate block quote to distinguish it from
|
||||
the command (which doesn't have a prompt).
|
||||
* When providing an example command or config for which the user is
|
||||
expected to substitute text with something specific to them, use
|
||||
angle brackets: `<IDENTIFIER>` for the text to be substituted.
|
||||
* Use `kubectl` instead of `cluster\kubectl.sh` for example cli
|
||||
commands.
|
||||
|
||||
### At the end
|
||||
|
||||
* Should have a section suggesting what to look at next, both in terms
|
||||
of "additional resources" and "what example to look at next".
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
39
vendor/k8s.io/kubernetes/examples/https-nginx/BUILD
generated
vendored
Normal file
39
vendor/k8s.io/kubernetes/examples/https-nginx/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,39 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_binary",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "https-nginx",
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["make_secret.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/install:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
24
vendor/k8s.io/kubernetes/examples/https-nginx/Dockerfile
generated
vendored
Normal file
24
vendor/k8s.io/kubernetes/examples/https-nginx/Dockerfile
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM nginx
|
||||
|
||||
|
||||
COPY index2.html /usr/share/nginx/html/index2.html
|
||||
RUN chmod +r /usr/share/nginx/html/index2.html
|
||||
COPY auto-reload-nginx.sh /home/auto-reload-nginx.sh
|
||||
RUN chmod +x /home/auto-reload-nginx.sh
|
||||
|
||||
# install inotify
|
||||
RUN apt-get update && apt-get install -y inotify-tools
|
38
vendor/k8s.io/kubernetes/examples/https-nginx/Makefile
generated
vendored
Normal file
38
vendor/k8s.io/kubernetes/examples/https-nginx/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
all:
|
||||
|
||||
TAG = 1.0
|
||||
PREFIX = bprashanth/nginxhttps
|
||||
KEY = /tmp/nginx.key
|
||||
CERT = /tmp/nginx.crt
|
||||
SECRET = /tmp/secret.json
|
||||
|
||||
keys:
|
||||
# The CName used here is specific to the service specified in nginx-app.yaml.
|
||||
openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout $(KEY) -out $(CERT) -subj "/CN=nginxsvc/O=nginxsvc"
|
||||
|
||||
secret:
|
||||
go run make_secret.go -crt $(CERT) -key $(KEY) > $(SECRET)
|
||||
|
||||
container:
|
||||
docker build --pull -t $(PREFIX):$(TAG) .
|
||||
|
||||
push: container
|
||||
docker push $(PREFIX):$(TAG)
|
||||
|
||||
clean:
|
||||
rm $(KEY)
|
||||
rm $(CERT)
|
129
vendor/k8s.io/kubernetes/examples/https-nginx/README.md
generated
vendored
Normal file
129
vendor/k8s.io/kubernetes/examples/https-nginx/README.md
generated
vendored
Normal file
|
@ -0,0 +1,129 @@
|
|||
|
||||
# Nginx https service
|
||||
|
||||
This example creates a basic nginx https service useful in verifying proof of concept, keys, secrets, configmap, and end-to-end https service creation in kubernetes.
|
||||
It uses an [nginx server block](http://wiki.nginx.org/ServerBlockExample) to serve the index page over both http and https. It will detect changes to nginx's configuration file, default.conf, mounted as a configmap volume and reload nginx automatically.
|
||||
|
||||
### Generate certificates
|
||||
|
||||
First generate a self signed rsa key and certificate that the server can use for TLS. This step invokes the make_secret.go script in the same directory, which uses the kubernetes api to generate a secret json config in /tmp/secret.json.
|
||||
|
||||
```sh
|
||||
$ make keys secret KEY=/tmp/nginx.key CERT=/tmp/nginx.crt SECRET=/tmp/secret.json
|
||||
```
|
||||
|
||||
### Create a https nginx application running in a kubernetes cluster
|
||||
|
||||
You need a [running kubernetes cluster](../../docs/getting-started-guides/) for this to work.
|
||||
|
||||
Create a secret and a configmap.
|
||||
|
||||
```sh
|
||||
$ kubectl create -f /tmp/secret.json
|
||||
secret "nginxsecret" created
|
||||
|
||||
$ kubectl create configmap nginxconfigmap --from-file=examples/https-nginx/default.conf
|
||||
configmap "nginxconfigmap" created
|
||||
```
|
||||
|
||||
Create a service and a replication controller using the configuration in nginx-app.yaml.
|
||||
|
||||
```sh
|
||||
$ kubectl create -f examples/https-nginx/nginx-app.yaml
|
||||
You have exposed your service on an external port on all nodes in your
|
||||
cluster. If you want to expose this service to the external internet, you may
|
||||
need to set up firewall rules for the service port(s) (tcp:32211,tcp:30028) to serve traffic.
|
||||
...
|
||||
service "nginxsvc" created
|
||||
replicationcontroller "my-nginx" created
|
||||
```
|
||||
|
||||
Then, find the node port that Kubernetes is using for http and https traffic.
|
||||
|
||||
```sh
|
||||
$ kubectl get service nginxsvc -o json
|
||||
...
|
||||
{
|
||||
"name": "http",
|
||||
"protocol": "TCP",
|
||||
"port": 80,
|
||||
"targetPort": 80,
|
||||
"nodePort": 32211
|
||||
},
|
||||
{
|
||||
"name": "https",
|
||||
"protocol": "TCP",
|
||||
"port": 443,
|
||||
"targetPort": 443,
|
||||
"nodePort": 30028
|
||||
}
|
||||
...
|
||||
```
|
||||
|
||||
If you are using Kubernetes on a cloud provider, you may need to create cloud firewall rules to serve traffic.
|
||||
If you are using GCE or GKE, you can use the following commands to add firewall rules.
|
||||
|
||||
```sh
|
||||
$ gcloud compute firewall-rules create allow-nginx-http --allow tcp:32211 --description "Incoming http allowed."
|
||||
Created [https://www.googleapis.com/compute/v1/projects/hello-world-job/global/firewalls/allow-nginx-http].
|
||||
NAME NETWORK SRC_RANGES RULES SRC_TAGS TARGET_TAGS
|
||||
allow-nginx-http default 0.0.0.0/0 tcp:32211
|
||||
|
||||
$ gcloud compute firewall-rules create allow-nginx-https --allow tcp:30028 --description "Incoming https allowed."
|
||||
Created [https://www.googleapis.com/compute/v1/projects/hello-world-job/global/firewalls/allow-nginx-https].
|
||||
NAME NETWORK SRC_RANGES RULES SRC_TAGS TARGET_TAGS
|
||||
allow-nginx-https default 0.0.0.0/0 tcp:30028
|
||||
```
|
||||
|
||||
Find your nodes' IPs.
|
||||
|
||||
```sh
|
||||
$ kubectl get nodes -o json | grep ExternalIP -A 2
|
||||
"type": "ExternalIP",
|
||||
"address": "104.198.1.26"
|
||||
}
|
||||
--
|
||||
"type": "ExternalIP",
|
||||
"address": "104.198.12.158"
|
||||
}
|
||||
--
|
||||
"type": "ExternalIP",
|
||||
"address": "104.198.11.137"
|
||||
}
|
||||
```
|
||||
|
||||
Now your service is up. You can either use your browser or type the following commands.
|
||||
|
||||
```sh
|
||||
$ curl https://<your-node-ip>:<your-port> -k
|
||||
|
||||
$ curl https://104.198.1.26:30028 -k
|
||||
...
|
||||
<title>Welcome to nginx!</title>
|
||||
...
|
||||
```
|
||||
|
||||
Then we will update the configmap by changing `index.html` to `index2.html`.
|
||||
|
||||
```sh
|
||||
kubectl create configmap nginxconfigmap --from-file=examples/https-nginx/default.conf -o yaml --dry-run\
|
||||
| sed 's/index.html/index2.html/g' | kubectl apply -f -
|
||||
configmap "nginxconfigmap" configured
|
||||
```
|
||||
|
||||
Wait a few seconds to let the change propagate. Now you should be able to either use your browser or type the following commands to verify Nginx has been reloaded with new configuration.
|
||||
|
||||
```sh
|
||||
$ curl https://<your-node-ip>:<your-port> -k
|
||||
|
||||
$ curl https://104.198.1.26:30028 -k
|
||||
...
|
||||
<title>Nginx reloaded!</title>
|
||||
...
|
||||
```
|
||||
|
||||
For more information on how to run this in a kubernetes cluster, please see the [user-guide](../../docs/user-guide/connecting-applications.md).
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
30
vendor/k8s.io/kubernetes/examples/https-nginx/auto-reload-nginx.sh
generated
vendored
Executable file
30
vendor/k8s.io/kubernetes/examples/https-nginx/auto-reload-nginx.sh
generated
vendored
Executable file
|
@ -0,0 +1,30 @@
|
|||
#!/bin/sh
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
nginx "$@"
|
||||
oldcksum=`cksum /etc/nginx/conf.d/default.conf`
|
||||
|
||||
inotifywait -e modify,move,create,delete -mr --timefmt '%d/%m/%y %H:%M' --format '%T' \
|
||||
/etc/nginx/conf.d/ | while read date time; do
|
||||
|
||||
newcksum=`cksum /etc/nginx/conf.d/default.conf`
|
||||
if [ "$newcksum" != "$oldcksum" ]; then
|
||||
echo "At ${time} on ${date}, config file update detected."
|
||||
oldcksum=$newcksum
|
||||
nginx -s reload
|
||||
fi
|
||||
|
||||
done
|
17
vendor/k8s.io/kubernetes/examples/https-nginx/default.conf
generated
vendored
Normal file
17
vendor/k8s.io/kubernetes/examples/https-nginx/default.conf
generated
vendored
Normal file
|
@ -0,0 +1,17 @@
|
|||
server {
|
||||
listen 80 default_server;
|
||||
listen [::]:80 default_server ipv6only=on;
|
||||
|
||||
listen 443 ssl;
|
||||
|
||||
root /usr/share/nginx/html;
|
||||
index index.html;
|
||||
|
||||
server_name localhost;
|
||||
ssl_certificate /etc/nginx/ssl/nginx.crt;
|
||||
ssl_certificate_key /etc/nginx/ssl/nginx.key;
|
||||
|
||||
location / {
|
||||
try_files $uri $uri/ =404;
|
||||
}
|
||||
}
|
28
vendor/k8s.io/kubernetes/examples/https-nginx/index2.html
generated
vendored
Normal file
28
vendor/k8s.io/kubernetes/examples/https-nginx/index2.html
generated
vendored
Normal file
|
@ -0,0 +1,28 @@
|
|||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Nginx reloaded!</title>
|
||||
<style>
|
||||
body {
|
||||
width: 35em;
|
||||
margin: 0 auto;
|
||||
font-family: Tahoma, Verdana, Arial, sans-serif;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Nginx has been reloaded!</h1>
|
||||
<p>If you see this page, the nginx web server has been automaticly reloaded, since the config file has been updated using <a href="https://github.com/kubernetes/kubernetes">Kubernetes</a>.</p>
|
||||
|
||||
|
||||
<p>For online documentation and support please refer to
|
||||
<a href="http://kubernetes.io/">kubernetes.io</a>.<br/></p>
|
||||
|
||||
<p>For online documentation and support please refer to
|
||||
<a href="http://nginx.org/">nginx.org</a>.<br/>
|
||||
Commercial support is available at
|
||||
<a href="http://nginx.com/">nginx.com</a>.</p>
|
||||
|
||||
<p><em>Thank you for using nginx.</em></p>
|
||||
</body>
|
||||
</html>
|
69
vendor/k8s.io/kubernetes/examples/https-nginx/make_secret.go
generated
vendored
Normal file
69
vendor/k8s.io/kubernetes/examples/https-nginx/make_secret.go
generated
vendored
Normal file
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// A small script that converts the given open ssl public/private keys to
|
||||
// a secret that it writes to stdout as json. Most common use case is to
|
||||
// create a secret from self signed certificates used to authenticate with
|
||||
// a devserver. Usage: go run make_secret.go -crt ca.crt -key priv.key > secret.json
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
|
||||
// This installs the legacy v1 API
|
||||
_ "k8s.io/kubernetes/pkg/api/install"
|
||||
)
|
||||
|
||||
// TODO:
|
||||
// Add a -o flag that writes to the specified destination file.
|
||||
// Teach the script to create crt and key if -crt and -key aren't specified.
|
||||
var (
|
||||
crt = flag.String("crt", "", "path to nginx certificates.")
|
||||
key = flag.String("key", "", "path to nginx private key.")
|
||||
)
|
||||
|
||||
func read(file string) []byte {
|
||||
b, err := ioutil.ReadFile(file)
|
||||
if err != nil {
|
||||
log.Fatalf("Cannot read file %v, %v", file, err)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if *crt == "" || *key == "" {
|
||||
log.Fatalf("Need to specify -crt -key and -template")
|
||||
}
|
||||
nginxCrt := read(*crt)
|
||||
nginxKey := read(*key)
|
||||
secret := &api.Secret{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "nginxsecret",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"nginx.crt": nginxCrt,
|
||||
"nginx.key": nginxKey,
|
||||
},
|
||||
}
|
||||
fmt.Printf(runtime.EncodeOrDie(api.Codecs.LegacyCodec(api.Registry.EnabledVersions()...), secret))
|
||||
}
|
54
vendor/k8s.io/kubernetes/examples/https-nginx/nginx-app.yaml
generated
vendored
Normal file
54
vendor/k8s.io/kubernetes/examples/https-nginx/nginx-app.yaml
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginxsvc
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
name: http
|
||||
- port: 443
|
||||
protocol: TCP
|
||||
name: https
|
||||
selector:
|
||||
app: nginx
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: my-nginx
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
volumes:
|
||||
- name: secret-volume
|
||||
secret:
|
||||
secretName: nginxsecret
|
||||
- name: configmap-volume
|
||||
configMap:
|
||||
name: nginxconfigmap
|
||||
containers:
|
||||
- name: nginxhttps
|
||||
image: ymqytw/nginxhttps:1.5
|
||||
command: ["/home/auto-reload-nginx.sh"]
|
||||
ports:
|
||||
- containerPort: 443
|
||||
- containerPort: 80
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /index.html
|
||||
port: 80
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 1
|
||||
volumeMounts:
|
||||
- mountPath: /etc/nginx/ssl
|
||||
name: secret-volume
|
||||
- mountPath: /etc/nginx/conf.d
|
||||
name: configmap-volume
|
134
vendor/k8s.io/kubernetes/examples/javaee/README.md
generated
vendored
Normal file
134
vendor/k8s.io/kubernetes/examples/javaee/README.md
generated
vendored
Normal file
|
@ -0,0 +1,134 @@
|
|||
## Java EE Application using WildFly and MySQL
|
||||
|
||||
The following document describes the deployment of a Java EE application using [WildFly](http://wildfly.org) application server and MySQL database server on Kubernetes. The sample application source code is at: https://github.com/javaee-samples/javaee7-simple-sample.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
https://github.com/kubernetes/kubernetes/blob/master/docs/user-guide/prereqs.md
|
||||
|
||||
### Start MySQL Pod
|
||||
|
||||
In Kubernetes a [_Pod_](../../docs/user-guide/pods.md) is the smallest deployable unit that can be created, scheduled, and managed. It's a collocated group of containers that share an IP and storage volume.
|
||||
|
||||
Here is the config for MySQL pod: [mysql-pod.yaml](mysql-pod.yaml)
|
||||
|
||||
<!-- BEGIN MUNGE: mysql-pod.yaml -->
|
||||
<!-- END MUNGE: EXAMPLE -->
|
||||
|
||||
Create the MySQL pod:
|
||||
|
||||
```sh
|
||||
kubectl create -f examples/javaee/mysql-pod.yaml
|
||||
```
|
||||
|
||||
Check status of the pod:
|
||||
|
||||
```sh
|
||||
kubectl get -w po
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
mysql-pod 0/1 Pending 0 4s
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
mysql-pod 0/1 Running 0 44s
|
||||
mysql-pod 1/1 Running 0 44s
|
||||
```
|
||||
|
||||
Wait for the status to `1/1` and `Running`.
|
||||
|
||||
### Start MySQL Service
|
||||
|
||||
We are creating a [_Service_](../../docs/user-guide/services.md) to expose the TCP port of the MySQL server. A Service distributes traffic across a set of Pods. The order of Service and the targeted Pods does not matter. However Service needs to be started before any other Pods consuming the Service are started.
|
||||
|
||||
In this application, we will use a Kubernetes Service to provide a discoverable endpoints for the MySQL endpoint in the cluster. MySQL service target pods with the labels `name: mysql-pod` and `context: docker-k8s-lab`.
|
||||
|
||||
Here is definition of the MySQL service: [mysql-service.yaml](mysql-service.yaml)
|
||||
|
||||
<!-- BEGIN MUNGE: mysql-service.yaml -->
|
||||
<!-- END MUNGE: EXAMPLE -->
|
||||
|
||||
Create this service:
|
||||
|
||||
```sh
|
||||
kubectl create -f examples/javaee/mysql-service.yaml
|
||||
```
|
||||
|
||||
Get status of the service:
|
||||
|
||||
```sh
|
||||
kubectl get -w svc
|
||||
NAME LABELS SELECTOR IP(S) PORT(S)
|
||||
kubernetes component=apiserver,provider=kubernetes <none> 10.247.0.1 443/TCP
|
||||
mysql-service context=docker-k8s-lab,name=mysql-pod context=docker-k8s-lab,name=mysql-pod 10.247.63.43 3306/TCP
|
||||
```
|
||||
|
||||
If multiple services are running, then it can be narrowed by specifying labels:
|
||||
|
||||
```sh
|
||||
kubectl get -w po -l context=docker-k8s-lab,name=mysql-pod
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
mysql-pod 1/1 Running 0 4m
|
||||
```
|
||||
|
||||
This is also the selector label used by service to target pods.
|
||||
|
||||
When a Service is run on a node, the kubelet adds a set of environment variables for each active Service. It supports both Docker links compatible variables and simpler `{SVCNAME}_SERVICE_HOST` and `{SVCNAME}_SERVICE_PORT` variables, where the Service name is upper-cased and dashes are converted to underscores.
|
||||
|
||||
Our service name is ``mysql-service'' and so ``MYSQL_SERVICE_SERVICE_HOST'' and ``MYSQL_SERVICE_SERVICE_PORT'' variables are available to other pods. This host and port variables are then used to create the JDBC resource in WildFly.
|
||||
|
||||
### Start WildFly Replication Controller
|
||||
|
||||
WildFly is a lightweight Java EE 7 compliant application server. It is wrapped in a Replication Controller and used as the Java EE runtime.
|
||||
|
||||
In Kubernetes a [_Replication Controller_](../../docs/user-guide/replication-controller.md) is responsible for replicating sets of identical pods. Like a _Service_ it has a selector query which identifies the members of it's set. Unlike a service it also has a desired number of replicas, and it will create or delete pods to ensure that the number of pods matches up with it's desired state.
|
||||
|
||||
Here is definition of the MySQL service: [wildfly-rc.yaml](wildfly-rc.yaml).
|
||||
|
||||
<!-- BEGIN MUNGE: wildfly-rc.yaml -->
|
||||
<!-- END MUNGE: EXAMPLE -->
|
||||
|
||||
Create this controller:
|
||||
|
||||
```sh
|
||||
kubectl create -f examples/javaee/wildfly-rc.yaml
|
||||
```
|
||||
|
||||
Check status of the pod inside replication controller:
|
||||
|
||||
```sh
|
||||
kubectl get po
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
mysql-pod 1/1 Running 0 1h
|
||||
wildfly-rc-w2kk5 1/1 Running 0 6m
|
||||
```
|
||||
|
||||
### Access the application
|
||||
|
||||
Get IP address of the pod:
|
||||
|
||||
```sh
|
||||
kubectl get -o template po wildfly-rc-w2kk5 --template={{.status.podIP}}
|
||||
10.246.1.23
|
||||
```
|
||||
|
||||
Log in to node and access the application:
|
||||
|
||||
```sh
|
||||
vagrant ssh node-1
|
||||
Last login: Thu Jul 16 00:24:36 2015 from 10.0.2.2
|
||||
[vagrant@kubernetes-node-1 ~]$ curl http://10.246.1.23:8080/employees/resources/employees/
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="yes"?><collection><employee><id>1</id><name>Penny</name></employee><employee><id>2</id><name>Sheldon</name></employee><employee><id>3</id><name>Amy</name></employee><employee><id>4</id><name>Leonard</name></employee><employee><id>5</id><name>Bernadette</name></employee><employee><id>6</id><name>Raj</name></employee><employee><id>7</id><name>Howard</name></employee><employee><id>8</id><name>Priya</name></employee></collection>
|
||||
```
|
||||
|
||||
### Delete resources
|
||||
|
||||
All resources created in this application can be deleted:
|
||||
|
||||
```sh
|
||||
kubectl delete -f examples/javaee/mysql-pod.yaml
|
||||
kubectl delete -f examples/javaee/mysql-service.yaml
|
||||
kubectl delete -f examples/javaee/wildfly-rc.yaml
|
||||
```
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
28
vendor/k8s.io/kubernetes/examples/javaee/mysql-pod.yaml
generated
vendored
Normal file
28
vendor/k8s.io/kubernetes/examples/javaee/mysql-pod.yaml
generated
vendored
Normal file
|
@ -0,0 +1,28 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: mysql-pod
|
||||
labels:
|
||||
name: mysql-pod
|
||||
context: docker-k8s-lab
|
||||
spec:
|
||||
containers:
|
||||
-
|
||||
name: mysql
|
||||
image: mysql:latest
|
||||
env:
|
||||
-
|
||||
name: "MYSQL_USER"
|
||||
value: "mysql"
|
||||
-
|
||||
name: "MYSQL_PASSWORD"
|
||||
value: "mysql"
|
||||
-
|
||||
name: "MYSQL_DATABASE"
|
||||
value: "sample"
|
||||
-
|
||||
name: "MYSQL_ROOT_PASSWORD"
|
||||
value: "supersecret"
|
||||
ports:
|
||||
-
|
||||
containerPort: 3306
|
15
vendor/k8s.io/kubernetes/examples/javaee/mysql-service.yaml
generated
vendored
Normal file
15
vendor/k8s.io/kubernetes/examples/javaee/mysql-service.yaml
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: mysql-service
|
||||
labels:
|
||||
name: mysql-pod
|
||||
context: docker-k8s-lab
|
||||
spec:
|
||||
ports:
|
||||
# the port that this service should serve on
|
||||
- port: 3306
|
||||
# label keys and values that must match in order to receive traffic for this service
|
||||
selector:
|
||||
name: mysql-pod
|
||||
context: docker-k8s-lab
|
19
vendor/k8s.io/kubernetes/examples/javaee/wildfly-rc.yaml
generated
vendored
Normal file
19
vendor/k8s.io/kubernetes/examples/javaee/wildfly-rc.yaml
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: wildfly-rc
|
||||
labels:
|
||||
name: wildfly
|
||||
context: docker-k8s-lab
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: wildfly
|
||||
spec:
|
||||
containers:
|
||||
- name: wildfly-rc-pod
|
||||
image: arungupta/wildfly-mysql-javaee7:k8s
|
||||
ports:
|
||||
- containerPort: 8080
|
185
vendor/k8s.io/kubernetes/examples/javaweb-tomcat-sidecar/README.md
generated
vendored
Normal file
185
vendor/k8s.io/kubernetes/examples/javaweb-tomcat-sidecar/README.md
generated
vendored
Normal file
|
@ -0,0 +1,185 @@
|
|||
## Java Web Application with Tomcat and Sidecar Container
|
||||
|
||||
The following document describes the deployment of a Java Web application using Tomcat. Instead of packaging `war` file inside the Tomcat image or mount the `war` as a volume, we use a sidecar container as `war` file provider.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
https://github.com/kubernetes/kubernetes/blob/master/docs/user-guide/prereqs.md
|
||||
|
||||
### Overview
|
||||
|
||||
This sidecar mode brings a new workflow for Java users:
|
||||
|
||||

|
||||
|
||||
As you can see, user can create a `sample:v2` container as sidecar to "provide" war file to Tomcat by copying it to the shared `emptyDir` volume. And Pod will make sure the two containers compose an "atomic" scheduling unit, which is perfect for this case. Thus, your application version management will be totally separated from web server management.
|
||||
|
||||
For example, if you are going to change the configurations of your Tomcat:
|
||||
|
||||
```console
|
||||
$ docker exec -it <tomcat_container_id> /bin/bash
|
||||
# make some change, and then commit it to a new image
|
||||
$ docker commit <tomcat_container_id> mytomcat:7.0-dev
|
||||
```
|
||||
|
||||
Done! The new Tomcat image **will not** mess up with your `sample.war` file. You can re-use your tomcat image with lots of different war container images for lots of different apps without having to build lots of different images.
|
||||
|
||||
Also this means that rolling out a new Tomcat to patch security or whatever else, doesn't require rebuilding N different images.
|
||||
|
||||
**Why not put my `sample.war` in a host dir and mount it to tomcat container?**
|
||||
|
||||
You have to **manage the volumes** in this case, for example, when you restart or scale the pod on another node, your contents is not ready on that host.
|
||||
|
||||
Generally, we have to set up a distributed file system (NFS at least) volume to solve this (if we do not have GCE PD volume). But this is generally unnecessary.
|
||||
|
||||
### How To Set this Up
|
||||
|
||||
In Kubernetes a [_Pod_](../../docs/user-guide/pods.md) is the smallest deployable unit that can be created, scheduled, and managed. It's a collocated group of containers that share an IP and storage volume.
|
||||
|
||||
Here is the config [javaweb.yaml](javaweb.yaml) for Java Web pod:
|
||||
|
||||
NOTE: you should define `war` container **first** as it is the "provider".
|
||||
|
||||
<!-- BEGIN MUNGE: javaweb.yaml -->
|
||||
|
||||
```
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: javaweb
|
||||
spec:
|
||||
containers:
|
||||
- image: resouer/sample:v1
|
||||
name: war
|
||||
volumeMounts:
|
||||
- mountPath: /app
|
||||
name: app-volume
|
||||
- image: resouer/mytomcat:7.0
|
||||
name: tomcat
|
||||
command: ["sh","-c","/root/apache-tomcat-7.0.42-v2/bin/start.sh"]
|
||||
volumeMounts:
|
||||
- mountPath: /root/apache-tomcat-7.0.42-v2/webapps
|
||||
name: app-volume
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
hostPort: 8001
|
||||
volumes:
|
||||
- name: app-volume
|
||||
emptyDir: {}
|
||||
```
|
||||
|
||||
<!-- END MUNGE: EXAMPLE -->
|
||||
|
||||
The only magic here is the `resouer/sample:v1` image:
|
||||
|
||||
```
|
||||
FROM busybox:latest
|
||||
ADD sample.war sample.war
|
||||
CMD "sh" "mv.sh"
|
||||
```
|
||||
|
||||
And the contents of `mv.sh` is:
|
||||
|
||||
```sh
|
||||
cp /sample.war /app
|
||||
tail -f /dev/null
|
||||
```
|
||||
|
||||
#### Explanation
|
||||
|
||||
1. 'war' container only contains the `war` file of your app
|
||||
2. 'war' container's CMD tries to copy `sample.war` to the `emptyDir` volume path
|
||||
3. The last line of `tail -f` is just used to hold the container, as Replication Controller does not support one-off task
|
||||
4. 'tomcat' container will load the `sample.war` from volume path
|
||||
|
||||
What's more, if you don't want to enclose a build-in `mv.sh` script in the `war` container, you can use Pod lifecycle handler to do the copy work, here's a example [javaweb-2.yaml](javaweb-2.yaml):
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: javaweb-2.yaml -->
|
||||
|
||||
```
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: javaweb-2
|
||||
spec:
|
||||
containers:
|
||||
- image: resouer/sample:v2
|
||||
name: war
|
||||
lifecycle:
|
||||
postStart:
|
||||
exec:
|
||||
command:
|
||||
- "cp"
|
||||
- "/sample.war"
|
||||
- "/app"
|
||||
volumeMounts:
|
||||
- mountPath: /app
|
||||
name: app-volume
|
||||
- image: resouer/mytomcat:7.0
|
||||
name: tomcat
|
||||
command: ["sh","-c","/root/apache-tomcat-7.0.42-v2/bin/start.sh"]
|
||||
volumeMounts:
|
||||
- mountPath: /root/apache-tomcat-7.0.42-v2/webapps
|
||||
name: app-volume
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
hostPort: 8001
|
||||
volumes:
|
||||
- name: app-volume
|
||||
emptyDir: {}
|
||||
```
|
||||
|
||||
<!-- END MUNGE: EXAMPLE -->
|
||||
|
||||
And the `resouer/sample:v2` Dockerfile is quite simple:
|
||||
|
||||
```
|
||||
FROM busybox:latest
|
||||
ADD sample.war sample.war
|
||||
CMD "tail" "-f" "/dev/null"
|
||||
```
|
||||
|
||||
#### Explanation
|
||||
|
||||
1. 'war' container only contains the `war` file of your app
|
||||
2. 'war' container's CMD uses `tail -f` to hold the container, nothing more
|
||||
3. The `postStart` lifecycle handler will do `cp` after the `war` container is started
|
||||
4. Again 'tomcat' container will load the `sample.war` from volume path
|
||||
|
||||
Done! Now your `war` container contains nothing except `sample.war`, clean enough.
|
||||
|
||||
### Test It Out
|
||||
|
||||
Create the Java web pod:
|
||||
|
||||
```console
|
||||
$ kubectl create -f examples/javaweb-tomcat-sidecar/javaweb-2.yaml
|
||||
```
|
||||
|
||||
Check status of the pod:
|
||||
|
||||
```console
|
||||
$ kubectl get -w po
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
javaweb-2 2/2 Running 0 7s
|
||||
```
|
||||
|
||||
Wait for the status to `2/2` and `Running`. Then you can visit "Hello, World" page on `http://localhost:8001/sample/index.html`
|
||||
|
||||
You can also test `javaweb.yaml` in the same way.
|
||||
|
||||
### Delete Resources
|
||||
|
||||
All resources created in this application can be deleted:
|
||||
|
||||
```console
|
||||
$ kubectl delete -f examples/javaweb-tomcat-sidecar/javaweb-2.yaml
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
31
vendor/k8s.io/kubernetes/examples/javaweb-tomcat-sidecar/javaweb-2.yaml
generated
vendored
Normal file
31
vendor/k8s.io/kubernetes/examples/javaweb-tomcat-sidecar/javaweb-2.yaml
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: javaweb-2
|
||||
spec:
|
||||
containers:
|
||||
- image: resouer/sample:v2
|
||||
name: war
|
||||
lifecycle:
|
||||
postStart:
|
||||
exec:
|
||||
command:
|
||||
- "cp"
|
||||
- "/sample.war"
|
||||
- "/app"
|
||||
volumeMounts:
|
||||
- mountPath: /app
|
||||
name: app-volume
|
||||
- image: resouer/mytomcat:7.0
|
||||
name: tomcat
|
||||
command: ["sh","-c","/root/apache-tomcat-7.0.42-v2/bin/start.sh"]
|
||||
volumeMounts:
|
||||
- mountPath: /root/apache-tomcat-7.0.42-v2/webapps
|
||||
name: app-volume
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
hostPort: 8001
|
||||
volumes:
|
||||
- name: app-volume
|
||||
emptyDir: {}
|
||||
|
24
vendor/k8s.io/kubernetes/examples/javaweb-tomcat-sidecar/javaweb.yaml
generated
vendored
Normal file
24
vendor/k8s.io/kubernetes/examples/javaweb-tomcat-sidecar/javaweb.yaml
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: javaweb
|
||||
spec:
|
||||
containers:
|
||||
- image: resouer/sample:v1
|
||||
name: war
|
||||
volumeMounts:
|
||||
- mountPath: /app
|
||||
name: app-volume
|
||||
- image: resouer/mytomcat:7.0
|
||||
name: tomcat
|
||||
command: ["sh","-c","/root/apache-tomcat-7.0.42-v2/bin/start.sh"]
|
||||
volumeMounts:
|
||||
- mountPath: /root/apache-tomcat-7.0.42-v2/webapps
|
||||
name: app-volume
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
hostPort: 8001
|
||||
volumes:
|
||||
- name: app-volume
|
||||
emptyDir: {}
|
||||
|
BIN
vendor/k8s.io/kubernetes/examples/javaweb-tomcat-sidecar/workflow.png
generated
vendored
Normal file
BIN
vendor/k8s.io/kubernetes/examples/javaweb-tomcat-sidecar/workflow.png
generated
vendored
Normal file
Binary file not shown.
After Width: | Height: | Size: 45 KiB |
7
vendor/k8s.io/kubernetes/examples/job/expansions/README.md
generated
vendored
Normal file
7
vendor/k8s.io/kubernetes/examples/job/expansions/README.md
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
|
||||
This file has moved to: http://kubernetes.io/docs/user-guide/jobs/
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
7
vendor/k8s.io/kubernetes/examples/job/work-queue-1/README.md
generated
vendored
Normal file
7
vendor/k8s.io/kubernetes/examples/job/work-queue-1/README.md
generated
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
|
||||
This file has moved to: http://kubernetes.io/docs/user-guide/jobs/
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
<!-- END MUNGE: GENERATED_ANALYTICS -->
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue